diff --git a/sunbird-ai-assistant/DockerFile b/sunbird-ai-assistant/DockerFile new file mode 100644 index 00000000..761d2db8 --- /dev/null +++ b/sunbird-ai-assistant/DockerFile @@ -0,0 +1,5 @@ +FROM python:3.10-slim +WORKDIR /app +COPY . . +RUN pip install -r requirements.txt +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/sunbird-ai-assistant/README.MD b/sunbird-ai-assistant/README.MD new file mode 100644 index 00000000..70608b64 --- /dev/null +++ b/sunbird-ai-assistant/README.MD @@ -0,0 +1,84 @@ +# 🧠 Sunbird AI Assistant + +A context-aware conversational agent built on top of the **Model Context Protocol (MCP)** and powered by **OpenAI’s function-calling tools**. This assistant interacts with **Sunbird Ed APIs**, adapting its capabilities to different user roles like **learner**, **admin**, and **mentor** to enhance the digital public good ecosystem in education. + +--- + +## 🌐 Overview + +Sunbird Ed is a modular Digital Public Good (DPG) designed to support learning and skilling platforms. However, it lacks intelligent conversational capabilities. This project integrates a role-aware AI assistant that: + +- Understands the **context** of a specific installation +- Personalizes responses based on **user persona** +- Interacts with key **Sunbird Ed APIs** (mocked or real) +- Is modular and extensible across various deployments + +--- + +## 📌 Features + +- 🔧 Tool-based LangChain agent integration with role-aware access +- 🔑 Authentication layer for API authorization +- 💬 Conversational interface (CLI or REST) +- 🧠 Session context manager per installation and user +- 🧰 Supports key Sunbird Ed APIs: + - Course metadata + - User enrollments + - Learning progress + - Admin controls +- 📦 Easily extendable for additional DPGs and APIs + +--- + +## 🧱 Architecture + +```txt + +-------------------+ + | CLI / Web UI | + +--------+----------+ + | + v + +----------------------+ + | FastAPI Backend | + +----------+-----------+ + | + +--------------------------+-------------------------+ + | LangChain Agent | + | (Function Calling, Role-based Tool Access) | + +----------+------------+------------+----------------+ + | | | + +---------+ +-------+-----+ +-+------------------+ + | Course Tools | Enrollment Tools | Admin Tools (RBAC) | + +-------------+------------------+---------------------+ + + +## Project Structure : + +. +├── app/ +│ ├── main.py +│ ├── agent/ +│ │ ├── session_manager.py # Manages session/user context +│ ├── auth/ +│ │ └── auth.py +│ ├── tool_schemas/ +│ │ ├── course_tools.py +│ │ ├── enrollment_tools.py +│ │ └── admin_tools.py +│ ├── utils/ +│ │ └── logger.py +│ ├── tool_registry.py # Registers tools based on role +│ └── agent_setup.py +├── requirements.txt +└── README.md + + +## 🧪 API Simulation & Mocking + +All Sunbird Ed APIs like `/course/v1/search`, `/user/enrollment/list` are currently mocked. +You can easily replace their logic in the tool schema files using actual API calls like: + +```python +requests.get("https:///course/v1/search", headers={...}) + + diff --git a/sunbird-ai-assistant/app/agent/mcp_agent.py b/sunbird-ai-assistant/app/agent/mcp_agent.py new file mode 100644 index 00000000..bdf5f6cd --- /dev/null +++ b/sunbird-ai-assistant/app/agent/mcp_agent.py @@ -0,0 +1,32 @@ +from langchain.agents import initialize_agent +from langchain.chat_models import ChatOpenAI +from tool_registry import register_tools +from app.agent.session_manager import SessionManager +from app.utils.loggers import get_logger + +logger = get_logger() + +def setup_agent(installation_id: str, user_id: str, persona: str): + """ + Initialize the LangChain agent with tools registered based on user persona. + """ + logger.info(f"Setting up agent for user '{user_id}' with role '{persona}' on installation '{installation_id}'") + + # Register tools based on user role/persona (e.g., learner, admin) + tools = register_tools(persona) + + # Setup LLM (OpenAI GPT-4) + llm = ChatOpenAI(temperature=0, model="gpt-4") + + # Create a session manager with contextual memory + session = SessionManager(installation_id, user_id, persona) + + # Initialize LangChain agent with tools and LLM + agent = initialize_agent( + tools=tools, + llm=llm, + agent_type="openai-tools", + verbose=True + ) + + return agent, session diff --git a/sunbird-ai-assistant/app/agent/session_manager.py b/sunbird-ai-assistant/app/agent/session_manager.py new file mode 100644 index 00000000..6377042e --- /dev/null +++ b/sunbird-ai-assistant/app/agent/session_manager.py @@ -0,0 +1,23 @@ +from app.services.context_loader import get_installation_context +from app.utils.loggers import get_logger + +logger = get_logger() + +class SessionManager: + def __init__(self, installation_id: str, user_id: str, persona: str): + self.installation_id = installation_id + self.user_id = user_id + self.persona = persona + self.context = self.load_context() + + def load_context(self): + context = get_installation_context(self.installation_id) + logger.info(f"Loaded context for {self.installation_id}") + return { + "persona": self.persona, + "installation_context": context, + "user_id": self.user_id + } + + def inject_context(self, prompt: str) -> str: + return f"{self.persona.upper()} CONTEXT: {self.context}\n\n{prompt}" diff --git a/sunbird-ai-assistant/app/agent/tool_registry.py b/sunbird-ai-assistant/app/agent/tool_registry.py new file mode 100644 index 00000000..1e44662c --- /dev/null +++ b/sunbird-ai-assistant/app/agent/tool_registry.py @@ -0,0 +1,13 @@ +from app.tool_schemas.course_tools import get_course_metadata +from app.tool_schemas.enrollment_tools import get_user_enrollments, get_course_progress + +def register_tools(user_role: str): + """Dynamically register tools based on the user's role/persona.""" + tools = [get_course_metadata, get_user_enrollments, get_course_progress] + + if user_role == "admin": + # Simulate additional admin-only tools (add them to a separate schema file) + from app.tool_schemas.admin_tools import manage_users, list_all_courses + tools += [manage_users, list_all_courses] + + return tools diff --git a/sunbird-ai-assistant/app/config.py b/sunbird-ai-assistant/app/config.py new file mode 100644 index 00000000..af23a062 --- /dev/null +++ b/sunbird-ai-assistant/app/config.py @@ -0,0 +1,9 @@ +import os + +OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") +OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-3.5-turbo") +OPENAI_TEMPERATURE = float(os.getenv("OPENAI_TEMPERATURE", 0.7)) +OPENAI_MAX_TOKENS = int(os.getenv("OPENAI_MAX_TOKENS", 4096)) +OPENAI_TOP_P = float(os.getenv("OPENAI_TOP_P", 1.0)) +OPENAI_FREQUENCY_PENALTY = float(os.getenv("OPENAI_FREQUENCY_PENALTY", 0.0)) +OPENAI_PRESENCE_PENALTY = float(os.getenv("OPENAI_PRESENCE_PENALTY", 0.0)) diff --git a/sunbird-ai-assistant/app/main.py b/sunbird-ai-assistant/app/main.py new file mode 100644 index 00000000..ce4881bd --- /dev/null +++ b/sunbird-ai-assistant/app/main.py @@ -0,0 +1,21 @@ +from fastapi import FastAPI +from app.agent.mcp_agent import setup_agent +from app.services.mock_api import mock_router + +from utils.loggers import get_logger + +logger = get_logger() + +logger.info("Assistant initialized successfully.") +logger.error("Failed to load user enrollments.") + + +app = FastAPI() + +# Include mock API routes to simulate Sunbird Ed +app.include_router(mock_router) + + +@app.on_event("startup") +async def startup_event(): + setup_agent() diff --git a/sunbird-ai-assistant/app/services/auth.py b/sunbird-ai-assistant/app/services/auth.py new file mode 100644 index 00000000..b00e55ef --- /dev/null +++ b/sunbird-ai-assistant/app/services/auth.py @@ -0,0 +1,59 @@ +import os +import time +import hashlib +from typing import Optional +from utils.loggers import get_logger + +logger = get_logger() + +# Simulate a user/token database +MOCK_USERS = { + "user_001": "admin", + "user_002": "learner", + "user_003": "mentor" +} + +# Simulated secret key (could be replaced with JWT secret or OAuth config) +SECRET_KEY = os.getenv("AUTH_SECRET_KEY", "sunbird_secret_key") + +# Token TTL in seconds (e.g., 1 hour) +TOKEN_TTL = 3600 + + +def generate_token(user_id: str) -> str: + """Generate a simple hashed token with TTL.""" + if user_id not in MOCK_USERS: + raise ValueError("Invalid user ID") + + timestamp = str(int(time.time()) + TOKEN_TTL) + raw = f"{user_id}:{timestamp}:{SECRET_KEY}" + token = hashlib.sha256(raw.encode()).hexdigest() + logger.info(f"Generated token for {user_id}") + return f"{user_id}:{timestamp}:{token}" + + +def validate_token(token: str) -> Optional[str]: + """Validate token and return user_id if valid, else None.""" + try: + user_id, timestamp, token_hash = token.split(":") + if time.time() > int(timestamp): + logger.warning("Token expired") + return None + + expected_raw = f"{user_id}:{timestamp}:{SECRET_KEY}" + expected_hash = hashlib.sha256(expected_raw.encode()).hexdigest() + + if expected_hash == token_hash: + logger.info(f"Validated token for {user_id}") + return user_id + else: + logger.warning("Token hash mismatch") + return None + except Exception as e: + logger.error(f"Token validation failed: {e}") + return None + + +def get_user_role(user_id: str) -> Optional[str]: + """Get the role of the user from mock DB.""" + return MOCK_USERS.get(user_id) diff --git a/sunbird-ai-assistant/app/services/context_loader.py b/sunbird-ai-assistant/app/services/context_loader.py new file mode 100644 index 00000000..c9c8f550 --- /dev/null +++ b/sunbird-ai-assistant/app/services/context_loader.py @@ -0,0 +1,13 @@ +import json +from app.utils.redis_cache import get_cache, set_cache + +def get_installation_context(installation_id: str): + key = f"context:{installation_id}" + cached = get_cache(key) + if cached: + return json.loads(cached) + + with open(f"data/installation_contexts/demo_deployment.json") as f: + context = json.load(f) + set_cache(key, json.dumps(context)) + return context diff --git a/sunbird-ai-assistant/app/services/mock_api.py b/sunbird-ai-assistant/app/services/mock_api.py new file mode 100644 index 00000000..ccb81432 --- /dev/null +++ b/sunbird-ai-assistant/app/services/mock_api.py @@ -0,0 +1,21 @@ +from fastapi import APIRouter + +mock_router = APIRouter() + +@mock_router.get("/course/v1/search") +def search_courses(query: str = ""): + return { + "courses": [ + {"id": "c101", "title": "Python Basics"}, + {"id": "c102", "title": "Data Science"}, + ] + } + +@mock_router.get("/user/enrollment/list") +def get_enrollments(user_id: str): + return { + "user_id": user_id, + "enrollments": [ + {"course_id": "c101", "progress": "50%"}, + ] + } diff --git a/sunbird-ai-assistant/app/tool_schemas/admin_tools.py b/sunbird-ai-assistant/app/tool_schemas/admin_tools.py new file mode 100644 index 00000000..6515a491 --- /dev/null +++ b/sunbird-ai-assistant/app/tool_schemas/admin_tools.py @@ -0,0 +1,41 @@ +from langchain.tools import tool +from typing import List, Dict + +# Simulated Course and User DB +ALL_COURSES = { + "python101": "Intro to Python", + "ml202": "Machine Learning Fundamentals", + "dl301": "Deep Learning with PyTorch" +} + +ALL_USERS = { + "user_001": "admin", + "user_002": "learner", + "user_003": "mentor" +} + +@tool +def list_all_courses() -> List[str]: + """ + List all available courses in the platform (Admin only). + """ + return list(ALL_COURSES.values()) + +@tool +def manage_users(action: str, target_user: str, new_role: str = "") -> Dict: + """ + Simulate user management operations (Admin only). + Supported actions: 'view', 'update' + """ + if target_user not in ALL_USERS: + return {"error": "User not found"} + + if action == "view": + return {target_user: ALL_USERS[target_user]} + elif action == "update": + if not new_role: + return {"error": "New role must be specified for update"} + ALL_USERS[target_user] = new_role + return {"message": f"Role updated to '{new_role}' for user '{target_user}'"} + else: + return {"error": "Unsupported action. Use 'view' or 'update'"} diff --git a/sunbird-ai-assistant/app/tool_schemas/course_tools.py b/sunbird-ai-assistant/app/tool_schemas/course_tools.py new file mode 100644 index 00000000..6955bd0e --- /dev/null +++ b/sunbird-ai-assistant/app/tool_schemas/course_tools.py @@ -0,0 +1,23 @@ +from typing import Dict +from langchain.tools import tool + +# Simulated Course Catalog +COURSE_DB = { + "python101": {"title": "Intro to Python", "level": "Beginner", "duration": "4 weeks"}, + "ml202": {"title": "Machine Learning Fundamentals", "level": "Intermediate", "duration": "6 weeks"} +} + +@tool +def get_course_metadata(course_id: str) -> Dict: + """ + Fetch metadata about a specific course using its ID. + """ + course = COURSE_DB.get(course_id) + if not course: + return {"error": f"No course found with ID '{course_id}'."} + return { + "course_id": course_id, + "title": course["title"], + "level": course["level"], + "duration": course["duration"] + } diff --git a/sunbird-ai-assistant/app/tool_schemas/enrollment_tools.py b/sunbird-ai-assistant/app/tool_schemas/enrollment_tools.py new file mode 100644 index 00000000..3481fa1a --- /dev/null +++ b/sunbird-ai-assistant/app/tool_schemas/enrollment_tools.py @@ -0,0 +1,30 @@ +from typing import List, Dict +from langchain.tools import tool + +# Simulated User Enrollment Data +USER_ENROLLMENTS = { + "user_001": ["python101"], + "user_002": ["ml202"], +} + +PROGRESS_TRACKER = { + "user_001": {"python101": "80%"}, + "user_002": {"ml202": "40%"}, +} + +@tool +def get_user_enrollments(user_id: str) -> List[str]: + """ + Retrieve a list of course IDs that the user is enrolled in. + """ + return USER_ENROLLMENTS.get(user_id, []) + +@tool +def get_course_progress(user_id: str, course_id: str) -> Dict: + """ + Get progress data for a specific user and course. + """ + progress = PROGRESS_TRACKER.get(user_id, {}).get(course_id) + if not progress: + return {"error": "Progress data not found for given user and course."} + return {"user_id": user_id, "course_id": course_id, "progress": progress} diff --git a/sunbird-ai-assistant/app/utils/loggers.py b/sunbird-ai-assistant/app/utils/loggers.py new file mode 100644 index 00000000..8ced05fe --- /dev/null +++ b/sunbird-ai-assistant/app/utils/loggers.py @@ -0,0 +1,21 @@ +import logging +import sys + +def get_logger(name: str = "sunbird_ai_assistant") -> logging.Logger: + """Initializes and returns a configured logger instance.""" + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + + # Avoid duplicate handlers in case of reloads (e.g., FastAPI dev server) + if not logger.handlers: + formatter = logging.Formatter( + "[%(asctime)s] [%(levelname)s] %(name)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S" + ) + + stream_handler = logging.StreamHandler(sys.stdout) + stream_handler.setFormatter(formatter) + + logger.addHandler(stream_handler) + + return logger diff --git a/sunbird-ai-assistant/app/utils/redis_cache.py b/sunbird-ai-assistant/app/utils/redis_cache.py new file mode 100644 index 00000000..8d70839c --- /dev/null +++ b/sunbird-ai-assistant/app/utils/redis_cache.py @@ -0,0 +1,10 @@ +import redis +from app.config import REDIS_HOST, REDIS_PORT + +r = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, decode_responses=True) + +def set_cache(key, value): + r.set(key, value) + +def get_cache(key): + return r.get(key) diff --git a/sunbird-ai-assistant/cli/chatbot.py b/sunbird-ai-assistant/cli/chatbot.py new file mode 100644 index 00000000..98419b06 --- /dev/null +++ b/sunbird-ai-assistant/cli/chatbot.py @@ -0,0 +1,36 @@ +from app.services.auth import generate_token, validate_token, get_user_role +from app.agent.mcp_agent import setup_agent + +def run_chat(): + print("=== Sunbird AI Assistant ===") + user_id = input("Enter User ID: ") + + try: + token = generate_token(user_id) + print(f"🔐 Your session token: {token}") + except ValueError: + print("🚫 Invalid user ID") + return + + installation_id = input("Enter Deployment ID (e.g., demo): ") + + # Authenticate the token + validated_user = validate_token(token) + if not validated_user: + print("❌ Token expired or invalid.") + return + + role = get_user_role(validated_user) + print(f"✅ Authenticated as {role} ({validated_user})") + + agent, session = setup_agent(installation_id, validated_user, role) + + print("\nYou can now start chatting with the AI Assistant.") + while True: + user_input = input(">> ") + if user_input.lower() in ("exit", "quit"): + break + + prompt = session.inject_context(user_input) + result = agent.run(prompt) + print("🤖", result) diff --git a/sunbird-ai-assistant/data/installation_contexts/demo_deployment.json b/sunbird-ai-assistant/data/installation_contexts/demo_deployment.json new file mode 100644 index 00000000..8b0ad2c3 --- /dev/null +++ b/sunbird-ai-assistant/data/installation_contexts/demo_deployment.json @@ -0,0 +1,18 @@ +[ + { + "org_name": "Sunbird EDU West", + "courses": [ + {"id": "c101", "title": "Python Basics"}, + {"id": "c102", "title": "AI for Beginners"} + ], + "support_contacts": ["help@sunbird.edu"] + }, + { + "org_name": "Sunbird EDU East", + "courses": [ + {"id": "c201", "title": "Java Basics"}, + {"id": "c202", "title": "Data Science with R"} + ], + "support_contacts": ["help@sunbird.edu"] + } +] diff --git a/sunbird-ai-assistant/requirements.txt b/sunbird-ai-assistant/requirements.txt new file mode 100644 index 00000000..e0bc0ff2 --- /dev/null +++ b/sunbird-ai-assistant/requirements.txt @@ -0,0 +1,29 @@ +# Core LLM and LangChain dependencies +openai>=1.10.0 +langchain>=0.1.8 + +# Web framework +fastapi>=0.109.0 +uvicorn[standard]>=0.25.0 + +# Tooling and HTTP requests +httpx>=0.25.1 +requests>=2.31.0 + +# Redis (for session/context caching) +redis>=5.0.1 + +# Environment management +python-dotenv>=1.0.0 + +# Logging and utilities +loguru>=0.7.2 + +# Optional: CLI or local testing +typer>=0.9.0 + +# For async support in FastAPI routes if needed +asyncio + +# (Optional) Dev tools +watchdog>=4.0.0 diff --git a/venv/Include/site/python3.12/greenlet/greenlet.h b/venv/Include/site/python3.12/greenlet/greenlet.h new file mode 100644 index 00000000..d02a16e4 --- /dev/null +++ b/venv/Include/site/python3.12/greenlet/greenlet.h @@ -0,0 +1,164 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ + +/* Greenlet object interface */ + +#ifndef Py_GREENLETOBJECT_H +#define Py_GREENLETOBJECT_H + + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is deprecated and undocumented. It does not change. */ +#define GREENLET_VERSION "1.0.0" + +#ifndef GREENLET_MODULE +#define implementation_ptr_t void* +#endif + +typedef struct _greenlet { + PyObject_HEAD + PyObject* weakreflist; + PyObject* dict; + implementation_ptr_t pimpl; +} PyGreenlet; + +#define PyGreenlet_Check(op) (op && PyObject_TypeCheck(op, &PyGreenlet_Type)) + + +/* C API functions */ + +/* Total number of symbols that are exported */ +#define PyGreenlet_API_pointers 12 + +#define PyGreenlet_Type_NUM 0 +#define PyExc_GreenletError_NUM 1 +#define PyExc_GreenletExit_NUM 2 + +#define PyGreenlet_New_NUM 3 +#define PyGreenlet_GetCurrent_NUM 4 +#define PyGreenlet_Throw_NUM 5 +#define PyGreenlet_Switch_NUM 6 +#define PyGreenlet_SetParent_NUM 7 + +#define PyGreenlet_MAIN_NUM 8 +#define PyGreenlet_STARTED_NUM 9 +#define PyGreenlet_ACTIVE_NUM 10 +#define PyGreenlet_GET_PARENT_NUM 11 + +#ifndef GREENLET_MODULE +/* This section is used by modules that uses the greenlet C API */ +static void** _PyGreenlet_API = NULL; + +# define PyGreenlet_Type \ + (*(PyTypeObject*)_PyGreenlet_API[PyGreenlet_Type_NUM]) + +# define PyExc_GreenletError \ + ((PyObject*)_PyGreenlet_API[PyExc_GreenletError_NUM]) + +# define PyExc_GreenletExit \ + ((PyObject*)_PyGreenlet_API[PyExc_GreenletExit_NUM]) + +/* + * PyGreenlet_New(PyObject *args) + * + * greenlet.greenlet(run, parent=None) + */ +# define PyGreenlet_New \ + (*(PyGreenlet * (*)(PyObject * run, PyGreenlet * parent)) \ + _PyGreenlet_API[PyGreenlet_New_NUM]) + +/* + * PyGreenlet_GetCurrent(void) + * + * greenlet.getcurrent() + */ +# define PyGreenlet_GetCurrent \ + (*(PyGreenlet * (*)(void)) _PyGreenlet_API[PyGreenlet_GetCurrent_NUM]) + +/* + * PyGreenlet_Throw( + * PyGreenlet *greenlet, + * PyObject *typ, + * PyObject *val, + * PyObject *tb) + * + * g.throw(...) + */ +# define PyGreenlet_Throw \ + (*(PyObject * (*)(PyGreenlet * self, \ + PyObject * typ, \ + PyObject * val, \ + PyObject * tb)) \ + _PyGreenlet_API[PyGreenlet_Throw_NUM]) + +/* + * PyGreenlet_Switch(PyGreenlet *greenlet, PyObject *args) + * + * g.switch(*args, **kwargs) + */ +# define PyGreenlet_Switch \ + (*(PyObject * \ + (*)(PyGreenlet * greenlet, PyObject * args, PyObject * kwargs)) \ + _PyGreenlet_API[PyGreenlet_Switch_NUM]) + +/* + * PyGreenlet_SetParent(PyObject *greenlet, PyObject *new_parent) + * + * g.parent = new_parent + */ +# define PyGreenlet_SetParent \ + (*(int (*)(PyGreenlet * greenlet, PyGreenlet * nparent)) \ + _PyGreenlet_API[PyGreenlet_SetParent_NUM]) + +/* + * PyGreenlet_GetParent(PyObject* greenlet) + * + * return greenlet.parent; + * + * This could return NULL even if there is no exception active. + * If it does not return NULL, you are responsible for decrementing the + * reference count. + */ +# define PyGreenlet_GetParent \ + (*(PyGreenlet* (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_GET_PARENT_NUM]) + +/* + * deprecated, undocumented alias. + */ +# define PyGreenlet_GET_PARENT PyGreenlet_GetParent + +# define PyGreenlet_MAIN \ + (*(int (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_MAIN_NUM]) + +# define PyGreenlet_STARTED \ + (*(int (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_STARTED_NUM]) + +# define PyGreenlet_ACTIVE \ + (*(int (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_ACTIVE_NUM]) + + + + +/* Macro that imports greenlet and initializes C API */ +/* NOTE: This has actually moved to ``greenlet._greenlet._C_API``, but we + keep the older definition to be sure older code that might have a copy of + the header still works. */ +# define PyGreenlet_Import() \ + { \ + _PyGreenlet_API = (void**)PyCapsule_Import("greenlet._C_API", 0); \ + } + +#endif /* GREENLET_MODULE */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_GREENLETOBJECT_H */ diff --git a/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/INSTALLER b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/LICENSE b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/LICENSE new file mode 100644 index 00000000..2f1b8e15 --- /dev/null +++ b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2017-2021 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/METADATA b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/METADATA new file mode 100644 index 00000000..db029b77 --- /dev/null +++ b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/METADATA @@ -0,0 +1,46 @@ +Metadata-Version: 2.1 +Name: PyYAML +Version: 6.0.2 +Summary: YAML parser and emitter for Python +Home-page: https://pyyaml.org/ +Download-URL: https://pypi.org/project/PyYAML/ +Author: Kirill Simonov +Author-email: xi@resolvent.net +License: MIT +Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues +Project-URL: CI, https://github.com/yaml/pyyaml/actions +Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation +Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core +Project-URL: Source Code, https://github.com/yaml/pyyaml +Platform: Any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +Requires-Python: >=3.8 +License-File: LICENSE + +YAML is a data serialization format designed for human readability +and interaction with scripting languages. PyYAML is a YAML parser +and emitter for Python. + +PyYAML features a complete YAML 1.1 parser, Unicode support, pickle +support, capable extension API, and sensible error messages. PyYAML +supports standard YAML tags and provides Python-specific tags that +allow to represent an arbitrary Python object. + +PyYAML is applicable for a broad range of tasks from complex +configuration files to object serialization and persistence. diff --git a/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/RECORD b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/RECORD new file mode 100644 index 00000000..61653255 --- /dev/null +++ b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/RECORD @@ -0,0 +1,43 @@ +PyYAML-6.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +PyYAML-6.0.2.dist-info/LICENSE,sha256=jTko-dxEkP1jVwfLiOsmvXZBAqcoKVQwfT5RZ6V36KQ,1101 +PyYAML-6.0.2.dist-info/METADATA,sha256=9lwXqTOrXPts-jI2Lo5UwuaAYo0hiRA0BZqjch0WjAk,2106 +PyYAML-6.0.2.dist-info/RECORD,, +PyYAML-6.0.2.dist-info/WHEEL,sha256=c7SWG1_hRvc9HXHEkmWlTu1Jr4WpzRucfzqTP-_8q0s,102 +PyYAML-6.0.2.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11 +_yaml/__init__.py,sha256=04Ae_5osxahpJHa3XBZUAf4wi6XX32gR8D6X6p64GEA,1402 +_yaml/__pycache__/__init__.cpython-312.pyc,, +yaml/__init__.py,sha256=N35S01HMesFTe0aRRMWkPj0Pa8IEbHpE9FK7cr5Bdtw,12311 +yaml/__pycache__/__init__.cpython-312.pyc,, +yaml/__pycache__/composer.cpython-312.pyc,, +yaml/__pycache__/constructor.cpython-312.pyc,, +yaml/__pycache__/cyaml.cpython-312.pyc,, +yaml/__pycache__/dumper.cpython-312.pyc,, +yaml/__pycache__/emitter.cpython-312.pyc,, +yaml/__pycache__/error.cpython-312.pyc,, +yaml/__pycache__/events.cpython-312.pyc,, +yaml/__pycache__/loader.cpython-312.pyc,, +yaml/__pycache__/nodes.cpython-312.pyc,, +yaml/__pycache__/parser.cpython-312.pyc,, +yaml/__pycache__/reader.cpython-312.pyc,, +yaml/__pycache__/representer.cpython-312.pyc,, +yaml/__pycache__/resolver.cpython-312.pyc,, +yaml/__pycache__/scanner.cpython-312.pyc,, +yaml/__pycache__/serializer.cpython-312.pyc,, +yaml/__pycache__/tokens.cpython-312.pyc,, +yaml/_yaml.cp312-win_amd64.pyd,sha256=Bx7e_LEQx7cnd1_A9_nClp3X77g-_Lw1aoAAtYZbwWk,263680 +yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883 +yaml/constructor.py,sha256=kNgkfaeLUkwQYY_Q6Ff1Tz2XVw_pG1xVE9Ak7z-viLA,28639 +yaml/cyaml.py,sha256=6ZrAG9fAYvdVe2FK_w0hmXoG7ZYsoYUwapG8CiC72H0,3851 +yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837 +yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006 +yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533 +yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445 +yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061 +yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440 +yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495 +yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794 +yaml/representer.py,sha256=IuWP-cAW9sHKEnS0gCqSa894k1Bg4cgTxaDwIcbRQ-Y,14190 +yaml/resolver.py,sha256=9L-VYfm4mWHxUD1Vg4X7rjDRK_7VZd6b92wzq7Y2IKY,9004 +yaml/scanner.py,sha256=YEM3iLZSaQwXcQRg2l2R4MdT0zGP2F9eHkKGKnHyWQY,51279 +yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165 +yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573 diff --git a/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/WHEEL b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/WHEEL new file mode 100644 index 00000000..a4e7d83d --- /dev/null +++ b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.44.0) +Root-Is-Purelib: false +Tag: cp312-cp312-win_amd64 + diff --git a/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/top_level.txt b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/top_level.txt new file mode 100644 index 00000000..e6475e91 --- /dev/null +++ b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_yaml +yaml diff --git a/venv/Lib/site-packages/__pycache__/jsonpatch.cpython-312.pyc b/venv/Lib/site-packages/__pycache__/jsonpatch.cpython-312.pyc new file mode 100644 index 00000000..8dff36e5 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/jsonpatch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/jsonpointer.cpython-312.pyc b/venv/Lib/site-packages/__pycache__/jsonpointer.cpython-312.pyc new file mode 100644 index 00000000..77d75e9e Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/jsonpointer.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/typing_extensions.cpython-312.pyc b/venv/Lib/site-packages/__pycache__/typing_extensions.cpython-312.pyc new file mode 100644 index 00000000..04b0fff0 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/typing_extensions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/_yaml/__init__.py b/venv/Lib/site-packages/_yaml/__init__.py new file mode 100644 index 00000000..7baa8c4b --- /dev/null +++ b/venv/Lib/site-packages/_yaml/__init__.py @@ -0,0 +1,33 @@ +# This is a stub package designed to roughly emulate the _yaml +# extension module, which previously existed as a standalone module +# and has been moved into the `yaml` package namespace. +# It does not perfectly mimic its old counterpart, but should get +# close enough for anyone who's relying on it even when they shouldn't. +import yaml + +# in some circumstances, the yaml module we imoprted may be from a different version, so we need +# to tread carefully when poking at it here (it may not have the attributes we expect) +if not getattr(yaml, '__with_libyaml__', False): + from sys import version_info + + exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError + raise exc("No module named '_yaml'") +else: + from yaml._yaml import * + import warnings + warnings.warn( + 'The _yaml extension module is now located at yaml._yaml' + ' and its location is subject to change. To use the' + ' LibYAML-based parser and emitter, import from `yaml`:' + ' `from yaml import CLoader as Loader, CDumper as Dumper`.', + DeprecationWarning + ) + del warnings + # Don't `del yaml` here because yaml is actually an existing + # namespace member of _yaml. + +__name__ = '_yaml' +# If the module is top-level (i.e. not a part of any specific package) +# then the attribute should be set to ''. +# https://docs.python.org/3.8/library/types.html +__package__ = '' diff --git a/venv/Lib/site-packages/_yaml/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/_yaml/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..4f537251 Binary files /dev/null and b/venv/Lib/site-packages/_yaml/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/INSTALLER b/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/METADATA b/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/METADATA new file mode 100644 index 00000000..3ac05cfd --- /dev/null +++ b/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/METADATA @@ -0,0 +1,295 @@ +Metadata-Version: 2.3 +Name: annotated-types +Version: 0.7.0 +Summary: Reusable constraint types to use with typing.Annotated +Project-URL: Homepage, https://github.com/annotated-types/annotated-types +Project-URL: Source, https://github.com/annotated-types/annotated-types +Project-URL: Changelog, https://github.com/annotated-types/annotated-types/releases +Author-email: Adrian Garcia Badaracco <1755071+adriangb@users.noreply.github.com>, Samuel Colvin , Zac Hatfield-Dodds +License-File: LICENSE +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Console +Classifier: Environment :: MacOS X +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Unix +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Typing :: Typed +Requires-Python: >=3.8 +Requires-Dist: typing-extensions>=4.0.0; python_version < '3.9' +Description-Content-Type: text/markdown + +# annotated-types + +[![CI](https://github.com/annotated-types/annotated-types/workflows/CI/badge.svg?event=push)](https://github.com/annotated-types/annotated-types/actions?query=event%3Apush+branch%3Amain+workflow%3ACI) +[![pypi](https://img.shields.io/pypi/v/annotated-types.svg)](https://pypi.python.org/pypi/annotated-types) +[![versions](https://img.shields.io/pypi/pyversions/annotated-types.svg)](https://github.com/annotated-types/annotated-types) +[![license](https://img.shields.io/github/license/annotated-types/annotated-types.svg)](https://github.com/annotated-types/annotated-types/blob/main/LICENSE) + +[PEP-593](https://peps.python.org/pep-0593/) added `typing.Annotated` as a way of +adding context-specific metadata to existing types, and specifies that +`Annotated[T, x]` _should_ be treated as `T` by any tool or library without special +logic for `x`. + +This package provides metadata objects which can be used to represent common +constraints such as upper and lower bounds on scalar values and collection sizes, +a `Predicate` marker for runtime checks, and +descriptions of how we intend these metadata to be interpreted. In some cases, +we also note alternative representations which do not require this package. + +## Install + +```bash +pip install annotated-types +``` + +## Examples + +```python +from typing import Annotated +from annotated_types import Gt, Len, Predicate + +class MyClass: + age: Annotated[int, Gt(18)] # Valid: 19, 20, ... + # Invalid: 17, 18, "19", 19.0, ... + factors: list[Annotated[int, Predicate(is_prime)]] # Valid: 2, 3, 5, 7, 11, ... + # Invalid: 4, 8, -2, 5.0, "prime", ... + + my_list: Annotated[list[int], Len(0, 10)] # Valid: [], [10, 20, 30, 40, 50] + # Invalid: (1, 2), ["abc"], [0] * 20 +``` + +## Documentation + +_While `annotated-types` avoids runtime checks for performance, users should not +construct invalid combinations such as `MultipleOf("non-numeric")` or `Annotated[int, Len(3)]`. +Downstream implementors may choose to raise an error, emit a warning, silently ignore +a metadata item, etc., if the metadata objects described below are used with an +incompatible type - or for any other reason!_ + +### Gt, Ge, Lt, Le + +Express inclusive and/or exclusive bounds on orderable values - which may be numbers, +dates, times, strings, sets, etc. Note that the boundary value need not be of the +same type that was annotated, so long as they can be compared: `Annotated[int, Gt(1.5)]` +is fine, for example, and implies that the value is an integer x such that `x > 1.5`. + +We suggest that implementors may also interpret `functools.partial(operator.le, 1.5)` +as being equivalent to `Gt(1.5)`, for users who wish to avoid a runtime dependency on +the `annotated-types` package. + +To be explicit, these types have the following meanings: + +* `Gt(x)` - value must be "Greater Than" `x` - equivalent to exclusive minimum +* `Ge(x)` - value must be "Greater than or Equal" to `x` - equivalent to inclusive minimum +* `Lt(x)` - value must be "Less Than" `x` - equivalent to exclusive maximum +* `Le(x)` - value must be "Less than or Equal" to `x` - equivalent to inclusive maximum + +### Interval + +`Interval(gt, ge, lt, le)` allows you to specify an upper and lower bound with a single +metadata object. `None` attributes should be ignored, and non-`None` attributes +treated as per the single bounds above. + +### MultipleOf + +`MultipleOf(multiple_of=x)` might be interpreted in two ways: + +1. Python semantics, implying `value % multiple_of == 0`, or +2. [JSONschema semantics](https://json-schema.org/draft/2020-12/json-schema-validation.html#rfc.section.6.2.1), + where `int(value / multiple_of) == value / multiple_of`. + +We encourage users to be aware of these two common interpretations and their +distinct behaviours, especially since very large or non-integer numbers make +it easy to cause silent data corruption due to floating-point imprecision. + +We encourage libraries to carefully document which interpretation they implement. + +### MinLen, MaxLen, Len + +`Len()` implies that `min_length <= len(value) <= max_length` - lower and upper bounds are inclusive. + +As well as `Len()` which can optionally include upper and lower bounds, we also +provide `MinLen(x)` and `MaxLen(y)` which are equivalent to `Len(min_length=x)` +and `Len(max_length=y)` respectively. + +`Len`, `MinLen`, and `MaxLen` may be used with any type which supports `len(value)`. + +Examples of usage: + +* `Annotated[list, MaxLen(10)]` (or `Annotated[list, Len(max_length=10))`) - list must have a length of 10 or less +* `Annotated[str, MaxLen(10)]` - string must have a length of 10 or less +* `Annotated[list, MinLen(3))` (or `Annotated[list, Len(min_length=3))`) - list must have a length of 3 or more +* `Annotated[list, Len(4, 6)]` - list must have a length of 4, 5, or 6 +* `Annotated[list, Len(8, 8)]` - list must have a length of exactly 8 + +#### Changed in v0.4.0 + +* `min_inclusive` has been renamed to `min_length`, no change in meaning +* `max_exclusive` has been renamed to `max_length`, upper bound is now **inclusive** instead of **exclusive** +* The recommendation that slices are interpreted as `Len` has been removed due to ambiguity and different semantic + meaning of the upper bound in slices vs. `Len` + +See [issue #23](https://github.com/annotated-types/annotated-types/issues/23) for discussion. + +### Timezone + +`Timezone` can be used with a `datetime` or a `time` to express which timezones +are allowed. `Annotated[datetime, Timezone(None)]` must be a naive datetime. +`Timezone[...]` ([literal ellipsis](https://docs.python.org/3/library/constants.html#Ellipsis)) +expresses that any timezone-aware datetime is allowed. You may also pass a specific +timezone string or [`tzinfo`](https://docs.python.org/3/library/datetime.html#tzinfo-objects) +object such as `Timezone(timezone.utc)` or `Timezone("Africa/Abidjan")` to express that you only +allow a specific timezone, though we note that this is often a symptom of fragile design. + +#### Changed in v0.x.x + +* `Timezone` accepts [`tzinfo`](https://docs.python.org/3/library/datetime.html#tzinfo-objects) objects instead of + `timezone`, extending compatibility to [`zoneinfo`](https://docs.python.org/3/library/zoneinfo.html) and third party libraries. + +### Unit + +`Unit(unit: str)` expresses that the annotated numeric value is the magnitude of +a quantity with the specified unit. For example, `Annotated[float, Unit("m/s")]` +would be a float representing a velocity in meters per second. + +Please note that `annotated_types` itself makes no attempt to parse or validate +the unit string in any way. That is left entirely to downstream libraries, +such as [`pint`](https://pint.readthedocs.io) or +[`astropy.units`](https://docs.astropy.org/en/stable/units/). + +An example of how a library might use this metadata: + +```python +from annotated_types import Unit +from typing import Annotated, TypeVar, Callable, Any, get_origin, get_args + +# given a type annotated with a unit: +Meters = Annotated[float, Unit("m")] + + +# you can cast the annotation to a specific unit type with any +# callable that accepts a string and returns the desired type +T = TypeVar("T") +def cast_unit(tp: Any, unit_cls: Callable[[str], T]) -> T | None: + if get_origin(tp) is Annotated: + for arg in get_args(tp): + if isinstance(arg, Unit): + return unit_cls(arg.unit) + return None + + +# using `pint` +import pint +pint_unit = cast_unit(Meters, pint.Unit) + + +# using `astropy.units` +import astropy.units as u +astropy_unit = cast_unit(Meters, u.Unit) +``` + +### Predicate + +`Predicate(func: Callable)` expresses that `func(value)` is truthy for valid values. +Users should prefer the statically inspectable metadata above, but if you need +the full power and flexibility of arbitrary runtime predicates... here it is. + +For some common constraints, we provide generic types: + +* `IsLower = Annotated[T, Predicate(str.islower)]` +* `IsUpper = Annotated[T, Predicate(str.isupper)]` +* `IsDigit = Annotated[T, Predicate(str.isdigit)]` +* `IsFinite = Annotated[T, Predicate(math.isfinite)]` +* `IsNotFinite = Annotated[T, Predicate(Not(math.isfinite))]` +* `IsNan = Annotated[T, Predicate(math.isnan)]` +* `IsNotNan = Annotated[T, Predicate(Not(math.isnan))]` +* `IsInfinite = Annotated[T, Predicate(math.isinf)]` +* `IsNotInfinite = Annotated[T, Predicate(Not(math.isinf))]` + +so that you can write e.g. `x: IsFinite[float] = 2.0` instead of the longer +(but exactly equivalent) `x: Annotated[float, Predicate(math.isfinite)] = 2.0`. + +Some libraries might have special logic to handle known or understandable predicates, +for example by checking for `str.isdigit` and using its presence to both call custom +logic to enforce digit-only strings, and customise some generated external schema. +Users are therefore encouraged to avoid indirection like `lambda s: s.lower()`, in +favor of introspectable methods such as `str.lower` or `re.compile("pattern").search`. + +To enable basic negation of commonly used predicates like `math.isnan` without introducing introspection that makes it impossible for implementers to introspect the predicate we provide a `Not` wrapper that simply negates the predicate in an introspectable manner. Several of the predicates listed above are created in this manner. + +We do not specify what behaviour should be expected for predicates that raise +an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently +skip invalid constraints, or statically raise an error; or it might try calling it +and then propagate or discard the resulting +`TypeError: descriptor 'isdigit' for 'str' objects doesn't apply to a 'int' object` +exception. We encourage libraries to document the behaviour they choose. + +### Doc + +`doc()` can be used to add documentation information in `Annotated`, for function and method parameters, variables, class attributes, return types, and any place where `Annotated` can be used. + +It expects a value that can be statically analyzed, as the main use case is for static analysis, editors, documentation generators, and similar tools. + +It returns a `DocInfo` class with a single attribute `documentation` containing the value passed to `doc()`. + +This is the early adopter's alternative form of the [`typing-doc` proposal](https://github.com/tiangolo/fastapi/blob/typing-doc/typing_doc.md). + +### Integrating downstream types with `GroupedMetadata` + +Implementers may choose to provide a convenience wrapper that groups multiple pieces of metadata. +This can help reduce verbosity and cognitive overhead for users. +For example, an implementer like Pydantic might provide a `Field` or `Meta` type that accepts keyword arguments and transforms these into low-level metadata: + +```python +from dataclasses import dataclass +from typing import Iterator +from annotated_types import GroupedMetadata, Ge + +@dataclass +class Field(GroupedMetadata): + ge: int | None = None + description: str | None = None + + def __iter__(self) -> Iterator[object]: + # Iterating over a GroupedMetadata object should yield annotated-types + # constraint metadata objects which describe it as fully as possible, + # and may include other unknown objects too. + if self.ge is not None: + yield Ge(self.ge) + if self.description is not None: + yield Description(self.description) +``` + +Libraries consuming annotated-types constraints should check for `GroupedMetadata` and unpack it by iterating over the object and treating the results as if they had been "unpacked" in the `Annotated` type. The same logic should be applied to the [PEP 646 `Unpack` type](https://peps.python.org/pep-0646/), so that `Annotated[T, Field(...)]`, `Annotated[T, Unpack[Field(...)]]` and `Annotated[T, *Field(...)]` are all treated consistently. + +Libraries consuming annotated-types should also ignore any metadata they do not recongize that came from unpacking a `GroupedMetadata`, just like they ignore unrecognized metadata in `Annotated` itself. + +Our own `annotated_types.Interval` class is a `GroupedMetadata` which unpacks itself into `Gt`, `Lt`, etc., so this is not an abstract concern. Similarly, `annotated_types.Len` is a `GroupedMetadata` which unpacks itself into `MinLen` (optionally) and `MaxLen`. + +### Consuming metadata + +We intend to not be prescriptive as to _how_ the metadata and constraints are used, but as an example of how one might parse constraints from types annotations see our [implementation in `test_main.py`](https://github.com/annotated-types/annotated-types/blob/f59cf6d1b5255a0fe359b93896759a180bec30ae/tests/test_main.py#L94-L103). + +It is up to the implementer to determine how this metadata is used. +You could use the metadata for runtime type checking, for generating schemas or to generate example data, amongst other use cases. + +## Design & History + +This package was designed at the PyCon 2022 sprints by the maintainers of Pydantic +and Hypothesis, with the goal of making it as easy as possible for end-users to +provide more informative annotations for use by runtime libraries. + +It is deliberately minimal, and following PEP-593 allows considerable downstream +discretion in what (if anything!) they choose to support. Nonetheless, we expect +that staying simple and covering _only_ the most common use-cases will give users +and maintainers the best experience we can. If you'd like more constraints for your +types - follow our lead, by defining them and documenting them downstream! diff --git a/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/RECORD b/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/RECORD new file mode 100644 index 00000000..70457290 --- /dev/null +++ b/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/RECORD @@ -0,0 +1,10 @@ +annotated_types-0.7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +annotated_types-0.7.0.dist-info/METADATA,sha256=7ltqxksJJ0wCYFGBNIQCWTlWQGeAH0hRFdnK3CB895E,15046 +annotated_types-0.7.0.dist-info/RECORD,, +annotated_types-0.7.0.dist-info/WHEEL,sha256=zEMcRr9Kr03x1ozGwg5v9NQBKn3kndp6LSoSlVg-jhU,87 +annotated_types-0.7.0.dist-info/licenses/LICENSE,sha256=_hBJiEsaDZNCkB6I4H8ykl0ksxIdmXK2poBfuYJLCV0,1083 +annotated_types/__init__.py,sha256=RynLsRKUEGI0KimXydlD1fZEfEzWwDo0Uon3zOKhG1Q,13819 +annotated_types/__pycache__/__init__.cpython-312.pyc,, +annotated_types/__pycache__/test_cases.cpython-312.pyc,, +annotated_types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +annotated_types/test_cases.py,sha256=zHFX6EpcMbGJ8FzBYDbO56bPwx_DYIVSKbZM-4B3_lg,6421 diff --git a/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/WHEEL b/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/WHEEL new file mode 100644 index 00000000..516596c7 --- /dev/null +++ b/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.24.2 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/licenses/LICENSE b/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/licenses/LICENSE new file mode 100644 index 00000000..d99323a9 --- /dev/null +++ b/venv/Lib/site-packages/annotated_types-0.7.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2022 the contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/Lib/site-packages/annotated_types/__init__.py b/venv/Lib/site-packages/annotated_types/__init__.py new file mode 100644 index 00000000..74e0deea --- /dev/null +++ b/venv/Lib/site-packages/annotated_types/__init__.py @@ -0,0 +1,432 @@ +import math +import sys +import types +from dataclasses import dataclass +from datetime import tzinfo +from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, SupportsFloat, SupportsIndex, TypeVar, Union + +if sys.version_info < (3, 8): + from typing_extensions import Protocol, runtime_checkable +else: + from typing import Protocol, runtime_checkable + +if sys.version_info < (3, 9): + from typing_extensions import Annotated, Literal +else: + from typing import Annotated, Literal + +if sys.version_info < (3, 10): + EllipsisType = type(Ellipsis) + KW_ONLY = {} + SLOTS = {} +else: + from types import EllipsisType + + KW_ONLY = {"kw_only": True} + SLOTS = {"slots": True} + + +__all__ = ( + 'BaseMetadata', + 'GroupedMetadata', + 'Gt', + 'Ge', + 'Lt', + 'Le', + 'Interval', + 'MultipleOf', + 'MinLen', + 'MaxLen', + 'Len', + 'Timezone', + 'Predicate', + 'LowerCase', + 'UpperCase', + 'IsDigits', + 'IsFinite', + 'IsNotFinite', + 'IsNan', + 'IsNotNan', + 'IsInfinite', + 'IsNotInfinite', + 'doc', + 'DocInfo', + '__version__', +) + +__version__ = '0.7.0' + + +T = TypeVar('T') + + +# arguments that start with __ are considered +# positional only +# see https://peps.python.org/pep-0484/#positional-only-arguments + + +class SupportsGt(Protocol): + def __gt__(self: T, __other: T) -> bool: + ... + + +class SupportsGe(Protocol): + def __ge__(self: T, __other: T) -> bool: + ... + + +class SupportsLt(Protocol): + def __lt__(self: T, __other: T) -> bool: + ... + + +class SupportsLe(Protocol): + def __le__(self: T, __other: T) -> bool: + ... + + +class SupportsMod(Protocol): + def __mod__(self: T, __other: T) -> T: + ... + + +class SupportsDiv(Protocol): + def __div__(self: T, __other: T) -> T: + ... + + +class BaseMetadata: + """Base class for all metadata. + + This exists mainly so that implementers + can do `isinstance(..., BaseMetadata)` while traversing field annotations. + """ + + __slots__ = () + + +@dataclass(frozen=True, **SLOTS) +class Gt(BaseMetadata): + """Gt(gt=x) implies that the value must be greater than x. + + It can be used with any type that supports the ``>`` operator, + including numbers, dates and times, strings, sets, and so on. + """ + + gt: SupportsGt + + +@dataclass(frozen=True, **SLOTS) +class Ge(BaseMetadata): + """Ge(ge=x) implies that the value must be greater than or equal to x. + + It can be used with any type that supports the ``>=`` operator, + including numbers, dates and times, strings, sets, and so on. + """ + + ge: SupportsGe + + +@dataclass(frozen=True, **SLOTS) +class Lt(BaseMetadata): + """Lt(lt=x) implies that the value must be less than x. + + It can be used with any type that supports the ``<`` operator, + including numbers, dates and times, strings, sets, and so on. + """ + + lt: SupportsLt + + +@dataclass(frozen=True, **SLOTS) +class Le(BaseMetadata): + """Le(le=x) implies that the value must be less than or equal to x. + + It can be used with any type that supports the ``<=`` operator, + including numbers, dates and times, strings, sets, and so on. + """ + + le: SupportsLe + + +@runtime_checkable +class GroupedMetadata(Protocol): + """A grouping of multiple objects, like typing.Unpack. + + `GroupedMetadata` on its own is not metadata and has no meaning. + All of the constraints and metadata should be fully expressable + in terms of the `BaseMetadata`'s returned by `GroupedMetadata.__iter__()`. + + Concrete implementations should override `GroupedMetadata.__iter__()` + to add their own metadata. + For example: + + >>> @dataclass + >>> class Field(GroupedMetadata): + >>> gt: float | None = None + >>> description: str | None = None + ... + >>> def __iter__(self) -> Iterable[object]: + >>> if self.gt is not None: + >>> yield Gt(self.gt) + >>> if self.description is not None: + >>> yield Description(self.gt) + + Also see the implementation of `Interval` below for an example. + + Parsers should recognize this and unpack it so that it can be used + both with and without unpacking: + + - `Annotated[int, Field(...)]` (parser must unpack Field) + - `Annotated[int, *Field(...)]` (PEP-646) + """ # noqa: trailing-whitespace + + @property + def __is_annotated_types_grouped_metadata__(self) -> Literal[True]: + return True + + def __iter__(self) -> Iterator[object]: + ... + + if not TYPE_CHECKING: + __slots__ = () # allow subclasses to use slots + + def __init_subclass__(cls, *args: Any, **kwargs: Any) -> None: + # Basic ABC like functionality without the complexity of an ABC + super().__init_subclass__(*args, **kwargs) + if cls.__iter__ is GroupedMetadata.__iter__: + raise TypeError("Can't subclass GroupedMetadata without implementing __iter__") + + def __iter__(self) -> Iterator[object]: # noqa: F811 + raise NotImplementedError # more helpful than "None has no attribute..." type errors + + +@dataclass(frozen=True, **KW_ONLY, **SLOTS) +class Interval(GroupedMetadata): + """Interval can express inclusive or exclusive bounds with a single object. + + It accepts keyword arguments ``gt``, ``ge``, ``lt``, and/or ``le``, which + are interpreted the same way as the single-bound constraints. + """ + + gt: Union[SupportsGt, None] = None + ge: Union[SupportsGe, None] = None + lt: Union[SupportsLt, None] = None + le: Union[SupportsLe, None] = None + + def __iter__(self) -> Iterator[BaseMetadata]: + """Unpack an Interval into zero or more single-bounds.""" + if self.gt is not None: + yield Gt(self.gt) + if self.ge is not None: + yield Ge(self.ge) + if self.lt is not None: + yield Lt(self.lt) + if self.le is not None: + yield Le(self.le) + + +@dataclass(frozen=True, **SLOTS) +class MultipleOf(BaseMetadata): + """MultipleOf(multiple_of=x) might be interpreted in two ways: + + 1. Python semantics, implying ``value % multiple_of == 0``, or + 2. JSONschema semantics, where ``int(value / multiple_of) == value / multiple_of`` + + We encourage users to be aware of these two common interpretations, + and libraries to carefully document which they implement. + """ + + multiple_of: Union[SupportsDiv, SupportsMod] + + +@dataclass(frozen=True, **SLOTS) +class MinLen(BaseMetadata): + """ + MinLen() implies minimum inclusive length, + e.g. ``len(value) >= min_length``. + """ + + min_length: Annotated[int, Ge(0)] + + +@dataclass(frozen=True, **SLOTS) +class MaxLen(BaseMetadata): + """ + MaxLen() implies maximum inclusive length, + e.g. ``len(value) <= max_length``. + """ + + max_length: Annotated[int, Ge(0)] + + +@dataclass(frozen=True, **SLOTS) +class Len(GroupedMetadata): + """ + Len() implies that ``min_length <= len(value) <= max_length``. + + Upper bound may be omitted or ``None`` to indicate no upper length bound. + """ + + min_length: Annotated[int, Ge(0)] = 0 + max_length: Optional[Annotated[int, Ge(0)]] = None + + def __iter__(self) -> Iterator[BaseMetadata]: + """Unpack a Len into zone or more single-bounds.""" + if self.min_length > 0: + yield MinLen(self.min_length) + if self.max_length is not None: + yield MaxLen(self.max_length) + + +@dataclass(frozen=True, **SLOTS) +class Timezone(BaseMetadata): + """Timezone(tz=...) requires a datetime to be aware (or ``tz=None``, naive). + + ``Annotated[datetime, Timezone(None)]`` must be a naive datetime. + ``Timezone[...]`` (the ellipsis literal) expresses that the datetime must be + tz-aware but any timezone is allowed. + + You may also pass a specific timezone string or tzinfo object such as + ``Timezone(timezone.utc)`` or ``Timezone("Africa/Abidjan")`` to express that + you only allow a specific timezone, though we note that this is often + a symptom of poor design. + """ + + tz: Union[str, tzinfo, EllipsisType, None] + + +@dataclass(frozen=True, **SLOTS) +class Unit(BaseMetadata): + """Indicates that the value is a physical quantity with the specified unit. + + It is intended for usage with numeric types, where the value represents the + magnitude of the quantity. For example, ``distance: Annotated[float, Unit('m')]`` + or ``speed: Annotated[float, Unit('m/s')]``. + + Interpretation of the unit string is left to the discretion of the consumer. + It is suggested to follow conventions established by python libraries that work + with physical quantities, such as + + - ``pint`` : + - ``astropy.units``: + + For indicating a quantity with a certain dimensionality but without a specific unit + it is recommended to use square brackets, e.g. `Annotated[float, Unit('[time]')]`. + Note, however, ``annotated_types`` itself makes no use of the unit string. + """ + + unit: str + + +@dataclass(frozen=True, **SLOTS) +class Predicate(BaseMetadata): + """``Predicate(func: Callable)`` implies `func(value)` is truthy for valid values. + + Users should prefer statically inspectable metadata, but if you need the full + power and flexibility of arbitrary runtime predicates... here it is. + + We provide a few predefined predicates for common string constraints: + ``IsLower = Predicate(str.islower)``, ``IsUpper = Predicate(str.isupper)``, and + ``IsDigits = Predicate(str.isdigit)``. Users are encouraged to use methods which + can be given special handling, and avoid indirection like ``lambda s: s.lower()``. + + Some libraries might have special logic to handle certain predicates, e.g. by + checking for `str.isdigit` and using its presence to both call custom logic to + enforce digit-only strings, and customise some generated external schema. + + We do not specify what behaviour should be expected for predicates that raise + an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently + skip invalid constraints, or statically raise an error; or it might try calling it + and then propagate or discard the resulting exception. + """ + + func: Callable[[Any], bool] + + def __repr__(self) -> str: + if getattr(self.func, "__name__", "") == "": + return f"{self.__class__.__name__}({self.func!r})" + if isinstance(self.func, (types.MethodType, types.BuiltinMethodType)) and ( + namespace := getattr(self.func.__self__, "__name__", None) + ): + return f"{self.__class__.__name__}({namespace}.{self.func.__name__})" + if isinstance(self.func, type(str.isascii)): # method descriptor + return f"{self.__class__.__name__}({self.func.__qualname__})" + return f"{self.__class__.__name__}({self.func.__name__})" + + +@dataclass +class Not: + func: Callable[[Any], bool] + + def __call__(self, __v: Any) -> bool: + return not self.func(__v) + + +_StrType = TypeVar("_StrType", bound=str) + +LowerCase = Annotated[_StrType, Predicate(str.islower)] +""" +Return True if the string is a lowercase string, False otherwise. + +A string is lowercase if all cased characters in the string are lowercase and there is at least one cased character in the string. +""" # noqa: E501 +UpperCase = Annotated[_StrType, Predicate(str.isupper)] +""" +Return True if the string is an uppercase string, False otherwise. + +A string is uppercase if all cased characters in the string are uppercase and there is at least one cased character in the string. +""" # noqa: E501 +IsDigit = Annotated[_StrType, Predicate(str.isdigit)] +IsDigits = IsDigit # type: ignore # plural for backwards compatibility, see #63 +""" +Return True if the string is a digit string, False otherwise. + +A string is a digit string if all characters in the string are digits and there is at least one character in the string. +""" # noqa: E501 +IsAscii = Annotated[_StrType, Predicate(str.isascii)] +""" +Return True if all characters in the string are ASCII, False otherwise. + +ASCII characters have code points in the range U+0000-U+007F. Empty string is ASCII too. +""" + +_NumericType = TypeVar('_NumericType', bound=Union[SupportsFloat, SupportsIndex]) +IsFinite = Annotated[_NumericType, Predicate(math.isfinite)] +"""Return True if x is neither an infinity nor a NaN, and False otherwise.""" +IsNotFinite = Annotated[_NumericType, Predicate(Not(math.isfinite))] +"""Return True if x is one of infinity or NaN, and False otherwise""" +IsNan = Annotated[_NumericType, Predicate(math.isnan)] +"""Return True if x is a NaN (not a number), and False otherwise.""" +IsNotNan = Annotated[_NumericType, Predicate(Not(math.isnan))] +"""Return True if x is anything but NaN (not a number), and False otherwise.""" +IsInfinite = Annotated[_NumericType, Predicate(math.isinf)] +"""Return True if x is a positive or negative infinity, and False otherwise.""" +IsNotInfinite = Annotated[_NumericType, Predicate(Not(math.isinf))] +"""Return True if x is neither a positive or negative infinity, and False otherwise.""" + +try: + from typing_extensions import DocInfo, doc # type: ignore [attr-defined] +except ImportError: + + @dataclass(frozen=True, **SLOTS) + class DocInfo: # type: ignore [no-redef] + """ " + The return value of doc(), mainly to be used by tools that want to extract the + Annotated documentation at runtime. + """ + + documentation: str + """The documentation string passed to doc().""" + + def doc( + documentation: str, + ) -> DocInfo: + """ + Add documentation to a type annotation inside of Annotated. + + For example: + + >>> def hi(name: Annotated[int, doc("The name of the user")]) -> None: ... + """ + return DocInfo(documentation) diff --git a/venv/Lib/site-packages/annotated_types/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/annotated_types/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..5947e5b5 Binary files /dev/null and b/venv/Lib/site-packages/annotated_types/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/annotated_types/__pycache__/test_cases.cpython-312.pyc b/venv/Lib/site-packages/annotated_types/__pycache__/test_cases.cpython-312.pyc new file mode 100644 index 00000000..ada47d29 Binary files /dev/null and b/venv/Lib/site-packages/annotated_types/__pycache__/test_cases.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/annotated_types/py.typed b/venv/Lib/site-packages/annotated_types/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/annotated_types/test_cases.py b/venv/Lib/site-packages/annotated_types/test_cases.py new file mode 100644 index 00000000..d9164d68 --- /dev/null +++ b/venv/Lib/site-packages/annotated_types/test_cases.py @@ -0,0 +1,151 @@ +import math +import sys +from datetime import date, datetime, timedelta, timezone +from decimal import Decimal +from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Set, Tuple + +if sys.version_info < (3, 9): + from typing_extensions import Annotated +else: + from typing import Annotated + +import annotated_types as at + + +class Case(NamedTuple): + """ + A test case for `annotated_types`. + """ + + annotation: Any + valid_cases: Iterable[Any] + invalid_cases: Iterable[Any] + + +def cases() -> Iterable[Case]: + # Gt, Ge, Lt, Le + yield Case(Annotated[int, at.Gt(4)], (5, 6, 1000), (4, 0, -1)) + yield Case(Annotated[float, at.Gt(0.5)], (0.6, 0.7, 0.8, 0.9), (0.5, 0.0, -0.1)) + yield Case( + Annotated[datetime, at.Gt(datetime(2000, 1, 1))], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + [datetime(2000, 1, 1), datetime(1999, 12, 31)], + ) + yield Case( + Annotated[datetime, at.Gt(date(2000, 1, 1))], + [date(2000, 1, 2), date(2000, 1, 3)], + [date(2000, 1, 1), date(1999, 12, 31)], + ) + yield Case( + Annotated[datetime, at.Gt(Decimal('1.123'))], + [Decimal('1.1231'), Decimal('123')], + [Decimal('1.123'), Decimal('0')], + ) + + yield Case(Annotated[int, at.Ge(4)], (4, 5, 6, 1000, 4), (0, -1)) + yield Case(Annotated[float, at.Ge(0.5)], (0.5, 0.6, 0.7, 0.8, 0.9), (0.4, 0.0, -0.1)) + yield Case( + Annotated[datetime, at.Ge(datetime(2000, 1, 1))], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + [datetime(1998, 1, 1), datetime(1999, 12, 31)], + ) + + yield Case(Annotated[int, at.Lt(4)], (0, -1), (4, 5, 6, 1000, 4)) + yield Case(Annotated[float, at.Lt(0.5)], (0.4, 0.0, -0.1), (0.5, 0.6, 0.7, 0.8, 0.9)) + yield Case( + Annotated[datetime, at.Lt(datetime(2000, 1, 1))], + [datetime(1999, 12, 31), datetime(1999, 12, 31)], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + ) + + yield Case(Annotated[int, at.Le(4)], (4, 0, -1), (5, 6, 1000)) + yield Case(Annotated[float, at.Le(0.5)], (0.5, 0.0, -0.1), (0.6, 0.7, 0.8, 0.9)) + yield Case( + Annotated[datetime, at.Le(datetime(2000, 1, 1))], + [datetime(2000, 1, 1), datetime(1999, 12, 31)], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + ) + + # Interval + yield Case(Annotated[int, at.Interval(gt=4)], (5, 6, 1000), (4, 0, -1)) + yield Case(Annotated[int, at.Interval(gt=4, lt=10)], (5, 6), (4, 10, 1000, 0, -1)) + yield Case(Annotated[float, at.Interval(ge=0.5, le=1)], (0.5, 0.9, 1), (0.49, 1.1)) + yield Case( + Annotated[datetime, at.Interval(gt=datetime(2000, 1, 1), le=datetime(2000, 1, 3))], + [datetime(2000, 1, 2), datetime(2000, 1, 3)], + [datetime(2000, 1, 1), datetime(2000, 1, 4)], + ) + + yield Case(Annotated[int, at.MultipleOf(multiple_of=3)], (0, 3, 9), (1, 2, 4)) + yield Case(Annotated[float, at.MultipleOf(multiple_of=0.5)], (0, 0.5, 1, 1.5), (0.4, 1.1)) + + # lengths + + yield Case(Annotated[str, at.MinLen(3)], ('123', '1234', 'x' * 10), ('', '1', '12')) + yield Case(Annotated[str, at.Len(3)], ('123', '1234', 'x' * 10), ('', '1', '12')) + yield Case(Annotated[List[int], at.MinLen(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2])) + yield Case(Annotated[List[int], at.Len(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2])) + + yield Case(Annotated[str, at.MaxLen(4)], ('', '1234'), ('12345', 'x' * 10)) + yield Case(Annotated[str, at.Len(0, 4)], ('', '1234'), ('12345', 'x' * 10)) + yield Case(Annotated[List[str], at.MaxLen(4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10)) + yield Case(Annotated[List[str], at.Len(0, 4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10)) + + yield Case(Annotated[str, at.Len(3, 5)], ('123', '12345'), ('', '1', '12', '123456', 'x' * 10)) + yield Case(Annotated[str, at.Len(3, 3)], ('123',), ('12', '1234')) + + yield Case(Annotated[Dict[int, int], at.Len(2, 3)], [{1: 1, 2: 2}], [{}, {1: 1}, {1: 1, 2: 2, 3: 3, 4: 4}]) + yield Case(Annotated[Set[int], at.Len(2, 3)], ({1, 2}, {1, 2, 3}), (set(), {1}, {1, 2, 3, 4})) + yield Case(Annotated[Tuple[int, ...], at.Len(2, 3)], ((1, 2), (1, 2, 3)), ((), (1,), (1, 2, 3, 4))) + + # Timezone + + yield Case( + Annotated[datetime, at.Timezone(None)], [datetime(2000, 1, 1)], [datetime(2000, 1, 1, tzinfo=timezone.utc)] + ) + yield Case( + Annotated[datetime, at.Timezone(...)], [datetime(2000, 1, 1, tzinfo=timezone.utc)], [datetime(2000, 1, 1)] + ) + yield Case( + Annotated[datetime, at.Timezone(timezone.utc)], + [datetime(2000, 1, 1, tzinfo=timezone.utc)], + [datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))], + ) + yield Case( + Annotated[datetime, at.Timezone('Europe/London')], + [datetime(2000, 1, 1, tzinfo=timezone(timedelta(0), name='Europe/London'))], + [datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))], + ) + + # Quantity + + yield Case(Annotated[float, at.Unit(unit='m')], (5, 4.2), ('5m', '4.2m')) + + # predicate types + + yield Case(at.LowerCase[str], ['abc', 'foobar'], ['', 'A', 'Boom']) + yield Case(at.UpperCase[str], ['ABC', 'DEFO'], ['', 'a', 'abc', 'AbC']) + yield Case(at.IsDigit[str], ['123'], ['', 'ab', 'a1b2']) + yield Case(at.IsAscii[str], ['123', 'foo bar'], ['£100', '😊', 'whatever 👀']) + + yield Case(Annotated[int, at.Predicate(lambda x: x % 2 == 0)], [0, 2, 4], [1, 3, 5]) + + yield Case(at.IsFinite[float], [1.23], [math.nan, math.inf, -math.inf]) + yield Case(at.IsNotFinite[float], [math.nan, math.inf], [1.23]) + yield Case(at.IsNan[float], [math.nan], [1.23, math.inf]) + yield Case(at.IsNotNan[float], [1.23, math.inf], [math.nan]) + yield Case(at.IsInfinite[float], [math.inf], [math.nan, 1.23]) + yield Case(at.IsNotInfinite[float], [math.nan, 1.23], [math.inf]) + + # check stacked predicates + yield Case(at.IsInfinite[Annotated[float, at.Predicate(lambda x: x > 0)]], [math.inf], [-math.inf, 1.23, math.nan]) + + # doc + yield Case(Annotated[int, at.doc("A number")], [1, 2], []) + + # custom GroupedMetadata + class MyCustomGroupedMetadata(at.GroupedMetadata): + def __iter__(self) -> Iterator[at.Predicate]: + yield at.Predicate(lambda x: float(x).is_integer()) + + yield Case(Annotated[float, MyCustomGroupedMetadata()], [0, 2.0], [0.01, 1.5]) diff --git a/venv/Lib/site-packages/anyio-4.9.0.dist-info/INSTALLER b/venv/Lib/site-packages/anyio-4.9.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/anyio-4.9.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/anyio-4.9.0.dist-info/LICENSE b/venv/Lib/site-packages/anyio-4.9.0.dist-info/LICENSE new file mode 100644 index 00000000..104eebf5 --- /dev/null +++ b/venv/Lib/site-packages/anyio-4.9.0.dist-info/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2018 Alex Grönholm + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/venv/Lib/site-packages/anyio-4.9.0.dist-info/METADATA b/venv/Lib/site-packages/anyio-4.9.0.dist-info/METADATA new file mode 100644 index 00000000..9d87e1db --- /dev/null +++ b/venv/Lib/site-packages/anyio-4.9.0.dist-info/METADATA @@ -0,0 +1,105 @@ +Metadata-Version: 2.2 +Name: anyio +Version: 4.9.0 +Summary: High level compatibility layer for multiple asynchronous event loop implementations +Author-email: Alex Grönholm +License: MIT +Project-URL: Documentation, https://anyio.readthedocs.io/en/latest/ +Project-URL: Changelog, https://anyio.readthedocs.io/en/stable/versionhistory.html +Project-URL: Source code, https://github.com/agronholm/anyio +Project-URL: Issue tracker, https://github.com/agronholm/anyio/issues +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Framework :: AnyIO +Classifier: Typing :: Typed +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: exceptiongroup>=1.0.2; python_version < "3.11" +Requires-Dist: idna>=2.8 +Requires-Dist: sniffio>=1.1 +Requires-Dist: typing_extensions>=4.5; python_version < "3.13" +Provides-Extra: trio +Requires-Dist: trio>=0.26.1; extra == "trio" +Provides-Extra: test +Requires-Dist: anyio[trio]; extra == "test" +Requires-Dist: blockbuster>=1.5.23; extra == "test" +Requires-Dist: coverage[toml]>=7; extra == "test" +Requires-Dist: exceptiongroup>=1.2.0; extra == "test" +Requires-Dist: hypothesis>=4.0; extra == "test" +Requires-Dist: psutil>=5.9; extra == "test" +Requires-Dist: pytest>=7.0; extra == "test" +Requires-Dist: trustme; extra == "test" +Requires-Dist: truststore>=0.9.1; python_version >= "3.10" and extra == "test" +Requires-Dist: uvloop>=0.21; (platform_python_implementation == "CPython" and platform_system != "Windows" and python_version < "3.14") and extra == "test" +Provides-Extra: doc +Requires-Dist: packaging; extra == "doc" +Requires-Dist: Sphinx~=8.2; extra == "doc" +Requires-Dist: sphinx_rtd_theme; extra == "doc" +Requires-Dist: sphinx-autodoc-typehints>=1.2.0; extra == "doc" + +.. image:: https://github.com/agronholm/anyio/actions/workflows/test.yml/badge.svg + :target: https://github.com/agronholm/anyio/actions/workflows/test.yml + :alt: Build Status +.. image:: https://coveralls.io/repos/github/agronholm/anyio/badge.svg?branch=master + :target: https://coveralls.io/github/agronholm/anyio?branch=master + :alt: Code Coverage +.. image:: https://readthedocs.org/projects/anyio/badge/?version=latest + :target: https://anyio.readthedocs.io/en/latest/?badge=latest + :alt: Documentation +.. image:: https://badges.gitter.im/gitterHQ/gitter.svg + :target: https://gitter.im/python-trio/AnyIO + :alt: Gitter chat + +AnyIO is an asynchronous networking and concurrency library that works on top of either asyncio_ or +trio_. It implements trio-like `structured concurrency`_ (SC) on top of asyncio and works in harmony +with the native SC of trio itself. + +Applications and libraries written against AnyIO's API will run unmodified on either asyncio_ or +trio_. AnyIO can also be adopted into a library or application incrementally – bit by bit, no full +refactoring necessary. It will blend in with the native libraries of your chosen backend. + +Documentation +------------- + +View full documentation at: https://anyio.readthedocs.io/ + +Features +-------- + +AnyIO offers the following functionality: + +* Task groups (nurseries_ in trio terminology) +* High-level networking (TCP, UDP and UNIX sockets) + + * `Happy eyeballs`_ algorithm for TCP connections (more robust than that of asyncio on Python + 3.8) + * async/await style UDP sockets (unlike asyncio where you still have to use Transports and + Protocols) + +* A versatile API for byte streams and object streams +* Inter-task synchronization and communication (locks, conditions, events, semaphores, object + streams) +* Worker threads +* Subprocesses +* Asynchronous file I/O (using worker threads) +* Signal handling + +AnyIO also comes with its own pytest_ plugin which also supports asynchronous fixtures. +It even works with the popular Hypothesis_ library. + +.. _asyncio: https://docs.python.org/3/library/asyncio.html +.. _trio: https://github.com/python-trio/trio +.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency +.. _nurseries: https://trio.readthedocs.io/en/stable/reference-core.html#nurseries-and-spawning +.. _Happy eyeballs: https://en.wikipedia.org/wiki/Happy_Eyeballs +.. _pytest: https://docs.pytest.org/en/latest/ +.. _Hypothesis: https://hypothesis.works/ diff --git a/venv/Lib/site-packages/anyio-4.9.0.dist-info/RECORD b/venv/Lib/site-packages/anyio-4.9.0.dist-info/RECORD new file mode 100644 index 00000000..971925ec --- /dev/null +++ b/venv/Lib/site-packages/anyio-4.9.0.dist-info/RECORD @@ -0,0 +1,88 @@ +anyio-4.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +anyio-4.9.0.dist-info/LICENSE,sha256=U2GsncWPLvX9LpsJxoKXwX8ElQkJu8gCO9uC6s8iwrA,1081 +anyio-4.9.0.dist-info/METADATA,sha256=vvkWPXXTbrpTCFK7zdcYwQcSQhx6Q4qITM9t_PEQCrY,4682 +anyio-4.9.0.dist-info/RECORD,, +anyio-4.9.0.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91 +anyio-4.9.0.dist-info/entry_points.txt,sha256=_d6Yu6uiaZmNe0CydowirE9Cmg7zUL2g08tQpoS3Qvc,39 +anyio-4.9.0.dist-info/top_level.txt,sha256=QglSMiWX8_5dpoVAEIHdEYzvqFMdSYWmCj6tYw2ITkQ,6 +anyio/__init__.py,sha256=t8bZuNXa5ncwXBaNKbv48BDgZt48RT_zCEtrnPmjNU8,4993 +anyio/__pycache__/__init__.cpython-312.pyc,, +anyio/__pycache__/from_thread.cpython-312.pyc,, +anyio/__pycache__/lowlevel.cpython-312.pyc,, +anyio/__pycache__/pytest_plugin.cpython-312.pyc,, +anyio/__pycache__/to_interpreter.cpython-312.pyc,, +anyio/__pycache__/to_process.cpython-312.pyc,, +anyio/__pycache__/to_thread.cpython-312.pyc,, +anyio/_backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio/_backends/__pycache__/__init__.cpython-312.pyc,, +anyio/_backends/__pycache__/_asyncio.cpython-312.pyc,, +anyio/_backends/__pycache__/_trio.cpython-312.pyc,, +anyio/_backends/_asyncio.py,sha256=AT1oaTfCE-9YFxooMlvld2yDqY5U2A-ANMcBDh9eRfI,93455 +anyio/_backends/_trio.py,sha256=HVfDqRGQ7Xj3JfTcYdgzmC7pZEplqU4NOO5kxNNSZnk,40429 +anyio/_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio/_core/__pycache__/__init__.cpython-312.pyc,, +anyio/_core/__pycache__/_asyncio_selector_thread.cpython-312.pyc,, +anyio/_core/__pycache__/_eventloop.cpython-312.pyc,, +anyio/_core/__pycache__/_exceptions.cpython-312.pyc,, +anyio/_core/__pycache__/_fileio.cpython-312.pyc,, +anyio/_core/__pycache__/_resources.cpython-312.pyc,, +anyio/_core/__pycache__/_signals.cpython-312.pyc,, +anyio/_core/__pycache__/_sockets.cpython-312.pyc,, +anyio/_core/__pycache__/_streams.cpython-312.pyc,, +anyio/_core/__pycache__/_subprocesses.cpython-312.pyc,, +anyio/_core/__pycache__/_synchronization.cpython-312.pyc,, +anyio/_core/__pycache__/_tasks.cpython-312.pyc,, +anyio/_core/__pycache__/_tempfile.cpython-312.pyc,, +anyio/_core/__pycache__/_testing.cpython-312.pyc,, +anyio/_core/__pycache__/_typedattr.cpython-312.pyc,, +anyio/_core/_asyncio_selector_thread.py,sha256=2PdxFM3cs02Kp6BSppbvmRT7q7asreTW5FgBxEsflBo,5626 +anyio/_core/_eventloop.py,sha256=t_tAwBFPjF8jrZGjlJ6bbYy6KA3bjsbZxV9mvh9t1i0,4695 +anyio/_core/_exceptions.py,sha256=RlPRlwastdmfDPoskdXNO6SI8_l3fclA2wtW6cokU9I,3503 +anyio/_core/_fileio.py,sha256=qFZhkLIz0cGXluvih_vcPUTucgq8UFVgsTCtYbijZIg,23340 +anyio/_core/_resources.py,sha256=NbmU5O5UX3xEyACnkmYX28Fmwdl-f-ny0tHym26e0w0,435 +anyio/_core/_signals.py,sha256=vulT1M1xdLYtAR-eY5TamIgaf1WTlOwOrMGwswlTTr8,905 +anyio/_core/_sockets.py,sha256=5Okc_UThGDEN9KCnsIhqWPRHBNuSy6b4NmG1i51TVF4,27150 +anyio/_core/_streams.py,sha256=OnaKgoDD-FcMSwLvkoAUGP51sG2ZdRvMpxt9q2w1gYA,1804 +anyio/_core/_subprocesses.py,sha256=EXm5igL7dj55iYkPlbYVAqtbqxJxjU-6OndSTIx9SRg,8047 +anyio/_core/_synchronization.py,sha256=DwUh8Tl6cG_UMVC_GyzPoC_U9BpfDfjMl9SINSxcZN4,20320 +anyio/_core/_tasks.py,sha256=f3CuWwo06cCZ6jaOv-JHFKWkgpgf2cvaF25Oh4augMA,4757 +anyio/_core/_tempfile.py,sha256=s-_ucacXbxBH5Bo5eo65lN0lPwZQd5B8yNN_9nARpCM,19696 +anyio/_core/_testing.py,sha256=YUGwA5cgFFbUTv4WFd7cv_BSVr4ryTtPp8owQA3JdWE,2118 +anyio/_core/_typedattr.py,sha256=P4ozZikn3-DbpoYcvyghS_FOYAgbmUxeoU8-L_07pZM,2508 +anyio/abc/__init__.py,sha256=c2OQbTCS_fQowviMXanLPh8m29ccwkXmpDr7uyNZYOo,2652 +anyio/abc/__pycache__/__init__.cpython-312.pyc,, +anyio/abc/__pycache__/_eventloop.cpython-312.pyc,, +anyio/abc/__pycache__/_resources.cpython-312.pyc,, +anyio/abc/__pycache__/_sockets.cpython-312.pyc,, +anyio/abc/__pycache__/_streams.cpython-312.pyc,, +anyio/abc/__pycache__/_subprocesses.cpython-312.pyc,, +anyio/abc/__pycache__/_tasks.cpython-312.pyc,, +anyio/abc/__pycache__/_testing.cpython-312.pyc,, +anyio/abc/_eventloop.py,sha256=UmL8DZCvQTgxzmyBZcGm9kWj9VQY8BMWueLh5S8yWN4,9682 +anyio/abc/_resources.py,sha256=DrYvkNN1hH6Uvv5_5uKySvDsnknGVDe8FCKfko0VtN8,783 +anyio/abc/_sockets.py,sha256=KhWtJxan8jpBXKwPaFeQzI4iRXdFaOIn0HXtDZnaO7U,6262 +anyio/abc/_streams.py,sha256=He_JpkAW2g5veOzcUq0XsRC2nId_i35L-d8cs7Uj1ZQ,6598 +anyio/abc/_subprocesses.py,sha256=cumAPJTktOQtw63IqG0lDpyZqu_l1EElvQHMiwJgL08,2067 +anyio/abc/_tasks.py,sha256=yJWbMwowvqjlAX4oJ3l9Is1w-zwynr2lX1Z02AWJqsY,3080 +anyio/abc/_testing.py,sha256=tBJUzkSfOXJw23fe8qSJ03kJlShOYjjaEyFB6k6MYT8,1821 +anyio/from_thread.py,sha256=MbXHZpgM9wgsRkbGhMNMomEGYj7Y_QYq6a5BZ3c5Ev8,17478 +anyio/lowlevel.py,sha256=nkgmW--SdxGVp0cmLUYazjkigveRm5HY7-gW8Bpp9oY,4169 +anyio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio/pytest_plugin.py,sha256=qXNwk9Pa7hPQKWocgLl9qijqKGMkGzdH2wJa-jPkGUM,9375 +anyio/streams/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +anyio/streams/__pycache__/__init__.cpython-312.pyc,, +anyio/streams/__pycache__/buffered.cpython-312.pyc,, +anyio/streams/__pycache__/file.cpython-312.pyc,, +anyio/streams/__pycache__/memory.cpython-312.pyc,, +anyio/streams/__pycache__/stapled.cpython-312.pyc,, +anyio/streams/__pycache__/text.cpython-312.pyc,, +anyio/streams/__pycache__/tls.cpython-312.pyc,, +anyio/streams/buffered.py,sha256=UCldKC168YuLvT7n3HtNPnQ2iWAMSTYQWbZvzLwMwkM,4500 +anyio/streams/file.py,sha256=6uoTNb5KbMoj-6gS3_xrrL8uZN8Q4iIvOS1WtGyFfKw,4383 +anyio/streams/memory.py,sha256=o1OVVx0OooteTTe2GytJreum93Ucuw5s4cAsr3X0-Ag,10560 +anyio/streams/stapled.py,sha256=U09pCrmOw9kkNhe6tKopsm1QIMT1lFTFvtb-A7SIe4k,4302 +anyio/streams/text.py,sha256=6x8w8xlfCZKTUWQoJiMPoMhSSJFUBRKgoBNSBtbd9yg,5094 +anyio/streams/tls.py,sha256=HxzpVmUgo8SUSIBass_lvef1pAI1uRSrnysM3iEGzl4,13199 +anyio/to_interpreter.py,sha256=UhuNCIucCRN7ZtyJg35Mlamzs1JpgDvK4xnL4TDWrAo,6527 +anyio/to_process.py,sha256=ZvruelRM-HNmqDaql4sdNODg2QD_uSlwSCxnV4OhsfQ,9595 +anyio/to_thread.py,sha256=WM2JQ2MbVsd5D5CM08bQiTwzZIvpsGjfH1Fy247KoDQ,2396 diff --git a/venv/Lib/site-packages/anyio-4.9.0.dist-info/WHEEL b/venv/Lib/site-packages/anyio-4.9.0.dist-info/WHEEL new file mode 100644 index 00000000..9c3ae630 --- /dev/null +++ b/venv/Lib/site-packages/anyio-4.9.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (76.0.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/anyio-4.9.0.dist-info/entry_points.txt b/venv/Lib/site-packages/anyio-4.9.0.dist-info/entry_points.txt new file mode 100644 index 00000000..44dd9bdc --- /dev/null +++ b/venv/Lib/site-packages/anyio-4.9.0.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[pytest11] +anyio = anyio.pytest_plugin diff --git a/venv/Lib/site-packages/anyio-4.9.0.dist-info/top_level.txt b/venv/Lib/site-packages/anyio-4.9.0.dist-info/top_level.txt new file mode 100644 index 00000000..c77c069e --- /dev/null +++ b/venv/Lib/site-packages/anyio-4.9.0.dist-info/top_level.txt @@ -0,0 +1 @@ +anyio diff --git a/venv/Lib/site-packages/anyio/__init__.py b/venv/Lib/site-packages/anyio/__init__.py new file mode 100644 index 00000000..578cda6f --- /dev/null +++ b/venv/Lib/site-packages/anyio/__init__.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from ._core._eventloop import current_time as current_time +from ._core._eventloop import get_all_backends as get_all_backends +from ._core._eventloop import get_cancelled_exc_class as get_cancelled_exc_class +from ._core._eventloop import run as run +from ._core._eventloop import sleep as sleep +from ._core._eventloop import sleep_forever as sleep_forever +from ._core._eventloop import sleep_until as sleep_until +from ._core._exceptions import BrokenResourceError as BrokenResourceError +from ._core._exceptions import BrokenWorkerIntepreter as BrokenWorkerIntepreter +from ._core._exceptions import BrokenWorkerProcess as BrokenWorkerProcess +from ._core._exceptions import BusyResourceError as BusyResourceError +from ._core._exceptions import ClosedResourceError as ClosedResourceError +from ._core._exceptions import DelimiterNotFound as DelimiterNotFound +from ._core._exceptions import EndOfStream as EndOfStream +from ._core._exceptions import IncompleteRead as IncompleteRead +from ._core._exceptions import TypedAttributeLookupError as TypedAttributeLookupError +from ._core._exceptions import WouldBlock as WouldBlock +from ._core._fileio import AsyncFile as AsyncFile +from ._core._fileio import Path as Path +from ._core._fileio import open_file as open_file +from ._core._fileio import wrap_file as wrap_file +from ._core._resources import aclose_forcefully as aclose_forcefully +from ._core._signals import open_signal_receiver as open_signal_receiver +from ._core._sockets import connect_tcp as connect_tcp +from ._core._sockets import connect_unix as connect_unix +from ._core._sockets import create_connected_udp_socket as create_connected_udp_socket +from ._core._sockets import ( + create_connected_unix_datagram_socket as create_connected_unix_datagram_socket, +) +from ._core._sockets import create_tcp_listener as create_tcp_listener +from ._core._sockets import create_udp_socket as create_udp_socket +from ._core._sockets import create_unix_datagram_socket as create_unix_datagram_socket +from ._core._sockets import create_unix_listener as create_unix_listener +from ._core._sockets import getaddrinfo as getaddrinfo +from ._core._sockets import getnameinfo as getnameinfo +from ._core._sockets import wait_readable as wait_readable +from ._core._sockets import wait_socket_readable as wait_socket_readable +from ._core._sockets import wait_socket_writable as wait_socket_writable +from ._core._sockets import wait_writable as wait_writable +from ._core._streams import create_memory_object_stream as create_memory_object_stream +from ._core._subprocesses import open_process as open_process +from ._core._subprocesses import run_process as run_process +from ._core._synchronization import CapacityLimiter as CapacityLimiter +from ._core._synchronization import ( + CapacityLimiterStatistics as CapacityLimiterStatistics, +) +from ._core._synchronization import Condition as Condition +from ._core._synchronization import ConditionStatistics as ConditionStatistics +from ._core._synchronization import Event as Event +from ._core._synchronization import EventStatistics as EventStatistics +from ._core._synchronization import Lock as Lock +from ._core._synchronization import LockStatistics as LockStatistics +from ._core._synchronization import ResourceGuard as ResourceGuard +from ._core._synchronization import Semaphore as Semaphore +from ._core._synchronization import SemaphoreStatistics as SemaphoreStatistics +from ._core._tasks import TASK_STATUS_IGNORED as TASK_STATUS_IGNORED +from ._core._tasks import CancelScope as CancelScope +from ._core._tasks import create_task_group as create_task_group +from ._core._tasks import current_effective_deadline as current_effective_deadline +from ._core._tasks import fail_after as fail_after +from ._core._tasks import move_on_after as move_on_after +from ._core._tempfile import NamedTemporaryFile as NamedTemporaryFile +from ._core._tempfile import SpooledTemporaryFile as SpooledTemporaryFile +from ._core._tempfile import TemporaryDirectory as TemporaryDirectory +from ._core._tempfile import TemporaryFile as TemporaryFile +from ._core._tempfile import gettempdir as gettempdir +from ._core._tempfile import gettempdirb as gettempdirb +from ._core._tempfile import mkdtemp as mkdtemp +from ._core._tempfile import mkstemp as mkstemp +from ._core._testing import TaskInfo as TaskInfo +from ._core._testing import get_current_task as get_current_task +from ._core._testing import get_running_tasks as get_running_tasks +from ._core._testing import wait_all_tasks_blocked as wait_all_tasks_blocked +from ._core._typedattr import TypedAttributeProvider as TypedAttributeProvider +from ._core._typedattr import TypedAttributeSet as TypedAttributeSet +from ._core._typedattr import typed_attribute as typed_attribute + +# Re-export imports so they look like they live directly in this package +for __value in list(locals().values()): + if getattr(__value, "__module__", "").startswith("anyio."): + __value.__module__ = __name__ + +del __value diff --git a/venv/Lib/site-packages/anyio/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/anyio/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..2103438e Binary files /dev/null and b/venv/Lib/site-packages/anyio/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/__pycache__/from_thread.cpython-312.pyc b/venv/Lib/site-packages/anyio/__pycache__/from_thread.cpython-312.pyc new file mode 100644 index 00000000..8bdccec6 Binary files /dev/null and b/venv/Lib/site-packages/anyio/__pycache__/from_thread.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/__pycache__/lowlevel.cpython-312.pyc b/venv/Lib/site-packages/anyio/__pycache__/lowlevel.cpython-312.pyc new file mode 100644 index 00000000..c1b79058 Binary files /dev/null and b/venv/Lib/site-packages/anyio/__pycache__/lowlevel.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/__pycache__/pytest_plugin.cpython-312.pyc b/venv/Lib/site-packages/anyio/__pycache__/pytest_plugin.cpython-312.pyc new file mode 100644 index 00000000..b345c66e Binary files /dev/null and b/venv/Lib/site-packages/anyio/__pycache__/pytest_plugin.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/__pycache__/to_interpreter.cpython-312.pyc b/venv/Lib/site-packages/anyio/__pycache__/to_interpreter.cpython-312.pyc new file mode 100644 index 00000000..38adbcff Binary files /dev/null and b/venv/Lib/site-packages/anyio/__pycache__/to_interpreter.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/__pycache__/to_process.cpython-312.pyc b/venv/Lib/site-packages/anyio/__pycache__/to_process.cpython-312.pyc new file mode 100644 index 00000000..2046000b Binary files /dev/null and b/venv/Lib/site-packages/anyio/__pycache__/to_process.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/__pycache__/to_thread.cpython-312.pyc b/venv/Lib/site-packages/anyio/__pycache__/to_thread.cpython-312.pyc new file mode 100644 index 00000000..f47a854a Binary files /dev/null and b/venv/Lib/site-packages/anyio/__pycache__/to_thread.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_backends/__init__.py b/venv/Lib/site-packages/anyio/_backends/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/anyio/_backends/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/anyio/_backends/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..384b9ed9 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_backends/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_backends/__pycache__/_asyncio.cpython-312.pyc b/venv/Lib/site-packages/anyio/_backends/__pycache__/_asyncio.cpython-312.pyc new file mode 100644 index 00000000..58551c56 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_backends/__pycache__/_asyncio.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_backends/__pycache__/_trio.cpython-312.pyc b/venv/Lib/site-packages/anyio/_backends/__pycache__/_trio.cpython-312.pyc new file mode 100644 index 00000000..68310abe Binary files /dev/null and b/venv/Lib/site-packages/anyio/_backends/__pycache__/_trio.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_backends/_asyncio.py b/venv/Lib/site-packages/anyio/_backends/_asyncio.py new file mode 100644 index 00000000..ed91f404 --- /dev/null +++ b/venv/Lib/site-packages/anyio/_backends/_asyncio.py @@ -0,0 +1,2816 @@ +from __future__ import annotations + +import array +import asyncio +import concurrent.futures +import contextvars +import math +import os +import socket +import sys +import threading +import weakref +from asyncio import ( + AbstractEventLoop, + CancelledError, + all_tasks, + create_task, + current_task, + get_running_loop, + sleep, +) +from asyncio.base_events import _run_until_complete_cb # type: ignore[attr-defined] +from collections import OrderedDict, deque +from collections.abc import ( + AsyncGenerator, + AsyncIterator, + Awaitable, + Callable, + Collection, + Coroutine, + Iterable, + Sequence, +) +from concurrent.futures import Future +from contextlib import AbstractContextManager, suppress +from contextvars import Context, copy_context +from dataclasses import dataclass +from functools import partial, wraps +from inspect import ( + CORO_RUNNING, + CORO_SUSPENDED, + getcoroutinestate, + iscoroutine, +) +from io import IOBase +from os import PathLike +from queue import Queue +from signal import Signals +from socket import AddressFamily, SocketKind +from threading import Thread +from types import CodeType, TracebackType +from typing import ( + IO, + TYPE_CHECKING, + Any, + Optional, + TypeVar, + cast, +) +from weakref import WeakKeyDictionary + +import sniffio + +from .. import ( + CapacityLimiterStatistics, + EventStatistics, + LockStatistics, + TaskInfo, + abc, +) +from .._core._eventloop import claim_worker_thread, threadlocals +from .._core._exceptions import ( + BrokenResourceError, + BusyResourceError, + ClosedResourceError, + EndOfStream, + WouldBlock, + iterate_exceptions, +) +from .._core._sockets import convert_ipv6_sockaddr +from .._core._streams import create_memory_object_stream +from .._core._synchronization import ( + CapacityLimiter as BaseCapacityLimiter, +) +from .._core._synchronization import Event as BaseEvent +from .._core._synchronization import Lock as BaseLock +from .._core._synchronization import ( + ResourceGuard, + SemaphoreStatistics, +) +from .._core._synchronization import Semaphore as BaseSemaphore +from .._core._tasks import CancelScope as BaseCancelScope +from ..abc import ( + AsyncBackend, + IPSockAddrType, + SocketListener, + UDPPacketType, + UNIXDatagramPacketType, +) +from ..abc._eventloop import StrOrBytesPath +from ..lowlevel import RunVar +from ..streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream + +if TYPE_CHECKING: + from _typeshed import FileDescriptorLike +else: + FileDescriptorLike = object + +if sys.version_info >= (3, 10): + from typing import ParamSpec +else: + from typing_extensions import ParamSpec + +if sys.version_info >= (3, 11): + from asyncio import Runner + from typing import TypeVarTuple, Unpack +else: + import contextvars + import enum + import signal + from asyncio import coroutines, events, exceptions, tasks + + from exceptiongroup import BaseExceptionGroup + from typing_extensions import TypeVarTuple, Unpack + + class _State(enum.Enum): + CREATED = "created" + INITIALIZED = "initialized" + CLOSED = "closed" + + class Runner: + # Copied from CPython 3.11 + def __init__( + self, + *, + debug: bool | None = None, + loop_factory: Callable[[], AbstractEventLoop] | None = None, + ): + self._state = _State.CREATED + self._debug = debug + self._loop_factory = loop_factory + self._loop: AbstractEventLoop | None = None + self._context = None + self._interrupt_count = 0 + self._set_event_loop = False + + def __enter__(self) -> Runner: + self._lazy_init() + return self + + def __exit__( + self, + exc_type: type[BaseException], + exc_val: BaseException, + exc_tb: TracebackType, + ) -> None: + self.close() + + def close(self) -> None: + """Shutdown and close event loop.""" + if self._state is not _State.INITIALIZED: + return + try: + loop = self._loop + _cancel_all_tasks(loop) + loop.run_until_complete(loop.shutdown_asyncgens()) + if hasattr(loop, "shutdown_default_executor"): + loop.run_until_complete(loop.shutdown_default_executor()) + else: + loop.run_until_complete(_shutdown_default_executor(loop)) + finally: + if self._set_event_loop: + events.set_event_loop(None) + loop.close() + self._loop = None + self._state = _State.CLOSED + + def get_loop(self) -> AbstractEventLoop: + """Return embedded event loop.""" + self._lazy_init() + return self._loop + + def run(self, coro: Coroutine[T_Retval], *, context=None) -> T_Retval: + """Run a coroutine inside the embedded event loop.""" + if not coroutines.iscoroutine(coro): + raise ValueError(f"a coroutine was expected, got {coro!r}") + + if events._get_running_loop() is not None: + # fail fast with short traceback + raise RuntimeError( + "Runner.run() cannot be called from a running event loop" + ) + + self._lazy_init() + + if context is None: + context = self._context + task = context.run(self._loop.create_task, coro) + + if ( + threading.current_thread() is threading.main_thread() + and signal.getsignal(signal.SIGINT) is signal.default_int_handler + ): + sigint_handler = partial(self._on_sigint, main_task=task) + try: + signal.signal(signal.SIGINT, sigint_handler) + except ValueError: + # `signal.signal` may throw if `threading.main_thread` does + # not support signals (e.g. embedded interpreter with signals + # not registered - see gh-91880) + sigint_handler = None + else: + sigint_handler = None + + self._interrupt_count = 0 + try: + return self._loop.run_until_complete(task) + except exceptions.CancelledError: + if self._interrupt_count > 0: + uncancel = getattr(task, "uncancel", None) + if uncancel is not None and uncancel() == 0: + raise KeyboardInterrupt() + raise # CancelledError + finally: + if ( + sigint_handler is not None + and signal.getsignal(signal.SIGINT) is sigint_handler + ): + signal.signal(signal.SIGINT, signal.default_int_handler) + + def _lazy_init(self) -> None: + if self._state is _State.CLOSED: + raise RuntimeError("Runner is closed") + if self._state is _State.INITIALIZED: + return + if self._loop_factory is None: + self._loop = events.new_event_loop() + if not self._set_event_loop: + # Call set_event_loop only once to avoid calling + # attach_loop multiple times on child watchers + events.set_event_loop(self._loop) + self._set_event_loop = True + else: + self._loop = self._loop_factory() + if self._debug is not None: + self._loop.set_debug(self._debug) + self._context = contextvars.copy_context() + self._state = _State.INITIALIZED + + def _on_sigint(self, signum, frame, main_task: asyncio.Task) -> None: + self._interrupt_count += 1 + if self._interrupt_count == 1 and not main_task.done(): + main_task.cancel() + # wakeup loop if it is blocked by select() with long timeout + self._loop.call_soon_threadsafe(lambda: None) + return + raise KeyboardInterrupt() + + def _cancel_all_tasks(loop: AbstractEventLoop) -> None: + to_cancel = tasks.all_tasks(loop) + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + + loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True)) + + for task in to_cancel: + if task.cancelled(): + continue + if task.exception() is not None: + loop.call_exception_handler( + { + "message": "unhandled exception during asyncio.run() shutdown", + "exception": task.exception(), + "task": task, + } + ) + + async def _shutdown_default_executor(loop: AbstractEventLoop) -> None: + """Schedule the shutdown of the default executor.""" + + def _do_shutdown(future: asyncio.futures.Future) -> None: + try: + loop._default_executor.shutdown(wait=True) # type: ignore[attr-defined] + loop.call_soon_threadsafe(future.set_result, None) + except Exception as ex: + loop.call_soon_threadsafe(future.set_exception, ex) + + loop._executor_shutdown_called = True + if loop._default_executor is None: + return + future = loop.create_future() + thread = threading.Thread(target=_do_shutdown, args=(future,)) + thread.start() + try: + await future + finally: + thread.join() + + +T_Retval = TypeVar("T_Retval") +T_contra = TypeVar("T_contra", contravariant=True) +PosArgsT = TypeVarTuple("PosArgsT") +P = ParamSpec("P") + +_root_task: RunVar[asyncio.Task | None] = RunVar("_root_task") + + +def find_root_task() -> asyncio.Task: + root_task = _root_task.get(None) + if root_task is not None and not root_task.done(): + return root_task + + # Look for a task that has been started via run_until_complete() + for task in all_tasks(): + if task._callbacks and not task.done(): + callbacks = [cb for cb, context in task._callbacks] + for cb in callbacks: + if ( + cb is _run_until_complete_cb + or getattr(cb, "__module__", None) == "uvloop.loop" + ): + _root_task.set(task) + return task + + # Look up the topmost task in the AnyIO task tree, if possible + task = cast(asyncio.Task, current_task()) + state = _task_states.get(task) + if state: + cancel_scope = state.cancel_scope + while cancel_scope and cancel_scope._parent_scope is not None: + cancel_scope = cancel_scope._parent_scope + + if cancel_scope is not None: + return cast(asyncio.Task, cancel_scope._host_task) + + return task + + +def get_callable_name(func: Callable) -> str: + module = getattr(func, "__module__", None) + qualname = getattr(func, "__qualname__", None) + return ".".join([x for x in (module, qualname) if x]) + + +# +# Event loop +# + +_run_vars: WeakKeyDictionary[asyncio.AbstractEventLoop, Any] = WeakKeyDictionary() + + +def _task_started(task: asyncio.Task) -> bool: + """Return ``True`` if the task has been started and has not finished.""" + # The task coro should never be None here, as we never add finished tasks to the + # task list + coro = task.get_coro() + assert coro is not None + try: + return getcoroutinestate(coro) in (CORO_RUNNING, CORO_SUSPENDED) + except AttributeError: + # task coro is async_genenerator_asend https://bugs.python.org/issue37771 + raise Exception(f"Cannot determine if task {task} has started or not") from None + + +# +# Timeouts and cancellation +# + + +def is_anyio_cancellation(exc: CancelledError) -> bool: + # Sometimes third party frameworks catch a CancelledError and raise a new one, so as + # a workaround we have to look at the previous ones in __context__ too for a + # matching cancel message + while True: + if ( + exc.args + and isinstance(exc.args[0], str) + and exc.args[0].startswith("Cancelled by cancel scope ") + ): + return True + + if isinstance(exc.__context__, CancelledError): + exc = exc.__context__ + continue + + return False + + +class CancelScope(BaseCancelScope): + def __new__( + cls, *, deadline: float = math.inf, shield: bool = False + ) -> CancelScope: + return object.__new__(cls) + + def __init__(self, deadline: float = math.inf, shield: bool = False): + self._deadline = deadline + self._shield = shield + self._parent_scope: CancelScope | None = None + self._child_scopes: set[CancelScope] = set() + self._cancel_called = False + self._cancelled_caught = False + self._active = False + self._timeout_handle: asyncio.TimerHandle | None = None + self._cancel_handle: asyncio.Handle | None = None + self._tasks: set[asyncio.Task] = set() + self._host_task: asyncio.Task | None = None + if sys.version_info >= (3, 11): + self._pending_uncancellations: int | None = 0 + else: + self._pending_uncancellations = None + + def __enter__(self) -> CancelScope: + if self._active: + raise RuntimeError( + "Each CancelScope may only be used for a single 'with' block" + ) + + self._host_task = host_task = cast(asyncio.Task, current_task()) + self._tasks.add(host_task) + try: + task_state = _task_states[host_task] + except KeyError: + task_state = TaskState(None, self) + _task_states[host_task] = task_state + else: + self._parent_scope = task_state.cancel_scope + task_state.cancel_scope = self + if self._parent_scope is not None: + # If using an eager task factory, the parent scope may not even contain + # the host task + self._parent_scope._child_scopes.add(self) + self._parent_scope._tasks.discard(host_task) + + self._timeout() + self._active = True + + # Start cancelling the host task if the scope was cancelled before entering + if self._cancel_called: + self._deliver_cancellation(self) + + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool: + del exc_tb + + if not self._active: + raise RuntimeError("This cancel scope is not active") + if current_task() is not self._host_task: + raise RuntimeError( + "Attempted to exit cancel scope in a different task than it was " + "entered in" + ) + + assert self._host_task is not None + host_task_state = _task_states.get(self._host_task) + if host_task_state is None or host_task_state.cancel_scope is not self: + raise RuntimeError( + "Attempted to exit a cancel scope that isn't the current tasks's " + "current cancel scope" + ) + + try: + self._active = False + if self._timeout_handle: + self._timeout_handle.cancel() + self._timeout_handle = None + + self._tasks.remove(self._host_task) + if self._parent_scope is not None: + self._parent_scope._child_scopes.remove(self) + self._parent_scope._tasks.add(self._host_task) + + host_task_state.cancel_scope = self._parent_scope + + # Restart the cancellation effort in the closest visible, cancelled parent + # scope if necessary + self._restart_cancellation_in_parent() + + # We only swallow the exception iff it was an AnyIO CancelledError, either + # directly as exc_val or inside an exception group and there are no cancelled + # parent cancel scopes visible to us here + if self._cancel_called and not self._parent_cancellation_is_visible_to_us: + # For each level-cancel() call made on the host task, call uncancel() + while self._pending_uncancellations: + self._host_task.uncancel() + self._pending_uncancellations -= 1 + + # Update cancelled_caught and check for exceptions we must not swallow + cannot_swallow_exc_val = False + if exc_val is not None: + for exc in iterate_exceptions(exc_val): + if isinstance(exc, CancelledError) and is_anyio_cancellation( + exc + ): + self._cancelled_caught = True + else: + cannot_swallow_exc_val = True + + return self._cancelled_caught and not cannot_swallow_exc_val + else: + if self._pending_uncancellations: + assert self._parent_scope is not None + assert self._parent_scope._pending_uncancellations is not None + self._parent_scope._pending_uncancellations += ( + self._pending_uncancellations + ) + self._pending_uncancellations = 0 + + return False + finally: + self._host_task = None + del exc_val + + @property + def _effectively_cancelled(self) -> bool: + cancel_scope: CancelScope | None = self + while cancel_scope is not None: + if cancel_scope._cancel_called: + return True + + if cancel_scope.shield: + return False + + cancel_scope = cancel_scope._parent_scope + + return False + + @property + def _parent_cancellation_is_visible_to_us(self) -> bool: + return ( + self._parent_scope is not None + and not self.shield + and self._parent_scope._effectively_cancelled + ) + + def _timeout(self) -> None: + if self._deadline != math.inf: + loop = get_running_loop() + if loop.time() >= self._deadline: + self.cancel() + else: + self._timeout_handle = loop.call_at(self._deadline, self._timeout) + + def _deliver_cancellation(self, origin: CancelScope) -> bool: + """ + Deliver cancellation to directly contained tasks and nested cancel scopes. + + Schedule another run at the end if we still have tasks eligible for + cancellation. + + :param origin: the cancel scope that originated the cancellation + :return: ``True`` if the delivery needs to be retried on the next cycle + + """ + should_retry = False + current = current_task() + for task in self._tasks: + should_retry = True + if task._must_cancel: # type: ignore[attr-defined] + continue + + # The task is eligible for cancellation if it has started + if task is not current and (task is self._host_task or _task_started(task)): + waiter = task._fut_waiter # type: ignore[attr-defined] + if not isinstance(waiter, asyncio.Future) or not waiter.done(): + task.cancel(f"Cancelled by cancel scope {id(origin):x}") + if ( + task is origin._host_task + and origin._pending_uncancellations is not None + ): + origin._pending_uncancellations += 1 + + # Deliver cancellation to child scopes that aren't shielded or running their own + # cancellation callbacks + for scope in self._child_scopes: + if not scope._shield and not scope.cancel_called: + should_retry = scope._deliver_cancellation(origin) or should_retry + + # Schedule another callback if there are still tasks left + if origin is self: + if should_retry: + self._cancel_handle = get_running_loop().call_soon( + self._deliver_cancellation, origin + ) + else: + self._cancel_handle = None + + return should_retry + + def _restart_cancellation_in_parent(self) -> None: + """ + Restart the cancellation effort in the closest directly cancelled parent scope. + + """ + scope = self._parent_scope + while scope is not None: + if scope._cancel_called: + if scope._cancel_handle is None: + scope._deliver_cancellation(scope) + + break + + # No point in looking beyond any shielded scope + if scope._shield: + break + + scope = scope._parent_scope + + def cancel(self) -> None: + if not self._cancel_called: + if self._timeout_handle: + self._timeout_handle.cancel() + self._timeout_handle = None + + self._cancel_called = True + if self._host_task is not None: + self._deliver_cancellation(self) + + @property + def deadline(self) -> float: + return self._deadline + + @deadline.setter + def deadline(self, value: float) -> None: + self._deadline = float(value) + if self._timeout_handle is not None: + self._timeout_handle.cancel() + self._timeout_handle = None + + if self._active and not self._cancel_called: + self._timeout() + + @property + def cancel_called(self) -> bool: + return self._cancel_called + + @property + def cancelled_caught(self) -> bool: + return self._cancelled_caught + + @property + def shield(self) -> bool: + return self._shield + + @shield.setter + def shield(self, value: bool) -> None: + if self._shield != value: + self._shield = value + if not value: + self._restart_cancellation_in_parent() + + +# +# Task states +# + + +class TaskState: + """ + Encapsulates auxiliary task information that cannot be added to the Task instance + itself because there are no guarantees about its implementation. + """ + + __slots__ = "parent_id", "cancel_scope", "__weakref__" + + def __init__(self, parent_id: int | None, cancel_scope: CancelScope | None): + self.parent_id = parent_id + self.cancel_scope = cancel_scope + + +_task_states: WeakKeyDictionary[asyncio.Task, TaskState] = WeakKeyDictionary() + + +# +# Task groups +# + + +class _AsyncioTaskStatus(abc.TaskStatus): + def __init__(self, future: asyncio.Future, parent_id: int): + self._future = future + self._parent_id = parent_id + + def started(self, value: T_contra | None = None) -> None: + try: + self._future.set_result(value) + except asyncio.InvalidStateError: + if not self._future.cancelled(): + raise RuntimeError( + "called 'started' twice on the same task status" + ) from None + + task = cast(asyncio.Task, current_task()) + _task_states[task].parent_id = self._parent_id + + +if sys.version_info >= (3, 12): + _eager_task_factory_code: CodeType | None = asyncio.eager_task_factory.__code__ +else: + _eager_task_factory_code = None + + +class TaskGroup(abc.TaskGroup): + def __init__(self) -> None: + self.cancel_scope: CancelScope = CancelScope() + self._active = False + self._exceptions: list[BaseException] = [] + self._tasks: set[asyncio.Task] = set() + self._on_completed_fut: asyncio.Future[None] | None = None + + async def __aenter__(self) -> TaskGroup: + self.cancel_scope.__enter__() + self._active = True + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool | None: + try: + if exc_val is not None: + self.cancel_scope.cancel() + if not isinstance(exc_val, CancelledError): + self._exceptions.append(exc_val) + + loop = get_running_loop() + try: + if self._tasks: + with CancelScope() as wait_scope: + while self._tasks: + self._on_completed_fut = loop.create_future() + + try: + await self._on_completed_fut + except CancelledError as exc: + # Shield the scope against further cancellation attempts, + # as they're not productive (#695) + wait_scope.shield = True + self.cancel_scope.cancel() + + # Set exc_val from the cancellation exception if it was + # previously unset. However, we should not replace a native + # cancellation exception with one raise by a cancel scope. + if exc_val is None or ( + isinstance(exc_val, CancelledError) + and not is_anyio_cancellation(exc) + ): + exc_val = exc + + self._on_completed_fut = None + else: + # If there are no child tasks to wait on, run at least one checkpoint + # anyway + await AsyncIOBackend.cancel_shielded_checkpoint() + + self._active = False + if self._exceptions: + # The exception that got us here should already have been + # added to self._exceptions so it's ok to break exception + # chaining and avoid adding a "During handling of above..." + # for each nesting level. + raise BaseExceptionGroup( + "unhandled errors in a TaskGroup", self._exceptions + ) from None + elif exc_val: + raise exc_val + except BaseException as exc: + if self.cancel_scope.__exit__(type(exc), exc, exc.__traceback__): + return True + + raise + + return self.cancel_scope.__exit__(exc_type, exc_val, exc_tb) + finally: + del exc_val, exc_tb, self._exceptions + + def _spawn( + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[Any]], + args: tuple[Unpack[PosArgsT]], + name: object, + task_status_future: asyncio.Future | None = None, + ) -> asyncio.Task: + def task_done(_task: asyncio.Task) -> None: + task_state = _task_states[_task] + assert task_state.cancel_scope is not None + assert _task in task_state.cancel_scope._tasks + task_state.cancel_scope._tasks.remove(_task) + self._tasks.remove(task) + del _task_states[_task] + + if self._on_completed_fut is not None and not self._tasks: + try: + self._on_completed_fut.set_result(None) + except asyncio.InvalidStateError: + pass + + try: + exc = _task.exception() + except CancelledError as e: + while isinstance(e.__context__, CancelledError): + e = e.__context__ + + exc = e + + if exc is not None: + # The future can only be in the cancelled state if the host task was + # cancelled, so return immediately instead of adding one more + # CancelledError to the exceptions list + if task_status_future is not None and task_status_future.cancelled(): + return + + if task_status_future is None or task_status_future.done(): + if not isinstance(exc, CancelledError): + self._exceptions.append(exc) + + if not self.cancel_scope._effectively_cancelled: + self.cancel_scope.cancel() + else: + task_status_future.set_exception(exc) + elif task_status_future is not None and not task_status_future.done(): + task_status_future.set_exception( + RuntimeError("Child exited without calling task_status.started()") + ) + + if not self._active: + raise RuntimeError( + "This task group is not active; no new tasks can be started." + ) + + kwargs = {} + if task_status_future: + parent_id = id(current_task()) + kwargs["task_status"] = _AsyncioTaskStatus( + task_status_future, id(self.cancel_scope._host_task) + ) + else: + parent_id = id(self.cancel_scope._host_task) + + coro = func(*args, **kwargs) + if not iscoroutine(coro): + prefix = f"{func.__module__}." if hasattr(func, "__module__") else "" + raise TypeError( + f"Expected {prefix}{func.__qualname__}() to return a coroutine, but " + f"the return value ({coro!r}) is not a coroutine object" + ) + + name = get_callable_name(func) if name is None else str(name) + loop = asyncio.get_running_loop() + if ( + (factory := loop.get_task_factory()) + and getattr(factory, "__code__", None) is _eager_task_factory_code + and (closure := getattr(factory, "__closure__", None)) + ): + custom_task_constructor = closure[0].cell_contents + task = custom_task_constructor(coro, loop=loop, name=name) + else: + task = create_task(coro, name=name) + + # Make the spawned task inherit the task group's cancel scope + _task_states[task] = TaskState( + parent_id=parent_id, cancel_scope=self.cancel_scope + ) + self.cancel_scope._tasks.add(task) + self._tasks.add(task) + task.add_done_callback(task_done) + return task + + def start_soon( + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[Any]], + *args: Unpack[PosArgsT], + name: object = None, + ) -> None: + self._spawn(func, args, name) + + async def start( + self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None + ) -> Any: + future: asyncio.Future = asyncio.Future() + task = self._spawn(func, args, name, future) + + # If the task raises an exception after sending a start value without a switch + # point between, the task group is cancelled and this method never proceeds to + # process the completed future. That's why we have to have a shielded cancel + # scope here. + try: + return await future + except CancelledError: + # Cancel the task and wait for it to exit before returning + task.cancel() + with CancelScope(shield=True), suppress(CancelledError): + await task + + raise + + +# +# Threads +# + +_Retval_Queue_Type = tuple[Optional[T_Retval], Optional[BaseException]] + + +class WorkerThread(Thread): + MAX_IDLE_TIME = 10 # seconds + + def __init__( + self, + root_task: asyncio.Task, + workers: set[WorkerThread], + idle_workers: deque[WorkerThread], + ): + super().__init__(name="AnyIO worker thread") + self.root_task = root_task + self.workers = workers + self.idle_workers = idle_workers + self.loop = root_task._loop + self.queue: Queue[ + tuple[Context, Callable, tuple, asyncio.Future, CancelScope] | None + ] = Queue(2) + self.idle_since = AsyncIOBackend.current_time() + self.stopping = False + + def _report_result( + self, future: asyncio.Future, result: Any, exc: BaseException | None + ) -> None: + self.idle_since = AsyncIOBackend.current_time() + if not self.stopping: + self.idle_workers.append(self) + + if not future.cancelled(): + if exc is not None: + if isinstance(exc, StopIteration): + new_exc = RuntimeError("coroutine raised StopIteration") + new_exc.__cause__ = exc + exc = new_exc + + future.set_exception(exc) + else: + future.set_result(result) + + def run(self) -> None: + with claim_worker_thread(AsyncIOBackend, self.loop): + while True: + item = self.queue.get() + if item is None: + # Shutdown command received + return + + context, func, args, future, cancel_scope = item + if not future.cancelled(): + result = None + exception: BaseException | None = None + threadlocals.current_cancel_scope = cancel_scope + try: + result = context.run(func, *args) + except BaseException as exc: + exception = exc + finally: + del threadlocals.current_cancel_scope + + if not self.loop.is_closed(): + self.loop.call_soon_threadsafe( + self._report_result, future, result, exception + ) + + del result, exception + + self.queue.task_done() + del item, context, func, args, future, cancel_scope + + def stop(self, f: asyncio.Task | None = None) -> None: + self.stopping = True + self.queue.put_nowait(None) + self.workers.discard(self) + try: + self.idle_workers.remove(self) + except ValueError: + pass + + +_threadpool_idle_workers: RunVar[deque[WorkerThread]] = RunVar( + "_threadpool_idle_workers" +) +_threadpool_workers: RunVar[set[WorkerThread]] = RunVar("_threadpool_workers") + + +class BlockingPortal(abc.BlockingPortal): + def __new__(cls) -> BlockingPortal: + return object.__new__(cls) + + def __init__(self) -> None: + super().__init__() + self._loop = get_running_loop() + + def _spawn_task_from_thread( + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval], + args: tuple[Unpack[PosArgsT]], + kwargs: dict[str, Any], + name: object, + future: Future[T_Retval], + ) -> None: + AsyncIOBackend.run_sync_from_thread( + partial(self._task_group.start_soon, name=name), + (self._call_func, func, args, kwargs, future), + self._loop, + ) + + +# +# Subprocesses +# + + +@dataclass(eq=False) +class StreamReaderWrapper(abc.ByteReceiveStream): + _stream: asyncio.StreamReader + + async def receive(self, max_bytes: int = 65536) -> bytes: + data = await self._stream.read(max_bytes) + if data: + return data + else: + raise EndOfStream + + async def aclose(self) -> None: + self._stream.set_exception(ClosedResourceError()) + await AsyncIOBackend.checkpoint() + + +@dataclass(eq=False) +class StreamWriterWrapper(abc.ByteSendStream): + _stream: asyncio.StreamWriter + + async def send(self, item: bytes) -> None: + self._stream.write(item) + await self._stream.drain() + + async def aclose(self) -> None: + self._stream.close() + await AsyncIOBackend.checkpoint() + + +@dataclass(eq=False) +class Process(abc.Process): + _process: asyncio.subprocess.Process + _stdin: StreamWriterWrapper | None + _stdout: StreamReaderWrapper | None + _stderr: StreamReaderWrapper | None + + async def aclose(self) -> None: + with CancelScope(shield=True) as scope: + if self._stdin: + await self._stdin.aclose() + if self._stdout: + await self._stdout.aclose() + if self._stderr: + await self._stderr.aclose() + + scope.shield = False + try: + await self.wait() + except BaseException: + scope.shield = True + self.kill() + await self.wait() + raise + + async def wait(self) -> int: + return await self._process.wait() + + def terminate(self) -> None: + self._process.terminate() + + def kill(self) -> None: + self._process.kill() + + def send_signal(self, signal: int) -> None: + self._process.send_signal(signal) + + @property + def pid(self) -> int: + return self._process.pid + + @property + def returncode(self) -> int | None: + return self._process.returncode + + @property + def stdin(self) -> abc.ByteSendStream | None: + return self._stdin + + @property + def stdout(self) -> abc.ByteReceiveStream | None: + return self._stdout + + @property + def stderr(self) -> abc.ByteReceiveStream | None: + return self._stderr + + +def _forcibly_shutdown_process_pool_on_exit( + workers: set[Process], _task: object +) -> None: + """ + Forcibly shuts down worker processes belonging to this event loop.""" + child_watcher: asyncio.AbstractChildWatcher | None = None + if sys.version_info < (3, 12): + try: + child_watcher = asyncio.get_event_loop_policy().get_child_watcher() + except NotImplementedError: + pass + + # Close as much as possible (w/o async/await) to avoid warnings + for process in workers: + if process.returncode is None: + continue + + process._stdin._stream._transport.close() # type: ignore[union-attr] + process._stdout._stream._transport.close() # type: ignore[union-attr] + process._stderr._stream._transport.close() # type: ignore[union-attr] + process.kill() + if child_watcher: + child_watcher.remove_child_handler(process.pid) + + +async def _shutdown_process_pool_on_exit(workers: set[abc.Process]) -> None: + """ + Shuts down worker processes belonging to this event loop. + + NOTE: this only works when the event loop was started using asyncio.run() or + anyio.run(). + + """ + process: abc.Process + try: + await sleep(math.inf) + except asyncio.CancelledError: + for process in workers: + if process.returncode is None: + process.kill() + + for process in workers: + await process.aclose() + + +# +# Sockets and networking +# + + +class StreamProtocol(asyncio.Protocol): + read_queue: deque[bytes] + read_event: asyncio.Event + write_event: asyncio.Event + exception: Exception | None = None + is_at_eof: bool = False + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + self.read_queue = deque() + self.read_event = asyncio.Event() + self.write_event = asyncio.Event() + self.write_event.set() + cast(asyncio.Transport, transport).set_write_buffer_limits(0) + + def connection_lost(self, exc: Exception | None) -> None: + if exc: + self.exception = BrokenResourceError() + self.exception.__cause__ = exc + + self.read_event.set() + self.write_event.set() + + def data_received(self, data: bytes) -> None: + # ProactorEventloop sometimes sends bytearray instead of bytes + self.read_queue.append(bytes(data)) + self.read_event.set() + + def eof_received(self) -> bool | None: + self.is_at_eof = True + self.read_event.set() + return True + + def pause_writing(self) -> None: + self.write_event = asyncio.Event() + + def resume_writing(self) -> None: + self.write_event.set() + + +class DatagramProtocol(asyncio.DatagramProtocol): + read_queue: deque[tuple[bytes, IPSockAddrType]] + read_event: asyncio.Event + write_event: asyncio.Event + exception: Exception | None = None + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + self.read_queue = deque(maxlen=100) # arbitrary value + self.read_event = asyncio.Event() + self.write_event = asyncio.Event() + self.write_event.set() + + def connection_lost(self, exc: Exception | None) -> None: + self.read_event.set() + self.write_event.set() + + def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None: + addr = convert_ipv6_sockaddr(addr) + self.read_queue.append((data, addr)) + self.read_event.set() + + def error_received(self, exc: Exception) -> None: + self.exception = exc + + def pause_writing(self) -> None: + self.write_event.clear() + + def resume_writing(self) -> None: + self.write_event.set() + + +class SocketStream(abc.SocketStream): + def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol): + self._transport = transport + self._protocol = protocol + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + self._closed = False + + @property + def _raw_socket(self) -> socket.socket: + return self._transport.get_extra_info("socket") + + async def receive(self, max_bytes: int = 65536) -> bytes: + with self._receive_guard: + if ( + not self._protocol.read_event.is_set() + and not self._transport.is_closing() + and not self._protocol.is_at_eof + ): + self._transport.resume_reading() + await self._protocol.read_event.wait() + self._transport.pause_reading() + else: + await AsyncIOBackend.checkpoint() + + try: + chunk = self._protocol.read_queue.popleft() + except IndexError: + if self._closed: + raise ClosedResourceError from None + elif self._protocol.exception: + raise self._protocol.exception from None + else: + raise EndOfStream from None + + if len(chunk) > max_bytes: + # Split the oversized chunk + chunk, leftover = chunk[:max_bytes], chunk[max_bytes:] + self._protocol.read_queue.appendleft(leftover) + + # If the read queue is empty, clear the flag so that the next call will + # block until data is available + if not self._protocol.read_queue: + self._protocol.read_event.clear() + + return chunk + + async def send(self, item: bytes) -> None: + with self._send_guard: + await AsyncIOBackend.checkpoint() + + if self._closed: + raise ClosedResourceError + elif self._protocol.exception is not None: + raise self._protocol.exception + + try: + self._transport.write(item) + except RuntimeError as exc: + if self._transport.is_closing(): + raise BrokenResourceError from exc + else: + raise + + await self._protocol.write_event.wait() + + async def send_eof(self) -> None: + try: + self._transport.write_eof() + except OSError: + pass + + async def aclose(self) -> None: + if not self._transport.is_closing(): + self._closed = True + try: + self._transport.write_eof() + except OSError: + pass + + self._transport.close() + await sleep(0) + self._transport.abort() + + +class _RawSocketMixin: + _receive_future: asyncio.Future | None = None + _send_future: asyncio.Future | None = None + _closing = False + + def __init__(self, raw_socket: socket.socket): + self.__raw_socket = raw_socket + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + + @property + def _raw_socket(self) -> socket.socket: + return self.__raw_socket + + def _wait_until_readable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future: + def callback(f: object) -> None: + del self._receive_future + loop.remove_reader(self.__raw_socket) + + f = self._receive_future = asyncio.Future() + loop.add_reader(self.__raw_socket, f.set_result, None) + f.add_done_callback(callback) + return f + + def _wait_until_writable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future: + def callback(f: object) -> None: + del self._send_future + loop.remove_writer(self.__raw_socket) + + f = self._send_future = asyncio.Future() + loop.add_writer(self.__raw_socket, f.set_result, None) + f.add_done_callback(callback) + return f + + async def aclose(self) -> None: + if not self._closing: + self._closing = True + if self.__raw_socket.fileno() != -1: + self.__raw_socket.close() + + if self._receive_future: + self._receive_future.set_result(None) + if self._send_future: + self._send_future.set_result(None) + + +class UNIXSocketStream(_RawSocketMixin, abc.UNIXSocketStream): + async def send_eof(self) -> None: + with self._send_guard: + self._raw_socket.shutdown(socket.SHUT_WR) + + async def receive(self, max_bytes: int = 65536) -> bytes: + loop = get_running_loop() + await AsyncIOBackend.checkpoint() + with self._receive_guard: + while True: + try: + data = self._raw_socket.recv(max_bytes) + except BlockingIOError: + await self._wait_until_readable(loop) + except OSError as exc: + if self._closing: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + else: + if not data: + raise EndOfStream + + return data + + async def send(self, item: bytes) -> None: + loop = get_running_loop() + await AsyncIOBackend.checkpoint() + with self._send_guard: + view = memoryview(item) + while view: + try: + bytes_sent = self._raw_socket.send(view) + except BlockingIOError: + await self._wait_until_writable(loop) + except OSError as exc: + if self._closing: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + else: + view = view[bytes_sent:] + + async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]: + if not isinstance(msglen, int) or msglen < 0: + raise ValueError("msglen must be a non-negative integer") + if not isinstance(maxfds, int) or maxfds < 1: + raise ValueError("maxfds must be a positive integer") + + loop = get_running_loop() + fds = array.array("i") + await AsyncIOBackend.checkpoint() + with self._receive_guard: + while True: + try: + message, ancdata, flags, addr = self._raw_socket.recvmsg( + msglen, socket.CMSG_LEN(maxfds * fds.itemsize) + ) + except BlockingIOError: + await self._wait_until_readable(loop) + except OSError as exc: + if self._closing: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + else: + if not message and not ancdata: + raise EndOfStream + + break + + for cmsg_level, cmsg_type, cmsg_data in ancdata: + if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS: + raise RuntimeError( + f"Received unexpected ancillary data; message = {message!r}, " + f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}" + ) + + fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) + + return message, list(fds) + + async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None: + if not message: + raise ValueError("message must not be empty") + if not fds: + raise ValueError("fds must not be empty") + + loop = get_running_loop() + filenos: list[int] = [] + for fd in fds: + if isinstance(fd, int): + filenos.append(fd) + elif isinstance(fd, IOBase): + filenos.append(fd.fileno()) + + fdarray = array.array("i", filenos) + await AsyncIOBackend.checkpoint() + with self._send_guard: + while True: + try: + # The ignore can be removed after mypy picks up + # https://github.com/python/typeshed/pull/5545 + self._raw_socket.sendmsg( + [message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)] + ) + break + except BlockingIOError: + await self._wait_until_writable(loop) + except OSError as exc: + if self._closing: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + + +class TCPSocketListener(abc.SocketListener): + _accept_scope: CancelScope | None = None + _closed = False + + def __init__(self, raw_socket: socket.socket): + self.__raw_socket = raw_socket + self._loop = cast(asyncio.BaseEventLoop, get_running_loop()) + self._accept_guard = ResourceGuard("accepting connections from") + + @property + def _raw_socket(self) -> socket.socket: + return self.__raw_socket + + async def accept(self) -> abc.SocketStream: + if self._closed: + raise ClosedResourceError + + with self._accept_guard: + await AsyncIOBackend.checkpoint() + with CancelScope() as self._accept_scope: + try: + client_sock, _addr = await self._loop.sock_accept(self._raw_socket) + except asyncio.CancelledError: + # Workaround for https://bugs.python.org/issue41317 + try: + self._loop.remove_reader(self._raw_socket) + except (ValueError, NotImplementedError): + pass + + if self._closed: + raise ClosedResourceError from None + + raise + finally: + self._accept_scope = None + + client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + transport, protocol = await self._loop.connect_accepted_socket( + StreamProtocol, client_sock + ) + return SocketStream(transport, protocol) + + async def aclose(self) -> None: + if self._closed: + return + + self._closed = True + if self._accept_scope: + # Workaround for https://bugs.python.org/issue41317 + try: + self._loop.remove_reader(self._raw_socket) + except (ValueError, NotImplementedError): + pass + + self._accept_scope.cancel() + await sleep(0) + + self._raw_socket.close() + + +class UNIXSocketListener(abc.SocketListener): + def __init__(self, raw_socket: socket.socket): + self.__raw_socket = raw_socket + self._loop = get_running_loop() + self._accept_guard = ResourceGuard("accepting connections from") + self._closed = False + + async def accept(self) -> abc.SocketStream: + await AsyncIOBackend.checkpoint() + with self._accept_guard: + while True: + try: + client_sock, _ = self.__raw_socket.accept() + client_sock.setblocking(False) + return UNIXSocketStream(client_sock) + except BlockingIOError: + f: asyncio.Future = asyncio.Future() + self._loop.add_reader(self.__raw_socket, f.set_result, None) + f.add_done_callback( + lambda _: self._loop.remove_reader(self.__raw_socket) + ) + await f + except OSError as exc: + if self._closed: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + + async def aclose(self) -> None: + self._closed = True + self.__raw_socket.close() + + @property + def _raw_socket(self) -> socket.socket: + return self.__raw_socket + + +class UDPSocket(abc.UDPSocket): + def __init__( + self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol + ): + self._transport = transport + self._protocol = protocol + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + self._closed = False + + @property + def _raw_socket(self) -> socket.socket: + return self._transport.get_extra_info("socket") + + async def aclose(self) -> None: + if not self._transport.is_closing(): + self._closed = True + self._transport.close() + + async def receive(self) -> tuple[bytes, IPSockAddrType]: + with self._receive_guard: + await AsyncIOBackend.checkpoint() + + # If the buffer is empty, ask for more data + if not self._protocol.read_queue and not self._transport.is_closing(): + self._protocol.read_event.clear() + await self._protocol.read_event.wait() + + try: + return self._protocol.read_queue.popleft() + except IndexError: + if self._closed: + raise ClosedResourceError from None + else: + raise BrokenResourceError from None + + async def send(self, item: UDPPacketType) -> None: + with self._send_guard: + await AsyncIOBackend.checkpoint() + await self._protocol.write_event.wait() + if self._closed: + raise ClosedResourceError + elif self._transport.is_closing(): + raise BrokenResourceError + else: + self._transport.sendto(*item) + + +class ConnectedUDPSocket(abc.ConnectedUDPSocket): + def __init__( + self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol + ): + self._transport = transport + self._protocol = protocol + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + self._closed = False + + @property + def _raw_socket(self) -> socket.socket: + return self._transport.get_extra_info("socket") + + async def aclose(self) -> None: + if not self._transport.is_closing(): + self._closed = True + self._transport.close() + + async def receive(self) -> bytes: + with self._receive_guard: + await AsyncIOBackend.checkpoint() + + # If the buffer is empty, ask for more data + if not self._protocol.read_queue and not self._transport.is_closing(): + self._protocol.read_event.clear() + await self._protocol.read_event.wait() + + try: + packet = self._protocol.read_queue.popleft() + except IndexError: + if self._closed: + raise ClosedResourceError from None + else: + raise BrokenResourceError from None + + return packet[0] + + async def send(self, item: bytes) -> None: + with self._send_guard: + await AsyncIOBackend.checkpoint() + await self._protocol.write_event.wait() + if self._closed: + raise ClosedResourceError + elif self._transport.is_closing(): + raise BrokenResourceError + else: + self._transport.sendto(item) + + +class UNIXDatagramSocket(_RawSocketMixin, abc.UNIXDatagramSocket): + async def receive(self) -> UNIXDatagramPacketType: + loop = get_running_loop() + await AsyncIOBackend.checkpoint() + with self._receive_guard: + while True: + try: + data = self._raw_socket.recvfrom(65536) + except BlockingIOError: + await self._wait_until_readable(loop) + except OSError as exc: + if self._closing: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + else: + return data + + async def send(self, item: UNIXDatagramPacketType) -> None: + loop = get_running_loop() + await AsyncIOBackend.checkpoint() + with self._send_guard: + while True: + try: + self._raw_socket.sendto(*item) + except BlockingIOError: + await self._wait_until_writable(loop) + except OSError as exc: + if self._closing: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + else: + return + + +class ConnectedUNIXDatagramSocket(_RawSocketMixin, abc.ConnectedUNIXDatagramSocket): + async def receive(self) -> bytes: + loop = get_running_loop() + await AsyncIOBackend.checkpoint() + with self._receive_guard: + while True: + try: + data = self._raw_socket.recv(65536) + except BlockingIOError: + await self._wait_until_readable(loop) + except OSError as exc: + if self._closing: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + else: + return data + + async def send(self, item: bytes) -> None: + loop = get_running_loop() + await AsyncIOBackend.checkpoint() + with self._send_guard: + while True: + try: + self._raw_socket.send(item) + except BlockingIOError: + await self._wait_until_writable(loop) + except OSError as exc: + if self._closing: + raise ClosedResourceError from None + else: + raise BrokenResourceError from exc + else: + return + + +_read_events: RunVar[dict[int, asyncio.Event]] = RunVar("read_events") +_write_events: RunVar[dict[int, asyncio.Event]] = RunVar("write_events") + + +# +# Synchronization +# + + +class Event(BaseEvent): + def __new__(cls) -> Event: + return object.__new__(cls) + + def __init__(self) -> None: + self._event = asyncio.Event() + + def set(self) -> None: + self._event.set() + + def is_set(self) -> bool: + return self._event.is_set() + + async def wait(self) -> None: + if self.is_set(): + await AsyncIOBackend.checkpoint() + else: + await self._event.wait() + + def statistics(self) -> EventStatistics: + return EventStatistics(len(self._event._waiters)) + + +class Lock(BaseLock): + def __new__(cls, *, fast_acquire: bool = False) -> Lock: + return object.__new__(cls) + + def __init__(self, *, fast_acquire: bool = False) -> None: + self._fast_acquire = fast_acquire + self._owner_task: asyncio.Task | None = None + self._waiters: deque[tuple[asyncio.Task, asyncio.Future]] = deque() + + async def acquire(self) -> None: + task = cast(asyncio.Task, current_task()) + if self._owner_task is None and not self._waiters: + await AsyncIOBackend.checkpoint_if_cancelled() + self._owner_task = task + + # Unless on the "fast path", yield control of the event loop so that other + # tasks can run too + if not self._fast_acquire: + try: + await AsyncIOBackend.cancel_shielded_checkpoint() + except CancelledError: + self.release() + raise + + return + + if self._owner_task == task: + raise RuntimeError("Attempted to acquire an already held Lock") + + fut: asyncio.Future[None] = asyncio.Future() + item = task, fut + self._waiters.append(item) + try: + await fut + except CancelledError: + self._waiters.remove(item) + if self._owner_task is task: + self.release() + + raise + + self._waiters.remove(item) + + def acquire_nowait(self) -> None: + task = cast(asyncio.Task, current_task()) + if self._owner_task is None and not self._waiters: + self._owner_task = task + return + + if self._owner_task is task: + raise RuntimeError("Attempted to acquire an already held Lock") + + raise WouldBlock + + def locked(self) -> bool: + return self._owner_task is not None + + def release(self) -> None: + if self._owner_task != current_task(): + raise RuntimeError("The current task is not holding this lock") + + for task, fut in self._waiters: + if not fut.cancelled(): + self._owner_task = task + fut.set_result(None) + return + + self._owner_task = None + + def statistics(self) -> LockStatistics: + task_info = AsyncIOTaskInfo(self._owner_task) if self._owner_task else None + return LockStatistics(self.locked(), task_info, len(self._waiters)) + + +class Semaphore(BaseSemaphore): + def __new__( + cls, + initial_value: int, + *, + max_value: int | None = None, + fast_acquire: bool = False, + ) -> Semaphore: + return object.__new__(cls) + + def __init__( + self, + initial_value: int, + *, + max_value: int | None = None, + fast_acquire: bool = False, + ): + super().__init__(initial_value, max_value=max_value) + self._value = initial_value + self._max_value = max_value + self._fast_acquire = fast_acquire + self._waiters: deque[asyncio.Future[None]] = deque() + + async def acquire(self) -> None: + if self._value > 0 and not self._waiters: + await AsyncIOBackend.checkpoint_if_cancelled() + self._value -= 1 + + # Unless on the "fast path", yield control of the event loop so that other + # tasks can run too + if not self._fast_acquire: + try: + await AsyncIOBackend.cancel_shielded_checkpoint() + except CancelledError: + self.release() + raise + + return + + fut: asyncio.Future[None] = asyncio.Future() + self._waiters.append(fut) + try: + await fut + except CancelledError: + try: + self._waiters.remove(fut) + except ValueError: + self.release() + + raise + + def acquire_nowait(self) -> None: + if self._value == 0: + raise WouldBlock + + self._value -= 1 + + def release(self) -> None: + if self._max_value is not None and self._value == self._max_value: + raise ValueError("semaphore released too many times") + + for fut in self._waiters: + if not fut.cancelled(): + fut.set_result(None) + self._waiters.remove(fut) + return + + self._value += 1 + + @property + def value(self) -> int: + return self._value + + @property + def max_value(self) -> int | None: + return self._max_value + + def statistics(self) -> SemaphoreStatistics: + return SemaphoreStatistics(len(self._waiters)) + + +class CapacityLimiter(BaseCapacityLimiter): + _total_tokens: float = 0 + + def __new__(cls, total_tokens: float) -> CapacityLimiter: + return object.__new__(cls) + + def __init__(self, total_tokens: float): + self._borrowers: set[Any] = set() + self._wait_queue: OrderedDict[Any, asyncio.Event] = OrderedDict() + self.total_tokens = total_tokens + + async def __aenter__(self) -> None: + await self.acquire() + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self.release() + + @property + def total_tokens(self) -> float: + return self._total_tokens + + @total_tokens.setter + def total_tokens(self, value: float) -> None: + if not isinstance(value, int) and not math.isinf(value): + raise TypeError("total_tokens must be an int or math.inf") + if value < 1: + raise ValueError("total_tokens must be >= 1") + + waiters_to_notify = max(value - self._total_tokens, 0) + self._total_tokens = value + + # Notify waiting tasks that they have acquired the limiter + while self._wait_queue and waiters_to_notify: + event = self._wait_queue.popitem(last=False)[1] + event.set() + waiters_to_notify -= 1 + + @property + def borrowed_tokens(self) -> int: + return len(self._borrowers) + + @property + def available_tokens(self) -> float: + return self._total_tokens - len(self._borrowers) + + def acquire_nowait(self) -> None: + self.acquire_on_behalf_of_nowait(current_task()) + + def acquire_on_behalf_of_nowait(self, borrower: object) -> None: + if borrower in self._borrowers: + raise RuntimeError( + "this borrower is already holding one of this CapacityLimiter's tokens" + ) + + if self._wait_queue or len(self._borrowers) >= self._total_tokens: + raise WouldBlock + + self._borrowers.add(borrower) + + async def acquire(self) -> None: + return await self.acquire_on_behalf_of(current_task()) + + async def acquire_on_behalf_of(self, borrower: object) -> None: + await AsyncIOBackend.checkpoint_if_cancelled() + try: + self.acquire_on_behalf_of_nowait(borrower) + except WouldBlock: + event = asyncio.Event() + self._wait_queue[borrower] = event + try: + await event.wait() + except BaseException: + self._wait_queue.pop(borrower, None) + raise + + self._borrowers.add(borrower) + else: + try: + await AsyncIOBackend.cancel_shielded_checkpoint() + except BaseException: + self.release() + raise + + def release(self) -> None: + self.release_on_behalf_of(current_task()) + + def release_on_behalf_of(self, borrower: object) -> None: + try: + self._borrowers.remove(borrower) + except KeyError: + raise RuntimeError( + "this borrower isn't holding any of this CapacityLimiter's tokens" + ) from None + + # Notify the next task in line if this limiter has free capacity now + if self._wait_queue and len(self._borrowers) < self._total_tokens: + event = self._wait_queue.popitem(last=False)[1] + event.set() + + def statistics(self) -> CapacityLimiterStatistics: + return CapacityLimiterStatistics( + self.borrowed_tokens, + self.total_tokens, + tuple(self._borrowers), + len(self._wait_queue), + ) + + +_default_thread_limiter: RunVar[CapacityLimiter] = RunVar("_default_thread_limiter") + + +# +# Operating system signals +# + + +class _SignalReceiver: + def __init__(self, signals: tuple[Signals, ...]): + self._signals = signals + self._loop = get_running_loop() + self._signal_queue: deque[Signals] = deque() + self._future: asyncio.Future = asyncio.Future() + self._handled_signals: set[Signals] = set() + + def _deliver(self, signum: Signals) -> None: + self._signal_queue.append(signum) + if not self._future.done(): + self._future.set_result(None) + + def __enter__(self) -> _SignalReceiver: + for sig in set(self._signals): + self._loop.add_signal_handler(sig, self._deliver, sig) + self._handled_signals.add(sig) + + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + for sig in self._handled_signals: + self._loop.remove_signal_handler(sig) + + def __aiter__(self) -> _SignalReceiver: + return self + + async def __anext__(self) -> Signals: + await AsyncIOBackend.checkpoint() + if not self._signal_queue: + self._future = asyncio.Future() + await self._future + + return self._signal_queue.popleft() + + +# +# Testing and debugging +# + + +class AsyncIOTaskInfo(TaskInfo): + def __init__(self, task: asyncio.Task): + task_state = _task_states.get(task) + if task_state is None: + parent_id = None + else: + parent_id = task_state.parent_id + + coro = task.get_coro() + assert coro is not None, "created TaskInfo from a completed Task" + super().__init__(id(task), parent_id, task.get_name(), coro) + self._task = weakref.ref(task) + + def has_pending_cancellation(self) -> bool: + if not (task := self._task()): + # If the task isn't around anymore, it won't have a pending cancellation + return False + + if task._must_cancel: # type: ignore[attr-defined] + return True + elif ( + isinstance(task._fut_waiter, asyncio.Future) # type: ignore[attr-defined] + and task._fut_waiter.cancelled() # type: ignore[attr-defined] + ): + return True + + if task_state := _task_states.get(task): + if cancel_scope := task_state.cancel_scope: + return cancel_scope._effectively_cancelled + + return False + + +class TestRunner(abc.TestRunner): + _send_stream: MemoryObjectSendStream[tuple[Awaitable[Any], asyncio.Future[Any]]] + + def __init__( + self, + *, + debug: bool | None = None, + use_uvloop: bool = False, + loop_factory: Callable[[], AbstractEventLoop] | None = None, + ) -> None: + if use_uvloop and loop_factory is None: + import uvloop + + loop_factory = uvloop.new_event_loop + + self._runner = Runner(debug=debug, loop_factory=loop_factory) + self._exceptions: list[BaseException] = [] + self._runner_task: asyncio.Task | None = None + + def __enter__(self) -> TestRunner: + self._runner.__enter__() + self.get_loop().set_exception_handler(self._exception_handler) + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self._runner.__exit__(exc_type, exc_val, exc_tb) + + def get_loop(self) -> AbstractEventLoop: + return self._runner.get_loop() + + def _exception_handler( + self, loop: asyncio.AbstractEventLoop, context: dict[str, Any] + ) -> None: + if isinstance(context.get("exception"), Exception): + self._exceptions.append(context["exception"]) + else: + loop.default_exception_handler(context) + + def _raise_async_exceptions(self) -> None: + # Re-raise any exceptions raised in asynchronous callbacks + if self._exceptions: + exceptions, self._exceptions = self._exceptions, [] + if len(exceptions) == 1: + raise exceptions[0] + elif exceptions: + raise BaseExceptionGroup( + "Multiple exceptions occurred in asynchronous callbacks", exceptions + ) + + async def _run_tests_and_fixtures( + self, + receive_stream: MemoryObjectReceiveStream[ + tuple[Awaitable[T_Retval], asyncio.Future[T_Retval]] + ], + ) -> None: + from _pytest.outcomes import OutcomeException + + with receive_stream, self._send_stream: + async for coro, future in receive_stream: + try: + retval = await coro + except CancelledError as exc: + if not future.cancelled(): + future.cancel(*exc.args) + + raise + except BaseException as exc: + if not future.cancelled(): + future.set_exception(exc) + + if not isinstance(exc, (Exception, OutcomeException)): + raise + else: + if not future.cancelled(): + future.set_result(retval) + + async def _call_in_runner_task( + self, + func: Callable[P, Awaitable[T_Retval]], + *args: P.args, + **kwargs: P.kwargs, + ) -> T_Retval: + if not self._runner_task: + self._send_stream, receive_stream = create_memory_object_stream[ + tuple[Awaitable[Any], asyncio.Future] + ](1) + self._runner_task = self.get_loop().create_task( + self._run_tests_and_fixtures(receive_stream) + ) + + coro = func(*args, **kwargs) + future: asyncio.Future[T_Retval] = self.get_loop().create_future() + self._send_stream.send_nowait((coro, future)) + return await future + + def run_asyncgen_fixture( + self, + fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]], + kwargs: dict[str, Any], + ) -> Iterable[T_Retval]: + asyncgen = fixture_func(**kwargs) + fixturevalue: T_Retval = self.get_loop().run_until_complete( + self._call_in_runner_task(asyncgen.asend, None) + ) + self._raise_async_exceptions() + + yield fixturevalue + + try: + self.get_loop().run_until_complete( + self._call_in_runner_task(asyncgen.asend, None) + ) + except StopAsyncIteration: + self._raise_async_exceptions() + else: + self.get_loop().run_until_complete(asyncgen.aclose()) + raise RuntimeError("Async generator fixture did not stop") + + def run_fixture( + self, + fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]], + kwargs: dict[str, Any], + ) -> T_Retval: + retval = self.get_loop().run_until_complete( + self._call_in_runner_task(fixture_func, **kwargs) + ) + self._raise_async_exceptions() + return retval + + def run_test( + self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any] + ) -> None: + try: + self.get_loop().run_until_complete( + self._call_in_runner_task(test_func, **kwargs) + ) + except Exception as exc: + self._exceptions.append(exc) + + self._raise_async_exceptions() + + +class AsyncIOBackend(AsyncBackend): + @classmethod + def run( + cls, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], + args: tuple[Unpack[PosArgsT]], + kwargs: dict[str, Any], + options: dict[str, Any], + ) -> T_Retval: + @wraps(func) + async def wrapper() -> T_Retval: + task = cast(asyncio.Task, current_task()) + task.set_name(get_callable_name(func)) + _task_states[task] = TaskState(None, None) + + try: + return await func(*args) + finally: + del _task_states[task] + + debug = options.get("debug", None) + loop_factory = options.get("loop_factory", None) + if loop_factory is None and options.get("use_uvloop", False): + import uvloop + + loop_factory = uvloop.new_event_loop + + with Runner(debug=debug, loop_factory=loop_factory) as runner: + return runner.run(wrapper()) + + @classmethod + def current_token(cls) -> object: + return get_running_loop() + + @classmethod + def current_time(cls) -> float: + return get_running_loop().time() + + @classmethod + def cancelled_exception_class(cls) -> type[BaseException]: + return CancelledError + + @classmethod + async def checkpoint(cls) -> None: + await sleep(0) + + @classmethod + async def checkpoint_if_cancelled(cls) -> None: + task = current_task() + if task is None: + return + + try: + cancel_scope = _task_states[task].cancel_scope + except KeyError: + return + + while cancel_scope: + if cancel_scope.cancel_called: + await sleep(0) + elif cancel_scope.shield: + break + else: + cancel_scope = cancel_scope._parent_scope + + @classmethod + async def cancel_shielded_checkpoint(cls) -> None: + with CancelScope(shield=True): + await sleep(0) + + @classmethod + async def sleep(cls, delay: float) -> None: + await sleep(delay) + + @classmethod + def create_cancel_scope( + cls, *, deadline: float = math.inf, shield: bool = False + ) -> CancelScope: + return CancelScope(deadline=deadline, shield=shield) + + @classmethod + def current_effective_deadline(cls) -> float: + if (task := current_task()) is None: + return math.inf + + try: + cancel_scope = _task_states[task].cancel_scope + except KeyError: + return math.inf + + deadline = math.inf + while cancel_scope: + deadline = min(deadline, cancel_scope.deadline) + if cancel_scope._cancel_called: + deadline = -math.inf + break + elif cancel_scope.shield: + break + else: + cancel_scope = cancel_scope._parent_scope + + return deadline + + @classmethod + def create_task_group(cls) -> abc.TaskGroup: + return TaskGroup() + + @classmethod + def create_event(cls) -> abc.Event: + return Event() + + @classmethod + def create_lock(cls, *, fast_acquire: bool) -> abc.Lock: + return Lock(fast_acquire=fast_acquire) + + @classmethod + def create_semaphore( + cls, + initial_value: int, + *, + max_value: int | None = None, + fast_acquire: bool = False, + ) -> abc.Semaphore: + return Semaphore(initial_value, max_value=max_value, fast_acquire=fast_acquire) + + @classmethod + def create_capacity_limiter(cls, total_tokens: float) -> abc.CapacityLimiter: + return CapacityLimiter(total_tokens) + + @classmethod + async def run_sync_in_worker_thread( # type: ignore[return] + cls, + func: Callable[[Unpack[PosArgsT]], T_Retval], + args: tuple[Unpack[PosArgsT]], + abandon_on_cancel: bool = False, + limiter: abc.CapacityLimiter | None = None, + ) -> T_Retval: + await cls.checkpoint() + + # If this is the first run in this event loop thread, set up the necessary + # variables + try: + idle_workers = _threadpool_idle_workers.get() + workers = _threadpool_workers.get() + except LookupError: + idle_workers = deque() + workers = set() + _threadpool_idle_workers.set(idle_workers) + _threadpool_workers.set(workers) + + async with limiter or cls.current_default_thread_limiter(): + with CancelScope(shield=not abandon_on_cancel) as scope: + future = asyncio.Future[T_Retval]() + root_task = find_root_task() + if not idle_workers: + worker = WorkerThread(root_task, workers, idle_workers) + worker.start() + workers.add(worker) + root_task.add_done_callback( + worker.stop, context=contextvars.Context() + ) + else: + worker = idle_workers.pop() + + # Prune any other workers that have been idle for MAX_IDLE_TIME + # seconds or longer + now = cls.current_time() + while idle_workers: + if ( + now - idle_workers[0].idle_since + < WorkerThread.MAX_IDLE_TIME + ): + break + + expired_worker = idle_workers.popleft() + expired_worker.root_task.remove_done_callback( + expired_worker.stop + ) + expired_worker.stop() + + context = copy_context() + context.run(sniffio.current_async_library_cvar.set, None) + if abandon_on_cancel or scope._parent_scope is None: + worker_scope = scope + else: + worker_scope = scope._parent_scope + + worker.queue.put_nowait((context, func, args, future, worker_scope)) + return await future + + @classmethod + def check_cancelled(cls) -> None: + scope: CancelScope | None = threadlocals.current_cancel_scope + while scope is not None: + if scope.cancel_called: + raise CancelledError(f"Cancelled by cancel scope {id(scope):x}") + + if scope.shield: + return + + scope = scope._parent_scope + + @classmethod + def run_async_from_thread( + cls, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], + args: tuple[Unpack[PosArgsT]], + token: object, + ) -> T_Retval: + async def task_wrapper(scope: CancelScope) -> T_Retval: + __tracebackhide__ = True + task = cast(asyncio.Task, current_task()) + _task_states[task] = TaskState(None, scope) + scope._tasks.add(task) + try: + return await func(*args) + except CancelledError as exc: + raise concurrent.futures.CancelledError(str(exc)) from None + finally: + scope._tasks.discard(task) + + loop = cast(AbstractEventLoop, token) + context = copy_context() + context.run(sniffio.current_async_library_cvar.set, "asyncio") + wrapper = task_wrapper(threadlocals.current_cancel_scope) + f: concurrent.futures.Future[T_Retval] = context.run( + asyncio.run_coroutine_threadsafe, wrapper, loop + ) + return f.result() + + @classmethod + def run_sync_from_thread( + cls, + func: Callable[[Unpack[PosArgsT]], T_Retval], + args: tuple[Unpack[PosArgsT]], + token: object, + ) -> T_Retval: + @wraps(func) + def wrapper() -> None: + try: + sniffio.current_async_library_cvar.set("asyncio") + f.set_result(func(*args)) + except BaseException as exc: + f.set_exception(exc) + if not isinstance(exc, Exception): + raise + + f: concurrent.futures.Future[T_Retval] = Future() + loop = cast(AbstractEventLoop, token) + loop.call_soon_threadsafe(wrapper) + return f.result() + + @classmethod + def create_blocking_portal(cls) -> abc.BlockingPortal: + return BlockingPortal() + + @classmethod + async def open_process( + cls, + command: StrOrBytesPath | Sequence[StrOrBytesPath], + *, + stdin: int | IO[Any] | None, + stdout: int | IO[Any] | None, + stderr: int | IO[Any] | None, + **kwargs: Any, + ) -> Process: + await cls.checkpoint() + if isinstance(command, PathLike): + command = os.fspath(command) + + if isinstance(command, (str, bytes)): + process = await asyncio.create_subprocess_shell( + command, + stdin=stdin, + stdout=stdout, + stderr=stderr, + **kwargs, + ) + else: + process = await asyncio.create_subprocess_exec( + *command, + stdin=stdin, + stdout=stdout, + stderr=stderr, + **kwargs, + ) + + stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None + stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None + stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None + return Process(process, stdin_stream, stdout_stream, stderr_stream) + + @classmethod + def setup_process_pool_exit_at_shutdown(cls, workers: set[abc.Process]) -> None: + create_task( + _shutdown_process_pool_on_exit(workers), + name="AnyIO process pool shutdown task", + ) + find_root_task().add_done_callback( + partial(_forcibly_shutdown_process_pool_on_exit, workers) # type:ignore[arg-type] + ) + + @classmethod + async def connect_tcp( + cls, host: str, port: int, local_address: IPSockAddrType | None = None + ) -> abc.SocketStream: + transport, protocol = cast( + tuple[asyncio.Transport, StreamProtocol], + await get_running_loop().create_connection( + StreamProtocol, host, port, local_addr=local_address + ), + ) + transport.pause_reading() + return SocketStream(transport, protocol) + + @classmethod + async def connect_unix(cls, path: str | bytes) -> abc.UNIXSocketStream: + await cls.checkpoint() + loop = get_running_loop() + raw_socket = socket.socket(socket.AF_UNIX) + raw_socket.setblocking(False) + while True: + try: + raw_socket.connect(path) + except BlockingIOError: + f: asyncio.Future = asyncio.Future() + loop.add_writer(raw_socket, f.set_result, None) + f.add_done_callback(lambda _: loop.remove_writer(raw_socket)) + await f + except BaseException: + raw_socket.close() + raise + else: + return UNIXSocketStream(raw_socket) + + @classmethod + def create_tcp_listener(cls, sock: socket.socket) -> SocketListener: + return TCPSocketListener(sock) + + @classmethod + def create_unix_listener(cls, sock: socket.socket) -> SocketListener: + return UNIXSocketListener(sock) + + @classmethod + async def create_udp_socket( + cls, + family: AddressFamily, + local_address: IPSockAddrType | None, + remote_address: IPSockAddrType | None, + reuse_port: bool, + ) -> UDPSocket | ConnectedUDPSocket: + transport, protocol = await get_running_loop().create_datagram_endpoint( + DatagramProtocol, + local_addr=local_address, + remote_addr=remote_address, + family=family, + reuse_port=reuse_port, + ) + if protocol.exception: + transport.close() + raise protocol.exception + + if not remote_address: + return UDPSocket(transport, protocol) + else: + return ConnectedUDPSocket(transport, protocol) + + @classmethod + async def create_unix_datagram_socket( # type: ignore[override] + cls, raw_socket: socket.socket, remote_path: str | bytes | None + ) -> abc.UNIXDatagramSocket | abc.ConnectedUNIXDatagramSocket: + await cls.checkpoint() + loop = get_running_loop() + + if remote_path: + while True: + try: + raw_socket.connect(remote_path) + except BlockingIOError: + f: asyncio.Future = asyncio.Future() + loop.add_writer(raw_socket, f.set_result, None) + f.add_done_callback(lambda _: loop.remove_writer(raw_socket)) + await f + except BaseException: + raw_socket.close() + raise + else: + return ConnectedUNIXDatagramSocket(raw_socket) + else: + return UNIXDatagramSocket(raw_socket) + + @classmethod + async def getaddrinfo( + cls, + host: bytes | str | None, + port: str | int | None, + *, + family: int | AddressFamily = 0, + type: int | SocketKind = 0, + proto: int = 0, + flags: int = 0, + ) -> Sequence[ + tuple[ + AddressFamily, + SocketKind, + int, + str, + tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes], + ] + ]: + return await get_running_loop().getaddrinfo( + host, port, family=family, type=type, proto=proto, flags=flags + ) + + @classmethod + async def getnameinfo( + cls, sockaddr: IPSockAddrType, flags: int = 0 + ) -> tuple[str, str]: + return await get_running_loop().getnameinfo(sockaddr, flags) + + @classmethod + async def wait_readable(cls, obj: FileDescriptorLike) -> None: + await cls.checkpoint() + try: + read_events = _read_events.get() + except LookupError: + read_events = {} + _read_events.set(read_events) + + if not isinstance(obj, int): + obj = obj.fileno() + + if read_events.get(obj): + raise BusyResourceError("reading from") + + loop = get_running_loop() + event = asyncio.Event() + try: + loop.add_reader(obj, event.set) + except NotImplementedError: + from anyio._core._asyncio_selector_thread import get_selector + + selector = get_selector() + selector.add_reader(obj, event.set) + remove_reader = selector.remove_reader + else: + remove_reader = loop.remove_reader + + read_events[obj] = event + try: + await event.wait() + finally: + remove_reader(obj) + del read_events[obj] + + @classmethod + async def wait_writable(cls, obj: FileDescriptorLike) -> None: + await cls.checkpoint() + try: + write_events = _write_events.get() + except LookupError: + write_events = {} + _write_events.set(write_events) + + if not isinstance(obj, int): + obj = obj.fileno() + + if write_events.get(obj): + raise BusyResourceError("writing to") + + loop = get_running_loop() + event = asyncio.Event() + try: + loop.add_writer(obj, event.set) + except NotImplementedError: + from anyio._core._asyncio_selector_thread import get_selector + + selector = get_selector() + selector.add_writer(obj, event.set) + remove_writer = selector.remove_writer + else: + remove_writer = loop.remove_writer + + write_events[obj] = event + try: + await event.wait() + finally: + del write_events[obj] + remove_writer(obj) + + @classmethod + def current_default_thread_limiter(cls) -> CapacityLimiter: + try: + return _default_thread_limiter.get() + except LookupError: + limiter = CapacityLimiter(40) + _default_thread_limiter.set(limiter) + return limiter + + @classmethod + def open_signal_receiver( + cls, *signals: Signals + ) -> AbstractContextManager[AsyncIterator[Signals]]: + return _SignalReceiver(signals) + + @classmethod + def get_current_task(cls) -> TaskInfo: + return AsyncIOTaskInfo(current_task()) # type: ignore[arg-type] + + @classmethod + def get_running_tasks(cls) -> Sequence[TaskInfo]: + return [AsyncIOTaskInfo(task) for task in all_tasks() if not task.done()] + + @classmethod + async def wait_all_tasks_blocked(cls) -> None: + await cls.checkpoint() + this_task = current_task() + while True: + for task in all_tasks(): + if task is this_task: + continue + + waiter = task._fut_waiter # type: ignore[attr-defined] + if waiter is None or waiter.done(): + await sleep(0.1) + break + else: + return + + @classmethod + def create_test_runner(cls, options: dict[str, Any]) -> TestRunner: + return TestRunner(**options) + + +backend_class = AsyncIOBackend diff --git a/venv/Lib/site-packages/anyio/_backends/_trio.py b/venv/Lib/site-packages/anyio/_backends/_trio.py new file mode 100644 index 00000000..b80cc04f --- /dev/null +++ b/venv/Lib/site-packages/anyio/_backends/_trio.py @@ -0,0 +1,1334 @@ +from __future__ import annotations + +import array +import math +import os +import socket +import sys +import types +import weakref +from collections.abc import ( + AsyncGenerator, + AsyncIterator, + Awaitable, + Callable, + Collection, + Coroutine, + Iterable, + Sequence, +) +from concurrent.futures import Future +from contextlib import AbstractContextManager +from dataclasses import dataclass +from functools import partial +from io import IOBase +from os import PathLike +from signal import Signals +from socket import AddressFamily, SocketKind +from types import TracebackType +from typing import ( + IO, + TYPE_CHECKING, + Any, + Generic, + NoReturn, + TypeVar, + cast, + overload, +) + +import trio.from_thread +import trio.lowlevel +from outcome import Error, Outcome, Value +from trio.lowlevel import ( + current_root_task, + current_task, + wait_readable, + wait_writable, +) +from trio.socket import SocketType as TrioSocketType +from trio.to_thread import run_sync + +from .. import ( + CapacityLimiterStatistics, + EventStatistics, + LockStatistics, + TaskInfo, + WouldBlock, + abc, +) +from .._core._eventloop import claim_worker_thread +from .._core._exceptions import ( + BrokenResourceError, + BusyResourceError, + ClosedResourceError, + EndOfStream, +) +from .._core._sockets import convert_ipv6_sockaddr +from .._core._streams import create_memory_object_stream +from .._core._synchronization import ( + CapacityLimiter as BaseCapacityLimiter, +) +from .._core._synchronization import Event as BaseEvent +from .._core._synchronization import Lock as BaseLock +from .._core._synchronization import ( + ResourceGuard, + SemaphoreStatistics, +) +from .._core._synchronization import Semaphore as BaseSemaphore +from .._core._tasks import CancelScope as BaseCancelScope +from ..abc import IPSockAddrType, UDPPacketType, UNIXDatagramPacketType +from ..abc._eventloop import AsyncBackend, StrOrBytesPath +from ..streams.memory import MemoryObjectSendStream + +if TYPE_CHECKING: + from _typeshed import HasFileno + +if sys.version_info >= (3, 10): + from typing import ParamSpec +else: + from typing_extensions import ParamSpec + +if sys.version_info >= (3, 11): + from typing import TypeVarTuple, Unpack +else: + from exceptiongroup import BaseExceptionGroup + from typing_extensions import TypeVarTuple, Unpack + +T = TypeVar("T") +T_Retval = TypeVar("T_Retval") +T_SockAddr = TypeVar("T_SockAddr", str, IPSockAddrType) +PosArgsT = TypeVarTuple("PosArgsT") +P = ParamSpec("P") + + +# +# Event loop +# + +RunVar = trio.lowlevel.RunVar + + +# +# Timeouts and cancellation +# + + +class CancelScope(BaseCancelScope): + def __new__( + cls, original: trio.CancelScope | None = None, **kwargs: object + ) -> CancelScope: + return object.__new__(cls) + + def __init__(self, original: trio.CancelScope | None = None, **kwargs: Any) -> None: + self.__original = original or trio.CancelScope(**kwargs) + + def __enter__(self) -> CancelScope: + self.__original.__enter__() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool: + return self.__original.__exit__(exc_type, exc_val, exc_tb) + + def cancel(self) -> None: + self.__original.cancel() + + @property + def deadline(self) -> float: + return self.__original.deadline + + @deadline.setter + def deadline(self, value: float) -> None: + self.__original.deadline = value + + @property + def cancel_called(self) -> bool: + return self.__original.cancel_called + + @property + def cancelled_caught(self) -> bool: + return self.__original.cancelled_caught + + @property + def shield(self) -> bool: + return self.__original.shield + + @shield.setter + def shield(self, value: bool) -> None: + self.__original.shield = value + + +# +# Task groups +# + + +class TaskGroup(abc.TaskGroup): + def __init__(self) -> None: + self._active = False + self._nursery_manager = trio.open_nursery(strict_exception_groups=True) + self.cancel_scope = None # type: ignore[assignment] + + async def __aenter__(self) -> TaskGroup: + self._active = True + self._nursery = await self._nursery_manager.__aenter__() + self.cancel_scope = CancelScope(self._nursery.cancel_scope) + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool: + try: + # trio.Nursery.__exit__ returns bool; .open_nursery has wrong type + return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb) # type: ignore[return-value] + except BaseExceptionGroup as exc: + if not exc.split(trio.Cancelled)[1]: + raise trio.Cancelled._create() from exc + + raise + finally: + del exc_val, exc_tb + self._active = False + + def start_soon( + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[Any]], + *args: Unpack[PosArgsT], + name: object = None, + ) -> None: + if not self._active: + raise RuntimeError( + "This task group is not active; no new tasks can be started." + ) + + self._nursery.start_soon(func, *args, name=name) + + async def start( + self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None + ) -> Any: + if not self._active: + raise RuntimeError( + "This task group is not active; no new tasks can be started." + ) + + return await self._nursery.start(func, *args, name=name) + + +# +# Threads +# + + +class BlockingPortal(abc.BlockingPortal): + def __new__(cls) -> BlockingPortal: + return object.__new__(cls) + + def __init__(self) -> None: + super().__init__() + self._token = trio.lowlevel.current_trio_token() + + def _spawn_task_from_thread( + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval], + args: tuple[Unpack[PosArgsT]], + kwargs: dict[str, Any], + name: object, + future: Future[T_Retval], + ) -> None: + trio.from_thread.run_sync( + partial(self._task_group.start_soon, name=name), + self._call_func, + func, + args, + kwargs, + future, + trio_token=self._token, + ) + + +# +# Subprocesses +# + + +@dataclass(eq=False) +class ReceiveStreamWrapper(abc.ByteReceiveStream): + _stream: trio.abc.ReceiveStream + + async def receive(self, max_bytes: int | None = None) -> bytes: + try: + data = await self._stream.receive_some(max_bytes) + except trio.ClosedResourceError as exc: + raise ClosedResourceError from exc.__cause__ + except trio.BrokenResourceError as exc: + raise BrokenResourceError from exc.__cause__ + + if data: + return data + else: + raise EndOfStream + + async def aclose(self) -> None: + await self._stream.aclose() + + +@dataclass(eq=False) +class SendStreamWrapper(abc.ByteSendStream): + _stream: trio.abc.SendStream + + async def send(self, item: bytes) -> None: + try: + await self._stream.send_all(item) + except trio.ClosedResourceError as exc: + raise ClosedResourceError from exc.__cause__ + except trio.BrokenResourceError as exc: + raise BrokenResourceError from exc.__cause__ + + async def aclose(self) -> None: + await self._stream.aclose() + + +@dataclass(eq=False) +class Process(abc.Process): + _process: trio.Process + _stdin: abc.ByteSendStream | None + _stdout: abc.ByteReceiveStream | None + _stderr: abc.ByteReceiveStream | None + + async def aclose(self) -> None: + with CancelScope(shield=True): + if self._stdin: + await self._stdin.aclose() + if self._stdout: + await self._stdout.aclose() + if self._stderr: + await self._stderr.aclose() + + try: + await self.wait() + except BaseException: + self.kill() + with CancelScope(shield=True): + await self.wait() + raise + + async def wait(self) -> int: + return await self._process.wait() + + def terminate(self) -> None: + self._process.terminate() + + def kill(self) -> None: + self._process.kill() + + def send_signal(self, signal: Signals) -> None: + self._process.send_signal(signal) + + @property + def pid(self) -> int: + return self._process.pid + + @property + def returncode(self) -> int | None: + return self._process.returncode + + @property + def stdin(self) -> abc.ByteSendStream | None: + return self._stdin + + @property + def stdout(self) -> abc.ByteReceiveStream | None: + return self._stdout + + @property + def stderr(self) -> abc.ByteReceiveStream | None: + return self._stderr + + +class _ProcessPoolShutdownInstrument(trio.abc.Instrument): + def after_run(self) -> None: + super().after_run() + + +current_default_worker_process_limiter: trio.lowlevel.RunVar = RunVar( + "current_default_worker_process_limiter" +) + + +async def _shutdown_process_pool(workers: set[abc.Process]) -> None: + try: + await trio.sleep(math.inf) + except trio.Cancelled: + for process in workers: + if process.returncode is None: + process.kill() + + with CancelScope(shield=True): + for process in workers: + await process.aclose() + + +# +# Sockets and networking +# + + +class _TrioSocketMixin(Generic[T_SockAddr]): + def __init__(self, trio_socket: TrioSocketType) -> None: + self._trio_socket = trio_socket + self._closed = False + + def _check_closed(self) -> None: + if self._closed: + raise ClosedResourceError + if self._trio_socket.fileno() < 0: + raise BrokenResourceError + + @property + def _raw_socket(self) -> socket.socket: + return self._trio_socket._sock # type: ignore[attr-defined] + + async def aclose(self) -> None: + if self._trio_socket.fileno() >= 0: + self._closed = True + self._trio_socket.close() + + def _convert_socket_error(self, exc: BaseException) -> NoReturn: + if isinstance(exc, trio.ClosedResourceError): + raise ClosedResourceError from exc + elif self._trio_socket.fileno() < 0 and self._closed: + raise ClosedResourceError from None + elif isinstance(exc, OSError): + raise BrokenResourceError from exc + else: + raise exc + + +class SocketStream(_TrioSocketMixin, abc.SocketStream): + def __init__(self, trio_socket: TrioSocketType) -> None: + super().__init__(trio_socket) + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + + async def receive(self, max_bytes: int = 65536) -> bytes: + with self._receive_guard: + try: + data = await self._trio_socket.recv(max_bytes) + except BaseException as exc: + self._convert_socket_error(exc) + + if data: + return data + else: + raise EndOfStream + + async def send(self, item: bytes) -> None: + with self._send_guard: + view = memoryview(item) + while view: + try: + bytes_sent = await self._trio_socket.send(view) + except BaseException as exc: + self._convert_socket_error(exc) + + view = view[bytes_sent:] + + async def send_eof(self) -> None: + self._trio_socket.shutdown(socket.SHUT_WR) + + +class UNIXSocketStream(SocketStream, abc.UNIXSocketStream): + async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]: + if not isinstance(msglen, int) or msglen < 0: + raise ValueError("msglen must be a non-negative integer") + if not isinstance(maxfds, int) or maxfds < 1: + raise ValueError("maxfds must be a positive integer") + + fds = array.array("i") + await trio.lowlevel.checkpoint() + with self._receive_guard: + while True: + try: + message, ancdata, flags, addr = await self._trio_socket.recvmsg( + msglen, socket.CMSG_LEN(maxfds * fds.itemsize) + ) + except BaseException as exc: + self._convert_socket_error(exc) + else: + if not message and not ancdata: + raise EndOfStream + + break + + for cmsg_level, cmsg_type, cmsg_data in ancdata: + if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS: + raise RuntimeError( + f"Received unexpected ancillary data; message = {message!r}, " + f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}" + ) + + fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) + + return message, list(fds) + + async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None: + if not message: + raise ValueError("message must not be empty") + if not fds: + raise ValueError("fds must not be empty") + + filenos: list[int] = [] + for fd in fds: + if isinstance(fd, int): + filenos.append(fd) + elif isinstance(fd, IOBase): + filenos.append(fd.fileno()) + + fdarray = array.array("i", filenos) + await trio.lowlevel.checkpoint() + with self._send_guard: + while True: + try: + await self._trio_socket.sendmsg( + [message], + [ + ( + socket.SOL_SOCKET, + socket.SCM_RIGHTS, + fdarray, + ) + ], + ) + break + except BaseException as exc: + self._convert_socket_error(exc) + + +class TCPSocketListener(_TrioSocketMixin, abc.SocketListener): + def __init__(self, raw_socket: socket.socket): + super().__init__(trio.socket.from_stdlib_socket(raw_socket)) + self._accept_guard = ResourceGuard("accepting connections from") + + async def accept(self) -> SocketStream: + with self._accept_guard: + try: + trio_socket, _addr = await self._trio_socket.accept() + except BaseException as exc: + self._convert_socket_error(exc) + + trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + return SocketStream(trio_socket) + + +class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener): + def __init__(self, raw_socket: socket.socket): + super().__init__(trio.socket.from_stdlib_socket(raw_socket)) + self._accept_guard = ResourceGuard("accepting connections from") + + async def accept(self) -> UNIXSocketStream: + with self._accept_guard: + try: + trio_socket, _addr = await self._trio_socket.accept() + except BaseException as exc: + self._convert_socket_error(exc) + + return UNIXSocketStream(trio_socket) + + +class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket): + def __init__(self, trio_socket: TrioSocketType) -> None: + super().__init__(trio_socket) + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + + async def receive(self) -> tuple[bytes, IPSockAddrType]: + with self._receive_guard: + try: + data, addr = await self._trio_socket.recvfrom(65536) + return data, convert_ipv6_sockaddr(addr) + except BaseException as exc: + self._convert_socket_error(exc) + + async def send(self, item: UDPPacketType) -> None: + with self._send_guard: + try: + await self._trio_socket.sendto(*item) + except BaseException as exc: + self._convert_socket_error(exc) + + +class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket): + def __init__(self, trio_socket: TrioSocketType) -> None: + super().__init__(trio_socket) + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + + async def receive(self) -> bytes: + with self._receive_guard: + try: + return await self._trio_socket.recv(65536) + except BaseException as exc: + self._convert_socket_error(exc) + + async def send(self, item: bytes) -> None: + with self._send_guard: + try: + await self._trio_socket.send(item) + except BaseException as exc: + self._convert_socket_error(exc) + + +class UNIXDatagramSocket(_TrioSocketMixin[str], abc.UNIXDatagramSocket): + def __init__(self, trio_socket: TrioSocketType) -> None: + super().__init__(trio_socket) + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + + async def receive(self) -> UNIXDatagramPacketType: + with self._receive_guard: + try: + data, addr = await self._trio_socket.recvfrom(65536) + return data, addr + except BaseException as exc: + self._convert_socket_error(exc) + + async def send(self, item: UNIXDatagramPacketType) -> None: + with self._send_guard: + try: + await self._trio_socket.sendto(*item) + except BaseException as exc: + self._convert_socket_error(exc) + + +class ConnectedUNIXDatagramSocket( + _TrioSocketMixin[str], abc.ConnectedUNIXDatagramSocket +): + def __init__(self, trio_socket: TrioSocketType) -> None: + super().__init__(trio_socket) + self._receive_guard = ResourceGuard("reading from") + self._send_guard = ResourceGuard("writing to") + + async def receive(self) -> bytes: + with self._receive_guard: + try: + return await self._trio_socket.recv(65536) + except BaseException as exc: + self._convert_socket_error(exc) + + async def send(self, item: bytes) -> None: + with self._send_guard: + try: + await self._trio_socket.send(item) + except BaseException as exc: + self._convert_socket_error(exc) + + +# +# Synchronization +# + + +class Event(BaseEvent): + def __new__(cls) -> Event: + return object.__new__(cls) + + def __init__(self) -> None: + self.__original = trio.Event() + + def is_set(self) -> bool: + return self.__original.is_set() + + async def wait(self) -> None: + return await self.__original.wait() + + def statistics(self) -> EventStatistics: + orig_statistics = self.__original.statistics() + return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting) + + def set(self) -> None: + self.__original.set() + + +class Lock(BaseLock): + def __new__(cls, *, fast_acquire: bool = False) -> Lock: + return object.__new__(cls) + + def __init__(self, *, fast_acquire: bool = False) -> None: + self._fast_acquire = fast_acquire + self.__original = trio.Lock() + + @staticmethod + def _convert_runtime_error_msg(exc: RuntimeError) -> None: + if exc.args == ("attempt to re-acquire an already held Lock",): + exc.args = ("Attempted to acquire an already held Lock",) + + async def acquire(self) -> None: + if not self._fast_acquire: + try: + await self.__original.acquire() + except RuntimeError as exc: + self._convert_runtime_error_msg(exc) + raise + + return + + # This is the "fast path" where we don't let other tasks run + await trio.lowlevel.checkpoint_if_cancelled() + try: + self.__original.acquire_nowait() + except trio.WouldBlock: + await self.__original._lot.park() + except RuntimeError as exc: + self._convert_runtime_error_msg(exc) + raise + + def acquire_nowait(self) -> None: + try: + self.__original.acquire_nowait() + except trio.WouldBlock: + raise WouldBlock from None + except RuntimeError as exc: + self._convert_runtime_error_msg(exc) + raise + + def locked(self) -> bool: + return self.__original.locked() + + def release(self) -> None: + self.__original.release() + + def statistics(self) -> LockStatistics: + orig_statistics = self.__original.statistics() + owner = TrioTaskInfo(orig_statistics.owner) if orig_statistics.owner else None + return LockStatistics( + orig_statistics.locked, owner, orig_statistics.tasks_waiting + ) + + +class Semaphore(BaseSemaphore): + def __new__( + cls, + initial_value: int, + *, + max_value: int | None = None, + fast_acquire: bool = False, + ) -> Semaphore: + return object.__new__(cls) + + def __init__( + self, + initial_value: int, + *, + max_value: int | None = None, + fast_acquire: bool = False, + ) -> None: + super().__init__(initial_value, max_value=max_value, fast_acquire=fast_acquire) + self.__original = trio.Semaphore(initial_value, max_value=max_value) + + async def acquire(self) -> None: + if not self._fast_acquire: + await self.__original.acquire() + return + + # This is the "fast path" where we don't let other tasks run + await trio.lowlevel.checkpoint_if_cancelled() + try: + self.__original.acquire_nowait() + except trio.WouldBlock: + await self.__original._lot.park() + + def acquire_nowait(self) -> None: + try: + self.__original.acquire_nowait() + except trio.WouldBlock: + raise WouldBlock from None + + @property + def max_value(self) -> int | None: + return self.__original.max_value + + @property + def value(self) -> int: + return self.__original.value + + def release(self) -> None: + self.__original.release() + + def statistics(self) -> SemaphoreStatistics: + orig_statistics = self.__original.statistics() + return SemaphoreStatistics(orig_statistics.tasks_waiting) + + +class CapacityLimiter(BaseCapacityLimiter): + def __new__( + cls, + total_tokens: float | None = None, + *, + original: trio.CapacityLimiter | None = None, + ) -> CapacityLimiter: + return object.__new__(cls) + + def __init__( + self, + total_tokens: float | None = None, + *, + original: trio.CapacityLimiter | None = None, + ) -> None: + if original is not None: + self.__original = original + else: + assert total_tokens is not None + self.__original = trio.CapacityLimiter(total_tokens) + + async def __aenter__(self) -> None: + return await self.__original.__aenter__() + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + await self.__original.__aexit__(exc_type, exc_val, exc_tb) + + @property + def total_tokens(self) -> float: + return self.__original.total_tokens + + @total_tokens.setter + def total_tokens(self, value: float) -> None: + self.__original.total_tokens = value + + @property + def borrowed_tokens(self) -> int: + return self.__original.borrowed_tokens + + @property + def available_tokens(self) -> float: + return self.__original.available_tokens + + def acquire_nowait(self) -> None: + self.__original.acquire_nowait() + + def acquire_on_behalf_of_nowait(self, borrower: object) -> None: + self.__original.acquire_on_behalf_of_nowait(borrower) + + async def acquire(self) -> None: + await self.__original.acquire() + + async def acquire_on_behalf_of(self, borrower: object) -> None: + await self.__original.acquire_on_behalf_of(borrower) + + def release(self) -> None: + return self.__original.release() + + def release_on_behalf_of(self, borrower: object) -> None: + return self.__original.release_on_behalf_of(borrower) + + def statistics(self) -> CapacityLimiterStatistics: + orig = self.__original.statistics() + return CapacityLimiterStatistics( + borrowed_tokens=orig.borrowed_tokens, + total_tokens=orig.total_tokens, + borrowers=tuple(orig.borrowers), + tasks_waiting=orig.tasks_waiting, + ) + + +_capacity_limiter_wrapper: trio.lowlevel.RunVar = RunVar("_capacity_limiter_wrapper") + + +# +# Signal handling +# + + +class _SignalReceiver: + _iterator: AsyncIterator[int] + + def __init__(self, signals: tuple[Signals, ...]): + self._signals = signals + + def __enter__(self) -> _SignalReceiver: + self._cm = trio.open_signal_receiver(*self._signals) + self._iterator = self._cm.__enter__() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool | None: + return self._cm.__exit__(exc_type, exc_val, exc_tb) + + def __aiter__(self) -> _SignalReceiver: + return self + + async def __anext__(self) -> Signals: + signum = await self._iterator.__anext__() + return Signals(signum) + + +# +# Testing and debugging +# + + +class TestRunner(abc.TestRunner): + def __init__(self, **options: Any) -> None: + from queue import Queue + + self._call_queue: Queue[Callable[[], object]] = Queue() + self._send_stream: MemoryObjectSendStream | None = None + self._options = options + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: types.TracebackType | None, + ) -> None: + if self._send_stream: + self._send_stream.close() + while self._send_stream is not None: + self._call_queue.get()() + + async def _run_tests_and_fixtures(self) -> None: + self._send_stream, receive_stream = create_memory_object_stream(1) + with receive_stream: + async for coro, outcome_holder in receive_stream: + try: + retval = await coro + except BaseException as exc: + outcome_holder.append(Error(exc)) + else: + outcome_holder.append(Value(retval)) + + def _main_task_finished(self, outcome: object) -> None: + self._send_stream = None + + def _call_in_runner_task( + self, + func: Callable[P, Awaitable[T_Retval]], + *args: P.args, + **kwargs: P.kwargs, + ) -> T_Retval: + if self._send_stream is None: + trio.lowlevel.start_guest_run( + self._run_tests_and_fixtures, + run_sync_soon_threadsafe=self._call_queue.put, + done_callback=self._main_task_finished, + **self._options, + ) + while self._send_stream is None: + self._call_queue.get()() + + outcome_holder: list[Outcome] = [] + self._send_stream.send_nowait((func(*args, **kwargs), outcome_holder)) + while not outcome_holder: + self._call_queue.get()() + + return outcome_holder[0].unwrap() + + def run_asyncgen_fixture( + self, + fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]], + kwargs: dict[str, Any], + ) -> Iterable[T_Retval]: + asyncgen = fixture_func(**kwargs) + fixturevalue: T_Retval = self._call_in_runner_task(asyncgen.asend, None) + + yield fixturevalue + + try: + self._call_in_runner_task(asyncgen.asend, None) + except StopAsyncIteration: + pass + else: + self._call_in_runner_task(asyncgen.aclose) + raise RuntimeError("Async generator fixture did not stop") + + def run_fixture( + self, + fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]], + kwargs: dict[str, Any], + ) -> T_Retval: + return self._call_in_runner_task(fixture_func, **kwargs) + + def run_test( + self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any] + ) -> None: + self._call_in_runner_task(test_func, **kwargs) + + +class TrioTaskInfo(TaskInfo): + def __init__(self, task: trio.lowlevel.Task): + parent_id = None + if task.parent_nursery and task.parent_nursery.parent_task: + parent_id = id(task.parent_nursery.parent_task) + + super().__init__(id(task), parent_id, task.name, task.coro) + self._task = weakref.proxy(task) + + def has_pending_cancellation(self) -> bool: + try: + return self._task._cancel_status.effectively_cancelled + except ReferenceError: + # If the task is no longer around, it surely doesn't have a cancellation + # pending + return False + + +class TrioBackend(AsyncBackend): + @classmethod + def run( + cls, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], + args: tuple[Unpack[PosArgsT]], + kwargs: dict[str, Any], + options: dict[str, Any], + ) -> T_Retval: + return trio.run(func, *args) + + @classmethod + def current_token(cls) -> object: + return trio.lowlevel.current_trio_token() + + @classmethod + def current_time(cls) -> float: + return trio.current_time() + + @classmethod + def cancelled_exception_class(cls) -> type[BaseException]: + return trio.Cancelled + + @classmethod + async def checkpoint(cls) -> None: + await trio.lowlevel.checkpoint() + + @classmethod + async def checkpoint_if_cancelled(cls) -> None: + await trio.lowlevel.checkpoint_if_cancelled() + + @classmethod + async def cancel_shielded_checkpoint(cls) -> None: + await trio.lowlevel.cancel_shielded_checkpoint() + + @classmethod + async def sleep(cls, delay: float) -> None: + await trio.sleep(delay) + + @classmethod + def create_cancel_scope( + cls, *, deadline: float = math.inf, shield: bool = False + ) -> abc.CancelScope: + return CancelScope(deadline=deadline, shield=shield) + + @classmethod + def current_effective_deadline(cls) -> float: + return trio.current_effective_deadline() + + @classmethod + def create_task_group(cls) -> abc.TaskGroup: + return TaskGroup() + + @classmethod + def create_event(cls) -> abc.Event: + return Event() + + @classmethod + def create_lock(cls, *, fast_acquire: bool) -> Lock: + return Lock(fast_acquire=fast_acquire) + + @classmethod + def create_semaphore( + cls, + initial_value: int, + *, + max_value: int | None = None, + fast_acquire: bool = False, + ) -> abc.Semaphore: + return Semaphore(initial_value, max_value=max_value, fast_acquire=fast_acquire) + + @classmethod + def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter: + return CapacityLimiter(total_tokens) + + @classmethod + async def run_sync_in_worker_thread( + cls, + func: Callable[[Unpack[PosArgsT]], T_Retval], + args: tuple[Unpack[PosArgsT]], + abandon_on_cancel: bool = False, + limiter: abc.CapacityLimiter | None = None, + ) -> T_Retval: + def wrapper() -> T_Retval: + with claim_worker_thread(TrioBackend, token): + return func(*args) + + token = TrioBackend.current_token() + return await run_sync( + wrapper, + abandon_on_cancel=abandon_on_cancel, + limiter=cast(trio.CapacityLimiter, limiter), + ) + + @classmethod + def check_cancelled(cls) -> None: + trio.from_thread.check_cancelled() + + @classmethod + def run_async_from_thread( + cls, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], + args: tuple[Unpack[PosArgsT]], + token: object, + ) -> T_Retval: + return trio.from_thread.run(func, *args) + + @classmethod + def run_sync_from_thread( + cls, + func: Callable[[Unpack[PosArgsT]], T_Retval], + args: tuple[Unpack[PosArgsT]], + token: object, + ) -> T_Retval: + return trio.from_thread.run_sync(func, *args) + + @classmethod + def create_blocking_portal(cls) -> abc.BlockingPortal: + return BlockingPortal() + + @classmethod + async def open_process( + cls, + command: StrOrBytesPath | Sequence[StrOrBytesPath], + *, + stdin: int | IO[Any] | None, + stdout: int | IO[Any] | None, + stderr: int | IO[Any] | None, + **kwargs: Any, + ) -> Process: + def convert_item(item: StrOrBytesPath) -> str: + str_or_bytes = os.fspath(item) + if isinstance(str_or_bytes, str): + return str_or_bytes + else: + return os.fsdecode(str_or_bytes) + + if isinstance(command, (str, bytes, PathLike)): + process = await trio.lowlevel.open_process( + convert_item(command), + stdin=stdin, + stdout=stdout, + stderr=stderr, + shell=True, + **kwargs, + ) + else: + process = await trio.lowlevel.open_process( + [convert_item(item) for item in command], + stdin=stdin, + stdout=stdout, + stderr=stderr, + shell=False, + **kwargs, + ) + + stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None + stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None + stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None + return Process(process, stdin_stream, stdout_stream, stderr_stream) + + @classmethod + def setup_process_pool_exit_at_shutdown(cls, workers: set[abc.Process]) -> None: + trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers) + + @classmethod + async def connect_tcp( + cls, host: str, port: int, local_address: IPSockAddrType | None = None + ) -> SocketStream: + family = socket.AF_INET6 if ":" in host else socket.AF_INET + trio_socket = trio.socket.socket(family) + trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + if local_address: + await trio_socket.bind(local_address) + + try: + await trio_socket.connect((host, port)) + except BaseException: + trio_socket.close() + raise + + return SocketStream(trio_socket) + + @classmethod + async def connect_unix(cls, path: str | bytes) -> abc.UNIXSocketStream: + trio_socket = trio.socket.socket(socket.AF_UNIX) + try: + await trio_socket.connect(path) + except BaseException: + trio_socket.close() + raise + + return UNIXSocketStream(trio_socket) + + @classmethod + def create_tcp_listener(cls, sock: socket.socket) -> abc.SocketListener: + return TCPSocketListener(sock) + + @classmethod + def create_unix_listener(cls, sock: socket.socket) -> abc.SocketListener: + return UNIXSocketListener(sock) + + @classmethod + async def create_udp_socket( + cls, + family: socket.AddressFamily, + local_address: IPSockAddrType | None, + remote_address: IPSockAddrType | None, + reuse_port: bool, + ) -> UDPSocket | ConnectedUDPSocket: + trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM) + + if reuse_port: + trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + + if local_address: + await trio_socket.bind(local_address) + + if remote_address: + await trio_socket.connect(remote_address) + return ConnectedUDPSocket(trio_socket) + else: + return UDPSocket(trio_socket) + + @classmethod + @overload + async def create_unix_datagram_socket( + cls, raw_socket: socket.socket, remote_path: None + ) -> abc.UNIXDatagramSocket: ... + + @classmethod + @overload + async def create_unix_datagram_socket( + cls, raw_socket: socket.socket, remote_path: str | bytes + ) -> abc.ConnectedUNIXDatagramSocket: ... + + @classmethod + async def create_unix_datagram_socket( + cls, raw_socket: socket.socket, remote_path: str | bytes | None + ) -> abc.UNIXDatagramSocket | abc.ConnectedUNIXDatagramSocket: + trio_socket = trio.socket.from_stdlib_socket(raw_socket) + + if remote_path: + await trio_socket.connect(remote_path) + return ConnectedUNIXDatagramSocket(trio_socket) + else: + return UNIXDatagramSocket(trio_socket) + + @classmethod + async def getaddrinfo( + cls, + host: bytes | str | None, + port: str | int | None, + *, + family: int | AddressFamily = 0, + type: int | SocketKind = 0, + proto: int = 0, + flags: int = 0, + ) -> Sequence[ + tuple[ + AddressFamily, + SocketKind, + int, + str, + tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes], + ] + ]: + return await trio.socket.getaddrinfo(host, port, family, type, proto, flags) + + @classmethod + async def getnameinfo( + cls, sockaddr: IPSockAddrType, flags: int = 0 + ) -> tuple[str, str]: + return await trio.socket.getnameinfo(sockaddr, flags) + + @classmethod + async def wait_readable(cls, obj: HasFileno | int) -> None: + try: + await wait_readable(obj) + except trio.ClosedResourceError as exc: + raise ClosedResourceError().with_traceback(exc.__traceback__) from None + except trio.BusyResourceError: + raise BusyResourceError("reading from") from None + + @classmethod + async def wait_writable(cls, obj: HasFileno | int) -> None: + try: + await wait_writable(obj) + except trio.ClosedResourceError as exc: + raise ClosedResourceError().with_traceback(exc.__traceback__) from None + except trio.BusyResourceError: + raise BusyResourceError("writing to") from None + + @classmethod + def current_default_thread_limiter(cls) -> CapacityLimiter: + try: + return _capacity_limiter_wrapper.get() + except LookupError: + limiter = CapacityLimiter( + original=trio.to_thread.current_default_thread_limiter() + ) + _capacity_limiter_wrapper.set(limiter) + return limiter + + @classmethod + def open_signal_receiver( + cls, *signals: Signals + ) -> AbstractContextManager[AsyncIterator[Signals]]: + return _SignalReceiver(signals) + + @classmethod + def get_current_task(cls) -> TaskInfo: + task = current_task() + return TrioTaskInfo(task) + + @classmethod + def get_running_tasks(cls) -> Sequence[TaskInfo]: + root_task = current_root_task() + assert root_task + task_infos = [TrioTaskInfo(root_task)] + nurseries = root_task.child_nurseries + while nurseries: + new_nurseries: list[trio.Nursery] = [] + for nursery in nurseries: + for task in nursery.child_tasks: + task_infos.append(TrioTaskInfo(task)) + new_nurseries.extend(task.child_nurseries) + + nurseries = new_nurseries + + return task_infos + + @classmethod + async def wait_all_tasks_blocked(cls) -> None: + from trio.testing import wait_all_tasks_blocked + + await wait_all_tasks_blocked() + + @classmethod + def create_test_runner(cls, options: dict[str, Any]) -> TestRunner: + return TestRunner(**options) + + +backend_class = TrioBackend diff --git a/venv/Lib/site-packages/anyio/_core/__init__.py b/venv/Lib/site-packages/anyio/_core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..d0c3312d Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_asyncio_selector_thread.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_asyncio_selector_thread.cpython-312.pyc new file mode 100644 index 00000000..c693e736 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_asyncio_selector_thread.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_eventloop.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_eventloop.cpython-312.pyc new file mode 100644 index 00000000..594dc7e4 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_eventloop.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_exceptions.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_exceptions.cpython-312.pyc new file mode 100644 index 00000000..6589cf65 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_exceptions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_fileio.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_fileio.cpython-312.pyc new file mode 100644 index 00000000..de855585 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_fileio.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_resources.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_resources.cpython-312.pyc new file mode 100644 index 00000000..6a957e6b Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_resources.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_signals.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_signals.cpython-312.pyc new file mode 100644 index 00000000..59101f09 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_signals.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_sockets.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_sockets.cpython-312.pyc new file mode 100644 index 00000000..2b1713ef Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_sockets.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_streams.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_streams.cpython-312.pyc new file mode 100644 index 00000000..52b112d7 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_streams.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_subprocesses.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_subprocesses.cpython-312.pyc new file mode 100644 index 00000000..abda6628 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_subprocesses.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_synchronization.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_synchronization.cpython-312.pyc new file mode 100644 index 00000000..232e53a8 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_synchronization.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_tasks.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_tasks.cpython-312.pyc new file mode 100644 index 00000000..0eb5e547 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_tasks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_tempfile.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_tempfile.cpython-312.pyc new file mode 100644 index 00000000..440bdd98 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_tempfile.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_testing.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_testing.cpython-312.pyc new file mode 100644 index 00000000..c2ec2c59 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_testing.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/__pycache__/_typedattr.cpython-312.pyc b/venv/Lib/site-packages/anyio/_core/__pycache__/_typedattr.cpython-312.pyc new file mode 100644 index 00000000..8ae61d04 Binary files /dev/null and b/venv/Lib/site-packages/anyio/_core/__pycache__/_typedattr.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/_core/_asyncio_selector_thread.py b/venv/Lib/site-packages/anyio/_core/_asyncio_selector_thread.py new file mode 100644 index 00000000..9f35bae5 --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_asyncio_selector_thread.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import asyncio +import socket +import threading +from collections.abc import Callable +from selectors import EVENT_READ, EVENT_WRITE, DefaultSelector +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from _typeshed import FileDescriptorLike + +_selector_lock = threading.Lock() +_selector: Selector | None = None + + +class Selector: + def __init__(self) -> None: + self._thread = threading.Thread(target=self.run, name="AnyIO socket selector") + self._selector = DefaultSelector() + self._send, self._receive = socket.socketpair() + self._send.setblocking(False) + self._receive.setblocking(False) + # This somewhat reduces the amount of memory wasted queueing up data + # for wakeups. With these settings, maximum number of 1-byte sends + # before getting BlockingIOError: + # Linux 4.8: 6 + # macOS (darwin 15.5): 1 + # Windows 10: 525347 + # Windows you're weird. (And on Windows setting SNDBUF to 0 makes send + # blocking, even on non-blocking sockets, so don't do that.) + self._receive.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1) + self._send.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1) + # On Windows this is a TCP socket so this might matter. On other + # platforms this fails b/c AF_UNIX sockets aren't actually TCP. + try: + self._send.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + except OSError: + pass + + self._selector.register(self._receive, EVENT_READ) + self._closed = False + + def start(self) -> None: + self._thread.start() + threading._register_atexit(self._stop) # type: ignore[attr-defined] + + def _stop(self) -> None: + global _selector + self._closed = True + self._notify_self() + self._send.close() + self._thread.join() + self._selector.unregister(self._receive) + self._receive.close() + self._selector.close() + _selector = None + assert not self._selector.get_map(), ( + "selector still has registered file descriptors after shutdown" + ) + + def _notify_self(self) -> None: + try: + self._send.send(b"\x00") + except BlockingIOError: + pass + + def add_reader(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None: + loop = asyncio.get_running_loop() + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, EVENT_READ, {EVENT_READ: (loop, callback)}) + else: + if EVENT_READ in key.data: + raise ValueError( + "this file descriptor is already registered for reading" + ) + + key.data[EVENT_READ] = loop, callback + self._selector.modify(fd, key.events | EVENT_READ, key.data) + + self._notify_self() + + def add_writer(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None: + loop = asyncio.get_running_loop() + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, EVENT_WRITE, {EVENT_WRITE: (loop, callback)}) + else: + if EVENT_WRITE in key.data: + raise ValueError( + "this file descriptor is already registered for writing" + ) + + key.data[EVENT_WRITE] = loop, callback + self._selector.modify(fd, key.events | EVENT_WRITE, key.data) + + self._notify_self() + + def remove_reader(self, fd: FileDescriptorLike) -> bool: + try: + key = self._selector.get_key(fd) + except KeyError: + return False + + if new_events := key.events ^ EVENT_READ: + del key.data[EVENT_READ] + self._selector.modify(fd, new_events, key.data) + else: + self._selector.unregister(fd) + + return True + + def remove_writer(self, fd: FileDescriptorLike) -> bool: + try: + key = self._selector.get_key(fd) + except KeyError: + return False + + if new_events := key.events ^ EVENT_WRITE: + del key.data[EVENT_WRITE] + self._selector.modify(fd, new_events, key.data) + else: + self._selector.unregister(fd) + + return True + + def run(self) -> None: + while not self._closed: + for key, events in self._selector.select(): + if key.fileobj is self._receive: + try: + while self._receive.recv(4096): + pass + except BlockingIOError: + pass + + continue + + if events & EVENT_READ: + loop, callback = key.data[EVENT_READ] + self.remove_reader(key.fd) + try: + loop.call_soon_threadsafe(callback) + except RuntimeError: + pass # the loop was already closed + + if events & EVENT_WRITE: + loop, callback = key.data[EVENT_WRITE] + self.remove_writer(key.fd) + try: + loop.call_soon_threadsafe(callback) + except RuntimeError: + pass # the loop was already closed + + +def get_selector() -> Selector: + global _selector + + with _selector_lock: + if _selector is None: + _selector = Selector() + _selector.start() + + return _selector diff --git a/venv/Lib/site-packages/anyio/_core/_eventloop.py b/venv/Lib/site-packages/anyio/_core/_eventloop.py new file mode 100644 index 00000000..6dcb4589 --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_eventloop.py @@ -0,0 +1,166 @@ +from __future__ import annotations + +import math +import sys +import threading +from collections.abc import Awaitable, Callable, Generator +from contextlib import contextmanager +from importlib import import_module +from typing import TYPE_CHECKING, Any, TypeVar + +import sniffio + +if sys.version_info >= (3, 11): + from typing import TypeVarTuple, Unpack +else: + from typing_extensions import TypeVarTuple, Unpack + +if TYPE_CHECKING: + from ..abc import AsyncBackend + +# This must be updated when new backends are introduced +BACKENDS = "asyncio", "trio" + +T_Retval = TypeVar("T_Retval") +PosArgsT = TypeVarTuple("PosArgsT") + +threadlocals = threading.local() +loaded_backends: dict[str, type[AsyncBackend]] = {} + + +def run( + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], + *args: Unpack[PosArgsT], + backend: str = "asyncio", + backend_options: dict[str, Any] | None = None, +) -> T_Retval: + """ + Run the given coroutine function in an asynchronous event loop. + + The current thread must not be already running an event loop. + + :param func: a coroutine function + :param args: positional arguments to ``func`` + :param backend: name of the asynchronous event loop implementation – currently + either ``asyncio`` or ``trio`` + :param backend_options: keyword arguments to call the backend ``run()`` + implementation with (documented :ref:`here `) + :return: the return value of the coroutine function + :raises RuntimeError: if an asynchronous event loop is already running in this + thread + :raises LookupError: if the named backend is not found + + """ + try: + asynclib_name = sniffio.current_async_library() + except sniffio.AsyncLibraryNotFoundError: + pass + else: + raise RuntimeError(f"Already running {asynclib_name} in this thread") + + try: + async_backend = get_async_backend(backend) + except ImportError as exc: + raise LookupError(f"No such backend: {backend}") from exc + + token = None + if sniffio.current_async_library_cvar.get(None) is None: + # Since we're in control of the event loop, we can cache the name of the async + # library + token = sniffio.current_async_library_cvar.set(backend) + + try: + backend_options = backend_options or {} + return async_backend.run(func, args, {}, backend_options) + finally: + if token: + sniffio.current_async_library_cvar.reset(token) + + +async def sleep(delay: float) -> None: + """ + Pause the current task for the specified duration. + + :param delay: the duration, in seconds + + """ + return await get_async_backend().sleep(delay) + + +async def sleep_forever() -> None: + """ + Pause the current task until it's cancelled. + + This is a shortcut for ``sleep(math.inf)``. + + .. versionadded:: 3.1 + + """ + await sleep(math.inf) + + +async def sleep_until(deadline: float) -> None: + """ + Pause the current task until the given time. + + :param deadline: the absolute time to wake up at (according to the internal + monotonic clock of the event loop) + + .. versionadded:: 3.1 + + """ + now = current_time() + await sleep(max(deadline - now, 0)) + + +def current_time() -> float: + """ + Return the current value of the event loop's internal clock. + + :return: the clock value (seconds) + + """ + return get_async_backend().current_time() + + +def get_all_backends() -> tuple[str, ...]: + """Return a tuple of the names of all built-in backends.""" + return BACKENDS + + +def get_cancelled_exc_class() -> type[BaseException]: + """Return the current async library's cancellation exception class.""" + return get_async_backend().cancelled_exception_class() + + +# +# Private API +# + + +@contextmanager +def claim_worker_thread( + backend_class: type[AsyncBackend], token: object +) -> Generator[Any, None, None]: + threadlocals.current_async_backend = backend_class + threadlocals.current_token = token + try: + yield + finally: + del threadlocals.current_async_backend + del threadlocals.current_token + + +def get_async_backend(asynclib_name: str | None = None) -> type[AsyncBackend]: + if asynclib_name is None: + asynclib_name = sniffio.current_async_library() + + # We use our own dict instead of sys.modules to get the already imported back-end + # class because the appropriate modules in sys.modules could potentially be only + # partially initialized + try: + return loaded_backends[asynclib_name] + except KeyError: + module = import_module(f"anyio._backends._{asynclib_name}") + loaded_backends[asynclib_name] = module.backend_class + return module.backend_class diff --git a/venv/Lib/site-packages/anyio/_core/_exceptions.py b/venv/Lib/site-packages/anyio/_core/_exceptions.py new file mode 100644 index 00000000..16b94482 --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_exceptions.py @@ -0,0 +1,126 @@ +from __future__ import annotations + +import sys +from collections.abc import Generator +from textwrap import dedent +from typing import Any + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + + +class BrokenResourceError(Exception): + """ + Raised when trying to use a resource that has been rendered unusable due to external + causes (e.g. a send stream whose peer has disconnected). + """ + + +class BrokenWorkerProcess(Exception): + """ + Raised by :meth:`~anyio.to_process.run_sync` if the worker process terminates abruptly or + otherwise misbehaves. + """ + + +class BrokenWorkerIntepreter(Exception): + """ + Raised by :meth:`~anyio.to_interpreter.run_sync` if an unexpected exception is + raised in the subinterpreter. + """ + + def __init__(self, excinfo: Any): + # This was adapted from concurrent.futures.interpreter.ExecutionFailed + msg = excinfo.formatted + if not msg: + if excinfo.type and excinfo.msg: + msg = f"{excinfo.type.__name__}: {excinfo.msg}" + else: + msg = excinfo.type.__name__ or excinfo.msg + + super().__init__(msg) + self.excinfo = excinfo + + def __str__(self) -> str: + try: + formatted = self.excinfo.errdisplay + except Exception: + return super().__str__() + else: + return dedent( + f""" + {super().__str__()} + + Uncaught in the interpreter: + + {formatted} + """.strip() + ) + + +class BusyResourceError(Exception): + """ + Raised when two tasks are trying to read from or write to the same resource + concurrently. + """ + + def __init__(self, action: str): + super().__init__(f"Another task is already {action} this resource") + + +class ClosedResourceError(Exception): + """Raised when trying to use a resource that has been closed.""" + + +class DelimiterNotFound(Exception): + """ + Raised during + :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the + maximum number of bytes has been read without the delimiter being found. + """ + + def __init__(self, max_bytes: int) -> None: + super().__init__( + f"The delimiter was not found among the first {max_bytes} bytes" + ) + + +class EndOfStream(Exception): + """ + Raised when trying to read from a stream that has been closed from the other end. + """ + + +class IncompleteRead(Exception): + """ + Raised during + :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or + :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the + connection is closed before the requested amount of bytes has been read. + """ + + def __init__(self) -> None: + super().__init__( + "The stream was closed before the read operation could be completed" + ) + + +class TypedAttributeLookupError(LookupError): + """ + Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute + is not found and no default value has been given. + """ + + +class WouldBlock(Exception): + """Raised by ``X_nowait`` functions if ``X()`` would block.""" + + +def iterate_exceptions( + exception: BaseException, +) -> Generator[BaseException, None, None]: + if isinstance(exception, BaseExceptionGroup): + for exc in exception.exceptions: + yield from iterate_exceptions(exc) + else: + yield exception diff --git a/venv/Lib/site-packages/anyio/_core/_fileio.py b/venv/Lib/site-packages/anyio/_core/_fileio.py new file mode 100644 index 00000000..a0d61984 --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_fileio.py @@ -0,0 +1,742 @@ +from __future__ import annotations + +import os +import pathlib +import sys +from collections.abc import ( + AsyncIterator, + Callable, + Iterable, + Iterator, + Sequence, +) +from dataclasses import dataclass +from functools import partial +from os import PathLike +from typing import ( + IO, + TYPE_CHECKING, + Any, + AnyStr, + ClassVar, + Final, + Generic, + overload, +) + +from .. import to_thread +from ..abc import AsyncResource + +if TYPE_CHECKING: + from types import ModuleType + + from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer +else: + ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object + + +class AsyncFile(AsyncResource, Generic[AnyStr]): + """ + An asynchronous file object. + + This class wraps a standard file object and provides async friendly versions of the + following blocking methods (where available on the original file object): + + * read + * read1 + * readline + * readlines + * readinto + * readinto1 + * write + * writelines + * truncate + * seek + * tell + * flush + + All other methods are directly passed through. + + This class supports the asynchronous context manager protocol which closes the + underlying file at the end of the context block. + + This class also supports asynchronous iteration:: + + async with await open_file(...) as f: + async for line in f: + print(line) + """ + + def __init__(self, fp: IO[AnyStr]) -> None: + self._fp: Any = fp + + def __getattr__(self, name: str) -> object: + return getattr(self._fp, name) + + @property + def wrapped(self) -> IO[AnyStr]: + """The wrapped file object.""" + return self._fp + + async def __aiter__(self) -> AsyncIterator[AnyStr]: + while True: + line = await self.readline() + if line: + yield line + else: + break + + async def aclose(self) -> None: + return await to_thread.run_sync(self._fp.close) + + async def read(self, size: int = -1) -> AnyStr: + return await to_thread.run_sync(self._fp.read, size) + + async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes: + return await to_thread.run_sync(self._fp.read1, size) + + async def readline(self) -> AnyStr: + return await to_thread.run_sync(self._fp.readline) + + async def readlines(self) -> list[AnyStr]: + return await to_thread.run_sync(self._fp.readlines) + + async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> int: + return await to_thread.run_sync(self._fp.readinto, b) + + async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> int: + return await to_thread.run_sync(self._fp.readinto1, b) + + @overload + async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int: ... + + @overload + async def write(self: AsyncFile[str], b: str) -> int: ... + + async def write(self, b: ReadableBuffer | str) -> int: + return await to_thread.run_sync(self._fp.write, b) + + @overload + async def writelines( + self: AsyncFile[bytes], lines: Iterable[ReadableBuffer] + ) -> None: ... + + @overload + async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None: ... + + async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None: + return await to_thread.run_sync(self._fp.writelines, lines) + + async def truncate(self, size: int | None = None) -> int: + return await to_thread.run_sync(self._fp.truncate, size) + + async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int: + return await to_thread.run_sync(self._fp.seek, offset, whence) + + async def tell(self) -> int: + return await to_thread.run_sync(self._fp.tell) + + async def flush(self) -> None: + return await to_thread.run_sync(self._fp.flush) + + +@overload +async def open_file( + file: str | PathLike[str] | int, + mode: OpenBinaryMode, + buffering: int = ..., + encoding: str | None = ..., + errors: str | None = ..., + newline: str | None = ..., + closefd: bool = ..., + opener: Callable[[str, int], int] | None = ..., +) -> AsyncFile[bytes]: ... + + +@overload +async def open_file( + file: str | PathLike[str] | int, + mode: OpenTextMode = ..., + buffering: int = ..., + encoding: str | None = ..., + errors: str | None = ..., + newline: str | None = ..., + closefd: bool = ..., + opener: Callable[[str, int], int] | None = ..., +) -> AsyncFile[str]: ... + + +async def open_file( + file: str | PathLike[str] | int, + mode: str = "r", + buffering: int = -1, + encoding: str | None = None, + errors: str | None = None, + newline: str | None = None, + closefd: bool = True, + opener: Callable[[str, int], int] | None = None, +) -> AsyncFile[Any]: + """ + Open a file asynchronously. + + The arguments are exactly the same as for the builtin :func:`open`. + + :return: an asynchronous file object + + """ + fp = await to_thread.run_sync( + open, file, mode, buffering, encoding, errors, newline, closefd, opener + ) + return AsyncFile(fp) + + +def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]: + """ + Wrap an existing file as an asynchronous file. + + :param file: an existing file-like object + :return: an asynchronous file object + + """ + return AsyncFile(file) + + +@dataclass(eq=False) +class _PathIterator(AsyncIterator["Path"]): + iterator: Iterator[PathLike[str]] + + async def __anext__(self) -> Path: + nextval = await to_thread.run_sync( + next, self.iterator, None, abandon_on_cancel=True + ) + if nextval is None: + raise StopAsyncIteration from None + + return Path(nextval) + + +class Path: + """ + An asynchronous version of :class:`pathlib.Path`. + + This class cannot be substituted for :class:`pathlib.Path` or + :class:`pathlib.PurePath`, but it is compatible with the :class:`os.PathLike` + interface. + + It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for + the deprecated :meth:`~pathlib.Path.link_to` method. + + Some methods may be unavailable or have limited functionality, based on the Python + version: + + * :meth:`~pathlib.Path.copy` (available on Python 3.14 or later) + * :meth:`~pathlib.Path.copy_into` (available on Python 3.14 or later) + * :meth:`~pathlib.Path.from_uri` (available on Python 3.13 or later) + * :meth:`~pathlib.PurePath.full_match` (available on Python 3.13 or later) + * :attr:`~pathlib.Path.info` (available on Python 3.14 or later) + * :meth:`~pathlib.Path.is_junction` (available on Python 3.12 or later) + * :meth:`~pathlib.PurePath.match` (the ``case_sensitive`` parameter is only + available on Python 3.13 or later) + * :meth:`~pathlib.Path.move` (available on Python 3.14 or later) + * :meth:`~pathlib.Path.move_into` (available on Python 3.14 or later) + * :meth:`~pathlib.PurePath.relative_to` (the ``walk_up`` parameter is only available + on Python 3.12 or later) + * :meth:`~pathlib.Path.walk` (available on Python 3.12 or later) + + Any methods that do disk I/O need to be awaited on. These methods are: + + * :meth:`~pathlib.Path.absolute` + * :meth:`~pathlib.Path.chmod` + * :meth:`~pathlib.Path.cwd` + * :meth:`~pathlib.Path.exists` + * :meth:`~pathlib.Path.expanduser` + * :meth:`~pathlib.Path.group` + * :meth:`~pathlib.Path.hardlink_to` + * :meth:`~pathlib.Path.home` + * :meth:`~pathlib.Path.is_block_device` + * :meth:`~pathlib.Path.is_char_device` + * :meth:`~pathlib.Path.is_dir` + * :meth:`~pathlib.Path.is_fifo` + * :meth:`~pathlib.Path.is_file` + * :meth:`~pathlib.Path.is_junction` + * :meth:`~pathlib.Path.is_mount` + * :meth:`~pathlib.Path.is_socket` + * :meth:`~pathlib.Path.is_symlink` + * :meth:`~pathlib.Path.lchmod` + * :meth:`~pathlib.Path.lstat` + * :meth:`~pathlib.Path.mkdir` + * :meth:`~pathlib.Path.open` + * :meth:`~pathlib.Path.owner` + * :meth:`~pathlib.Path.read_bytes` + * :meth:`~pathlib.Path.read_text` + * :meth:`~pathlib.Path.readlink` + * :meth:`~pathlib.Path.rename` + * :meth:`~pathlib.Path.replace` + * :meth:`~pathlib.Path.resolve` + * :meth:`~pathlib.Path.rmdir` + * :meth:`~pathlib.Path.samefile` + * :meth:`~pathlib.Path.stat` + * :meth:`~pathlib.Path.symlink_to` + * :meth:`~pathlib.Path.touch` + * :meth:`~pathlib.Path.unlink` + * :meth:`~pathlib.Path.walk` + * :meth:`~pathlib.Path.write_bytes` + * :meth:`~pathlib.Path.write_text` + + Additionally, the following methods return an async iterator yielding + :class:`~.Path` objects: + + * :meth:`~pathlib.Path.glob` + * :meth:`~pathlib.Path.iterdir` + * :meth:`~pathlib.Path.rglob` + """ + + __slots__ = "_path", "__weakref__" + + __weakref__: Any + + def __init__(self, *args: str | PathLike[str]) -> None: + self._path: Final[pathlib.Path] = pathlib.Path(*args) + + def __fspath__(self) -> str: + return self._path.__fspath__() + + def __str__(self) -> str: + return self._path.__str__() + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.as_posix()!r})" + + def __bytes__(self) -> bytes: + return self._path.__bytes__() + + def __hash__(self) -> int: + return self._path.__hash__() + + def __eq__(self, other: object) -> bool: + target = other._path if isinstance(other, Path) else other + return self._path.__eq__(target) + + def __lt__(self, other: pathlib.PurePath | Path) -> bool: + target = other._path if isinstance(other, Path) else other + return self._path.__lt__(target) + + def __le__(self, other: pathlib.PurePath | Path) -> bool: + target = other._path if isinstance(other, Path) else other + return self._path.__le__(target) + + def __gt__(self, other: pathlib.PurePath | Path) -> bool: + target = other._path if isinstance(other, Path) else other + return self._path.__gt__(target) + + def __ge__(self, other: pathlib.PurePath | Path) -> bool: + target = other._path if isinstance(other, Path) else other + return self._path.__ge__(target) + + def __truediv__(self, other: str | PathLike[str]) -> Path: + return Path(self._path / other) + + def __rtruediv__(self, other: str | PathLike[str]) -> Path: + return Path(other) / self + + @property + def parts(self) -> tuple[str, ...]: + return self._path.parts + + @property + def drive(self) -> str: + return self._path.drive + + @property + def root(self) -> str: + return self._path.root + + @property + def anchor(self) -> str: + return self._path.anchor + + @property + def parents(self) -> Sequence[Path]: + return tuple(Path(p) for p in self._path.parents) + + @property + def parent(self) -> Path: + return Path(self._path.parent) + + @property + def name(self) -> str: + return self._path.name + + @property + def suffix(self) -> str: + return self._path.suffix + + @property + def suffixes(self) -> list[str]: + return self._path.suffixes + + @property + def stem(self) -> str: + return self._path.stem + + async def absolute(self) -> Path: + path = await to_thread.run_sync(self._path.absolute) + return Path(path) + + def as_posix(self) -> str: + return self._path.as_posix() + + def as_uri(self) -> str: + return self._path.as_uri() + + if sys.version_info >= (3, 13): + parser: ClassVar[ModuleType] = pathlib.Path.parser + + @classmethod + def from_uri(cls, uri: str) -> Path: + return Path(pathlib.Path.from_uri(uri)) + + def full_match( + self, path_pattern: str, *, case_sensitive: bool | None = None + ) -> bool: + return self._path.full_match(path_pattern, case_sensitive=case_sensitive) + + def match( + self, path_pattern: str, *, case_sensitive: bool | None = None + ) -> bool: + return self._path.match(path_pattern, case_sensitive=case_sensitive) + else: + + def match(self, path_pattern: str) -> bool: + return self._path.match(path_pattern) + + if sys.version_info >= (3, 14): + + @property + def info(self) -> Any: # TODO: add return type annotation when Typeshed gets it + return self._path.info + + async def copy( + self, + target: str | os.PathLike[str], + *, + follow_symlinks: bool = True, + dirs_exist_ok: bool = False, + preserve_metadata: bool = False, + ) -> Path: + func = partial( + self._path.copy, + follow_symlinks=follow_symlinks, + dirs_exist_ok=dirs_exist_ok, + preserve_metadata=preserve_metadata, + ) + return Path(await to_thread.run_sync(func, target)) + + async def copy_into( + self, + target_dir: str | os.PathLike[str], + *, + follow_symlinks: bool = True, + dirs_exist_ok: bool = False, + preserve_metadata: bool = False, + ) -> Path: + func = partial( + self._path.copy_into, + follow_symlinks=follow_symlinks, + dirs_exist_ok=dirs_exist_ok, + preserve_metadata=preserve_metadata, + ) + return Path(await to_thread.run_sync(func, target_dir)) + + async def move(self, target: str | os.PathLike[str]) -> Path: + # Upstream does not handle anyio.Path properly as a PathLike + target = pathlib.Path(target) + return Path(await to_thread.run_sync(self._path.move, target)) + + async def move_into( + self, + target_dir: str | os.PathLike[str], + ) -> Path: + return Path(await to_thread.run_sync(self._path.move_into, target_dir)) + + def is_relative_to(self, other: str | PathLike[str]) -> bool: + try: + self.relative_to(other) + return True + except ValueError: + return False + + async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None: + func = partial(os.chmod, follow_symlinks=follow_symlinks) + return await to_thread.run_sync(func, self._path, mode) + + @classmethod + async def cwd(cls) -> Path: + path = await to_thread.run_sync(pathlib.Path.cwd) + return cls(path) + + async def exists(self) -> bool: + return await to_thread.run_sync(self._path.exists, abandon_on_cancel=True) + + async def expanduser(self) -> Path: + return Path( + await to_thread.run_sync(self._path.expanduser, abandon_on_cancel=True) + ) + + def glob(self, pattern: str) -> AsyncIterator[Path]: + gen = self._path.glob(pattern) + return _PathIterator(gen) + + async def group(self) -> str: + return await to_thread.run_sync(self._path.group, abandon_on_cancel=True) + + async def hardlink_to( + self, target: str | bytes | PathLike[str] | PathLike[bytes] + ) -> None: + if isinstance(target, Path): + target = target._path + + await to_thread.run_sync(os.link, target, self) + + @classmethod + async def home(cls) -> Path: + home_path = await to_thread.run_sync(pathlib.Path.home) + return cls(home_path) + + def is_absolute(self) -> bool: + return self._path.is_absolute() + + async def is_block_device(self) -> bool: + return await to_thread.run_sync( + self._path.is_block_device, abandon_on_cancel=True + ) + + async def is_char_device(self) -> bool: + return await to_thread.run_sync( + self._path.is_char_device, abandon_on_cancel=True + ) + + async def is_dir(self) -> bool: + return await to_thread.run_sync(self._path.is_dir, abandon_on_cancel=True) + + async def is_fifo(self) -> bool: + return await to_thread.run_sync(self._path.is_fifo, abandon_on_cancel=True) + + async def is_file(self) -> bool: + return await to_thread.run_sync(self._path.is_file, abandon_on_cancel=True) + + if sys.version_info >= (3, 12): + + async def is_junction(self) -> bool: + return await to_thread.run_sync(self._path.is_junction) + + async def is_mount(self) -> bool: + return await to_thread.run_sync( + os.path.ismount, self._path, abandon_on_cancel=True + ) + + def is_reserved(self) -> bool: + return self._path.is_reserved() + + async def is_socket(self) -> bool: + return await to_thread.run_sync(self._path.is_socket, abandon_on_cancel=True) + + async def is_symlink(self) -> bool: + return await to_thread.run_sync(self._path.is_symlink, abandon_on_cancel=True) + + async def iterdir(self) -> AsyncIterator[Path]: + gen = ( + self._path.iterdir() + if sys.version_info < (3, 13) + else await to_thread.run_sync(self._path.iterdir, abandon_on_cancel=True) + ) + async for path in _PathIterator(gen): + yield path + + def joinpath(self, *args: str | PathLike[str]) -> Path: + return Path(self._path.joinpath(*args)) + + async def lchmod(self, mode: int) -> None: + await to_thread.run_sync(self._path.lchmod, mode) + + async def lstat(self) -> os.stat_result: + return await to_thread.run_sync(self._path.lstat, abandon_on_cancel=True) + + async def mkdir( + self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False + ) -> None: + await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok) + + @overload + async def open( + self, + mode: OpenBinaryMode, + buffering: int = ..., + encoding: str | None = ..., + errors: str | None = ..., + newline: str | None = ..., + ) -> AsyncFile[bytes]: ... + + @overload + async def open( + self, + mode: OpenTextMode = ..., + buffering: int = ..., + encoding: str | None = ..., + errors: str | None = ..., + newline: str | None = ..., + ) -> AsyncFile[str]: ... + + async def open( + self, + mode: str = "r", + buffering: int = -1, + encoding: str | None = None, + errors: str | None = None, + newline: str | None = None, + ) -> AsyncFile[Any]: + fp = await to_thread.run_sync( + self._path.open, mode, buffering, encoding, errors, newline + ) + return AsyncFile(fp) + + async def owner(self) -> str: + return await to_thread.run_sync(self._path.owner, abandon_on_cancel=True) + + async def read_bytes(self) -> bytes: + return await to_thread.run_sync(self._path.read_bytes) + + async def read_text( + self, encoding: str | None = None, errors: str | None = None + ) -> str: + return await to_thread.run_sync(self._path.read_text, encoding, errors) + + if sys.version_info >= (3, 12): + + def relative_to( + self, *other: str | PathLike[str], walk_up: bool = False + ) -> Path: + return Path(self._path.relative_to(*other, walk_up=walk_up)) + + else: + + def relative_to(self, *other: str | PathLike[str]) -> Path: + return Path(self._path.relative_to(*other)) + + async def readlink(self) -> Path: + target = await to_thread.run_sync(os.readlink, self._path) + return Path(target) + + async def rename(self, target: str | pathlib.PurePath | Path) -> Path: + if isinstance(target, Path): + target = target._path + + await to_thread.run_sync(self._path.rename, target) + return Path(target) + + async def replace(self, target: str | pathlib.PurePath | Path) -> Path: + if isinstance(target, Path): + target = target._path + + await to_thread.run_sync(self._path.replace, target) + return Path(target) + + async def resolve(self, strict: bool = False) -> Path: + func = partial(self._path.resolve, strict=strict) + return Path(await to_thread.run_sync(func, abandon_on_cancel=True)) + + def rglob(self, pattern: str) -> AsyncIterator[Path]: + gen = self._path.rglob(pattern) + return _PathIterator(gen) + + async def rmdir(self) -> None: + await to_thread.run_sync(self._path.rmdir) + + async def samefile(self, other_path: str | PathLike[str]) -> bool: + if isinstance(other_path, Path): + other_path = other_path._path + + return await to_thread.run_sync( + self._path.samefile, other_path, abandon_on_cancel=True + ) + + async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result: + func = partial(os.stat, follow_symlinks=follow_symlinks) + return await to_thread.run_sync(func, self._path, abandon_on_cancel=True) + + async def symlink_to( + self, + target: str | bytes | PathLike[str] | PathLike[bytes], + target_is_directory: bool = False, + ) -> None: + if isinstance(target, Path): + target = target._path + + await to_thread.run_sync(self._path.symlink_to, target, target_is_directory) + + async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None: + await to_thread.run_sync(self._path.touch, mode, exist_ok) + + async def unlink(self, missing_ok: bool = False) -> None: + try: + await to_thread.run_sync(self._path.unlink) + except FileNotFoundError: + if not missing_ok: + raise + + if sys.version_info >= (3, 12): + + async def walk( + self, + top_down: bool = True, + on_error: Callable[[OSError], object] | None = None, + follow_symlinks: bool = False, + ) -> AsyncIterator[tuple[Path, list[str], list[str]]]: + def get_next_value() -> tuple[pathlib.Path, list[str], list[str]] | None: + try: + return next(gen) + except StopIteration: + return None + + gen = self._path.walk(top_down, on_error, follow_symlinks) + while True: + value = await to_thread.run_sync(get_next_value) + if value is None: + return + + root, dirs, paths = value + yield Path(root), dirs, paths + + def with_name(self, name: str) -> Path: + return Path(self._path.with_name(name)) + + def with_stem(self, stem: str) -> Path: + return Path(self._path.with_name(stem + self._path.suffix)) + + def with_suffix(self, suffix: str) -> Path: + return Path(self._path.with_suffix(suffix)) + + def with_segments(self, *pathsegments: str | PathLike[str]) -> Path: + return Path(*pathsegments) + + async def write_bytes(self, data: bytes) -> int: + return await to_thread.run_sync(self._path.write_bytes, data) + + async def write_text( + self, + data: str, + encoding: str | None = None, + errors: str | None = None, + newline: str | None = None, + ) -> int: + # Path.write_text() does not support the "newline" parameter before Python 3.10 + def sync_write_text() -> int: + with self._path.open( + "w", encoding=encoding, errors=errors, newline=newline + ) as fp: + return fp.write(data) + + return await to_thread.run_sync(sync_write_text) + + +PathLike.register(Path) diff --git a/venv/Lib/site-packages/anyio/_core/_resources.py b/venv/Lib/site-packages/anyio/_core/_resources.py new file mode 100644 index 00000000..b9a5344a --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_resources.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +from ..abc import AsyncResource +from ._tasks import CancelScope + + +async def aclose_forcefully(resource: AsyncResource) -> None: + """ + Close an asynchronous resource in a cancelled scope. + + Doing this closes the resource without waiting on anything. + + :param resource: the resource to close + + """ + with CancelScope() as scope: + scope.cancel() + await resource.aclose() diff --git a/venv/Lib/site-packages/anyio/_core/_signals.py b/venv/Lib/site-packages/anyio/_core/_signals.py new file mode 100644 index 00000000..f3451d30 --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_signals.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +from collections.abc import AsyncIterator +from contextlib import AbstractContextManager +from signal import Signals + +from ._eventloop import get_async_backend + + +def open_signal_receiver( + *signals: Signals, +) -> AbstractContextManager[AsyncIterator[Signals]]: + """ + Start receiving operating system signals. + + :param signals: signals to receive (e.g. ``signal.SIGINT``) + :return: an asynchronous context manager for an asynchronous iterator which yields + signal numbers + + .. warning:: Windows does not support signals natively so it is best to avoid + relying on this in cross-platform applications. + + .. warning:: On asyncio, this permanently replaces any previous signal handler for + the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`. + + """ + return get_async_backend().open_signal_receiver(*signals) diff --git a/venv/Lib/site-packages/anyio/_core/_sockets.py b/venv/Lib/site-packages/anyio/_core/_sockets.py new file mode 100644 index 00000000..054bcdda --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_sockets.py @@ -0,0 +1,792 @@ +from __future__ import annotations + +import errno +import os +import socket +import ssl +import stat +import sys +from collections.abc import Awaitable +from ipaddress import IPv6Address, ip_address +from os import PathLike, chmod +from socket import AddressFamily, SocketKind +from typing import TYPE_CHECKING, Any, Literal, cast, overload + +from .. import to_thread +from ..abc import ( + ConnectedUDPSocket, + ConnectedUNIXDatagramSocket, + IPAddressType, + IPSockAddrType, + SocketListener, + SocketStream, + UDPSocket, + UNIXDatagramSocket, + UNIXSocketStream, +) +from ..streams.stapled import MultiListener +from ..streams.tls import TLSStream +from ._eventloop import get_async_backend +from ._resources import aclose_forcefully +from ._synchronization import Event +from ._tasks import create_task_group, move_on_after + +if TYPE_CHECKING: + from _typeshed import FileDescriptorLike +else: + FileDescriptorLike = object + +if sys.version_info < (3, 11): + from exceptiongroup import ExceptionGroup + +if sys.version_info < (3, 13): + from typing_extensions import deprecated +else: + from warnings import deprecated + +IPPROTO_IPV6 = getattr(socket, "IPPROTO_IPV6", 41) # https://bugs.python.org/issue29515 + +AnyIPAddressFamily = Literal[ + AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6 +] +IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6] + + +# tls_hostname given +@overload +async def connect_tcp( + remote_host: IPAddressType, + remote_port: int, + *, + local_host: IPAddressType | None = ..., + ssl_context: ssl.SSLContext | None = ..., + tls_standard_compatible: bool = ..., + tls_hostname: str, + happy_eyeballs_delay: float = ..., +) -> TLSStream: ... + + +# ssl_context given +@overload +async def connect_tcp( + remote_host: IPAddressType, + remote_port: int, + *, + local_host: IPAddressType | None = ..., + ssl_context: ssl.SSLContext, + tls_standard_compatible: bool = ..., + tls_hostname: str | None = ..., + happy_eyeballs_delay: float = ..., +) -> TLSStream: ... + + +# tls=True +@overload +async def connect_tcp( + remote_host: IPAddressType, + remote_port: int, + *, + local_host: IPAddressType | None = ..., + tls: Literal[True], + ssl_context: ssl.SSLContext | None = ..., + tls_standard_compatible: bool = ..., + tls_hostname: str | None = ..., + happy_eyeballs_delay: float = ..., +) -> TLSStream: ... + + +# tls=False +@overload +async def connect_tcp( + remote_host: IPAddressType, + remote_port: int, + *, + local_host: IPAddressType | None = ..., + tls: Literal[False], + ssl_context: ssl.SSLContext | None = ..., + tls_standard_compatible: bool = ..., + tls_hostname: str | None = ..., + happy_eyeballs_delay: float = ..., +) -> SocketStream: ... + + +# No TLS arguments +@overload +async def connect_tcp( + remote_host: IPAddressType, + remote_port: int, + *, + local_host: IPAddressType | None = ..., + happy_eyeballs_delay: float = ..., +) -> SocketStream: ... + + +async def connect_tcp( + remote_host: IPAddressType, + remote_port: int, + *, + local_host: IPAddressType | None = None, + tls: bool = False, + ssl_context: ssl.SSLContext | None = None, + tls_standard_compatible: bool = True, + tls_hostname: str | None = None, + happy_eyeballs_delay: float = 0.25, +) -> SocketStream | TLSStream: + """ + Connect to a host using the TCP protocol. + + This function implements the stateless version of the Happy Eyeballs algorithm (RFC + 6555). If ``remote_host`` is a host name that resolves to multiple IP addresses, + each one is tried until one connection attempt succeeds. If the first attempt does + not connected within 250 milliseconds, a second attempt is started using the next + address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if + available) is tried first. + + When the connection has been established, a TLS handshake will be done if either + ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``. + + :param remote_host: the IP address or host name to connect to + :param remote_port: port on the target host to connect to + :param local_host: the interface address or name to bind the socket to before + connecting + :param tls: ``True`` to do a TLS handshake with the connected stream and return a + :class:`~anyio.streams.tls.TLSStream` instead + :param ssl_context: the SSL context object to use (if omitted, a default context is + created) + :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake + before closing the stream and requires that the server does this as well. + Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream. + Some protocols, such as HTTP, require this option to be ``False``. + See :meth:`~ssl.SSLContext.wrap_socket` for details. + :param tls_hostname: host name to check the server certificate against (defaults to + the value of ``remote_host``) + :param happy_eyeballs_delay: delay (in seconds) before starting the next connection + attempt + :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream + :raises OSError: if the connection attempt fails + + """ + # Placed here due to https://github.com/python/mypy/issues/7057 + connected_stream: SocketStream | None = None + + async def try_connect(remote_host: str, event: Event) -> None: + nonlocal connected_stream + try: + stream = await asynclib.connect_tcp(remote_host, remote_port, local_address) + except OSError as exc: + oserrors.append(exc) + return + else: + if connected_stream is None: + connected_stream = stream + tg.cancel_scope.cancel() + else: + await stream.aclose() + finally: + event.set() + + asynclib = get_async_backend() + local_address: IPSockAddrType | None = None + family = socket.AF_UNSPEC + if local_host: + gai_res = await getaddrinfo(str(local_host), None) + family, *_, local_address = gai_res[0] + + target_host = str(remote_host) + try: + addr_obj = ip_address(remote_host) + except ValueError: + addr_obj = None + + if addr_obj is not None: + if isinstance(addr_obj, IPv6Address): + target_addrs = [(socket.AF_INET6, addr_obj.compressed)] + else: + target_addrs = [(socket.AF_INET, addr_obj.compressed)] + else: + # getaddrinfo() will raise an exception if name resolution fails + gai_res = await getaddrinfo( + target_host, remote_port, family=family, type=socket.SOCK_STREAM + ) + + # Organize the list so that the first address is an IPv6 address (if available) + # and the second one is an IPv4 addresses. The rest can be in whatever order. + v6_found = v4_found = False + target_addrs = [] + for af, *rest, sa in gai_res: + if af == socket.AF_INET6 and not v6_found: + v6_found = True + target_addrs.insert(0, (af, sa[0])) + elif af == socket.AF_INET and not v4_found and v6_found: + v4_found = True + target_addrs.insert(1, (af, sa[0])) + else: + target_addrs.append((af, sa[0])) + + oserrors: list[OSError] = [] + try: + async with create_task_group() as tg: + for i, (af, addr) in enumerate(target_addrs): + event = Event() + tg.start_soon(try_connect, addr, event) + with move_on_after(happy_eyeballs_delay): + await event.wait() + + if connected_stream is None: + cause = ( + oserrors[0] + if len(oserrors) == 1 + else ExceptionGroup("multiple connection attempts failed", oserrors) + ) + raise OSError("All connection attempts failed") from cause + finally: + oserrors.clear() + + if tls or tls_hostname or ssl_context: + try: + return await TLSStream.wrap( + connected_stream, + server_side=False, + hostname=tls_hostname or str(remote_host), + ssl_context=ssl_context, + standard_compatible=tls_standard_compatible, + ) + except BaseException: + await aclose_forcefully(connected_stream) + raise + + return connected_stream + + +async def connect_unix(path: str | bytes | PathLike[Any]) -> UNIXSocketStream: + """ + Connect to the given UNIX socket. + + Not available on Windows. + + :param path: path to the socket + :return: a socket stream object + + """ + path = os.fspath(path) + return await get_async_backend().connect_unix(path) + + +async def create_tcp_listener( + *, + local_host: IPAddressType | None = None, + local_port: int = 0, + family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC, + backlog: int = 65536, + reuse_port: bool = False, +) -> MultiListener[SocketStream]: + """ + Create a TCP socket listener. + + :param local_port: port number to listen on + :param local_host: IP address of the interface to listen on. If omitted, listen on + all IPv4 and IPv6 interfaces. To listen on all interfaces on a specific address + family, use ``0.0.0.0`` for IPv4 or ``::`` for IPv6. + :param family: address family (used if ``local_host`` was omitted) + :param backlog: maximum number of queued incoming connections (up to a maximum of + 2**16, or 65536) + :param reuse_port: ``True`` to allow multiple sockets to bind to the same + address/port (not supported on Windows) + :return: a list of listener objects + + """ + asynclib = get_async_backend() + backlog = min(backlog, 65536) + local_host = str(local_host) if local_host is not None else None + gai_res = await getaddrinfo( + local_host, + local_port, + family=family, + type=socket.SocketKind.SOCK_STREAM if sys.platform == "win32" else 0, + flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG, + ) + listeners: list[SocketListener] = [] + try: + # The set() is here to work around a glibc bug: + # https://sourceware.org/bugzilla/show_bug.cgi?id=14969 + sockaddr: tuple[str, int] | tuple[str, int, int, int] + for fam, kind, *_, sockaddr in sorted(set(gai_res)): + # Workaround for an uvloop bug where we don't get the correct scope ID for + # IPv6 link-local addresses when passing type=socket.SOCK_STREAM to + # getaddrinfo(): https://github.com/MagicStack/uvloop/issues/539 + if sys.platform != "win32" and kind is not SocketKind.SOCK_STREAM: + continue + + raw_socket = socket.socket(fam) + raw_socket.setblocking(False) + + # For Windows, enable exclusive address use. For others, enable address + # reuse. + if sys.platform == "win32": + raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) + else: + raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + if reuse_port: + raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + + # If only IPv6 was requested, disable dual stack operation + if fam == socket.AF_INET6: + raw_socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) + + # Workaround for #554 + if "%" in sockaddr[0]: + addr, scope_id = sockaddr[0].split("%", 1) + sockaddr = (addr, sockaddr[1], 0, int(scope_id)) + + raw_socket.bind(sockaddr) + raw_socket.listen(backlog) + listener = asynclib.create_tcp_listener(raw_socket) + listeners.append(listener) + except BaseException: + for listener in listeners: + await listener.aclose() + + raise + + return MultiListener(listeners) + + +async def create_unix_listener( + path: str | bytes | PathLike[Any], + *, + mode: int | None = None, + backlog: int = 65536, +) -> SocketListener: + """ + Create a UNIX socket listener. + + Not available on Windows. + + :param path: path of the socket + :param mode: permissions to set on the socket + :param backlog: maximum number of queued incoming connections (up to a maximum of + 2**16, or 65536) + :return: a listener object + + .. versionchanged:: 3.0 + If a socket already exists on the file system in the given path, it will be + removed first. + + """ + backlog = min(backlog, 65536) + raw_socket = await setup_unix_local_socket(path, mode, socket.SOCK_STREAM) + try: + raw_socket.listen(backlog) + return get_async_backend().create_unix_listener(raw_socket) + except BaseException: + raw_socket.close() + raise + + +async def create_udp_socket( + family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC, + *, + local_host: IPAddressType | None = None, + local_port: int = 0, + reuse_port: bool = False, +) -> UDPSocket: + """ + Create a UDP socket. + + If ``port`` has been given, the socket will be bound to this port on the local + machine, making this socket suitable for providing UDP based services. + + :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically + determined from ``local_host`` if omitted + :param local_host: IP address or host name of the local interface to bind to + :param local_port: local port to bind to + :param reuse_port: ``True`` to allow multiple sockets to bind to the same + address/port (not supported on Windows) + :return: a UDP socket + + """ + if family is AddressFamily.AF_UNSPEC and not local_host: + raise ValueError('Either "family" or "local_host" must be given') + + if local_host: + gai_res = await getaddrinfo( + str(local_host), + local_port, + family=family, + type=socket.SOCK_DGRAM, + flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG, + ) + family = cast(AnyIPAddressFamily, gai_res[0][0]) + local_address = gai_res[0][-1] + elif family is AddressFamily.AF_INET6: + local_address = ("::", 0) + else: + local_address = ("0.0.0.0", 0) + + sock = await get_async_backend().create_udp_socket( + family, local_address, None, reuse_port + ) + return cast(UDPSocket, sock) + + +async def create_connected_udp_socket( + remote_host: IPAddressType, + remote_port: int, + *, + family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC, + local_host: IPAddressType | None = None, + local_port: int = 0, + reuse_port: bool = False, +) -> ConnectedUDPSocket: + """ + Create a connected UDP socket. + + Connected UDP sockets can only communicate with the specified remote host/port, an + any packets sent from other sources are dropped. + + :param remote_host: remote host to set as the default target + :param remote_port: port on the remote host to set as the default target + :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically + determined from ``local_host`` or ``remote_host`` if omitted + :param local_host: IP address or host name of the local interface to bind to + :param local_port: local port to bind to + :param reuse_port: ``True`` to allow multiple sockets to bind to the same + address/port (not supported on Windows) + :return: a connected UDP socket + + """ + local_address = None + if local_host: + gai_res = await getaddrinfo( + str(local_host), + local_port, + family=family, + type=socket.SOCK_DGRAM, + flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG, + ) + family = cast(AnyIPAddressFamily, gai_res[0][0]) + local_address = gai_res[0][-1] + + gai_res = await getaddrinfo( + str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM + ) + family = cast(AnyIPAddressFamily, gai_res[0][0]) + remote_address = gai_res[0][-1] + + sock = await get_async_backend().create_udp_socket( + family, local_address, remote_address, reuse_port + ) + return cast(ConnectedUDPSocket, sock) + + +async def create_unix_datagram_socket( + *, + local_path: None | str | bytes | PathLike[Any] = None, + local_mode: int | None = None, +) -> UNIXDatagramSocket: + """ + Create a UNIX datagram socket. + + Not available on Windows. + + If ``local_path`` has been given, the socket will be bound to this path, making this + socket suitable for receiving datagrams from other processes. Other processes can + send datagrams to this socket only if ``local_path`` is set. + + If a socket already exists on the file system in the ``local_path``, it will be + removed first. + + :param local_path: the path on which to bind to + :param local_mode: permissions to set on the local socket + :return: a UNIX datagram socket + + """ + raw_socket = await setup_unix_local_socket( + local_path, local_mode, socket.SOCK_DGRAM + ) + return await get_async_backend().create_unix_datagram_socket(raw_socket, None) + + +async def create_connected_unix_datagram_socket( + remote_path: str | bytes | PathLike[Any], + *, + local_path: None | str | bytes | PathLike[Any] = None, + local_mode: int | None = None, +) -> ConnectedUNIXDatagramSocket: + """ + Create a connected UNIX datagram socket. + + Connected datagram sockets can only communicate with the specified remote path. + + If ``local_path`` has been given, the socket will be bound to this path, making + this socket suitable for receiving datagrams from other processes. Other processes + can send datagrams to this socket only if ``local_path`` is set. + + If a socket already exists on the file system in the ``local_path``, it will be + removed first. + + :param remote_path: the path to set as the default target + :param local_path: the path on which to bind to + :param local_mode: permissions to set on the local socket + :return: a connected UNIX datagram socket + + """ + remote_path = os.fspath(remote_path) + raw_socket = await setup_unix_local_socket( + local_path, local_mode, socket.SOCK_DGRAM + ) + return await get_async_backend().create_unix_datagram_socket( + raw_socket, remote_path + ) + + +async def getaddrinfo( + host: bytes | str | None, + port: str | int | None, + *, + family: int | AddressFamily = 0, + type: int | SocketKind = 0, + proto: int = 0, + flags: int = 0, +) -> list[tuple[AddressFamily, SocketKind, int, str, tuple[str, int]]]: + """ + Look up a numeric IP address given a host name. + + Internationalized domain names are translated according to the (non-transitional) + IDNA 2008 standard. + + .. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of + (host, port), unlike what :func:`socket.getaddrinfo` does. + + :param host: host name + :param port: port number + :param family: socket family (`'AF_INET``, ...) + :param type: socket type (``SOCK_STREAM``, ...) + :param proto: protocol number + :param flags: flags to pass to upstream ``getaddrinfo()`` + :return: list of tuples containing (family, type, proto, canonname, sockaddr) + + .. seealso:: :func:`socket.getaddrinfo` + + """ + # Handle unicode hostnames + if isinstance(host, str): + try: + encoded_host: bytes | None = host.encode("ascii") + except UnicodeEncodeError: + import idna + + encoded_host = idna.encode(host, uts46=True) + else: + encoded_host = host + + gai_res = await get_async_backend().getaddrinfo( + encoded_host, port, family=family, type=type, proto=proto, flags=flags + ) + return [ + (family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr)) + for family, type, proto, canonname, sockaddr in gai_res + # filter out IPv6 results when IPv6 is disabled + if not isinstance(sockaddr[0], int) + ] + + +def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[tuple[str, str]]: + """ + Look up the host name of an IP address. + + :param sockaddr: socket address (e.g. (ipaddress, port) for IPv4) + :param flags: flags to pass to upstream ``getnameinfo()`` + :return: a tuple of (host name, service name) + + .. seealso:: :func:`socket.getnameinfo` + + """ + return get_async_backend().getnameinfo(sockaddr, flags) + + +@deprecated("This function is deprecated; use `wait_readable` instead") +def wait_socket_readable(sock: socket.socket) -> Awaitable[None]: + """ + .. deprecated:: 4.7.0 + Use :func:`wait_readable` instead. + + Wait until the given socket has data to be read. + + .. warning:: Only use this on raw sockets that have not been wrapped by any higher + level constructs like socket streams! + + :param sock: a socket object + :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the + socket to become readable + :raises ~anyio.BusyResourceError: if another task is already waiting for the socket + to become readable + + """ + return get_async_backend().wait_readable(sock.fileno()) + + +@deprecated("This function is deprecated; use `wait_writable` instead") +def wait_socket_writable(sock: socket.socket) -> Awaitable[None]: + """ + .. deprecated:: 4.7.0 + Use :func:`wait_writable` instead. + + Wait until the given socket can be written to. + + This does **NOT** work on Windows when using the asyncio backend with a proactor + event loop (default on py3.8+). + + .. warning:: Only use this on raw sockets that have not been wrapped by any higher + level constructs like socket streams! + + :param sock: a socket object + :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the + socket to become writable + :raises ~anyio.BusyResourceError: if another task is already waiting for the socket + to become writable + + """ + return get_async_backend().wait_writable(sock.fileno()) + + +def wait_readable(obj: FileDescriptorLike) -> Awaitable[None]: + """ + Wait until the given object has data to be read. + + On Unix systems, ``obj`` must either be an integer file descriptor, or else an + object with a ``.fileno()`` method which returns an integer file descriptor. Any + kind of file descriptor can be passed, though the exact semantics will depend on + your kernel. For example, this probably won't do anything useful for on-disk files. + + On Windows systems, ``obj`` must either be an integer ``SOCKET`` handle, or else an + object with a ``.fileno()`` method which returns an integer ``SOCKET`` handle. File + descriptors aren't supported, and neither are handles that refer to anything besides + a ``SOCKET``. + + On backends where this functionality is not natively provided (asyncio + ``ProactorEventLoop`` on Windows), it is provided using a separate selector thread + which is set to shut down when the interpreter shuts down. + + .. warning:: Don't use this on raw sockets that have been wrapped by any higher + level constructs like socket streams! + + :param obj: an object with a ``.fileno()`` method or an integer handle + :raises ~anyio.ClosedResourceError: if the object was closed while waiting for the + object to become readable + :raises ~anyio.BusyResourceError: if another task is already waiting for the object + to become readable + + """ + return get_async_backend().wait_readable(obj) + + +def wait_writable(obj: FileDescriptorLike) -> Awaitable[None]: + """ + Wait until the given object can be written to. + + :param obj: an object with a ``.fileno()`` method or an integer handle + :raises ~anyio.ClosedResourceError: if the object was closed while waiting for the + object to become writable + :raises ~anyio.BusyResourceError: if another task is already waiting for the object + to become writable + + .. seealso:: See the documentation of :func:`wait_readable` for the definition of + ``obj`` and notes on backend compatibility. + + .. warning:: Don't use this on raw sockets that have been wrapped by any higher + level constructs like socket streams! + + """ + return get_async_backend().wait_writable(obj) + + +# +# Private API +# + + +def convert_ipv6_sockaddr( + sockaddr: tuple[str, int, int, int] | tuple[str, int], +) -> tuple[str, int]: + """ + Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format. + + If the scope ID is nonzero, it is added to the address, separated with ``%``. + Otherwise the flow id and scope id are simply cut off from the tuple. + Any other kinds of socket addresses are returned as-is. + + :param sockaddr: the result of :meth:`~socket.socket.getsockname` + :return: the converted socket address + + """ + # This is more complicated than it should be because of MyPy + if isinstance(sockaddr, tuple) and len(sockaddr) == 4: + host, port, flowinfo, scope_id = sockaddr + if scope_id: + # PyPy (as of v7.3.11) leaves the interface name in the result, so + # we discard it and only get the scope ID from the end + # (https://foss.heptapod.net/pypy/pypy/-/issues/3938) + host = host.split("%")[0] + + # Add scope_id to the address + return f"{host}%{scope_id}", port + else: + return host, port + else: + return sockaddr + + +async def setup_unix_local_socket( + path: None | str | bytes | PathLike[Any], + mode: int | None, + socktype: int, +) -> socket.socket: + """ + Create a UNIX local socket object, deleting the socket at the given path if it + exists. + + Not available on Windows. + + :param path: path of the socket + :param mode: permissions to set on the socket + :param socktype: socket.SOCK_STREAM or socket.SOCK_DGRAM + + """ + path_str: str | None + if path is not None: + path_str = os.fsdecode(path) + + # Linux abstract namespace sockets aren't backed by a concrete file so skip stat call + if not path_str.startswith("\0"): + # Copied from pathlib... + try: + stat_result = os.stat(path) + except OSError as e: + if e.errno not in ( + errno.ENOENT, + errno.ENOTDIR, + errno.EBADF, + errno.ELOOP, + ): + raise + else: + if stat.S_ISSOCK(stat_result.st_mode): + os.unlink(path) + else: + path_str = None + + raw_socket = socket.socket(socket.AF_UNIX, socktype) + raw_socket.setblocking(False) + + if path_str is not None: + try: + await to_thread.run_sync(raw_socket.bind, path_str, abandon_on_cancel=True) + if mode is not None: + await to_thread.run_sync(chmod, path_str, mode, abandon_on_cancel=True) + except BaseException: + raw_socket.close() + raise + + return raw_socket diff --git a/venv/Lib/site-packages/anyio/_core/_streams.py b/venv/Lib/site-packages/anyio/_core/_streams.py new file mode 100644 index 00000000..6a9814e5 --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_streams.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +import math +from typing import TypeVar +from warnings import warn + +from ..streams.memory import ( + MemoryObjectReceiveStream, + MemoryObjectSendStream, + MemoryObjectStreamState, +) + +T_Item = TypeVar("T_Item") + + +class create_memory_object_stream( + tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]], +): + """ + Create a memory object stream. + + The stream's item type can be annotated like + :func:`create_memory_object_stream[T_Item]`. + + :param max_buffer_size: number of items held in the buffer until ``send()`` starts + blocking + :param item_type: old way of marking the streams with the right generic type for + static typing (does nothing on AnyIO 4) + + .. deprecated:: 4.0 + Use ``create_memory_object_stream[YourItemType](...)`` instead. + :return: a tuple of (send stream, receive stream) + + """ + + def __new__( # type: ignore[misc] + cls, max_buffer_size: float = 0, item_type: object = None + ) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]: + if max_buffer_size != math.inf and not isinstance(max_buffer_size, int): + raise ValueError("max_buffer_size must be either an integer or math.inf") + if max_buffer_size < 0: + raise ValueError("max_buffer_size cannot be negative") + if item_type is not None: + warn( + "The item_type argument has been deprecated in AnyIO 4.0. " + "Use create_memory_object_stream[YourItemType](...) instead.", + DeprecationWarning, + stacklevel=2, + ) + + state = MemoryObjectStreamState[T_Item](max_buffer_size) + return (MemoryObjectSendStream(state), MemoryObjectReceiveStream(state)) diff --git a/venv/Lib/site-packages/anyio/_core/_subprocesses.py b/venv/Lib/site-packages/anyio/_core/_subprocesses.py new file mode 100644 index 00000000..36d9b306 --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_subprocesses.py @@ -0,0 +1,202 @@ +from __future__ import annotations + +import sys +from collections.abc import AsyncIterable, Iterable, Mapping, Sequence +from io import BytesIO +from os import PathLike +from subprocess import PIPE, CalledProcessError, CompletedProcess +from typing import IO, Any, Union, cast + +from ..abc import Process +from ._eventloop import get_async_backend +from ._tasks import create_task_group + +if sys.version_info >= (3, 10): + from typing import TypeAlias +else: + from typing_extensions import TypeAlias + +StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"] + + +async def run_process( + command: StrOrBytesPath | Sequence[StrOrBytesPath], + *, + input: bytes | None = None, + stdin: int | IO[Any] | None = None, + stdout: int | IO[Any] | None = PIPE, + stderr: int | IO[Any] | None = PIPE, + check: bool = True, + cwd: StrOrBytesPath | None = None, + env: Mapping[str, str] | None = None, + startupinfo: Any = None, + creationflags: int = 0, + start_new_session: bool = False, + pass_fds: Sequence[int] = (), + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, +) -> CompletedProcess[bytes]: + """ + Run an external command in a subprocess and wait until it completes. + + .. seealso:: :func:`subprocess.run` + + :param command: either a string to pass to the shell, or an iterable of strings + containing the executable name or path and its arguments + :param input: bytes passed to the standard input of the subprocess + :param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, + a file-like object, or `None`; ``input`` overrides this + :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, + a file-like object, or `None` + :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, + :data:`subprocess.STDOUT`, a file-like object, or `None` + :param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the + process terminates with a return code other than 0 + :param cwd: If not ``None``, change the working directory to this before running the + command + :param env: if not ``None``, this mapping replaces the inherited environment + variables from the parent process + :param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used + to specify process startup parameters (Windows only) + :param creationflags: flags that can be used to control the creation of the + subprocess (see :class:`subprocess.Popen` for the specifics) + :param start_new_session: if ``true`` the setsid() system call will be made in the + child process prior to the execution of the subprocess. (POSIX only) + :param pass_fds: sequence of file descriptors to keep open between the parent and + child processes. (POSIX only) + :param user: effective user to run the process as (Python >= 3.9, POSIX only) + :param group: effective group to run the process as (Python >= 3.9, POSIX only) + :param extra_groups: supplementary groups to set in the subprocess (Python >= 3.9, + POSIX only) + :param umask: if not negative, this umask is applied in the child process before + running the given command (Python >= 3.9, POSIX only) + :return: an object representing the completed process + :raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process + exits with a nonzero return code + + """ + + async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None: + buffer = BytesIO() + async for chunk in stream: + buffer.write(chunk) + + stream_contents[index] = buffer.getvalue() + + if stdin is not None and input is not None: + raise ValueError("only one of stdin and input is allowed") + + async with await open_process( + command, + stdin=PIPE if input else stdin, + stdout=stdout, + stderr=stderr, + cwd=cwd, + env=env, + startupinfo=startupinfo, + creationflags=creationflags, + start_new_session=start_new_session, + pass_fds=pass_fds, + user=user, + group=group, + extra_groups=extra_groups, + umask=umask, + ) as process: + stream_contents: list[bytes | None] = [None, None] + async with create_task_group() as tg: + if process.stdout: + tg.start_soon(drain_stream, process.stdout, 0) + + if process.stderr: + tg.start_soon(drain_stream, process.stderr, 1) + + if process.stdin and input: + await process.stdin.send(input) + await process.stdin.aclose() + + await process.wait() + + output, errors = stream_contents + if check and process.returncode != 0: + raise CalledProcessError(cast(int, process.returncode), command, output, errors) + + return CompletedProcess(command, cast(int, process.returncode), output, errors) + + +async def open_process( + command: StrOrBytesPath | Sequence[StrOrBytesPath], + *, + stdin: int | IO[Any] | None = PIPE, + stdout: int | IO[Any] | None = PIPE, + stderr: int | IO[Any] | None = PIPE, + cwd: StrOrBytesPath | None = None, + env: Mapping[str, str] | None = None, + startupinfo: Any = None, + creationflags: int = 0, + start_new_session: bool = False, + pass_fds: Sequence[int] = (), + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, +) -> Process: + """ + Start an external command in a subprocess. + + .. seealso:: :class:`subprocess.Popen` + + :param command: either a string to pass to the shell, or an iterable of strings + containing the executable name or path and its arguments + :param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a + file-like object, or ``None`` + :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, + a file-like object, or ``None`` + :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, + :data:`subprocess.STDOUT`, a file-like object, or ``None`` + :param cwd: If not ``None``, the working directory is changed before executing + :param env: If env is not ``None``, it must be a mapping that defines the + environment variables for the new process + :param creationflags: flags that can be used to control the creation of the + subprocess (see :class:`subprocess.Popen` for the specifics) + :param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used + to specify process startup parameters (Windows only) + :param start_new_session: if ``true`` the setsid() system call will be made in the + child process prior to the execution of the subprocess. (POSIX only) + :param pass_fds: sequence of file descriptors to keep open between the parent and + child processes. (POSIX only) + :param user: effective user to run the process as (POSIX only) + :param group: effective group to run the process as (POSIX only) + :param extra_groups: supplementary groups to set in the subprocess (POSIX only) + :param umask: if not negative, this umask is applied in the child process before + running the given command (POSIX only) + :return: an asynchronous process object + + """ + kwargs: dict[str, Any] = {} + if user is not None: + kwargs["user"] = user + + if group is not None: + kwargs["group"] = group + + if extra_groups is not None: + kwargs["extra_groups"] = group + + if umask >= 0: + kwargs["umask"] = umask + + return await get_async_backend().open_process( + command, + stdin=stdin, + stdout=stdout, + stderr=stderr, + cwd=cwd, + env=env, + startupinfo=startupinfo, + creationflags=creationflags, + start_new_session=start_new_session, + pass_fds=pass_fds, + **kwargs, + ) diff --git a/venv/Lib/site-packages/anyio/_core/_synchronization.py b/venv/Lib/site-packages/anyio/_core/_synchronization.py new file mode 100644 index 00000000..a6331328 --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_synchronization.py @@ -0,0 +1,732 @@ +from __future__ import annotations + +import math +from collections import deque +from dataclasses import dataclass +from types import TracebackType + +from sniffio import AsyncLibraryNotFoundError + +from ..lowlevel import checkpoint +from ._eventloop import get_async_backend +from ._exceptions import BusyResourceError +from ._tasks import CancelScope +from ._testing import TaskInfo, get_current_task + + +@dataclass(frozen=True) +class EventStatistics: + """ + :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait` + """ + + tasks_waiting: int + + +@dataclass(frozen=True) +class CapacityLimiterStatistics: + """ + :ivar int borrowed_tokens: number of tokens currently borrowed by tasks + :ivar float total_tokens: total number of available tokens + :ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from + this limiter + :ivar int tasks_waiting: number of tasks waiting on + :meth:`~.CapacityLimiter.acquire` or + :meth:`~.CapacityLimiter.acquire_on_behalf_of` + """ + + borrowed_tokens: int + total_tokens: float + borrowers: tuple[object, ...] + tasks_waiting: int + + +@dataclass(frozen=True) +class LockStatistics: + """ + :ivar bool locked: flag indicating if this lock is locked or not + :ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the + lock is not held by any task) + :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire` + """ + + locked: bool + owner: TaskInfo | None + tasks_waiting: int + + +@dataclass(frozen=True) +class ConditionStatistics: + """ + :ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait` + :ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying + :class:`~.Lock` + """ + + tasks_waiting: int + lock_statistics: LockStatistics + + +@dataclass(frozen=True) +class SemaphoreStatistics: + """ + :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire` + + """ + + tasks_waiting: int + + +class Event: + def __new__(cls) -> Event: + try: + return get_async_backend().create_event() + except AsyncLibraryNotFoundError: + return EventAdapter() + + def set(self) -> None: + """Set the flag, notifying all listeners.""" + raise NotImplementedError + + def is_set(self) -> bool: + """Return ``True`` if the flag is set, ``False`` if not.""" + raise NotImplementedError + + async def wait(self) -> None: + """ + Wait until the flag has been set. + + If the flag has already been set when this method is called, it returns + immediately. + + """ + raise NotImplementedError + + def statistics(self) -> EventStatistics: + """Return statistics about the current state of this event.""" + raise NotImplementedError + + +class EventAdapter(Event): + _internal_event: Event | None = None + _is_set: bool = False + + def __new__(cls) -> EventAdapter: + return object.__new__(cls) + + @property + def _event(self) -> Event: + if self._internal_event is None: + self._internal_event = get_async_backend().create_event() + if self._is_set: + self._internal_event.set() + + return self._internal_event + + def set(self) -> None: + if self._internal_event is None: + self._is_set = True + else: + self._event.set() + + def is_set(self) -> bool: + if self._internal_event is None: + return self._is_set + + return self._internal_event.is_set() + + async def wait(self) -> None: + await self._event.wait() + + def statistics(self) -> EventStatistics: + if self._internal_event is None: + return EventStatistics(tasks_waiting=0) + + return self._internal_event.statistics() + + +class Lock: + def __new__(cls, *, fast_acquire: bool = False) -> Lock: + try: + return get_async_backend().create_lock(fast_acquire=fast_acquire) + except AsyncLibraryNotFoundError: + return LockAdapter(fast_acquire=fast_acquire) + + async def __aenter__(self) -> None: + await self.acquire() + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self.release() + + async def acquire(self) -> None: + """Acquire the lock.""" + raise NotImplementedError + + def acquire_nowait(self) -> None: + """ + Acquire the lock, without blocking. + + :raises ~anyio.WouldBlock: if the operation would block + + """ + raise NotImplementedError + + def release(self) -> None: + """Release the lock.""" + raise NotImplementedError + + def locked(self) -> bool: + """Return True if the lock is currently held.""" + raise NotImplementedError + + def statistics(self) -> LockStatistics: + """ + Return statistics about the current state of this lock. + + .. versionadded:: 3.0 + """ + raise NotImplementedError + + +class LockAdapter(Lock): + _internal_lock: Lock | None = None + + def __new__(cls, *, fast_acquire: bool = False) -> LockAdapter: + return object.__new__(cls) + + def __init__(self, *, fast_acquire: bool = False): + self._fast_acquire = fast_acquire + + @property + def _lock(self) -> Lock: + if self._internal_lock is None: + self._internal_lock = get_async_backend().create_lock( + fast_acquire=self._fast_acquire + ) + + return self._internal_lock + + async def __aenter__(self) -> None: + await self._lock.acquire() + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + if self._internal_lock is not None: + self._internal_lock.release() + + async def acquire(self) -> None: + """Acquire the lock.""" + await self._lock.acquire() + + def acquire_nowait(self) -> None: + """ + Acquire the lock, without blocking. + + :raises ~anyio.WouldBlock: if the operation would block + + """ + self._lock.acquire_nowait() + + def release(self) -> None: + """Release the lock.""" + self._lock.release() + + def locked(self) -> bool: + """Return True if the lock is currently held.""" + return self._lock.locked() + + def statistics(self) -> LockStatistics: + """ + Return statistics about the current state of this lock. + + .. versionadded:: 3.0 + + """ + if self._internal_lock is None: + return LockStatistics(False, None, 0) + + return self._internal_lock.statistics() + + +class Condition: + _owner_task: TaskInfo | None = None + + def __init__(self, lock: Lock | None = None): + self._lock = lock or Lock() + self._waiters: deque[Event] = deque() + + async def __aenter__(self) -> None: + await self.acquire() + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self.release() + + def _check_acquired(self) -> None: + if self._owner_task != get_current_task(): + raise RuntimeError("The current task is not holding the underlying lock") + + async def acquire(self) -> None: + """Acquire the underlying lock.""" + await self._lock.acquire() + self._owner_task = get_current_task() + + def acquire_nowait(self) -> None: + """ + Acquire the underlying lock, without blocking. + + :raises ~anyio.WouldBlock: if the operation would block + + """ + self._lock.acquire_nowait() + self._owner_task = get_current_task() + + def release(self) -> None: + """Release the underlying lock.""" + self._lock.release() + + def locked(self) -> bool: + """Return True if the lock is set.""" + return self._lock.locked() + + def notify(self, n: int = 1) -> None: + """Notify exactly n listeners.""" + self._check_acquired() + for _ in range(n): + try: + event = self._waiters.popleft() + except IndexError: + break + + event.set() + + def notify_all(self) -> None: + """Notify all the listeners.""" + self._check_acquired() + for event in self._waiters: + event.set() + + self._waiters.clear() + + async def wait(self) -> None: + """Wait for a notification.""" + await checkpoint() + event = Event() + self._waiters.append(event) + self.release() + try: + await event.wait() + except BaseException: + if not event.is_set(): + self._waiters.remove(event) + + raise + finally: + with CancelScope(shield=True): + await self.acquire() + + def statistics(self) -> ConditionStatistics: + """ + Return statistics about the current state of this condition. + + .. versionadded:: 3.0 + """ + return ConditionStatistics(len(self._waiters), self._lock.statistics()) + + +class Semaphore: + def __new__( + cls, + initial_value: int, + *, + max_value: int | None = None, + fast_acquire: bool = False, + ) -> Semaphore: + try: + return get_async_backend().create_semaphore( + initial_value, max_value=max_value, fast_acquire=fast_acquire + ) + except AsyncLibraryNotFoundError: + return SemaphoreAdapter(initial_value, max_value=max_value) + + def __init__( + self, + initial_value: int, + *, + max_value: int | None = None, + fast_acquire: bool = False, + ): + if not isinstance(initial_value, int): + raise TypeError("initial_value must be an integer") + if initial_value < 0: + raise ValueError("initial_value must be >= 0") + if max_value is not None: + if not isinstance(max_value, int): + raise TypeError("max_value must be an integer or None") + if max_value < initial_value: + raise ValueError( + "max_value must be equal to or higher than initial_value" + ) + + self._fast_acquire = fast_acquire + + async def __aenter__(self) -> Semaphore: + await self.acquire() + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self.release() + + async def acquire(self) -> None: + """Decrement the semaphore value, blocking if necessary.""" + raise NotImplementedError + + def acquire_nowait(self) -> None: + """ + Acquire the underlying lock, without blocking. + + :raises ~anyio.WouldBlock: if the operation would block + + """ + raise NotImplementedError + + def release(self) -> None: + """Increment the semaphore value.""" + raise NotImplementedError + + @property + def value(self) -> int: + """The current value of the semaphore.""" + raise NotImplementedError + + @property + def max_value(self) -> int | None: + """The maximum value of the semaphore.""" + raise NotImplementedError + + def statistics(self) -> SemaphoreStatistics: + """ + Return statistics about the current state of this semaphore. + + .. versionadded:: 3.0 + """ + raise NotImplementedError + + +class SemaphoreAdapter(Semaphore): + _internal_semaphore: Semaphore | None = None + + def __new__( + cls, + initial_value: int, + *, + max_value: int | None = None, + fast_acquire: bool = False, + ) -> SemaphoreAdapter: + return object.__new__(cls) + + def __init__( + self, + initial_value: int, + *, + max_value: int | None = None, + fast_acquire: bool = False, + ) -> None: + super().__init__(initial_value, max_value=max_value, fast_acquire=fast_acquire) + self._initial_value = initial_value + self._max_value = max_value + + @property + def _semaphore(self) -> Semaphore: + if self._internal_semaphore is None: + self._internal_semaphore = get_async_backend().create_semaphore( + self._initial_value, max_value=self._max_value + ) + + return self._internal_semaphore + + async def acquire(self) -> None: + await self._semaphore.acquire() + + def acquire_nowait(self) -> None: + self._semaphore.acquire_nowait() + + def release(self) -> None: + self._semaphore.release() + + @property + def value(self) -> int: + if self._internal_semaphore is None: + return self._initial_value + + return self._semaphore.value + + @property + def max_value(self) -> int | None: + return self._max_value + + def statistics(self) -> SemaphoreStatistics: + if self._internal_semaphore is None: + return SemaphoreStatistics(tasks_waiting=0) + + return self._semaphore.statistics() + + +class CapacityLimiter: + def __new__(cls, total_tokens: float) -> CapacityLimiter: + try: + return get_async_backend().create_capacity_limiter(total_tokens) + except AsyncLibraryNotFoundError: + return CapacityLimiterAdapter(total_tokens) + + async def __aenter__(self) -> None: + raise NotImplementedError + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool | None: + raise NotImplementedError + + @property + def total_tokens(self) -> float: + """ + The total number of tokens available for borrowing. + + This is a read-write property. If the total number of tokens is increased, the + proportionate number of tasks waiting on this limiter will be granted their + tokens. + + .. versionchanged:: 3.0 + The property is now writable. + + """ + raise NotImplementedError + + @total_tokens.setter + def total_tokens(self, value: float) -> None: + raise NotImplementedError + + @property + def borrowed_tokens(self) -> int: + """The number of tokens that have currently been borrowed.""" + raise NotImplementedError + + @property + def available_tokens(self) -> float: + """The number of tokens currently available to be borrowed""" + raise NotImplementedError + + def acquire_nowait(self) -> None: + """ + Acquire a token for the current task without waiting for one to become + available. + + :raises ~anyio.WouldBlock: if there are no tokens available for borrowing + + """ + raise NotImplementedError + + def acquire_on_behalf_of_nowait(self, borrower: object) -> None: + """ + Acquire a token without waiting for one to become available. + + :param borrower: the entity borrowing a token + :raises ~anyio.WouldBlock: if there are no tokens available for borrowing + + """ + raise NotImplementedError + + async def acquire(self) -> None: + """ + Acquire a token for the current task, waiting if necessary for one to become + available. + + """ + raise NotImplementedError + + async def acquire_on_behalf_of(self, borrower: object) -> None: + """ + Acquire a token, waiting if necessary for one to become available. + + :param borrower: the entity borrowing a token + + """ + raise NotImplementedError + + def release(self) -> None: + """ + Release the token held by the current task. + + :raises RuntimeError: if the current task has not borrowed a token from this + limiter. + + """ + raise NotImplementedError + + def release_on_behalf_of(self, borrower: object) -> None: + """ + Release the token held by the given borrower. + + :raises RuntimeError: if the borrower has not borrowed a token from this + limiter. + + """ + raise NotImplementedError + + def statistics(self) -> CapacityLimiterStatistics: + """ + Return statistics about the current state of this limiter. + + .. versionadded:: 3.0 + + """ + raise NotImplementedError + + +class CapacityLimiterAdapter(CapacityLimiter): + _internal_limiter: CapacityLimiter | None = None + + def __new__(cls, total_tokens: float) -> CapacityLimiterAdapter: + return object.__new__(cls) + + def __init__(self, total_tokens: float) -> None: + self.total_tokens = total_tokens + + @property + def _limiter(self) -> CapacityLimiter: + if self._internal_limiter is None: + self._internal_limiter = get_async_backend().create_capacity_limiter( + self._total_tokens + ) + + return self._internal_limiter + + async def __aenter__(self) -> None: + await self._limiter.__aenter__() + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool | None: + return await self._limiter.__aexit__(exc_type, exc_val, exc_tb) + + @property + def total_tokens(self) -> float: + if self._internal_limiter is None: + return self._total_tokens + + return self._internal_limiter.total_tokens + + @total_tokens.setter + def total_tokens(self, value: float) -> None: + if not isinstance(value, int) and value is not math.inf: + raise TypeError("total_tokens must be an int or math.inf") + elif value < 1: + raise ValueError("total_tokens must be >= 1") + + if self._internal_limiter is None: + self._total_tokens = value + return + + self._limiter.total_tokens = value + + @property + def borrowed_tokens(self) -> int: + if self._internal_limiter is None: + return 0 + + return self._internal_limiter.borrowed_tokens + + @property + def available_tokens(self) -> float: + if self._internal_limiter is None: + return self._total_tokens + + return self._internal_limiter.available_tokens + + def acquire_nowait(self) -> None: + self._limiter.acquire_nowait() + + def acquire_on_behalf_of_nowait(self, borrower: object) -> None: + self._limiter.acquire_on_behalf_of_nowait(borrower) + + async def acquire(self) -> None: + await self._limiter.acquire() + + async def acquire_on_behalf_of(self, borrower: object) -> None: + await self._limiter.acquire_on_behalf_of(borrower) + + def release(self) -> None: + self._limiter.release() + + def release_on_behalf_of(self, borrower: object) -> None: + self._limiter.release_on_behalf_of(borrower) + + def statistics(self) -> CapacityLimiterStatistics: + if self._internal_limiter is None: + return CapacityLimiterStatistics( + borrowed_tokens=0, + total_tokens=self.total_tokens, + borrowers=(), + tasks_waiting=0, + ) + + return self._internal_limiter.statistics() + + +class ResourceGuard: + """ + A context manager for ensuring that a resource is only used by a single task at a + time. + + Entering this context manager while the previous has not exited it yet will trigger + :exc:`BusyResourceError`. + + :param action: the action to guard against (visible in the :exc:`BusyResourceError` + when triggered, e.g. "Another task is already {action} this resource") + + .. versionadded:: 4.1 + """ + + __slots__ = "action", "_guarded" + + def __init__(self, action: str = "using"): + self.action: str = action + self._guarded = False + + def __enter__(self) -> None: + if self._guarded: + raise BusyResourceError(self.action) + + self._guarded = True + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self._guarded = False diff --git a/venv/Lib/site-packages/anyio/_core/_tasks.py b/venv/Lib/site-packages/anyio/_core/_tasks.py new file mode 100644 index 00000000..fe490151 --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_tasks.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +import math +from collections.abc import Generator +from contextlib import contextmanager +from types import TracebackType + +from ..abc._tasks import TaskGroup, TaskStatus +from ._eventloop import get_async_backend + + +class _IgnoredTaskStatus(TaskStatus[object]): + def started(self, value: object = None) -> None: + pass + + +TASK_STATUS_IGNORED = _IgnoredTaskStatus() + + +class CancelScope: + """ + Wraps a unit of work that can be made separately cancellable. + + :param deadline: The time (clock value) when this scope is cancelled automatically + :param shield: ``True`` to shield the cancel scope from external cancellation + """ + + def __new__( + cls, *, deadline: float = math.inf, shield: bool = False + ) -> CancelScope: + return get_async_backend().create_cancel_scope(shield=shield, deadline=deadline) + + def cancel(self) -> None: + """Cancel this scope immediately.""" + raise NotImplementedError + + @property + def deadline(self) -> float: + """ + The time (clock value) when this scope is cancelled automatically. + + Will be ``float('inf')`` if no timeout has been set. + + """ + raise NotImplementedError + + @deadline.setter + def deadline(self, value: float) -> None: + raise NotImplementedError + + @property + def cancel_called(self) -> bool: + """``True`` if :meth:`cancel` has been called.""" + raise NotImplementedError + + @property + def cancelled_caught(self) -> bool: + """ + ``True`` if this scope suppressed a cancellation exception it itself raised. + + This is typically used to check if any work was interrupted, or to see if the + scope was cancelled due to its deadline being reached. The value will, however, + only be ``True`` if the cancellation was triggered by the scope itself (and not + an outer scope). + + """ + raise NotImplementedError + + @property + def shield(self) -> bool: + """ + ``True`` if this scope is shielded from external cancellation. + + While a scope is shielded, it will not receive cancellations from outside. + + """ + raise NotImplementedError + + @shield.setter + def shield(self, value: bool) -> None: + raise NotImplementedError + + def __enter__(self) -> CancelScope: + raise NotImplementedError + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool: + raise NotImplementedError + + +@contextmanager +def fail_after( + delay: float | None, shield: bool = False +) -> Generator[CancelScope, None, None]: + """ + Create a context manager which raises a :class:`TimeoutError` if does not finish in + time. + + :param delay: maximum allowed time (in seconds) before raising the exception, or + ``None`` to disable the timeout + :param shield: ``True`` to shield the cancel scope from external cancellation + :return: a context manager that yields a cancel scope + :rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\] + + """ + current_time = get_async_backend().current_time + deadline = (current_time() + delay) if delay is not None else math.inf + with get_async_backend().create_cancel_scope( + deadline=deadline, shield=shield + ) as cancel_scope: + yield cancel_scope + + if cancel_scope.cancelled_caught and current_time() >= cancel_scope.deadline: + raise TimeoutError + + +def move_on_after(delay: float | None, shield: bool = False) -> CancelScope: + """ + Create a cancel scope with a deadline that expires after the given delay. + + :param delay: maximum allowed time (in seconds) before exiting the context block, or + ``None`` to disable the timeout + :param shield: ``True`` to shield the cancel scope from external cancellation + :return: a cancel scope + + """ + deadline = ( + (get_async_backend().current_time() + delay) if delay is not None else math.inf + ) + return get_async_backend().create_cancel_scope(deadline=deadline, shield=shield) + + +def current_effective_deadline() -> float: + """ + Return the nearest deadline among all the cancel scopes effective for the current + task. + + :return: a clock value from the event loop's internal clock (or ``float('inf')`` if + there is no deadline in effect, or ``float('-inf')`` if the current scope has + been cancelled) + :rtype: float + + """ + return get_async_backend().current_effective_deadline() + + +def create_task_group() -> TaskGroup: + """ + Create a task group. + + :return: a task group + + """ + return get_async_backend().create_task_group() diff --git a/venv/Lib/site-packages/anyio/_core/_tempfile.py b/venv/Lib/site-packages/anyio/_core/_tempfile.py new file mode 100644 index 00000000..26d70eca --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_tempfile.py @@ -0,0 +1,616 @@ +from __future__ import annotations + +import os +import sys +import tempfile +from collections.abc import Iterable +from io import BytesIO, TextIOWrapper +from types import TracebackType +from typing import ( + TYPE_CHECKING, + Any, + AnyStr, + Generic, + overload, +) + +from .. import to_thread +from .._core._fileio import AsyncFile +from ..lowlevel import checkpoint_if_cancelled + +if TYPE_CHECKING: + from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer + + +class TemporaryFile(Generic[AnyStr]): + """ + An asynchronous temporary file that is automatically created and cleaned up. + + This class provides an asynchronous context manager interface to a temporary file. + The file is created using Python's standard `tempfile.TemporaryFile` function in a + background thread, and is wrapped as an asynchronous file using `AsyncFile`. + + :param mode: The mode in which the file is opened. Defaults to "w+b". + :param buffering: The buffering policy (-1 means the default buffering). + :param encoding: The encoding used to decode or encode the file. Only applicable in + text mode. + :param newline: Controls how universal newlines mode works (only applicable in text + mode). + :param suffix: The suffix for the temporary file name. + :param prefix: The prefix for the temporary file name. + :param dir: The directory in which the temporary file is created. + :param errors: The error handling scheme used for encoding/decoding errors. + """ + + _async_file: AsyncFile[AnyStr] + + @overload + def __init__( + self: TemporaryFile[bytes], + mode: OpenBinaryMode = ..., + buffering: int = ..., + encoding: str | None = ..., + newline: str | None = ..., + suffix: str | None = ..., + prefix: str | None = ..., + dir: str | None = ..., + *, + errors: str | None = ..., + ): ... + @overload + def __init__( + self: TemporaryFile[str], + mode: OpenTextMode, + buffering: int = ..., + encoding: str | None = ..., + newline: str | None = ..., + suffix: str | None = ..., + prefix: str | None = ..., + dir: str | None = ..., + *, + errors: str | None = ..., + ): ... + + def __init__( + self, + mode: OpenTextMode | OpenBinaryMode = "w+b", + buffering: int = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: str | None = None, + prefix: str | None = None, + dir: str | None = None, + *, + errors: str | None = None, + ) -> None: + self.mode = mode + self.buffering = buffering + self.encoding = encoding + self.newline = newline + self.suffix: str | None = suffix + self.prefix: str | None = prefix + self.dir: str | None = dir + self.errors = errors + + async def __aenter__(self) -> AsyncFile[AnyStr]: + fp = await to_thread.run_sync( + lambda: tempfile.TemporaryFile( + self.mode, + self.buffering, + self.encoding, + self.newline, + self.suffix, + self.prefix, + self.dir, + errors=self.errors, + ) + ) + self._async_file = AsyncFile(fp) + return self._async_file + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + await self._async_file.aclose() + + +class NamedTemporaryFile(Generic[AnyStr]): + """ + An asynchronous named temporary file that is automatically created and cleaned up. + + This class provides an asynchronous context manager for a temporary file with a + visible name in the file system. It uses Python's standard + :func:`~tempfile.NamedTemporaryFile` function and wraps the file object with + :class:`AsyncFile` for asynchronous operations. + + :param mode: The mode in which the file is opened. Defaults to "w+b". + :param buffering: The buffering policy (-1 means the default buffering). + :param encoding: The encoding used to decode or encode the file. Only applicable in + text mode. + :param newline: Controls how universal newlines mode works (only applicable in text + mode). + :param suffix: The suffix for the temporary file name. + :param prefix: The prefix for the temporary file name. + :param dir: The directory in which the temporary file is created. + :param delete: Whether to delete the file when it is closed. + :param errors: The error handling scheme used for encoding/decoding errors. + :param delete_on_close: (Python 3.12+) Whether to delete the file on close. + """ + + _async_file: AsyncFile[AnyStr] + + @overload + def __init__( + self: NamedTemporaryFile[bytes], + mode: OpenBinaryMode = ..., + buffering: int = ..., + encoding: str | None = ..., + newline: str | None = ..., + suffix: str | None = ..., + prefix: str | None = ..., + dir: str | None = ..., + delete: bool = ..., + *, + errors: str | None = ..., + delete_on_close: bool = ..., + ): ... + @overload + def __init__( + self: NamedTemporaryFile[str], + mode: OpenTextMode, + buffering: int = ..., + encoding: str | None = ..., + newline: str | None = ..., + suffix: str | None = ..., + prefix: str | None = ..., + dir: str | None = ..., + delete: bool = ..., + *, + errors: str | None = ..., + delete_on_close: bool = ..., + ): ... + + def __init__( + self, + mode: OpenBinaryMode | OpenTextMode = "w+b", + buffering: int = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: str | None = None, + prefix: str | None = None, + dir: str | None = None, + delete: bool = True, + *, + errors: str | None = None, + delete_on_close: bool = True, + ) -> None: + self._params: dict[str, Any] = { + "mode": mode, + "buffering": buffering, + "encoding": encoding, + "newline": newline, + "suffix": suffix, + "prefix": prefix, + "dir": dir, + "delete": delete, + "errors": errors, + } + if sys.version_info >= (3, 12): + self._params["delete_on_close"] = delete_on_close + + async def __aenter__(self) -> AsyncFile[AnyStr]: + fp = await to_thread.run_sync( + lambda: tempfile.NamedTemporaryFile(**self._params) + ) + self._async_file = AsyncFile(fp) + return self._async_file + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + await self._async_file.aclose() + + +class SpooledTemporaryFile(AsyncFile[AnyStr]): + """ + An asynchronous spooled temporary file that starts in memory and is spooled to disk. + + This class provides an asynchronous interface to a spooled temporary file, much like + Python's standard :class:`~tempfile.SpooledTemporaryFile`. It supports asynchronous + write operations and provides a method to force a rollover to disk. + + :param max_size: Maximum size in bytes before the file is rolled over to disk. + :param mode: The mode in which the file is opened. Defaults to "w+b". + :param buffering: The buffering policy (-1 means the default buffering). + :param encoding: The encoding used to decode or encode the file (text mode only). + :param newline: Controls how universal newlines mode works (text mode only). + :param suffix: The suffix for the temporary file name. + :param prefix: The prefix for the temporary file name. + :param dir: The directory in which the temporary file is created. + :param errors: The error handling scheme used for encoding/decoding errors. + """ + + _rolled: bool = False + + @overload + def __init__( + self: SpooledTemporaryFile[bytes], + max_size: int = ..., + mode: OpenBinaryMode = ..., + buffering: int = ..., + encoding: str | None = ..., + newline: str | None = ..., + suffix: str | None = ..., + prefix: str | None = ..., + dir: str | None = ..., + *, + errors: str | None = ..., + ): ... + @overload + def __init__( + self: SpooledTemporaryFile[str], + max_size: int = ..., + mode: OpenTextMode = ..., + buffering: int = ..., + encoding: str | None = ..., + newline: str | None = ..., + suffix: str | None = ..., + prefix: str | None = ..., + dir: str | None = ..., + *, + errors: str | None = ..., + ): ... + + def __init__( + self, + max_size: int = 0, + mode: OpenBinaryMode | OpenTextMode = "w+b", + buffering: int = -1, + encoding: str | None = None, + newline: str | None = None, + suffix: str | None = None, + prefix: str | None = None, + dir: str | None = None, + *, + errors: str | None = None, + ) -> None: + self._tempfile_params: dict[str, Any] = { + "mode": mode, + "buffering": buffering, + "encoding": encoding, + "newline": newline, + "suffix": suffix, + "prefix": prefix, + "dir": dir, + "errors": errors, + } + self._max_size = max_size + if "b" in mode: + super().__init__(BytesIO()) # type: ignore[arg-type] + else: + super().__init__( + TextIOWrapper( # type: ignore[arg-type] + BytesIO(), + encoding=encoding, + errors=errors, + newline=newline, + write_through=True, + ) + ) + + async def aclose(self) -> None: + if not self._rolled: + self._fp.close() + return + + await super().aclose() + + async def _check(self) -> None: + if self._rolled or self._fp.tell() < self._max_size: + return + + await self.rollover() + + async def rollover(self) -> None: + if self._rolled: + return + + self._rolled = True + buffer = self._fp + buffer.seek(0) + self._fp = await to_thread.run_sync( + lambda: tempfile.TemporaryFile(**self._tempfile_params) + ) + await self.write(buffer.read()) + buffer.close() + + @property + def closed(self) -> bool: + return self._fp.closed + + async def read(self, size: int = -1) -> AnyStr: + if not self._rolled: + await checkpoint_if_cancelled() + return self._fp.read(size) + + return await super().read(size) # type: ignore[return-value] + + async def read1(self: SpooledTemporaryFile[bytes], size: int = -1) -> bytes: + if not self._rolled: + await checkpoint_if_cancelled() + return self._fp.read1(size) + + return await super().read1(size) + + async def readline(self) -> AnyStr: + if not self._rolled: + await checkpoint_if_cancelled() + return self._fp.readline() + + return await super().readline() # type: ignore[return-value] + + async def readlines(self) -> list[AnyStr]: + if not self._rolled: + await checkpoint_if_cancelled() + return self._fp.readlines() + + return await super().readlines() # type: ignore[return-value] + + async def readinto(self: SpooledTemporaryFile[bytes], b: WriteableBuffer) -> int: + if not self._rolled: + await checkpoint_if_cancelled() + self._fp.readinto(b) + + return await super().readinto(b) + + async def readinto1(self: SpooledTemporaryFile[bytes], b: WriteableBuffer) -> int: + if not self._rolled: + await checkpoint_if_cancelled() + self._fp.readinto(b) + + return await super().readinto1(b) + + async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int: + if not self._rolled: + await checkpoint_if_cancelled() + return self._fp.seek(offset, whence) + + return await super().seek(offset, whence) + + async def tell(self) -> int: + if not self._rolled: + await checkpoint_if_cancelled() + return self._fp.tell() + + return await super().tell() + + async def truncate(self, size: int | None = None) -> int: + if not self._rolled: + await checkpoint_if_cancelled() + return self._fp.truncate(size) + + return await super().truncate(size) + + @overload + async def write(self: SpooledTemporaryFile[bytes], b: ReadableBuffer) -> int: ... + @overload + async def write(self: SpooledTemporaryFile[str], b: str) -> int: ... + + async def write(self, b: ReadableBuffer | str) -> int: + """ + Asynchronously write data to the spooled temporary file. + + If the file has not yet been rolled over, the data is written synchronously, + and a rollover is triggered if the size exceeds the maximum size. + + :param s: The data to write. + :return: The number of bytes written. + :raises RuntimeError: If the underlying file is not initialized. + + """ + if not self._rolled: + await checkpoint_if_cancelled() + result = self._fp.write(b) + await self._check() + return result + + return await super().write(b) # type: ignore[misc] + + @overload + async def writelines( + self: SpooledTemporaryFile[bytes], lines: Iterable[ReadableBuffer] + ) -> None: ... + @overload + async def writelines( + self: SpooledTemporaryFile[str], lines: Iterable[str] + ) -> None: ... + + async def writelines(self, lines: Iterable[str] | Iterable[ReadableBuffer]) -> None: + """ + Asynchronously write a list of lines to the spooled temporary file. + + If the file has not yet been rolled over, the lines are written synchronously, + and a rollover is triggered if the size exceeds the maximum size. + + :param lines: An iterable of lines to write. + :raises RuntimeError: If the underlying file is not initialized. + + """ + if not self._rolled: + await checkpoint_if_cancelled() + result = self._fp.writelines(lines) + await self._check() + return result + + return await super().writelines(lines) # type: ignore[misc] + + +class TemporaryDirectory(Generic[AnyStr]): + """ + An asynchronous temporary directory that is created and cleaned up automatically. + + This class provides an asynchronous context manager for creating a temporary + directory. It wraps Python's standard :class:`~tempfile.TemporaryDirectory` to + perform directory creation and cleanup operations in a background thread. + + :param suffix: Suffix to be added to the temporary directory name. + :param prefix: Prefix to be added to the temporary directory name. + :param dir: The parent directory where the temporary directory is created. + :param ignore_cleanup_errors: Whether to ignore errors during cleanup + (Python 3.10+). + :param delete: Whether to delete the directory upon closing (Python 3.12+). + """ + + def __init__( + self, + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: AnyStr | None = None, + *, + ignore_cleanup_errors: bool = False, + delete: bool = True, + ) -> None: + self.suffix: AnyStr | None = suffix + self.prefix: AnyStr | None = prefix + self.dir: AnyStr | None = dir + self.ignore_cleanup_errors = ignore_cleanup_errors + self.delete = delete + + self._tempdir: tempfile.TemporaryDirectory | None = None + + async def __aenter__(self) -> str: + params: dict[str, Any] = { + "suffix": self.suffix, + "prefix": self.prefix, + "dir": self.dir, + } + if sys.version_info >= (3, 10): + params["ignore_cleanup_errors"] = self.ignore_cleanup_errors + + if sys.version_info >= (3, 12): + params["delete"] = self.delete + + self._tempdir = await to_thread.run_sync( + lambda: tempfile.TemporaryDirectory(**params) + ) + return await to_thread.run_sync(self._tempdir.__enter__) + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + if self._tempdir is not None: + await to_thread.run_sync( + self._tempdir.__exit__, exc_type, exc_value, traceback + ) + + async def cleanup(self) -> None: + if self._tempdir is not None: + await to_thread.run_sync(self._tempdir.cleanup) + + +@overload +async def mkstemp( + suffix: str | None = None, + prefix: str | None = None, + dir: str | None = None, + text: bool = False, +) -> tuple[int, str]: ... + + +@overload +async def mkstemp( + suffix: bytes | None = None, + prefix: bytes | None = None, + dir: bytes | None = None, + text: bool = False, +) -> tuple[int, bytes]: ... + + +async def mkstemp( + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: AnyStr | None = None, + text: bool = False, +) -> tuple[int, str | bytes]: + """ + Asynchronously create a temporary file and return an OS-level handle and the file + name. + + This function wraps `tempfile.mkstemp` and executes it in a background thread. + + :param suffix: Suffix to be added to the file name. + :param prefix: Prefix to be added to the file name. + :param dir: Directory in which the temporary file is created. + :param text: Whether the file is opened in text mode. + :return: A tuple containing the file descriptor and the file name. + + """ + return await to_thread.run_sync(tempfile.mkstemp, suffix, prefix, dir, text) + + +@overload +async def mkdtemp( + suffix: str | None = None, + prefix: str | None = None, + dir: str | None = None, +) -> str: ... + + +@overload +async def mkdtemp( + suffix: bytes | None = None, + prefix: bytes | None = None, + dir: bytes | None = None, +) -> bytes: ... + + +async def mkdtemp( + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + dir: AnyStr | None = None, +) -> str | bytes: + """ + Asynchronously create a temporary directory and return its path. + + This function wraps `tempfile.mkdtemp` and executes it in a background thread. + + :param suffix: Suffix to be added to the directory name. + :param prefix: Prefix to be added to the directory name. + :param dir: Parent directory where the temporary directory is created. + :return: The path of the created temporary directory. + + """ + return await to_thread.run_sync(tempfile.mkdtemp, suffix, prefix, dir) + + +async def gettempdir() -> str: + """ + Asynchronously return the name of the directory used for temporary files. + + This function wraps `tempfile.gettempdir` and executes it in a background thread. + + :return: The path of the temporary directory as a string. + + """ + return await to_thread.run_sync(tempfile.gettempdir) + + +async def gettempdirb() -> bytes: + """ + Asynchronously return the name of the directory used for temporary files in bytes. + + This function wraps `tempfile.gettempdirb` and executes it in a background thread. + + :return: The path of the temporary directory as bytes. + + """ + return await to_thread.run_sync(tempfile.gettempdirb) diff --git a/venv/Lib/site-packages/anyio/_core/_testing.py b/venv/Lib/site-packages/anyio/_core/_testing.py new file mode 100644 index 00000000..9e28b227 --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_testing.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from collections.abc import Awaitable, Generator +from typing import Any, cast + +from ._eventloop import get_async_backend + + +class TaskInfo: + """ + Represents an asynchronous task. + + :ivar int id: the unique identifier of the task + :ivar parent_id: the identifier of the parent task, if any + :vartype parent_id: Optional[int] + :ivar str name: the description of the task (if any) + :ivar ~collections.abc.Coroutine coro: the coroutine object of the task + """ + + __slots__ = "_name", "id", "parent_id", "name", "coro" + + def __init__( + self, + id: int, + parent_id: int | None, + name: str | None, + coro: Generator[Any, Any, Any] | Awaitable[Any], + ): + func = get_current_task + self._name = f"{func.__module__}.{func.__qualname__}" + self.id: int = id + self.parent_id: int | None = parent_id + self.name: str | None = name + self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro + + def __eq__(self, other: object) -> bool: + if isinstance(other, TaskInfo): + return self.id == other.id + + return NotImplemented + + def __hash__(self) -> int: + return hash(self.id) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})" + + def has_pending_cancellation(self) -> bool: + """ + Return ``True`` if the task has a cancellation pending, ``False`` otherwise. + + """ + return False + + +def get_current_task() -> TaskInfo: + """ + Return the current task. + + :return: a representation of the current task + + """ + return get_async_backend().get_current_task() + + +def get_running_tasks() -> list[TaskInfo]: + """ + Return a list of running tasks in the current event loop. + + :return: a list of task info objects + + """ + return cast("list[TaskInfo]", get_async_backend().get_running_tasks()) + + +async def wait_all_tasks_blocked() -> None: + """Wait until all other tasks are waiting for something.""" + await get_async_backend().wait_all_tasks_blocked() diff --git a/venv/Lib/site-packages/anyio/_core/_typedattr.py b/venv/Lib/site-packages/anyio/_core/_typedattr.py new file mode 100644 index 00000000..f358a448 --- /dev/null +++ b/venv/Lib/site-packages/anyio/_core/_typedattr.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +from collections.abc import Callable, Mapping +from typing import Any, TypeVar, final, overload + +from ._exceptions import TypedAttributeLookupError + +T_Attr = TypeVar("T_Attr") +T_Default = TypeVar("T_Default") +undefined = object() + + +def typed_attribute() -> Any: + """Return a unique object, used to mark typed attributes.""" + return object() + + +class TypedAttributeSet: + """ + Superclass for typed attribute collections. + + Checks that every public attribute of every subclass has a type annotation. + """ + + def __init_subclass__(cls) -> None: + annotations: dict[str, Any] = getattr(cls, "__annotations__", {}) + for attrname in dir(cls): + if not attrname.startswith("_") and attrname not in annotations: + raise TypeError( + f"Attribute {attrname!r} is missing its type annotation" + ) + + super().__init_subclass__() + + +class TypedAttributeProvider: + """Base class for classes that wish to provide typed extra attributes.""" + + @property + def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]: + """ + A mapping of the extra attributes to callables that return the corresponding + values. + + If the provider wraps another provider, the attributes from that wrapper should + also be included in the returned mapping (but the wrapper may override the + callables from the wrapped instance). + + """ + return {} + + @overload + def extra(self, attribute: T_Attr) -> T_Attr: ... + + @overload + def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default: ... + + @final + def extra(self, attribute: Any, default: object = undefined) -> object: + """ + extra(attribute, default=undefined) + + Return the value of the given typed extra attribute. + + :param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to + look for + :param default: the value that should be returned if no value is found for the + attribute + :raises ~anyio.TypedAttributeLookupError: if the search failed and no default + value was given + + """ + try: + getter = self.extra_attributes[attribute] + except KeyError: + if default is undefined: + raise TypedAttributeLookupError("Attribute not found") from None + else: + return default + + return getter() diff --git a/venv/Lib/site-packages/anyio/abc/__init__.py b/venv/Lib/site-packages/anyio/abc/__init__.py new file mode 100644 index 00000000..3d3b61cc --- /dev/null +++ b/venv/Lib/site-packages/anyio/abc/__init__.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from ._eventloop import AsyncBackend as AsyncBackend +from ._resources import AsyncResource as AsyncResource +from ._sockets import ConnectedUDPSocket as ConnectedUDPSocket +from ._sockets import ConnectedUNIXDatagramSocket as ConnectedUNIXDatagramSocket +from ._sockets import IPAddressType as IPAddressType +from ._sockets import IPSockAddrType as IPSockAddrType +from ._sockets import SocketAttribute as SocketAttribute +from ._sockets import SocketListener as SocketListener +from ._sockets import SocketStream as SocketStream +from ._sockets import UDPPacketType as UDPPacketType +from ._sockets import UDPSocket as UDPSocket +from ._sockets import UNIXDatagramPacketType as UNIXDatagramPacketType +from ._sockets import UNIXDatagramSocket as UNIXDatagramSocket +from ._sockets import UNIXSocketStream as UNIXSocketStream +from ._streams import AnyByteReceiveStream as AnyByteReceiveStream +from ._streams import AnyByteSendStream as AnyByteSendStream +from ._streams import AnyByteStream as AnyByteStream +from ._streams import AnyUnreliableByteReceiveStream as AnyUnreliableByteReceiveStream +from ._streams import AnyUnreliableByteSendStream as AnyUnreliableByteSendStream +from ._streams import AnyUnreliableByteStream as AnyUnreliableByteStream +from ._streams import ByteReceiveStream as ByteReceiveStream +from ._streams import ByteSendStream as ByteSendStream +from ._streams import ByteStream as ByteStream +from ._streams import Listener as Listener +from ._streams import ObjectReceiveStream as ObjectReceiveStream +from ._streams import ObjectSendStream as ObjectSendStream +from ._streams import ObjectStream as ObjectStream +from ._streams import UnreliableObjectReceiveStream as UnreliableObjectReceiveStream +from ._streams import UnreliableObjectSendStream as UnreliableObjectSendStream +from ._streams import UnreliableObjectStream as UnreliableObjectStream +from ._subprocesses import Process as Process +from ._tasks import TaskGroup as TaskGroup +from ._tasks import TaskStatus as TaskStatus +from ._testing import TestRunner as TestRunner + +# Re-exported here, for backwards compatibility +# isort: off +from .._core._synchronization import ( + CapacityLimiter as CapacityLimiter, + Condition as Condition, + Event as Event, + Lock as Lock, + Semaphore as Semaphore, +) +from .._core._tasks import CancelScope as CancelScope +from ..from_thread import BlockingPortal as BlockingPortal + +# Re-export imports so they look like they live directly in this package +for __value in list(locals().values()): + if getattr(__value, "__module__", "").startswith("anyio.abc."): + __value.__module__ = __name__ + +del __value diff --git a/venv/Lib/site-packages/anyio/abc/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/anyio/abc/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..f7f55a50 Binary files /dev/null and b/venv/Lib/site-packages/anyio/abc/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/abc/__pycache__/_eventloop.cpython-312.pyc b/venv/Lib/site-packages/anyio/abc/__pycache__/_eventloop.cpython-312.pyc new file mode 100644 index 00000000..aeb29f43 Binary files /dev/null and b/venv/Lib/site-packages/anyio/abc/__pycache__/_eventloop.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/abc/__pycache__/_resources.cpython-312.pyc b/venv/Lib/site-packages/anyio/abc/__pycache__/_resources.cpython-312.pyc new file mode 100644 index 00000000..98912368 Binary files /dev/null and b/venv/Lib/site-packages/anyio/abc/__pycache__/_resources.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/abc/__pycache__/_sockets.cpython-312.pyc b/venv/Lib/site-packages/anyio/abc/__pycache__/_sockets.cpython-312.pyc new file mode 100644 index 00000000..f176f9cc Binary files /dev/null and b/venv/Lib/site-packages/anyio/abc/__pycache__/_sockets.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/abc/__pycache__/_streams.cpython-312.pyc b/venv/Lib/site-packages/anyio/abc/__pycache__/_streams.cpython-312.pyc new file mode 100644 index 00000000..52b43ce9 Binary files /dev/null and b/venv/Lib/site-packages/anyio/abc/__pycache__/_streams.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/abc/__pycache__/_subprocesses.cpython-312.pyc b/venv/Lib/site-packages/anyio/abc/__pycache__/_subprocesses.cpython-312.pyc new file mode 100644 index 00000000..102ec215 Binary files /dev/null and b/venv/Lib/site-packages/anyio/abc/__pycache__/_subprocesses.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/abc/__pycache__/_tasks.cpython-312.pyc b/venv/Lib/site-packages/anyio/abc/__pycache__/_tasks.cpython-312.pyc new file mode 100644 index 00000000..161561e3 Binary files /dev/null and b/venv/Lib/site-packages/anyio/abc/__pycache__/_tasks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/abc/__pycache__/_testing.cpython-312.pyc b/venv/Lib/site-packages/anyio/abc/__pycache__/_testing.cpython-312.pyc new file mode 100644 index 00000000..53fa0dde Binary files /dev/null and b/venv/Lib/site-packages/anyio/abc/__pycache__/_testing.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/abc/_eventloop.py b/venv/Lib/site-packages/anyio/abc/_eventloop.py new file mode 100644 index 00000000..4cfce836 --- /dev/null +++ b/venv/Lib/site-packages/anyio/abc/_eventloop.py @@ -0,0 +1,376 @@ +from __future__ import annotations + +import math +import sys +from abc import ABCMeta, abstractmethod +from collections.abc import AsyncIterator, Awaitable, Callable, Sequence +from contextlib import AbstractContextManager +from os import PathLike +from signal import Signals +from socket import AddressFamily, SocketKind, socket +from typing import ( + IO, + TYPE_CHECKING, + Any, + TypeVar, + Union, + overload, +) + +if sys.version_info >= (3, 11): + from typing import TypeVarTuple, Unpack +else: + from typing_extensions import TypeVarTuple, Unpack + +if sys.version_info >= (3, 10): + from typing import TypeAlias +else: + from typing_extensions import TypeAlias + +if TYPE_CHECKING: + from _typeshed import HasFileno + + from .._core._synchronization import CapacityLimiter, Event, Lock, Semaphore + from .._core._tasks import CancelScope + from .._core._testing import TaskInfo + from ..from_thread import BlockingPortal + from ._sockets import ( + ConnectedUDPSocket, + ConnectedUNIXDatagramSocket, + IPSockAddrType, + SocketListener, + SocketStream, + UDPSocket, + UNIXDatagramSocket, + UNIXSocketStream, + ) + from ._subprocesses import Process + from ._tasks import TaskGroup + from ._testing import TestRunner + +T_Retval = TypeVar("T_Retval") +PosArgsT = TypeVarTuple("PosArgsT") +StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"] + + +class AsyncBackend(metaclass=ABCMeta): + @classmethod + @abstractmethod + def run( + cls, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], + args: tuple[Unpack[PosArgsT]], + kwargs: dict[str, Any], + options: dict[str, Any], + ) -> T_Retval: + """ + Run the given coroutine function in an asynchronous event loop. + + The current thread must not be already running an event loop. + + :param func: a coroutine function + :param args: positional arguments to ``func`` + :param kwargs: positional arguments to ``func`` + :param options: keyword arguments to call the backend ``run()`` implementation + with + :return: the return value of the coroutine function + """ + + @classmethod + @abstractmethod + def current_token(cls) -> object: + """ + + :return: + """ + + @classmethod + @abstractmethod + def current_time(cls) -> float: + """ + Return the current value of the event loop's internal clock. + + :return: the clock value (seconds) + """ + + @classmethod + @abstractmethod + def cancelled_exception_class(cls) -> type[BaseException]: + """Return the exception class that is raised in a task if it's cancelled.""" + + @classmethod + @abstractmethod + async def checkpoint(cls) -> None: + """ + Check if the task has been cancelled, and allow rescheduling of other tasks. + + This is effectively the same as running :meth:`checkpoint_if_cancelled` and then + :meth:`cancel_shielded_checkpoint`. + """ + + @classmethod + async def checkpoint_if_cancelled(cls) -> None: + """ + Check if the current task group has been cancelled. + + This will check if the task has been cancelled, but will not allow other tasks + to be scheduled if not. + + """ + if cls.current_effective_deadline() == -math.inf: + await cls.checkpoint() + + @classmethod + async def cancel_shielded_checkpoint(cls) -> None: + """ + Allow the rescheduling of other tasks. + + This will give other tasks the opportunity to run, but without checking if the + current task group has been cancelled, unlike with :meth:`checkpoint`. + + """ + with cls.create_cancel_scope(shield=True): + await cls.sleep(0) + + @classmethod + @abstractmethod + async def sleep(cls, delay: float) -> None: + """ + Pause the current task for the specified duration. + + :param delay: the duration, in seconds + """ + + @classmethod + @abstractmethod + def create_cancel_scope( + cls, *, deadline: float = math.inf, shield: bool = False + ) -> CancelScope: + pass + + @classmethod + @abstractmethod + def current_effective_deadline(cls) -> float: + """ + Return the nearest deadline among all the cancel scopes effective for the + current task. + + :return: + - a clock value from the event loop's internal clock + - ``inf`` if there is no deadline in effect + - ``-inf`` if the current scope has been cancelled + :rtype: float + """ + + @classmethod + @abstractmethod + def create_task_group(cls) -> TaskGroup: + pass + + @classmethod + @abstractmethod + def create_event(cls) -> Event: + pass + + @classmethod + @abstractmethod + def create_lock(cls, *, fast_acquire: bool) -> Lock: + pass + + @classmethod + @abstractmethod + def create_semaphore( + cls, + initial_value: int, + *, + max_value: int | None = None, + fast_acquire: bool = False, + ) -> Semaphore: + pass + + @classmethod + @abstractmethod + def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter: + pass + + @classmethod + @abstractmethod + async def run_sync_in_worker_thread( + cls, + func: Callable[[Unpack[PosArgsT]], T_Retval], + args: tuple[Unpack[PosArgsT]], + abandon_on_cancel: bool = False, + limiter: CapacityLimiter | None = None, + ) -> T_Retval: + pass + + @classmethod + @abstractmethod + def check_cancelled(cls) -> None: + pass + + @classmethod + @abstractmethod + def run_async_from_thread( + cls, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], + args: tuple[Unpack[PosArgsT]], + token: object, + ) -> T_Retval: + pass + + @classmethod + @abstractmethod + def run_sync_from_thread( + cls, + func: Callable[[Unpack[PosArgsT]], T_Retval], + args: tuple[Unpack[PosArgsT]], + token: object, + ) -> T_Retval: + pass + + @classmethod + @abstractmethod + def create_blocking_portal(cls) -> BlockingPortal: + pass + + @classmethod + @abstractmethod + async def open_process( + cls, + command: StrOrBytesPath | Sequence[StrOrBytesPath], + *, + stdin: int | IO[Any] | None, + stdout: int | IO[Any] | None, + stderr: int | IO[Any] | None, + **kwargs: Any, + ) -> Process: + pass + + @classmethod + @abstractmethod + def setup_process_pool_exit_at_shutdown(cls, workers: set[Process]) -> None: + pass + + @classmethod + @abstractmethod + async def connect_tcp( + cls, host: str, port: int, local_address: IPSockAddrType | None = None + ) -> SocketStream: + pass + + @classmethod + @abstractmethod + async def connect_unix(cls, path: str | bytes) -> UNIXSocketStream: + pass + + @classmethod + @abstractmethod + def create_tcp_listener(cls, sock: socket) -> SocketListener: + pass + + @classmethod + @abstractmethod + def create_unix_listener(cls, sock: socket) -> SocketListener: + pass + + @classmethod + @abstractmethod + async def create_udp_socket( + cls, + family: AddressFamily, + local_address: IPSockAddrType | None, + remote_address: IPSockAddrType | None, + reuse_port: bool, + ) -> UDPSocket | ConnectedUDPSocket: + pass + + @classmethod + @overload + async def create_unix_datagram_socket( + cls, raw_socket: socket, remote_path: None + ) -> UNIXDatagramSocket: ... + + @classmethod + @overload + async def create_unix_datagram_socket( + cls, raw_socket: socket, remote_path: str | bytes + ) -> ConnectedUNIXDatagramSocket: ... + + @classmethod + @abstractmethod + async def create_unix_datagram_socket( + cls, raw_socket: socket, remote_path: str | bytes | None + ) -> UNIXDatagramSocket | ConnectedUNIXDatagramSocket: + pass + + @classmethod + @abstractmethod + async def getaddrinfo( + cls, + host: bytes | str | None, + port: str | int | None, + *, + family: int | AddressFamily = 0, + type: int | SocketKind = 0, + proto: int = 0, + flags: int = 0, + ) -> Sequence[ + tuple[ + AddressFamily, + SocketKind, + int, + str, + tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes], + ] + ]: + pass + + @classmethod + @abstractmethod + async def getnameinfo( + cls, sockaddr: IPSockAddrType, flags: int = 0 + ) -> tuple[str, str]: + pass + + @classmethod + @abstractmethod + async def wait_readable(cls, obj: HasFileno | int) -> None: + pass + + @classmethod + @abstractmethod + async def wait_writable(cls, obj: HasFileno | int) -> None: + pass + + @classmethod + @abstractmethod + def current_default_thread_limiter(cls) -> CapacityLimiter: + pass + + @classmethod + @abstractmethod + def open_signal_receiver( + cls, *signals: Signals + ) -> AbstractContextManager[AsyncIterator[Signals]]: + pass + + @classmethod + @abstractmethod + def get_current_task(cls) -> TaskInfo: + pass + + @classmethod + @abstractmethod + def get_running_tasks(cls) -> Sequence[TaskInfo]: + pass + + @classmethod + @abstractmethod + async def wait_all_tasks_blocked(cls) -> None: + pass + + @classmethod + @abstractmethod + def create_test_runner(cls, options: dict[str, Any]) -> TestRunner: + pass diff --git a/venv/Lib/site-packages/anyio/abc/_resources.py b/venv/Lib/site-packages/anyio/abc/_resources.py new file mode 100644 index 00000000..10df115a --- /dev/null +++ b/venv/Lib/site-packages/anyio/abc/_resources.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from abc import ABCMeta, abstractmethod +from types import TracebackType +from typing import TypeVar + +T = TypeVar("T") + + +class AsyncResource(metaclass=ABCMeta): + """ + Abstract base class for all closeable asynchronous resources. + + Works as an asynchronous context manager which returns the instance itself on enter, + and calls :meth:`aclose` on exit. + """ + + __slots__ = () + + async def __aenter__(self: T) -> T: + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + await self.aclose() + + @abstractmethod + async def aclose(self) -> None: + """Close the resource.""" diff --git a/venv/Lib/site-packages/anyio/abc/_sockets.py b/venv/Lib/site-packages/anyio/abc/_sockets.py new file mode 100644 index 00000000..1c6a450c --- /dev/null +++ b/venv/Lib/site-packages/anyio/abc/_sockets.py @@ -0,0 +1,194 @@ +from __future__ import annotations + +import socket +from abc import abstractmethod +from collections.abc import Callable, Collection, Mapping +from contextlib import AsyncExitStack +from io import IOBase +from ipaddress import IPv4Address, IPv6Address +from socket import AddressFamily +from types import TracebackType +from typing import Any, TypeVar, Union + +from .._core._typedattr import ( + TypedAttributeProvider, + TypedAttributeSet, + typed_attribute, +) +from ._streams import ByteStream, Listener, UnreliableObjectStream +from ._tasks import TaskGroup + +IPAddressType = Union[str, IPv4Address, IPv6Address] +IPSockAddrType = tuple[str, int] +SockAddrType = Union[IPSockAddrType, str] +UDPPacketType = tuple[bytes, IPSockAddrType] +UNIXDatagramPacketType = tuple[bytes, str] +T_Retval = TypeVar("T_Retval") + + +class _NullAsyncContextManager: + async def __aenter__(self) -> None: + pass + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool | None: + return None + + +class SocketAttribute(TypedAttributeSet): + #: the address family of the underlying socket + family: AddressFamily = typed_attribute() + #: the local socket address of the underlying socket + local_address: SockAddrType = typed_attribute() + #: for IP addresses, the local port the underlying socket is bound to + local_port: int = typed_attribute() + #: the underlying stdlib socket object + raw_socket: socket.socket = typed_attribute() + #: the remote address the underlying socket is connected to + remote_address: SockAddrType = typed_attribute() + #: for IP addresses, the remote port the underlying socket is connected to + remote_port: int = typed_attribute() + + +class _SocketProvider(TypedAttributeProvider): + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + from .._core._sockets import convert_ipv6_sockaddr as convert + + attributes: dict[Any, Callable[[], Any]] = { + SocketAttribute.family: lambda: self._raw_socket.family, + SocketAttribute.local_address: lambda: convert( + self._raw_socket.getsockname() + ), + SocketAttribute.raw_socket: lambda: self._raw_socket, + } + try: + peername: tuple[str, int] | None = convert(self._raw_socket.getpeername()) + except OSError: + peername = None + + # Provide the remote address for connected sockets + if peername is not None: + attributes[SocketAttribute.remote_address] = lambda: peername + + # Provide local and remote ports for IP based sockets + if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6): + attributes[SocketAttribute.local_port] = ( + lambda: self._raw_socket.getsockname()[1] + ) + if peername is not None: + remote_port = peername[1] + attributes[SocketAttribute.remote_port] = lambda: remote_port + + return attributes + + @property + @abstractmethod + def _raw_socket(self) -> socket.socket: + pass + + +class SocketStream(ByteStream, _SocketProvider): + """ + Transports bytes over a socket. + + Supports all relevant extra attributes from :class:`~SocketAttribute`. + """ + + +class UNIXSocketStream(SocketStream): + @abstractmethod + async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None: + """ + Send file descriptors along with a message to the peer. + + :param message: a non-empty bytestring + :param fds: a collection of files (either numeric file descriptors or open file + or socket objects) + """ + + @abstractmethod + async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]: + """ + Receive file descriptors along with a message from the peer. + + :param msglen: length of the message to expect from the peer + :param maxfds: maximum number of file descriptors to expect from the peer + :return: a tuple of (message, file descriptors) + """ + + +class SocketListener(Listener[SocketStream], _SocketProvider): + """ + Listens to incoming socket connections. + + Supports all relevant extra attributes from :class:`~SocketAttribute`. + """ + + @abstractmethod + async def accept(self) -> SocketStream: + """Accept an incoming connection.""" + + async def serve( + self, + handler: Callable[[SocketStream], Any], + task_group: TaskGroup | None = None, + ) -> None: + from .. import create_task_group + + async with AsyncExitStack() as stack: + if task_group is None: + task_group = await stack.enter_async_context(create_task_group()) + + while True: + stream = await self.accept() + task_group.start_soon(handler, stream) + + +class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider): + """ + Represents an unconnected UDP socket. + + Supports all relevant extra attributes from :class:`~SocketAttribute`. + """ + + async def sendto(self, data: bytes, host: str, port: int) -> None: + """ + Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port))). + + """ + return await self.send((data, (host, port))) + + +class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider): + """ + Represents an connected UDP socket. + + Supports all relevant extra attributes from :class:`~SocketAttribute`. + """ + + +class UNIXDatagramSocket( + UnreliableObjectStream[UNIXDatagramPacketType], _SocketProvider +): + """ + Represents an unconnected Unix datagram socket. + + Supports all relevant extra attributes from :class:`~SocketAttribute`. + """ + + async def sendto(self, data: bytes, path: str) -> None: + """Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, path)).""" + return await self.send((data, path)) + + +class ConnectedUNIXDatagramSocket(UnreliableObjectStream[bytes], _SocketProvider): + """ + Represents a connected Unix datagram socket. + + Supports all relevant extra attributes from :class:`~SocketAttribute`. + """ diff --git a/venv/Lib/site-packages/anyio/abc/_streams.py b/venv/Lib/site-packages/anyio/abc/_streams.py new file mode 100644 index 00000000..f11d97b5 --- /dev/null +++ b/venv/Lib/site-packages/anyio/abc/_streams.py @@ -0,0 +1,203 @@ +from __future__ import annotations + +from abc import abstractmethod +from collections.abc import Callable +from typing import Any, Generic, TypeVar, Union + +from .._core._exceptions import EndOfStream +from .._core._typedattr import TypedAttributeProvider +from ._resources import AsyncResource +from ._tasks import TaskGroup + +T_Item = TypeVar("T_Item") +T_co = TypeVar("T_co", covariant=True) +T_contra = TypeVar("T_contra", contravariant=True) + + +class UnreliableObjectReceiveStream( + Generic[T_co], AsyncResource, TypedAttributeProvider +): + """ + An interface for receiving objects. + + This interface makes no guarantees that the received messages arrive in the order in + which they were sent, or that no messages are missed. + + Asynchronously iterating over objects of this type will yield objects matching the + given type parameter. + """ + + def __aiter__(self) -> UnreliableObjectReceiveStream[T_co]: + return self + + async def __anext__(self) -> T_co: + try: + return await self.receive() + except EndOfStream: + raise StopAsyncIteration + + @abstractmethod + async def receive(self) -> T_co: + """ + Receive the next item. + + :raises ~anyio.ClosedResourceError: if the receive stream has been explicitly + closed + :raises ~anyio.EndOfStream: if this stream has been closed from the other end + :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable + due to external causes + """ + + +class UnreliableObjectSendStream( + Generic[T_contra], AsyncResource, TypedAttributeProvider +): + """ + An interface for sending objects. + + This interface makes no guarantees that the messages sent will reach the + recipient(s) in the same order in which they were sent, or at all. + """ + + @abstractmethod + async def send(self, item: T_contra) -> None: + """ + Send an item to the peer(s). + + :param item: the item to send + :raises ~anyio.ClosedResourceError: if the send stream has been explicitly + closed + :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable + due to external causes + """ + + +class UnreliableObjectStream( + UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item] +): + """ + A bidirectional message stream which does not guarantee the order or reliability of + message delivery. + """ + + +class ObjectReceiveStream(UnreliableObjectReceiveStream[T_co]): + """ + A receive message stream which guarantees that messages are received in the same + order in which they were sent, and that no messages are missed. + """ + + +class ObjectSendStream(UnreliableObjectSendStream[T_contra]): + """ + A send message stream which guarantees that messages are delivered in the same order + in which they were sent, without missing any messages in the middle. + """ + + +class ObjectStream( + ObjectReceiveStream[T_Item], + ObjectSendStream[T_Item], + UnreliableObjectStream[T_Item], +): + """ + A bidirectional message stream which guarantees the order and reliability of message + delivery. + """ + + @abstractmethod + async def send_eof(self) -> None: + """ + Send an end-of-file indication to the peer. + + You should not try to send any further data to this stream after calling this + method. This method is idempotent (does nothing on successive calls). + """ + + +class ByteReceiveStream(AsyncResource, TypedAttributeProvider): + """ + An interface for receiving bytes from a single peer. + + Iterating this byte stream will yield a byte string of arbitrary length, but no more + than 65536 bytes. + """ + + def __aiter__(self) -> ByteReceiveStream: + return self + + async def __anext__(self) -> bytes: + try: + return await self.receive() + except EndOfStream: + raise StopAsyncIteration + + @abstractmethod + async def receive(self, max_bytes: int = 65536) -> bytes: + """ + Receive at most ``max_bytes`` bytes from the peer. + + .. note:: Implementers of this interface should not return an empty + :class:`bytes` object, and users should ignore them. + + :param max_bytes: maximum number of bytes to receive + :return: the received bytes + :raises ~anyio.EndOfStream: if this stream has been closed from the other end + """ + + +class ByteSendStream(AsyncResource, TypedAttributeProvider): + """An interface for sending bytes to a single peer.""" + + @abstractmethod + async def send(self, item: bytes) -> None: + """ + Send the given bytes to the peer. + + :param item: the bytes to send + """ + + +class ByteStream(ByteReceiveStream, ByteSendStream): + """A bidirectional byte stream.""" + + @abstractmethod + async def send_eof(self) -> None: + """ + Send an end-of-file indication to the peer. + + You should not try to send any further data to this stream after calling this + method. This method is idempotent (does nothing on successive calls). + """ + + +#: Type alias for all unreliable bytes-oriented receive streams. +AnyUnreliableByteReceiveStream = Union[ + UnreliableObjectReceiveStream[bytes], ByteReceiveStream +] +#: Type alias for all unreliable bytes-oriented send streams. +AnyUnreliableByteSendStream = Union[UnreliableObjectSendStream[bytes], ByteSendStream] +#: Type alias for all unreliable bytes-oriented streams. +AnyUnreliableByteStream = Union[UnreliableObjectStream[bytes], ByteStream] +#: Type alias for all bytes-oriented receive streams. +AnyByteReceiveStream = Union[ObjectReceiveStream[bytes], ByteReceiveStream] +#: Type alias for all bytes-oriented send streams. +AnyByteSendStream = Union[ObjectSendStream[bytes], ByteSendStream] +#: Type alias for all bytes-oriented streams. +AnyByteStream = Union[ObjectStream[bytes], ByteStream] + + +class Listener(Generic[T_co], AsyncResource, TypedAttributeProvider): + """An interface for objects that let you accept incoming connections.""" + + @abstractmethod + async def serve( + self, handler: Callable[[T_co], Any], task_group: TaskGroup | None = None + ) -> None: + """ + Accept incoming connections as they come in and start tasks to handle them. + + :param handler: a callable that will be used to handle each accepted connection + :param task_group: the task group that will be used to start tasks for handling + each accepted connection (if omitted, an ad-hoc task group will be created) + """ diff --git a/venv/Lib/site-packages/anyio/abc/_subprocesses.py b/venv/Lib/site-packages/anyio/abc/_subprocesses.py new file mode 100644 index 00000000..ce0564ce --- /dev/null +++ b/venv/Lib/site-packages/anyio/abc/_subprocesses.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +from abc import abstractmethod +from signal import Signals + +from ._resources import AsyncResource +from ._streams import ByteReceiveStream, ByteSendStream + + +class Process(AsyncResource): + """An asynchronous version of :class:`subprocess.Popen`.""" + + @abstractmethod + async def wait(self) -> int: + """ + Wait until the process exits. + + :return: the exit code of the process + """ + + @abstractmethod + def terminate(self) -> None: + """ + Terminates the process, gracefully if possible. + + On Windows, this calls ``TerminateProcess()``. + On POSIX systems, this sends ``SIGTERM`` to the process. + + .. seealso:: :meth:`subprocess.Popen.terminate` + """ + + @abstractmethod + def kill(self) -> None: + """ + Kills the process. + + On Windows, this calls ``TerminateProcess()``. + On POSIX systems, this sends ``SIGKILL`` to the process. + + .. seealso:: :meth:`subprocess.Popen.kill` + """ + + @abstractmethod + def send_signal(self, signal: Signals) -> None: + """ + Send a signal to the subprocess. + + .. seealso:: :meth:`subprocess.Popen.send_signal` + + :param signal: the signal number (e.g. :data:`signal.SIGHUP`) + """ + + @property + @abstractmethod + def pid(self) -> int: + """The process ID of the process.""" + + @property + @abstractmethod + def returncode(self) -> int | None: + """ + The return code of the process. If the process has not yet terminated, this will + be ``None``. + """ + + @property + @abstractmethod + def stdin(self) -> ByteSendStream | None: + """The stream for the standard input of the process.""" + + @property + @abstractmethod + def stdout(self) -> ByteReceiveStream | None: + """The stream for the standard output of the process.""" + + @property + @abstractmethod + def stderr(self) -> ByteReceiveStream | None: + """The stream for the standard error output of the process.""" diff --git a/venv/Lib/site-packages/anyio/abc/_tasks.py b/venv/Lib/site-packages/anyio/abc/_tasks.py new file mode 100644 index 00000000..f6e5c40c --- /dev/null +++ b/venv/Lib/site-packages/anyio/abc/_tasks.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +import sys +from abc import ABCMeta, abstractmethod +from collections.abc import Awaitable, Callable +from types import TracebackType +from typing import TYPE_CHECKING, Any, Protocol, TypeVar, overload + +if sys.version_info >= (3, 11): + from typing import TypeVarTuple, Unpack +else: + from typing_extensions import TypeVarTuple, Unpack + +if TYPE_CHECKING: + from .._core._tasks import CancelScope + +T_Retval = TypeVar("T_Retval") +T_contra = TypeVar("T_contra", contravariant=True) +PosArgsT = TypeVarTuple("PosArgsT") + + +class TaskStatus(Protocol[T_contra]): + @overload + def started(self: TaskStatus[None]) -> None: ... + + @overload + def started(self, value: T_contra) -> None: ... + + def started(self, value: T_contra | None = None) -> None: + """ + Signal that the task has started. + + :param value: object passed back to the starter of the task + """ + + +class TaskGroup(metaclass=ABCMeta): + """ + Groups several asynchronous tasks together. + + :ivar cancel_scope: the cancel scope inherited by all child tasks + :vartype cancel_scope: CancelScope + + .. note:: On asyncio, support for eager task factories is considered to be + **experimental**. In particular, they don't follow the usual semantics of new + tasks being scheduled on the next iteration of the event loop, and may thus + cause unexpected behavior in code that wasn't written with such semantics in + mind. + """ + + cancel_scope: CancelScope + + @abstractmethod + def start_soon( + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[Any]], + *args: Unpack[PosArgsT], + name: object = None, + ) -> None: + """ + Start a new task in this task group. + + :param func: a coroutine function + :param args: positional arguments to call the function with + :param name: name of the task, for the purposes of introspection and debugging + + .. versionadded:: 3.0 + """ + + @abstractmethod + async def start( + self, + func: Callable[..., Awaitable[Any]], + *args: object, + name: object = None, + ) -> Any: + """ + Start a new task and wait until it signals for readiness. + + :param func: a coroutine function + :param args: positional arguments to call the function with + :param name: name of the task, for the purposes of introspection and debugging + :return: the value passed to ``task_status.started()`` + :raises RuntimeError: if the task finishes without calling + ``task_status.started()`` + + .. versionadded:: 3.0 + """ + + @abstractmethod + async def __aenter__(self) -> TaskGroup: + """Enter the task group context and allow starting new tasks.""" + + @abstractmethod + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool | None: + """Exit the task group context waiting for all tasks to finish.""" diff --git a/venv/Lib/site-packages/anyio/abc/_testing.py b/venv/Lib/site-packages/anyio/abc/_testing.py new file mode 100644 index 00000000..7c50ed76 --- /dev/null +++ b/venv/Lib/site-packages/anyio/abc/_testing.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import types +from abc import ABCMeta, abstractmethod +from collections.abc import AsyncGenerator, Callable, Coroutine, Iterable +from typing import Any, TypeVar + +_T = TypeVar("_T") + + +class TestRunner(metaclass=ABCMeta): + """ + Encapsulates a running event loop. Every call made through this object will use the + same event loop. + """ + + def __enter__(self) -> TestRunner: + return self + + @abstractmethod + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: types.TracebackType | None, + ) -> bool | None: ... + + @abstractmethod + def run_asyncgen_fixture( + self, + fixture_func: Callable[..., AsyncGenerator[_T, Any]], + kwargs: dict[str, Any], + ) -> Iterable[_T]: + """ + Run an async generator fixture. + + :param fixture_func: the fixture function + :param kwargs: keyword arguments to call the fixture function with + :return: an iterator yielding the value yielded from the async generator + """ + + @abstractmethod + def run_fixture( + self, + fixture_func: Callable[..., Coroutine[Any, Any, _T]], + kwargs: dict[str, Any], + ) -> _T: + """ + Run an async fixture. + + :param fixture_func: the fixture function + :param kwargs: keyword arguments to call the fixture function with + :return: the return value of the fixture function + """ + + @abstractmethod + def run_test( + self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any] + ) -> None: + """ + Run an async test function. + + :param test_func: the test function + :param kwargs: keyword arguments to call the test function with + """ diff --git a/venv/Lib/site-packages/anyio/from_thread.py b/venv/Lib/site-packages/anyio/from_thread.py new file mode 100644 index 00000000..61790973 --- /dev/null +++ b/venv/Lib/site-packages/anyio/from_thread.py @@ -0,0 +1,527 @@ +from __future__ import annotations + +import sys +from collections.abc import Awaitable, Callable, Generator +from concurrent.futures import Future +from contextlib import ( + AbstractAsyncContextManager, + AbstractContextManager, + contextmanager, +) +from dataclasses import dataclass, field +from inspect import isawaitable +from threading import Lock, Thread, get_ident +from types import TracebackType +from typing import ( + Any, + Generic, + TypeVar, + cast, + overload, +) + +from ._core import _eventloop +from ._core._eventloop import get_async_backend, get_cancelled_exc_class, threadlocals +from ._core._synchronization import Event +from ._core._tasks import CancelScope, create_task_group +from .abc import AsyncBackend +from .abc._tasks import TaskStatus + +if sys.version_info >= (3, 11): + from typing import TypeVarTuple, Unpack +else: + from typing_extensions import TypeVarTuple, Unpack + +T_Retval = TypeVar("T_Retval") +T_co = TypeVar("T_co", covariant=True) +PosArgsT = TypeVarTuple("PosArgsT") + + +def run( + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], *args: Unpack[PosArgsT] +) -> T_Retval: + """ + Call a coroutine function from a worker thread. + + :param func: a coroutine function + :param args: positional arguments for the callable + :return: the return value of the coroutine function + + """ + try: + async_backend = threadlocals.current_async_backend + token = threadlocals.current_token + except AttributeError: + raise RuntimeError( + "This function can only be run from an AnyIO worker thread" + ) from None + + return async_backend.run_async_from_thread(func, args, token=token) + + +def run_sync( + func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT] +) -> T_Retval: + """ + Call a function in the event loop thread from a worker thread. + + :param func: a callable + :param args: positional arguments for the callable + :return: the return value of the callable + + """ + try: + async_backend = threadlocals.current_async_backend + token = threadlocals.current_token + except AttributeError: + raise RuntimeError( + "This function can only be run from an AnyIO worker thread" + ) from None + + return async_backend.run_sync_from_thread(func, args, token=token) + + +class _BlockingAsyncContextManager(Generic[T_co], AbstractContextManager): + _enter_future: Future[T_co] + _exit_future: Future[bool | None] + _exit_event: Event + _exit_exc_info: tuple[ + type[BaseException] | None, BaseException | None, TracebackType | None + ] = (None, None, None) + + def __init__( + self, async_cm: AbstractAsyncContextManager[T_co], portal: BlockingPortal + ): + self._async_cm = async_cm + self._portal = portal + + async def run_async_cm(self) -> bool | None: + try: + self._exit_event = Event() + value = await self._async_cm.__aenter__() + except BaseException as exc: + self._enter_future.set_exception(exc) + raise + else: + self._enter_future.set_result(value) + + try: + # Wait for the sync context manager to exit. + # This next statement can raise `get_cancelled_exc_class()` if + # something went wrong in a task group in this async context + # manager. + await self._exit_event.wait() + finally: + # In case of cancellation, it could be that we end up here before + # `_BlockingAsyncContextManager.__exit__` is called, and an + # `_exit_exc_info` has been set. + result = await self._async_cm.__aexit__(*self._exit_exc_info) + return result + + def __enter__(self) -> T_co: + self._enter_future = Future() + self._exit_future = self._portal.start_task_soon(self.run_async_cm) + return self._enter_future.result() + + def __exit__( + self, + __exc_type: type[BaseException] | None, + __exc_value: BaseException | None, + __traceback: TracebackType | None, + ) -> bool | None: + self._exit_exc_info = __exc_type, __exc_value, __traceback + self._portal.call(self._exit_event.set) + return self._exit_future.result() + + +class _BlockingPortalTaskStatus(TaskStatus): + def __init__(self, future: Future): + self._future = future + + def started(self, value: object = None) -> None: + self._future.set_result(value) + + +class BlockingPortal: + """An object that lets external threads run code in an asynchronous event loop.""" + + def __new__(cls) -> BlockingPortal: + return get_async_backend().create_blocking_portal() + + def __init__(self) -> None: + self._event_loop_thread_id: int | None = get_ident() + self._stop_event = Event() + self._task_group = create_task_group() + self._cancelled_exc_class = get_cancelled_exc_class() + + async def __aenter__(self) -> BlockingPortal: + await self._task_group.__aenter__() + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool | None: + await self.stop() + return await self._task_group.__aexit__(exc_type, exc_val, exc_tb) + + def _check_running(self) -> None: + if self._event_loop_thread_id is None: + raise RuntimeError("This portal is not running") + if self._event_loop_thread_id == get_ident(): + raise RuntimeError( + "This method cannot be called from the event loop thread" + ) + + async def sleep_until_stopped(self) -> None: + """Sleep until :meth:`stop` is called.""" + await self._stop_event.wait() + + async def stop(self, cancel_remaining: bool = False) -> None: + """ + Signal the portal to shut down. + + This marks the portal as no longer accepting new calls and exits from + :meth:`sleep_until_stopped`. + + :param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False`` + to let them finish before returning + + """ + self._event_loop_thread_id = None + self._stop_event.set() + if cancel_remaining: + self._task_group.cancel_scope.cancel() + + async def _call_func( + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval], + args: tuple[Unpack[PosArgsT]], + kwargs: dict[str, Any], + future: Future[T_Retval], + ) -> None: + def callback(f: Future[T_Retval]) -> None: + if f.cancelled() and self._event_loop_thread_id not in ( + None, + get_ident(), + ): + self.call(scope.cancel) + + try: + retval_or_awaitable = func(*args, **kwargs) + if isawaitable(retval_or_awaitable): + with CancelScope() as scope: + if future.cancelled(): + scope.cancel() + else: + future.add_done_callback(callback) + + retval = await retval_or_awaitable + else: + retval = retval_or_awaitable + except self._cancelled_exc_class: + future.cancel() + future.set_running_or_notify_cancel() + except BaseException as exc: + if not future.cancelled(): + future.set_exception(exc) + + # Let base exceptions fall through + if not isinstance(exc, Exception): + raise + else: + if not future.cancelled(): + future.set_result(retval) + finally: + scope = None # type: ignore[assignment] + + def _spawn_task_from_thread( + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval], + args: tuple[Unpack[PosArgsT]], + kwargs: dict[str, Any], + name: object, + future: Future[T_Retval], + ) -> None: + """ + Spawn a new task using the given callable. + + Implementers must ensure that the future is resolved when the task finishes. + + :param func: a callable + :param args: positional arguments to be passed to the callable + :param kwargs: keyword arguments to be passed to the callable + :param name: name of the task (will be coerced to a string if not ``None``) + :param future: a future that will resolve to the return value of the callable, + or the exception raised during its execution + + """ + raise NotImplementedError + + @overload + def call( + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], + *args: Unpack[PosArgsT], + ) -> T_Retval: ... + + @overload + def call( + self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT] + ) -> T_Retval: ... + + def call( + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval], + *args: Unpack[PosArgsT], + ) -> T_Retval: + """ + Call the given function in the event loop thread. + + If the callable returns a coroutine object, it is awaited on. + + :param func: any callable + :raises RuntimeError: if the portal is not running or if this method is called + from within the event loop thread + + """ + return cast(T_Retval, self.start_task_soon(func, *args).result()) + + @overload + def start_task_soon( + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], + *args: Unpack[PosArgsT], + name: object = None, + ) -> Future[T_Retval]: ... + + @overload + def start_task_soon( + self, + func: Callable[[Unpack[PosArgsT]], T_Retval], + *args: Unpack[PosArgsT], + name: object = None, + ) -> Future[T_Retval]: ... + + def start_task_soon( + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval], + *args: Unpack[PosArgsT], + name: object = None, + ) -> Future[T_Retval]: + """ + Start a task in the portal's task group. + + The task will be run inside a cancel scope which can be cancelled by cancelling + the returned future. + + :param func: the target function + :param args: positional arguments passed to ``func`` + :param name: name of the task (will be coerced to a string if not ``None``) + :return: a future that resolves with the return value of the callable if the + task completes successfully, or with the exception raised in the task + :raises RuntimeError: if the portal is not running or if this method is called + from within the event loop thread + :rtype: concurrent.futures.Future[T_Retval] + + .. versionadded:: 3.0 + + """ + self._check_running() + f: Future[T_Retval] = Future() + self._spawn_task_from_thread(func, args, {}, name, f) + return f + + def start_task( + self, + func: Callable[..., Awaitable[T_Retval]], + *args: object, + name: object = None, + ) -> tuple[Future[T_Retval], Any]: + """ + Start a task in the portal's task group and wait until it signals for readiness. + + This method works the same way as :meth:`.abc.TaskGroup.start`. + + :param func: the target function + :param args: positional arguments passed to ``func`` + :param name: name of the task (will be coerced to a string if not ``None``) + :return: a tuple of (future, task_status_value) where the ``task_status_value`` + is the value passed to ``task_status.started()`` from within the target + function + :rtype: tuple[concurrent.futures.Future[T_Retval], Any] + + .. versionadded:: 3.0 + + """ + + def task_done(future: Future[T_Retval]) -> None: + if not task_status_future.done(): + if future.cancelled(): + task_status_future.cancel() + elif future.exception(): + task_status_future.set_exception(future.exception()) + else: + exc = RuntimeError( + "Task exited without calling task_status.started()" + ) + task_status_future.set_exception(exc) + + self._check_running() + task_status_future: Future = Future() + task_status = _BlockingPortalTaskStatus(task_status_future) + f: Future = Future() + f.add_done_callback(task_done) + self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f) + return f, task_status_future.result() + + def wrap_async_context_manager( + self, cm: AbstractAsyncContextManager[T_co] + ) -> AbstractContextManager[T_co]: + """ + Wrap an async context manager as a synchronous context manager via this portal. + + Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping + in the middle until the synchronous context manager exits. + + :param cm: an asynchronous context manager + :return: a synchronous context manager + + .. versionadded:: 2.1 + + """ + return _BlockingAsyncContextManager(cm, self) + + +@dataclass +class BlockingPortalProvider: + """ + A manager for a blocking portal. Used as a context manager. The first thread to + enter this context manager causes a blocking portal to be started with the specific + parameters, and the last thread to exit causes the portal to be shut down. Thus, + there will be exactly one blocking portal running in this context as long as at + least one thread has entered this context manager. + + The parameters are the same as for :func:`~anyio.run`. + + :param backend: name of the backend + :param backend_options: backend options + + .. versionadded:: 4.4 + """ + + backend: str = "asyncio" + backend_options: dict[str, Any] | None = None + _lock: Lock = field(init=False, default_factory=Lock) + _leases: int = field(init=False, default=0) + _portal: BlockingPortal = field(init=False) + _portal_cm: AbstractContextManager[BlockingPortal] | None = field( + init=False, default=None + ) + + def __enter__(self) -> BlockingPortal: + with self._lock: + if self._portal_cm is None: + self._portal_cm = start_blocking_portal( + self.backend, self.backend_options + ) + self._portal = self._portal_cm.__enter__() + + self._leases += 1 + return self._portal + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + portal_cm: AbstractContextManager[BlockingPortal] | None = None + with self._lock: + assert self._portal_cm + assert self._leases > 0 + self._leases -= 1 + if not self._leases: + portal_cm = self._portal_cm + self._portal_cm = None + del self._portal + + if portal_cm: + portal_cm.__exit__(None, None, None) + + +@contextmanager +def start_blocking_portal( + backend: str = "asyncio", backend_options: dict[str, Any] | None = None +) -> Generator[BlockingPortal, Any, None]: + """ + Start a new event loop in a new thread and run a blocking portal in its main task. + + The parameters are the same as for :func:`~anyio.run`. + + :param backend: name of the backend + :param backend_options: backend options + :return: a context manager that yields a blocking portal + + .. versionchanged:: 3.0 + Usage as a context manager is now required. + + """ + + async def run_portal() -> None: + async with BlockingPortal() as portal_: + future.set_result(portal_) + await portal_.sleep_until_stopped() + + def run_blocking_portal() -> None: + if future.set_running_or_notify_cancel(): + try: + _eventloop.run( + run_portal, backend=backend, backend_options=backend_options + ) + except BaseException as exc: + if not future.done(): + future.set_exception(exc) + + future: Future[BlockingPortal] = Future() + thread = Thread(target=run_blocking_portal, daemon=True) + thread.start() + try: + cancel_remaining_tasks = False + portal = future.result() + try: + yield portal + except BaseException: + cancel_remaining_tasks = True + raise + finally: + try: + portal.call(portal.stop, cancel_remaining_tasks) + except RuntimeError: + pass + finally: + thread.join() + + +def check_cancelled() -> None: + """ + Check if the cancel scope of the host task's running the current worker thread has + been cancelled. + + If the host task's current cancel scope has indeed been cancelled, the + backend-specific cancellation exception will be raised. + + :raises RuntimeError: if the current thread was not spawned by + :func:`.to_thread.run_sync` + + """ + try: + async_backend: AsyncBackend = threadlocals.current_async_backend + except AttributeError: + raise RuntimeError( + "This function can only be run from an AnyIO worker thread" + ) from None + + async_backend.check_cancelled() diff --git a/venv/Lib/site-packages/anyio/lowlevel.py b/venv/Lib/site-packages/anyio/lowlevel.py new file mode 100644 index 00000000..14c7668c --- /dev/null +++ b/venv/Lib/site-packages/anyio/lowlevel.py @@ -0,0 +1,161 @@ +from __future__ import annotations + +import enum +from dataclasses import dataclass +from typing import Any, Generic, Literal, TypeVar, overload +from weakref import WeakKeyDictionary + +from ._core._eventloop import get_async_backend + +T = TypeVar("T") +D = TypeVar("D") + + +async def checkpoint() -> None: + """ + Check for cancellation and allow the scheduler to switch to another task. + + Equivalent to (but more efficient than):: + + await checkpoint_if_cancelled() + await cancel_shielded_checkpoint() + + + .. versionadded:: 3.0 + + """ + await get_async_backend().checkpoint() + + +async def checkpoint_if_cancelled() -> None: + """ + Enter a checkpoint if the enclosing cancel scope has been cancelled. + + This does not allow the scheduler to switch to a different task. + + .. versionadded:: 3.0 + + """ + await get_async_backend().checkpoint_if_cancelled() + + +async def cancel_shielded_checkpoint() -> None: + """ + Allow the scheduler to switch to another task but without checking for cancellation. + + Equivalent to (but potentially more efficient than):: + + with CancelScope(shield=True): + await checkpoint() + + + .. versionadded:: 3.0 + + """ + await get_async_backend().cancel_shielded_checkpoint() + + +def current_token() -> object: + """ + Return a backend specific token object that can be used to get back to the event + loop. + + """ + return get_async_backend().current_token() + + +_run_vars: WeakKeyDictionary[Any, dict[str, Any]] = WeakKeyDictionary() +_token_wrappers: dict[Any, _TokenWrapper] = {} + + +@dataclass(frozen=True) +class _TokenWrapper: + __slots__ = "_token", "__weakref__" + _token: object + + +class _NoValueSet(enum.Enum): + NO_VALUE_SET = enum.auto() + + +class RunvarToken(Generic[T]): + __slots__ = "_var", "_value", "_redeemed" + + def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]): + self._var = var + self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value + self._redeemed = False + + +class RunVar(Generic[T]): + """ + Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop. + """ + + __slots__ = "_name", "_default" + + NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET + + _token_wrappers: set[_TokenWrapper] = set() + + def __init__( + self, name: str, default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET + ): + self._name = name + self._default = default + + @property + def _current_vars(self) -> dict[str, T]: + token = current_token() + try: + return _run_vars[token] + except KeyError: + run_vars = _run_vars[token] = {} + return run_vars + + @overload + def get(self, default: D) -> T | D: ... + + @overload + def get(self) -> T: ... + + def get( + self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET + ) -> T | D: + try: + return self._current_vars[self._name] + except KeyError: + if default is not RunVar.NO_VALUE_SET: + return default + elif self._default is not RunVar.NO_VALUE_SET: + return self._default + + raise LookupError( + f'Run variable "{self._name}" has no value and no default set' + ) + + def set(self, value: T) -> RunvarToken[T]: + current_vars = self._current_vars + token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET)) + current_vars[self._name] = value + return token + + def reset(self, token: RunvarToken[T]) -> None: + if token._var is not self: + raise ValueError("This token does not belong to this RunVar") + + if token._redeemed: + raise ValueError("This token has already been used") + + if token._value is _NoValueSet.NO_VALUE_SET: + try: + del self._current_vars[self._name] + except KeyError: + pass + else: + self._current_vars[self._name] = token._value + + token._redeemed = True + + def __repr__(self) -> str: + return f"" diff --git a/venv/Lib/site-packages/anyio/py.typed b/venv/Lib/site-packages/anyio/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/anyio/pytest_plugin.py b/venv/Lib/site-packages/anyio/pytest_plugin.py new file mode 100644 index 00000000..21e4ab22 --- /dev/null +++ b/venv/Lib/site-packages/anyio/pytest_plugin.py @@ -0,0 +1,272 @@ +from __future__ import annotations + +import socket +import sys +from collections.abc import Callable, Generator, Iterator +from contextlib import ExitStack, contextmanager +from inspect import isasyncgenfunction, iscoroutinefunction, ismethod +from typing import Any, cast + +import pytest +import sniffio +from _pytest.fixtures import SubRequest +from _pytest.outcomes import Exit + +from ._core._eventloop import get_all_backends, get_async_backend +from ._core._exceptions import iterate_exceptions +from .abc import TestRunner + +if sys.version_info < (3, 11): + from exceptiongroup import ExceptionGroup + +_current_runner: TestRunner | None = None +_runner_stack: ExitStack | None = None +_runner_leases = 0 + + +def extract_backend_and_options(backend: object) -> tuple[str, dict[str, Any]]: + if isinstance(backend, str): + return backend, {} + elif isinstance(backend, tuple) and len(backend) == 2: + if isinstance(backend[0], str) and isinstance(backend[1], dict): + return cast(tuple[str, dict[str, Any]], backend) + + raise TypeError("anyio_backend must be either a string or tuple of (string, dict)") + + +@contextmanager +def get_runner( + backend_name: str, backend_options: dict[str, Any] +) -> Iterator[TestRunner]: + global _current_runner, _runner_leases, _runner_stack + if _current_runner is None: + asynclib = get_async_backend(backend_name) + _runner_stack = ExitStack() + if sniffio.current_async_library_cvar.get(None) is None: + # Since we're in control of the event loop, we can cache the name of the + # async library + token = sniffio.current_async_library_cvar.set(backend_name) + _runner_stack.callback(sniffio.current_async_library_cvar.reset, token) + + backend_options = backend_options or {} + _current_runner = _runner_stack.enter_context( + asynclib.create_test_runner(backend_options) + ) + + _runner_leases += 1 + try: + yield _current_runner + finally: + _runner_leases -= 1 + if not _runner_leases: + assert _runner_stack is not None + _runner_stack.close() + _runner_stack = _current_runner = None + + +def pytest_configure(config: Any) -> None: + config.addinivalue_line( + "markers", + "anyio: mark the (coroutine function) test to be run asynchronously via anyio.", + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_fixture_setup(fixturedef: Any, request: Any) -> Generator[Any]: + def wrapper( + *args: Any, anyio_backend: Any, request: SubRequest, **kwargs: Any + ) -> Any: + # Rebind any fixture methods to the request instance + if ( + request.instance + and ismethod(func) + and type(func.__self__) is type(request.instance) + ): + local_func = func.__func__.__get__(request.instance) + else: + local_func = func + + backend_name, backend_options = extract_backend_and_options(anyio_backend) + if has_backend_arg: + kwargs["anyio_backend"] = anyio_backend + + if has_request_arg: + kwargs["request"] = request + + with get_runner(backend_name, backend_options) as runner: + if isasyncgenfunction(local_func): + yield from runner.run_asyncgen_fixture(local_func, kwargs) + else: + yield runner.run_fixture(local_func, kwargs) + + # Only apply this to coroutine functions and async generator functions in requests + # that involve the anyio_backend fixture + func = fixturedef.func + if isasyncgenfunction(func) or iscoroutinefunction(func): + if "anyio_backend" in request.fixturenames: + fixturedef.func = wrapper + original_argname = fixturedef.argnames + + if not (has_backend_arg := "anyio_backend" in fixturedef.argnames): + fixturedef.argnames += ("anyio_backend",) + + if not (has_request_arg := "request" in fixturedef.argnames): + fixturedef.argnames += ("request",) + + try: + return (yield) + finally: + fixturedef.func = func + fixturedef.argnames = original_argname + + return (yield) + + +@pytest.hookimpl(tryfirst=True) +def pytest_pycollect_makeitem(collector: Any, name: Any, obj: Any) -> None: + if collector.istestfunction(obj, name): + inner_func = obj.hypothesis.inner_test if hasattr(obj, "hypothesis") else obj + if iscoroutinefunction(inner_func): + marker = collector.get_closest_marker("anyio") + own_markers = getattr(obj, "pytestmark", ()) + if marker or any(marker.name == "anyio" for marker in own_markers): + pytest.mark.usefixtures("anyio_backend")(obj) + + +@pytest.hookimpl(tryfirst=True) +def pytest_pyfunc_call(pyfuncitem: Any) -> bool | None: + def run_with_hypothesis(**kwargs: Any) -> None: + with get_runner(backend_name, backend_options) as runner: + runner.run_test(original_func, kwargs) + + backend = pyfuncitem.funcargs.get("anyio_backend") + if backend: + backend_name, backend_options = extract_backend_and_options(backend) + + if hasattr(pyfuncitem.obj, "hypothesis"): + # Wrap the inner test function unless it's already wrapped + original_func = pyfuncitem.obj.hypothesis.inner_test + if original_func.__qualname__ != run_with_hypothesis.__qualname__: + if iscoroutinefunction(original_func): + pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis + + return None + + if iscoroutinefunction(pyfuncitem.obj): + funcargs = pyfuncitem.funcargs + testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} + with get_runner(backend_name, backend_options) as runner: + try: + runner.run_test(pyfuncitem.obj, testargs) + except ExceptionGroup as excgrp: + for exc in iterate_exceptions(excgrp): + if isinstance(exc, (Exit, KeyboardInterrupt, SystemExit)): + raise exc from excgrp + + raise + + return True + + return None + + +@pytest.fixture(scope="module", params=get_all_backends()) +def anyio_backend(request: Any) -> Any: + return request.param + + +@pytest.fixture +def anyio_backend_name(anyio_backend: Any) -> str: + if isinstance(anyio_backend, str): + return anyio_backend + else: + return anyio_backend[0] + + +@pytest.fixture +def anyio_backend_options(anyio_backend: Any) -> dict[str, Any]: + if isinstance(anyio_backend, str): + return {} + else: + return anyio_backend[1] + + +class FreePortFactory: + """ + Manages port generation based on specified socket kind, ensuring no duplicate + ports are generated. + + This class provides functionality for generating available free ports on the + system. It is initialized with a specific socket kind and can generate ports + for given address families while avoiding reuse of previously generated ports. + + Users should not instantiate this class directly, but use the + ``free_tcp_port_factory`` and ``free_udp_port_factory`` fixtures instead. For simple + uses cases, ``free_tcp_port`` and ``free_udp_port`` can be used instead. + """ + + def __init__(self, kind: socket.SocketKind) -> None: + self._kind = kind + self._generated = set[int]() + + @property + def kind(self) -> socket.SocketKind: + """ + The type of socket connection (e.g., :data:`~socket.SOCK_STREAM` or + :data:`~socket.SOCK_DGRAM`) used to bind for checking port availability + + """ + return self._kind + + def __call__(self, family: socket.AddressFamily | None = None) -> int: + """ + Return an unbound port for the given address family. + + :param family: if omitted, both IPv4 and IPv6 addresses will be tried + :return: a port number + + """ + if family is not None: + families = [family] + else: + families = [socket.AF_INET] + if socket.has_ipv6: + families.append(socket.AF_INET6) + + while True: + port = 0 + with ExitStack() as stack: + for family in families: + sock = stack.enter_context(socket.socket(family, self._kind)) + addr = "::1" if family == socket.AF_INET6 else "127.0.0.1" + try: + sock.bind((addr, port)) + except OSError: + break + + if not port: + port = sock.getsockname()[1] + else: + if port not in self._generated: + self._generated.add(port) + return port + + +@pytest.fixture(scope="session") +def free_tcp_port_factory() -> FreePortFactory: + return FreePortFactory(socket.SOCK_STREAM) + + +@pytest.fixture(scope="session") +def free_udp_port_factory() -> FreePortFactory: + return FreePortFactory(socket.SOCK_DGRAM) + + +@pytest.fixture +def free_tcp_port(free_tcp_port_factory: Callable[[], int]) -> int: + return free_tcp_port_factory() + + +@pytest.fixture +def free_udp_port(free_udp_port_factory: Callable[[], int]) -> int: + return free_udp_port_factory() diff --git a/venv/Lib/site-packages/anyio/streams/__init__.py b/venv/Lib/site-packages/anyio/streams/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/anyio/streams/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/anyio/streams/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..9d05cb24 Binary files /dev/null and b/venv/Lib/site-packages/anyio/streams/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/streams/__pycache__/buffered.cpython-312.pyc b/venv/Lib/site-packages/anyio/streams/__pycache__/buffered.cpython-312.pyc new file mode 100644 index 00000000..60df34e3 Binary files /dev/null and b/venv/Lib/site-packages/anyio/streams/__pycache__/buffered.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/streams/__pycache__/file.cpython-312.pyc b/venv/Lib/site-packages/anyio/streams/__pycache__/file.cpython-312.pyc new file mode 100644 index 00000000..de78647b Binary files /dev/null and b/venv/Lib/site-packages/anyio/streams/__pycache__/file.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/streams/__pycache__/memory.cpython-312.pyc b/venv/Lib/site-packages/anyio/streams/__pycache__/memory.cpython-312.pyc new file mode 100644 index 00000000..3f335c24 Binary files /dev/null and b/venv/Lib/site-packages/anyio/streams/__pycache__/memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/streams/__pycache__/stapled.cpython-312.pyc b/venv/Lib/site-packages/anyio/streams/__pycache__/stapled.cpython-312.pyc new file mode 100644 index 00000000..33b7e32c Binary files /dev/null and b/venv/Lib/site-packages/anyio/streams/__pycache__/stapled.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/streams/__pycache__/text.cpython-312.pyc b/venv/Lib/site-packages/anyio/streams/__pycache__/text.cpython-312.pyc new file mode 100644 index 00000000..a5e30843 Binary files /dev/null and b/venv/Lib/site-packages/anyio/streams/__pycache__/text.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/streams/__pycache__/tls.cpython-312.pyc b/venv/Lib/site-packages/anyio/streams/__pycache__/tls.cpython-312.pyc new file mode 100644 index 00000000..35d98fcb Binary files /dev/null and b/venv/Lib/site-packages/anyio/streams/__pycache__/tls.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/anyio/streams/buffered.py b/venv/Lib/site-packages/anyio/streams/buffered.py new file mode 100644 index 00000000..f5d5e836 --- /dev/null +++ b/venv/Lib/site-packages/anyio/streams/buffered.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +from collections.abc import Callable, Mapping +from dataclasses import dataclass, field +from typing import Any + +from .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead +from ..abc import AnyByteReceiveStream, ByteReceiveStream + + +@dataclass(eq=False) +class BufferedByteReceiveStream(ByteReceiveStream): + """ + Wraps any bytes-based receive stream and uses a buffer to provide sophisticated + receiving capabilities in the form of a byte stream. + """ + + receive_stream: AnyByteReceiveStream + _buffer: bytearray = field(init=False, default_factory=bytearray) + _closed: bool = field(init=False, default=False) + + async def aclose(self) -> None: + await self.receive_stream.aclose() + self._closed = True + + @property + def buffer(self) -> bytes: + """The bytes currently in the buffer.""" + return bytes(self._buffer) + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return self.receive_stream.extra_attributes + + async def receive(self, max_bytes: int = 65536) -> bytes: + if self._closed: + raise ClosedResourceError + + if self._buffer: + chunk = bytes(self._buffer[:max_bytes]) + del self._buffer[:max_bytes] + return chunk + elif isinstance(self.receive_stream, ByteReceiveStream): + return await self.receive_stream.receive(max_bytes) + else: + # With a bytes-oriented object stream, we need to handle any surplus bytes + # we get from the receive() call + chunk = await self.receive_stream.receive() + if len(chunk) > max_bytes: + # Save the surplus bytes in the buffer + self._buffer.extend(chunk[max_bytes:]) + return chunk[:max_bytes] + else: + return chunk + + async def receive_exactly(self, nbytes: int) -> bytes: + """ + Read exactly the given amount of bytes from the stream. + + :param nbytes: the number of bytes to read + :return: the bytes read + :raises ~anyio.IncompleteRead: if the stream was closed before the requested + amount of bytes could be read from the stream + + """ + while True: + remaining = nbytes - len(self._buffer) + if remaining <= 0: + retval = self._buffer[:nbytes] + del self._buffer[:nbytes] + return bytes(retval) + + try: + if isinstance(self.receive_stream, ByteReceiveStream): + chunk = await self.receive_stream.receive(remaining) + else: + chunk = await self.receive_stream.receive() + except EndOfStream as exc: + raise IncompleteRead from exc + + self._buffer.extend(chunk) + + async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes: + """ + Read from the stream until the delimiter is found or max_bytes have been read. + + :param delimiter: the marker to look for in the stream + :param max_bytes: maximum number of bytes that will be read before raising + :exc:`~anyio.DelimiterNotFound` + :return: the bytes read (not including the delimiter) + :raises ~anyio.IncompleteRead: if the stream was closed before the delimiter + was found + :raises ~anyio.DelimiterNotFound: if the delimiter is not found within the + bytes read up to the maximum allowed + + """ + delimiter_size = len(delimiter) + offset = 0 + while True: + # Check if the delimiter can be found in the current buffer + index = self._buffer.find(delimiter, offset) + if index >= 0: + found = self._buffer[:index] + del self._buffer[: index + len(delimiter) :] + return bytes(found) + + # Check if the buffer is already at or over the limit + if len(self._buffer) >= max_bytes: + raise DelimiterNotFound(max_bytes) + + # Read more data into the buffer from the socket + try: + data = await self.receive_stream.receive() + except EndOfStream as exc: + raise IncompleteRead from exc + + # Move the offset forward and add the new data to the buffer + offset = max(len(self._buffer) - delimiter_size + 1, 0) + self._buffer.extend(data) diff --git a/venv/Lib/site-packages/anyio/streams/file.py b/venv/Lib/site-packages/anyio/streams/file.py new file mode 100644 index 00000000..f4924642 --- /dev/null +++ b/venv/Lib/site-packages/anyio/streams/file.py @@ -0,0 +1,148 @@ +from __future__ import annotations + +from collections.abc import Callable, Mapping +from io import SEEK_SET, UnsupportedOperation +from os import PathLike +from pathlib import Path +from typing import Any, BinaryIO, cast + +from .. import ( + BrokenResourceError, + ClosedResourceError, + EndOfStream, + TypedAttributeSet, + to_thread, + typed_attribute, +) +from ..abc import ByteReceiveStream, ByteSendStream + + +class FileStreamAttribute(TypedAttributeSet): + #: the open file descriptor + file: BinaryIO = typed_attribute() + #: the path of the file on the file system, if available (file must be a real file) + path: Path = typed_attribute() + #: the file number, if available (file must be a real file or a TTY) + fileno: int = typed_attribute() + + +class _BaseFileStream: + def __init__(self, file: BinaryIO): + self._file = file + + async def aclose(self) -> None: + await to_thread.run_sync(self._file.close) + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + attributes: dict[Any, Callable[[], Any]] = { + FileStreamAttribute.file: lambda: self._file, + } + + if hasattr(self._file, "name"): + attributes[FileStreamAttribute.path] = lambda: Path(self._file.name) + + try: + self._file.fileno() + except UnsupportedOperation: + pass + else: + attributes[FileStreamAttribute.fileno] = lambda: self._file.fileno() + + return attributes + + +class FileReadStream(_BaseFileStream, ByteReceiveStream): + """ + A byte stream that reads from a file in the file system. + + :param file: a file that has been opened for reading in binary mode + + .. versionadded:: 3.0 + """ + + @classmethod + async def from_path(cls, path: str | PathLike[str]) -> FileReadStream: + """ + Create a file read stream by opening the given file. + + :param path: path of the file to read from + + """ + file = await to_thread.run_sync(Path(path).open, "rb") + return cls(cast(BinaryIO, file)) + + async def receive(self, max_bytes: int = 65536) -> bytes: + try: + data = await to_thread.run_sync(self._file.read, max_bytes) + except ValueError: + raise ClosedResourceError from None + except OSError as exc: + raise BrokenResourceError from exc + + if data: + return data + else: + raise EndOfStream + + async def seek(self, position: int, whence: int = SEEK_SET) -> int: + """ + Seek the file to the given position. + + .. seealso:: :meth:`io.IOBase.seek` + + .. note:: Not all file descriptors are seekable. + + :param position: position to seek the file to + :param whence: controls how ``position`` is interpreted + :return: the new absolute position + :raises OSError: if the file is not seekable + + """ + return await to_thread.run_sync(self._file.seek, position, whence) + + async def tell(self) -> int: + """ + Return the current stream position. + + .. note:: Not all file descriptors are seekable. + + :return: the current absolute position + :raises OSError: if the file is not seekable + + """ + return await to_thread.run_sync(self._file.tell) + + +class FileWriteStream(_BaseFileStream, ByteSendStream): + """ + A byte stream that writes to a file in the file system. + + :param file: a file that has been opened for writing in binary mode + + .. versionadded:: 3.0 + """ + + @classmethod + async def from_path( + cls, path: str | PathLike[str], append: bool = False + ) -> FileWriteStream: + """ + Create a file write stream by opening the given file for writing. + + :param path: path of the file to write to + :param append: if ``True``, open the file for appending; if ``False``, any + existing file at the given path will be truncated + + """ + mode = "ab" if append else "wb" + file = await to_thread.run_sync(Path(path).open, mode) + return cls(cast(BinaryIO, file)) + + async def send(self, item: bytes) -> None: + try: + await to_thread.run_sync(self._file.write, item) + except ValueError: + raise ClosedResourceError from None + except OSError as exc: + raise BrokenResourceError from exc diff --git a/venv/Lib/site-packages/anyio/streams/memory.py b/venv/Lib/site-packages/anyio/streams/memory.py new file mode 100644 index 00000000..83bf1d97 --- /dev/null +++ b/venv/Lib/site-packages/anyio/streams/memory.py @@ -0,0 +1,317 @@ +from __future__ import annotations + +import warnings +from collections import OrderedDict, deque +from dataclasses import dataclass, field +from types import TracebackType +from typing import Generic, NamedTuple, TypeVar + +from .. import ( + BrokenResourceError, + ClosedResourceError, + EndOfStream, + WouldBlock, +) +from .._core._testing import TaskInfo, get_current_task +from ..abc import Event, ObjectReceiveStream, ObjectSendStream +from ..lowlevel import checkpoint + +T_Item = TypeVar("T_Item") +T_co = TypeVar("T_co", covariant=True) +T_contra = TypeVar("T_contra", contravariant=True) + + +class MemoryObjectStreamStatistics(NamedTuple): + current_buffer_used: int #: number of items stored in the buffer + #: maximum number of items that can be stored on this stream (or :data:`math.inf`) + max_buffer_size: float + open_send_streams: int #: number of unclosed clones of the send stream + open_receive_streams: int #: number of unclosed clones of the receive stream + #: number of tasks blocked on :meth:`MemoryObjectSendStream.send` + tasks_waiting_send: int + #: number of tasks blocked on :meth:`MemoryObjectReceiveStream.receive` + tasks_waiting_receive: int + + +@dataclass(eq=False) +class MemoryObjectItemReceiver(Generic[T_Item]): + task_info: TaskInfo = field(init=False, default_factory=get_current_task) + item: T_Item = field(init=False) + + def __repr__(self) -> str: + # When item is not defined, we get following error with default __repr__: + # AttributeError: 'MemoryObjectItemReceiver' object has no attribute 'item' + item = getattr(self, "item", None) + return f"{self.__class__.__name__}(task_info={self.task_info}, item={item!r})" + + +@dataclass(eq=False) +class MemoryObjectStreamState(Generic[T_Item]): + max_buffer_size: float = field() + buffer: deque[T_Item] = field(init=False, default_factory=deque) + open_send_channels: int = field(init=False, default=0) + open_receive_channels: int = field(init=False, default=0) + waiting_receivers: OrderedDict[Event, MemoryObjectItemReceiver[T_Item]] = field( + init=False, default_factory=OrderedDict + ) + waiting_senders: OrderedDict[Event, T_Item] = field( + init=False, default_factory=OrderedDict + ) + + def statistics(self) -> MemoryObjectStreamStatistics: + return MemoryObjectStreamStatistics( + len(self.buffer), + self.max_buffer_size, + self.open_send_channels, + self.open_receive_channels, + len(self.waiting_senders), + len(self.waiting_receivers), + ) + + +@dataclass(eq=False) +class MemoryObjectReceiveStream(Generic[T_co], ObjectReceiveStream[T_co]): + _state: MemoryObjectStreamState[T_co] + _closed: bool = field(init=False, default=False) + + def __post_init__(self) -> None: + self._state.open_receive_channels += 1 + + def receive_nowait(self) -> T_co: + """ + Receive the next item if it can be done without waiting. + + :return: the received item + :raises ~anyio.ClosedResourceError: if this send stream has been closed + :raises ~anyio.EndOfStream: if the buffer is empty and this stream has been + closed from the sending end + :raises ~anyio.WouldBlock: if there are no items in the buffer and no tasks + waiting to send + + """ + if self._closed: + raise ClosedResourceError + + if self._state.waiting_senders: + # Get the item from the next sender + send_event, item = self._state.waiting_senders.popitem(last=False) + self._state.buffer.append(item) + send_event.set() + + if self._state.buffer: + return self._state.buffer.popleft() + elif not self._state.open_send_channels: + raise EndOfStream + + raise WouldBlock + + async def receive(self) -> T_co: + await checkpoint() + try: + return self.receive_nowait() + except WouldBlock: + # Add ourselves in the queue + receive_event = Event() + receiver = MemoryObjectItemReceiver[T_co]() + self._state.waiting_receivers[receive_event] = receiver + + try: + await receive_event.wait() + finally: + self._state.waiting_receivers.pop(receive_event, None) + + try: + return receiver.item + except AttributeError: + raise EndOfStream from None + + def clone(self) -> MemoryObjectReceiveStream[T_co]: + """ + Create a clone of this receive stream. + + Each clone can be closed separately. Only when all clones have been closed will + the receiving end of the memory stream be considered closed by the sending ends. + + :return: the cloned stream + + """ + if self._closed: + raise ClosedResourceError + + return MemoryObjectReceiveStream(_state=self._state) + + def close(self) -> None: + """ + Close the stream. + + This works the exact same way as :meth:`aclose`, but is provided as a special + case for the benefit of synchronous callbacks. + + """ + if not self._closed: + self._closed = True + self._state.open_receive_channels -= 1 + if self._state.open_receive_channels == 0: + send_events = list(self._state.waiting_senders.keys()) + for event in send_events: + event.set() + + async def aclose(self) -> None: + self.close() + + def statistics(self) -> MemoryObjectStreamStatistics: + """ + Return statistics about the current state of this stream. + + .. versionadded:: 3.0 + """ + return self._state.statistics() + + def __enter__(self) -> MemoryObjectReceiveStream[T_co]: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self.close() + + def __del__(self) -> None: + if not self._closed: + warnings.warn( + f"Unclosed <{self.__class__.__name__} at {id(self):x}>", + ResourceWarning, + source=self, + ) + + +@dataclass(eq=False) +class MemoryObjectSendStream(Generic[T_contra], ObjectSendStream[T_contra]): + _state: MemoryObjectStreamState[T_contra] + _closed: bool = field(init=False, default=False) + + def __post_init__(self) -> None: + self._state.open_send_channels += 1 + + def send_nowait(self, item: T_contra) -> None: + """ + Send an item immediately if it can be done without waiting. + + :param item: the item to send + :raises ~anyio.ClosedResourceError: if this send stream has been closed + :raises ~anyio.BrokenResourceError: if the stream has been closed from the + receiving end + :raises ~anyio.WouldBlock: if the buffer is full and there are no tasks waiting + to receive + + """ + if self._closed: + raise ClosedResourceError + if not self._state.open_receive_channels: + raise BrokenResourceError + + while self._state.waiting_receivers: + receive_event, receiver = self._state.waiting_receivers.popitem(last=False) + if not receiver.task_info.has_pending_cancellation(): + receiver.item = item + receive_event.set() + return + + if len(self._state.buffer) < self._state.max_buffer_size: + self._state.buffer.append(item) + else: + raise WouldBlock + + async def send(self, item: T_contra) -> None: + """ + Send an item to the stream. + + If the buffer is full, this method blocks until there is again room in the + buffer or the item can be sent directly to a receiver. + + :param item: the item to send + :raises ~anyio.ClosedResourceError: if this send stream has been closed + :raises ~anyio.BrokenResourceError: if the stream has been closed from the + receiving end + + """ + await checkpoint() + try: + self.send_nowait(item) + except WouldBlock: + # Wait until there's someone on the receiving end + send_event = Event() + self._state.waiting_senders[send_event] = item + try: + await send_event.wait() + except BaseException: + self._state.waiting_senders.pop(send_event, None) + raise + + if send_event in self._state.waiting_senders: + del self._state.waiting_senders[send_event] + raise BrokenResourceError from None + + def clone(self) -> MemoryObjectSendStream[T_contra]: + """ + Create a clone of this send stream. + + Each clone can be closed separately. Only when all clones have been closed will + the sending end of the memory stream be considered closed by the receiving ends. + + :return: the cloned stream + + """ + if self._closed: + raise ClosedResourceError + + return MemoryObjectSendStream(_state=self._state) + + def close(self) -> None: + """ + Close the stream. + + This works the exact same way as :meth:`aclose`, but is provided as a special + case for the benefit of synchronous callbacks. + + """ + if not self._closed: + self._closed = True + self._state.open_send_channels -= 1 + if self._state.open_send_channels == 0: + receive_events = list(self._state.waiting_receivers.keys()) + self._state.waiting_receivers.clear() + for event in receive_events: + event.set() + + async def aclose(self) -> None: + self.close() + + def statistics(self) -> MemoryObjectStreamStatistics: + """ + Return statistics about the current state of this stream. + + .. versionadded:: 3.0 + """ + return self._state.statistics() + + def __enter__(self) -> MemoryObjectSendStream[T_contra]: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self.close() + + def __del__(self) -> None: + if not self._closed: + warnings.warn( + f"Unclosed <{self.__class__.__name__} at {id(self):x}>", + ResourceWarning, + source=self, + ) diff --git a/venv/Lib/site-packages/anyio/streams/stapled.py b/venv/Lib/site-packages/anyio/streams/stapled.py new file mode 100644 index 00000000..80f64a2e --- /dev/null +++ b/venv/Lib/site-packages/anyio/streams/stapled.py @@ -0,0 +1,141 @@ +from __future__ import annotations + +from collections.abc import Callable, Mapping, Sequence +from dataclasses import dataclass +from typing import Any, Generic, TypeVar + +from ..abc import ( + ByteReceiveStream, + ByteSendStream, + ByteStream, + Listener, + ObjectReceiveStream, + ObjectSendStream, + ObjectStream, + TaskGroup, +) + +T_Item = TypeVar("T_Item") +T_Stream = TypeVar("T_Stream") + + +@dataclass(eq=False) +class StapledByteStream(ByteStream): + """ + Combines two byte streams into a single, bidirectional byte stream. + + Extra attributes will be provided from both streams, with the receive stream + providing the values in case of a conflict. + + :param ByteSendStream send_stream: the sending byte stream + :param ByteReceiveStream receive_stream: the receiving byte stream + """ + + send_stream: ByteSendStream + receive_stream: ByteReceiveStream + + async def receive(self, max_bytes: int = 65536) -> bytes: + return await self.receive_stream.receive(max_bytes) + + async def send(self, item: bytes) -> None: + await self.send_stream.send(item) + + async def send_eof(self) -> None: + await self.send_stream.aclose() + + async def aclose(self) -> None: + await self.send_stream.aclose() + await self.receive_stream.aclose() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return { + **self.send_stream.extra_attributes, + **self.receive_stream.extra_attributes, + } + + +@dataclass(eq=False) +class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]): + """ + Combines two object streams into a single, bidirectional object stream. + + Extra attributes will be provided from both streams, with the receive stream + providing the values in case of a conflict. + + :param ObjectSendStream send_stream: the sending object stream + :param ObjectReceiveStream receive_stream: the receiving object stream + """ + + send_stream: ObjectSendStream[T_Item] + receive_stream: ObjectReceiveStream[T_Item] + + async def receive(self) -> T_Item: + return await self.receive_stream.receive() + + async def send(self, item: T_Item) -> None: + await self.send_stream.send(item) + + async def send_eof(self) -> None: + await self.send_stream.aclose() + + async def aclose(self) -> None: + await self.send_stream.aclose() + await self.receive_stream.aclose() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return { + **self.send_stream.extra_attributes, + **self.receive_stream.extra_attributes, + } + + +@dataclass(eq=False) +class MultiListener(Generic[T_Stream], Listener[T_Stream]): + """ + Combines multiple listeners into one, serving connections from all of them at once. + + Any MultiListeners in the given collection of listeners will have their listeners + moved into this one. + + Extra attributes are provided from each listener, with each successive listener + overriding any conflicting attributes from the previous one. + + :param listeners: listeners to serve + :type listeners: Sequence[Listener[T_Stream]] + """ + + listeners: Sequence[Listener[T_Stream]] + + def __post_init__(self) -> None: + listeners: list[Listener[T_Stream]] = [] + for listener in self.listeners: + if isinstance(listener, MultiListener): + listeners.extend(listener.listeners) + del listener.listeners[:] # type: ignore[attr-defined] + else: + listeners.append(listener) + + self.listeners = listeners + + async def serve( + self, handler: Callable[[T_Stream], Any], task_group: TaskGroup | None = None + ) -> None: + from .. import create_task_group + + async with create_task_group() as tg: + for listener in self.listeners: + tg.start_soon(listener.serve, handler, task_group) + + async def aclose(self) -> None: + for listener in self.listeners: + await listener.aclose() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + attributes: dict = {} + for listener in self.listeners: + attributes.update(listener.extra_attributes) + + return attributes diff --git a/venv/Lib/site-packages/anyio/streams/text.py b/venv/Lib/site-packages/anyio/streams/text.py new file mode 100644 index 00000000..f1a11278 --- /dev/null +++ b/venv/Lib/site-packages/anyio/streams/text.py @@ -0,0 +1,147 @@ +from __future__ import annotations + +import codecs +from collections.abc import Callable, Mapping +from dataclasses import InitVar, dataclass, field +from typing import Any + +from ..abc import ( + AnyByteReceiveStream, + AnyByteSendStream, + AnyByteStream, + ObjectReceiveStream, + ObjectSendStream, + ObjectStream, +) + + +@dataclass(eq=False) +class TextReceiveStream(ObjectReceiveStream[str]): + """ + Stream wrapper that decodes bytes to strings using the given encoding. + + Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any + completely received unicode characters as soon as they come in. + + :param transport_stream: any bytes-based receive stream + :param encoding: character encoding to use for decoding bytes to strings (defaults + to ``utf-8``) + :param errors: handling scheme for decoding errors (defaults to ``strict``; see the + `codecs module documentation`_ for a comprehensive list of options) + + .. _codecs module documentation: + https://docs.python.org/3/library/codecs.html#codec-objects + """ + + transport_stream: AnyByteReceiveStream + encoding: InitVar[str] = "utf-8" + errors: InitVar[str] = "strict" + _decoder: codecs.IncrementalDecoder = field(init=False) + + def __post_init__(self, encoding: str, errors: str) -> None: + decoder_class = codecs.getincrementaldecoder(encoding) + self._decoder = decoder_class(errors=errors) + + async def receive(self) -> str: + while True: + chunk = await self.transport_stream.receive() + decoded = self._decoder.decode(chunk) + if decoded: + return decoded + + async def aclose(self) -> None: + await self.transport_stream.aclose() + self._decoder.reset() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return self.transport_stream.extra_attributes + + +@dataclass(eq=False) +class TextSendStream(ObjectSendStream[str]): + """ + Sends strings to the wrapped stream as bytes using the given encoding. + + :param AnyByteSendStream transport_stream: any bytes-based send stream + :param str encoding: character encoding to use for encoding strings to bytes + (defaults to ``utf-8``) + :param str errors: handling scheme for encoding errors (defaults to ``strict``; see + the `codecs module documentation`_ for a comprehensive list of options) + + .. _codecs module documentation: + https://docs.python.org/3/library/codecs.html#codec-objects + """ + + transport_stream: AnyByteSendStream + encoding: InitVar[str] = "utf-8" + errors: str = "strict" + _encoder: Callable[..., tuple[bytes, int]] = field(init=False) + + def __post_init__(self, encoding: str) -> None: + self._encoder = codecs.getencoder(encoding) + + async def send(self, item: str) -> None: + encoded = self._encoder(item, self.errors)[0] + await self.transport_stream.send(encoded) + + async def aclose(self) -> None: + await self.transport_stream.aclose() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return self.transport_stream.extra_attributes + + +@dataclass(eq=False) +class TextStream(ObjectStream[str]): + """ + A bidirectional stream that decodes bytes to strings on receive and encodes strings + to bytes on send. + + Extra attributes will be provided from both streams, with the receive stream + providing the values in case of a conflict. + + :param AnyByteStream transport_stream: any bytes-based stream + :param str encoding: character encoding to use for encoding/decoding strings to/from + bytes (defaults to ``utf-8``) + :param str errors: handling scheme for encoding errors (defaults to ``strict``; see + the `codecs module documentation`_ for a comprehensive list of options) + + .. _codecs module documentation: + https://docs.python.org/3/library/codecs.html#codec-objects + """ + + transport_stream: AnyByteStream + encoding: InitVar[str] = "utf-8" + errors: InitVar[str] = "strict" + _receive_stream: TextReceiveStream = field(init=False) + _send_stream: TextSendStream = field(init=False) + + def __post_init__(self, encoding: str, errors: str) -> None: + self._receive_stream = TextReceiveStream( + self.transport_stream, encoding=encoding, errors=errors + ) + self._send_stream = TextSendStream( + self.transport_stream, encoding=encoding, errors=errors + ) + + async def receive(self) -> str: + return await self._receive_stream.receive() + + async def send(self, item: str) -> None: + await self._send_stream.send(item) + + async def send_eof(self) -> None: + await self.transport_stream.send_eof() + + async def aclose(self) -> None: + await self._send_stream.aclose() + await self._receive_stream.aclose() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return { + **self._send_stream.extra_attributes, + **self._receive_stream.extra_attributes, + } diff --git a/venv/Lib/site-packages/anyio/streams/tls.py b/venv/Lib/site-packages/anyio/streams/tls.py new file mode 100644 index 00000000..70a41cc7 --- /dev/null +++ b/venv/Lib/site-packages/anyio/streams/tls.py @@ -0,0 +1,352 @@ +from __future__ import annotations + +import logging +import re +import ssl +import sys +from collections.abc import Callable, Mapping +from dataclasses import dataclass +from functools import wraps +from typing import Any, TypeVar + +from .. import ( + BrokenResourceError, + EndOfStream, + aclose_forcefully, + get_cancelled_exc_class, + to_thread, +) +from .._core._typedattr import TypedAttributeSet, typed_attribute +from ..abc import AnyByteStream, ByteStream, Listener, TaskGroup + +if sys.version_info >= (3, 11): + from typing import TypeVarTuple, Unpack +else: + from typing_extensions import TypeVarTuple, Unpack + +T_Retval = TypeVar("T_Retval") +PosArgsT = TypeVarTuple("PosArgsT") +_PCTRTT = tuple[tuple[str, str], ...] +_PCTRTTT = tuple[_PCTRTT, ...] + + +class TLSAttribute(TypedAttributeSet): + """Contains Transport Layer Security related attributes.""" + + #: the selected ALPN protocol + alpn_protocol: str | None = typed_attribute() + #: the channel binding for type ``tls-unique`` + channel_binding_tls_unique: bytes = typed_attribute() + #: the selected cipher + cipher: tuple[str, str, int] = typed_attribute() + #: the peer certificate in dictionary form (see :meth:`ssl.SSLSocket.getpeercert` + # for more information) + peer_certificate: None | (dict[str, str | _PCTRTTT | _PCTRTT]) = typed_attribute() + #: the peer certificate in binary form + peer_certificate_binary: bytes | None = typed_attribute() + #: ``True`` if this is the server side of the connection + server_side: bool = typed_attribute() + #: ciphers shared by the client during the TLS handshake (``None`` if this is the + #: client side) + shared_ciphers: list[tuple[str, str, int]] | None = typed_attribute() + #: the :class:`~ssl.SSLObject` used for encryption + ssl_object: ssl.SSLObject = typed_attribute() + #: ``True`` if this stream does (and expects) a closing TLS handshake when the + #: stream is being closed + standard_compatible: bool = typed_attribute() + #: the TLS protocol version (e.g. ``TLSv1.2``) + tls_version: str = typed_attribute() + + +@dataclass(eq=False) +class TLSStream(ByteStream): + """ + A stream wrapper that encrypts all sent data and decrypts received data. + + This class has no public initializer; use :meth:`wrap` instead. + All extra attributes from :class:`~TLSAttribute` are supported. + + :var AnyByteStream transport_stream: the wrapped stream + + """ + + transport_stream: AnyByteStream + standard_compatible: bool + _ssl_object: ssl.SSLObject + _read_bio: ssl.MemoryBIO + _write_bio: ssl.MemoryBIO + + @classmethod + async def wrap( + cls, + transport_stream: AnyByteStream, + *, + server_side: bool | None = None, + hostname: str | None = None, + ssl_context: ssl.SSLContext | None = None, + standard_compatible: bool = True, + ) -> TLSStream: + """ + Wrap an existing stream with Transport Layer Security. + + This performs a TLS handshake with the peer. + + :param transport_stream: a bytes-transporting stream to wrap + :param server_side: ``True`` if this is the server side of the connection, + ``False`` if this is the client side (if omitted, will be set to ``False`` + if ``hostname`` has been provided, ``False`` otherwise). Used only to create + a default context when an explicit context has not been provided. + :param hostname: host name of the peer (if host name checking is desired) + :param ssl_context: the SSLContext object to use (if not provided, a secure + default will be created) + :param standard_compatible: if ``False``, skip the closing handshake when + closing the connection, and don't raise an exception if the peer does the + same + :raises ~ssl.SSLError: if the TLS handshake fails + + """ + if server_side is None: + server_side = not hostname + + if not ssl_context: + purpose = ( + ssl.Purpose.CLIENT_AUTH if server_side else ssl.Purpose.SERVER_AUTH + ) + ssl_context = ssl.create_default_context(purpose) + + # Re-enable detection of unexpected EOFs if it was disabled by Python + if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"): + ssl_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF + + bio_in = ssl.MemoryBIO() + bio_out = ssl.MemoryBIO() + + # External SSLContext implementations may do blocking I/O in wrap_bio(), + # but the standard library implementation won't + if type(ssl_context) is ssl.SSLContext: + ssl_object = ssl_context.wrap_bio( + bio_in, bio_out, server_side=server_side, server_hostname=hostname + ) + else: + ssl_object = await to_thread.run_sync( + ssl_context.wrap_bio, + bio_in, + bio_out, + server_side, + hostname, + None, + ) + + wrapper = cls( + transport_stream=transport_stream, + standard_compatible=standard_compatible, + _ssl_object=ssl_object, + _read_bio=bio_in, + _write_bio=bio_out, + ) + await wrapper._call_sslobject_method(ssl_object.do_handshake) + return wrapper + + async def _call_sslobject_method( + self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT] + ) -> T_Retval: + while True: + try: + result = func(*args) + except ssl.SSLWantReadError: + try: + # Flush any pending writes first + if self._write_bio.pending: + await self.transport_stream.send(self._write_bio.read()) + + data = await self.transport_stream.receive() + except EndOfStream: + self._read_bio.write_eof() + except OSError as exc: + self._read_bio.write_eof() + self._write_bio.write_eof() + raise BrokenResourceError from exc + else: + self._read_bio.write(data) + except ssl.SSLWantWriteError: + await self.transport_stream.send(self._write_bio.read()) + except ssl.SSLSyscallError as exc: + self._read_bio.write_eof() + self._write_bio.write_eof() + raise BrokenResourceError from exc + except ssl.SSLError as exc: + self._read_bio.write_eof() + self._write_bio.write_eof() + if isinstance(exc, ssl.SSLEOFError) or ( + exc.strerror and "UNEXPECTED_EOF_WHILE_READING" in exc.strerror + ): + if self.standard_compatible: + raise BrokenResourceError from exc + else: + raise EndOfStream from None + + raise + else: + # Flush any pending writes first + if self._write_bio.pending: + await self.transport_stream.send(self._write_bio.read()) + + return result + + async def unwrap(self) -> tuple[AnyByteStream, bytes]: + """ + Does the TLS closing handshake. + + :return: a tuple of (wrapped byte stream, bytes left in the read buffer) + + """ + await self._call_sslobject_method(self._ssl_object.unwrap) + self._read_bio.write_eof() + self._write_bio.write_eof() + return self.transport_stream, self._read_bio.read() + + async def aclose(self) -> None: + if self.standard_compatible: + try: + await self.unwrap() + except BaseException: + await aclose_forcefully(self.transport_stream) + raise + + await self.transport_stream.aclose() + + async def receive(self, max_bytes: int = 65536) -> bytes: + data = await self._call_sslobject_method(self._ssl_object.read, max_bytes) + if not data: + raise EndOfStream + + return data + + async def send(self, item: bytes) -> None: + await self._call_sslobject_method(self._ssl_object.write, item) + + async def send_eof(self) -> None: + tls_version = self.extra(TLSAttribute.tls_version) + match = re.match(r"TLSv(\d+)(?:\.(\d+))?", tls_version) + if match: + major, minor = int(match.group(1)), int(match.group(2) or 0) + if (major, minor) < (1, 3): + raise NotImplementedError( + f"send_eof() requires at least TLSv1.3; current " + f"session uses {tls_version}" + ) + + raise NotImplementedError( + "send_eof() has not yet been implemented for TLS streams" + ) + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return { + **self.transport_stream.extra_attributes, + TLSAttribute.alpn_protocol: self._ssl_object.selected_alpn_protocol, + TLSAttribute.channel_binding_tls_unique: ( + self._ssl_object.get_channel_binding + ), + TLSAttribute.cipher: self._ssl_object.cipher, + TLSAttribute.peer_certificate: lambda: self._ssl_object.getpeercert(False), + TLSAttribute.peer_certificate_binary: lambda: self._ssl_object.getpeercert( + True + ), + TLSAttribute.server_side: lambda: self._ssl_object.server_side, + TLSAttribute.shared_ciphers: lambda: self._ssl_object.shared_ciphers() + if self._ssl_object.server_side + else None, + TLSAttribute.standard_compatible: lambda: self.standard_compatible, + TLSAttribute.ssl_object: lambda: self._ssl_object, + TLSAttribute.tls_version: self._ssl_object.version, + } + + +@dataclass(eq=False) +class TLSListener(Listener[TLSStream]): + """ + A convenience listener that wraps another listener and auto-negotiates a TLS session + on every accepted connection. + + If the TLS handshake times out or raises an exception, + :meth:`handle_handshake_error` is called to do whatever post-mortem processing is + deemed necessary. + + Supports only the :attr:`~TLSAttribute.standard_compatible` extra attribute. + + :param Listener listener: the listener to wrap + :param ssl_context: the SSL context object + :param standard_compatible: a flag passed through to :meth:`TLSStream.wrap` + :param handshake_timeout: time limit for the TLS handshake + (passed to :func:`~anyio.fail_after`) + """ + + listener: Listener[Any] + ssl_context: ssl.SSLContext + standard_compatible: bool = True + handshake_timeout: float = 30 + + @staticmethod + async def handle_handshake_error(exc: BaseException, stream: AnyByteStream) -> None: + """ + Handle an exception raised during the TLS handshake. + + This method does 3 things: + + #. Forcefully closes the original stream + #. Logs the exception (unless it was a cancellation exception) using the + ``anyio.streams.tls`` logger + #. Reraises the exception if it was a base exception or a cancellation exception + + :param exc: the exception + :param stream: the original stream + + """ + await aclose_forcefully(stream) + + # Log all except cancellation exceptions + if not isinstance(exc, get_cancelled_exc_class()): + # CPython (as of 3.11.5) returns incorrect `sys.exc_info()` here when using + # any asyncio implementation, so we explicitly pass the exception to log + # (https://github.com/python/cpython/issues/108668). Trio does not have this + # issue because it works around the CPython bug. + logging.getLogger(__name__).exception( + "Error during TLS handshake", exc_info=exc + ) + + # Only reraise base exceptions and cancellation exceptions + if not isinstance(exc, Exception) or isinstance(exc, get_cancelled_exc_class()): + raise + + async def serve( + self, + handler: Callable[[TLSStream], Any], + task_group: TaskGroup | None = None, + ) -> None: + @wraps(handler) + async def handler_wrapper(stream: AnyByteStream) -> None: + from .. import fail_after + + try: + with fail_after(self.handshake_timeout): + wrapped_stream = await TLSStream.wrap( + stream, + ssl_context=self.ssl_context, + standard_compatible=self.standard_compatible, + ) + except BaseException as exc: + await self.handle_handshake_error(exc, stream) + else: + await handler(wrapped_stream) + + await self.listener.serve(handler_wrapper, task_group) + + async def aclose(self) -> None: + await self.listener.aclose() + + @property + def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: + return { + TLSAttribute.standard_compatible: lambda: self.standard_compatible, + } diff --git a/venv/Lib/site-packages/anyio/to_interpreter.py b/venv/Lib/site-packages/anyio/to_interpreter.py new file mode 100644 index 00000000..8a2e993a --- /dev/null +++ b/venv/Lib/site-packages/anyio/to_interpreter.py @@ -0,0 +1,218 @@ +from __future__ import annotations + +import atexit +import os +import pickle +import sys +from collections import deque +from collections.abc import Callable +from textwrap import dedent +from typing import Any, Final, TypeVar + +from . import current_time, to_thread +from ._core._exceptions import BrokenWorkerIntepreter +from ._core._synchronization import CapacityLimiter +from .lowlevel import RunVar + +if sys.version_info >= (3, 11): + from typing import TypeVarTuple, Unpack +else: + from typing_extensions import TypeVarTuple, Unpack + +UNBOUND: Final = 2 # I have no clue how this works, but it was used in the stdlib +FMT_UNPICKLED: Final = 0 +FMT_PICKLED: Final = 1 +DEFAULT_CPU_COUNT: Final = 8 # this is just an arbitrarily selected value +MAX_WORKER_IDLE_TIME = ( + 30 # seconds a subinterpreter can be idle before becoming eligible for pruning +) + +T_Retval = TypeVar("T_Retval") +PosArgsT = TypeVarTuple("PosArgsT") + +_idle_workers = RunVar[deque["Worker"]]("_available_workers") +_default_interpreter_limiter = RunVar[CapacityLimiter]("_default_interpreter_limiter") + + +class Worker: + _run_func = compile( + dedent(""" + import _interpqueues as queues + import _interpreters as interpreters + from pickle import loads, dumps, HIGHEST_PROTOCOL + + item = queues.get(queue_id)[0] + try: + func, args = loads(item) + retval = func(*args) + except BaseException as exc: + is_exception = True + retval = exc + else: + is_exception = False + + try: + queues.put(queue_id, (retval, is_exception), FMT_UNPICKLED, UNBOUND) + except interpreters.NotShareableError: + retval = dumps(retval, HIGHEST_PROTOCOL) + queues.put(queue_id, (retval, is_exception), FMT_PICKLED, UNBOUND) + """), + "", + "exec", + ) + + last_used: float = 0 + + _initialized: bool = False + _interpreter_id: int + _queue_id: int + + def initialize(self) -> None: + import _interpqueues as queues + import _interpreters as interpreters + + self._interpreter_id = interpreters.create() + self._queue_id = queues.create(2, FMT_UNPICKLED, UNBOUND) + self._initialized = True + interpreters.set___main___attrs( + self._interpreter_id, + { + "queue_id": self._queue_id, + "FMT_PICKLED": FMT_PICKLED, + "FMT_UNPICKLED": FMT_UNPICKLED, + "UNBOUND": UNBOUND, + }, + ) + + def destroy(self) -> None: + import _interpqueues as queues + import _interpreters as interpreters + + if self._initialized: + interpreters.destroy(self._interpreter_id) + queues.destroy(self._queue_id) + + def _call( + self, + func: Callable[..., T_Retval], + args: tuple[Any], + ) -> tuple[Any, bool]: + import _interpqueues as queues + import _interpreters as interpreters + + if not self._initialized: + self.initialize() + + payload = pickle.dumps((func, args), pickle.HIGHEST_PROTOCOL) + queues.put(self._queue_id, payload, FMT_PICKLED, UNBOUND) + + res: Any + is_exception: bool + if exc_info := interpreters.exec(self._interpreter_id, self._run_func): + raise BrokenWorkerIntepreter(exc_info) + + (res, is_exception), fmt = queues.get(self._queue_id)[:2] + if fmt == FMT_PICKLED: + res = pickle.loads(res) + + return res, is_exception + + async def call( + self, + func: Callable[..., T_Retval], + args: tuple[Any], + limiter: CapacityLimiter, + ) -> T_Retval: + result, is_exception = await to_thread.run_sync( + self._call, + func, + args, + limiter=limiter, + ) + if is_exception: + raise result + + return result + + +def _stop_workers(workers: deque[Worker]) -> None: + for worker in workers: + worker.destroy() + + workers.clear() + + +async def run_sync( + func: Callable[[Unpack[PosArgsT]], T_Retval], + *args: Unpack[PosArgsT], + limiter: CapacityLimiter | None = None, +) -> T_Retval: + """ + Call the given function with the given arguments in a subinterpreter. + + If the ``cancellable`` option is enabled and the task waiting for its completion is + cancelled, the call will still run its course but its return value (or any raised + exception) will be ignored. + + .. warning:: This feature is **experimental**. The upstream interpreter API has not + yet been finalized or thoroughly tested, so don't rely on this for anything + mission critical. + + :param func: a callable + :param args: positional arguments for the callable + :param limiter: capacity limiter to use to limit the total amount of subinterpreters + running (if omitted, the default limiter is used) + :return: the result of the call + :raises BrokenWorkerIntepreter: if there's an internal error in a subinterpreter + + """ + if sys.version_info <= (3, 13): + raise RuntimeError("subinterpreters require at least Python 3.13") + + if limiter is None: + limiter = current_default_interpreter_limiter() + + try: + idle_workers = _idle_workers.get() + except LookupError: + idle_workers = deque() + _idle_workers.set(idle_workers) + atexit.register(_stop_workers, idle_workers) + + async with limiter: + try: + worker = idle_workers.pop() + except IndexError: + worker = Worker() + + try: + return await worker.call(func, args, limiter) + finally: + # Prune workers that have been idle for too long + now = current_time() + while idle_workers: + if now - idle_workers[0].last_used <= MAX_WORKER_IDLE_TIME: + break + + await to_thread.run_sync(idle_workers.popleft().destroy, limiter=limiter) + + worker.last_used = current_time() + idle_workers.append(worker) + + +def current_default_interpreter_limiter() -> CapacityLimiter: + """ + Return the capacity limiter that is used by default to limit the number of + concurrently running subinterpreters. + + Defaults to the number of CPU cores. + + :return: a capacity limiter object + + """ + try: + return _default_interpreter_limiter.get() + except LookupError: + limiter = CapacityLimiter(os.cpu_count() or DEFAULT_CPU_COUNT) + _default_interpreter_limiter.set(limiter) + return limiter diff --git a/venv/Lib/site-packages/anyio/to_process.py b/venv/Lib/site-packages/anyio/to_process.py new file mode 100644 index 00000000..495de2ae --- /dev/null +++ b/venv/Lib/site-packages/anyio/to_process.py @@ -0,0 +1,258 @@ +from __future__ import annotations + +import os +import pickle +import subprocess +import sys +from collections import deque +from collections.abc import Callable +from importlib.util import module_from_spec, spec_from_file_location +from typing import TypeVar, cast + +from ._core._eventloop import current_time, get_async_backend, get_cancelled_exc_class +from ._core._exceptions import BrokenWorkerProcess +from ._core._subprocesses import open_process +from ._core._synchronization import CapacityLimiter +from ._core._tasks import CancelScope, fail_after +from .abc import ByteReceiveStream, ByteSendStream, Process +from .lowlevel import RunVar, checkpoint_if_cancelled +from .streams.buffered import BufferedByteReceiveStream + +if sys.version_info >= (3, 11): + from typing import TypeVarTuple, Unpack +else: + from typing_extensions import TypeVarTuple, Unpack + +WORKER_MAX_IDLE_TIME = 300 # 5 minutes + +T_Retval = TypeVar("T_Retval") +PosArgsT = TypeVarTuple("PosArgsT") + +_process_pool_workers: RunVar[set[Process]] = RunVar("_process_pool_workers") +_process_pool_idle_workers: RunVar[deque[tuple[Process, float]]] = RunVar( + "_process_pool_idle_workers" +) +_default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter") + + +async def run_sync( # type: ignore[return] + func: Callable[[Unpack[PosArgsT]], T_Retval], + *args: Unpack[PosArgsT], + cancellable: bool = False, + limiter: CapacityLimiter | None = None, +) -> T_Retval: + """ + Call the given function with the given arguments in a worker process. + + If the ``cancellable`` option is enabled and the task waiting for its completion is + cancelled, the worker process running it will be abruptly terminated using SIGKILL + (or ``terminateProcess()`` on Windows). + + :param func: a callable + :param args: positional arguments for the callable + :param cancellable: ``True`` to allow cancellation of the operation while it's + running + :param limiter: capacity limiter to use to limit the total amount of processes + running (if omitted, the default limiter is used) + :return: an awaitable that yields the return value of the function. + + """ + + async def send_raw_command(pickled_cmd: bytes) -> object: + try: + await stdin.send(pickled_cmd) + response = await buffered.receive_until(b"\n", 50) + status, length = response.split(b" ") + if status not in (b"RETURN", b"EXCEPTION"): + raise RuntimeError( + f"Worker process returned unexpected response: {response!r}" + ) + + pickled_response = await buffered.receive_exactly(int(length)) + except BaseException as exc: + workers.discard(process) + try: + process.kill() + with CancelScope(shield=True): + await process.aclose() + except ProcessLookupError: + pass + + if isinstance(exc, get_cancelled_exc_class()): + raise + else: + raise BrokenWorkerProcess from exc + + retval = pickle.loads(pickled_response) + if status == b"EXCEPTION": + assert isinstance(retval, BaseException) + raise retval + else: + return retval + + # First pickle the request before trying to reserve a worker process + await checkpoint_if_cancelled() + request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL) + + # If this is the first run in this event loop thread, set up the necessary variables + try: + workers = _process_pool_workers.get() + idle_workers = _process_pool_idle_workers.get() + except LookupError: + workers = set() + idle_workers = deque() + _process_pool_workers.set(workers) + _process_pool_idle_workers.set(idle_workers) + get_async_backend().setup_process_pool_exit_at_shutdown(workers) + + async with limiter or current_default_process_limiter(): + # Pop processes from the pool (starting from the most recently used) until we + # find one that hasn't exited yet + process: Process + while idle_workers: + process, idle_since = idle_workers.pop() + if process.returncode is None: + stdin = cast(ByteSendStream, process.stdin) + buffered = BufferedByteReceiveStream( + cast(ByteReceiveStream, process.stdout) + ) + + # Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME + # seconds or longer + now = current_time() + killed_processes: list[Process] = [] + while idle_workers: + if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME: + break + + process_to_kill, idle_since = idle_workers.popleft() + process_to_kill.kill() + workers.remove(process_to_kill) + killed_processes.append(process_to_kill) + + with CancelScope(shield=True): + for killed_process in killed_processes: + await killed_process.aclose() + + break + + workers.remove(process) + else: + command = [sys.executable, "-u", "-m", __name__] + process = await open_process( + command, stdin=subprocess.PIPE, stdout=subprocess.PIPE + ) + try: + stdin = cast(ByteSendStream, process.stdin) + buffered = BufferedByteReceiveStream( + cast(ByteReceiveStream, process.stdout) + ) + with fail_after(20): + message = await buffered.receive(6) + + if message != b"READY\n": + raise BrokenWorkerProcess( + f"Worker process returned unexpected response: {message!r}" + ) + + main_module_path = getattr(sys.modules["__main__"], "__file__", None) + pickled = pickle.dumps( + ("init", sys.path, main_module_path), + protocol=pickle.HIGHEST_PROTOCOL, + ) + await send_raw_command(pickled) + except (BrokenWorkerProcess, get_cancelled_exc_class()): + raise + except BaseException as exc: + process.kill() + raise BrokenWorkerProcess( + "Error during worker process initialization" + ) from exc + + workers.add(process) + + with CancelScope(shield=not cancellable): + try: + return cast(T_Retval, await send_raw_command(request)) + finally: + if process in workers: + idle_workers.append((process, current_time())) + + +def current_default_process_limiter() -> CapacityLimiter: + """ + Return the capacity limiter that is used by default to limit the number of worker + processes. + + :return: a capacity limiter object + + """ + try: + return _default_process_limiter.get() + except LookupError: + limiter = CapacityLimiter(os.cpu_count() or 2) + _default_process_limiter.set(limiter) + return limiter + + +def process_worker() -> None: + # Redirect standard streams to os.devnull so that user code won't interfere with the + # parent-worker communication + stdin = sys.stdin + stdout = sys.stdout + sys.stdin = open(os.devnull) + sys.stdout = open(os.devnull, "w") + + stdout.buffer.write(b"READY\n") + while True: + retval = exception = None + try: + command, *args = pickle.load(stdin.buffer) + except EOFError: + return + except BaseException as exc: + exception = exc + else: + if command == "run": + func, args = args + try: + retval = func(*args) + except BaseException as exc: + exception = exc + elif command == "init": + main_module_path: str | None + sys.path, main_module_path = args + del sys.modules["__main__"] + if main_module_path and os.path.isfile(main_module_path): + # Load the parent's main module but as __mp_main__ instead of + # __main__ (like multiprocessing does) to avoid infinite recursion + try: + spec = spec_from_file_location("__mp_main__", main_module_path) + if spec and spec.loader: + main = module_from_spec(spec) + spec.loader.exec_module(main) + sys.modules["__main__"] = main + except BaseException as exc: + exception = exc + try: + if exception is not None: + status = b"EXCEPTION" + pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL) + else: + status = b"RETURN" + pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL) + except BaseException as exc: + exception = exc + status = b"EXCEPTION" + pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL) + + stdout.buffer.write(b"%s %d\n" % (status, len(pickled))) + stdout.buffer.write(pickled) + + # Respect SIGTERM + if isinstance(exception, SystemExit): + raise exception + + +if __name__ == "__main__": + process_worker() diff --git a/venv/Lib/site-packages/anyio/to_thread.py b/venv/Lib/site-packages/anyio/to_thread.py new file mode 100644 index 00000000..5070516e --- /dev/null +++ b/venv/Lib/site-packages/anyio/to_thread.py @@ -0,0 +1,69 @@ +from __future__ import annotations + +import sys +from collections.abc import Callable +from typing import TypeVar +from warnings import warn + +from ._core._eventloop import get_async_backend +from .abc import CapacityLimiter + +if sys.version_info >= (3, 11): + from typing import TypeVarTuple, Unpack +else: + from typing_extensions import TypeVarTuple, Unpack + +T_Retval = TypeVar("T_Retval") +PosArgsT = TypeVarTuple("PosArgsT") + + +async def run_sync( + func: Callable[[Unpack[PosArgsT]], T_Retval], + *args: Unpack[PosArgsT], + abandon_on_cancel: bool = False, + cancellable: bool | None = None, + limiter: CapacityLimiter | None = None, +) -> T_Retval: + """ + Call the given function with the given arguments in a worker thread. + + If the ``cancellable`` option is enabled and the task waiting for its completion is + cancelled, the thread will still run its course but its return value (or any raised + exception) will be ignored. + + :param func: a callable + :param args: positional arguments for the callable + :param abandon_on_cancel: ``True`` to abandon the thread (leaving it to run + unchecked on own) if the host task is cancelled, ``False`` to ignore + cancellations in the host task until the operation has completed in the worker + thread + :param cancellable: deprecated alias of ``abandon_on_cancel``; will override + ``abandon_on_cancel`` if both parameters are passed + :param limiter: capacity limiter to use to limit the total amount of threads running + (if omitted, the default limiter is used) + :return: an awaitable that yields the return value of the function. + + """ + if cancellable is not None: + abandon_on_cancel = cancellable + warn( + "The `cancellable=` keyword argument to `anyio.to_thread.run_sync` is " + "deprecated since AnyIO 4.1.0; use `abandon_on_cancel=` instead", + DeprecationWarning, + stacklevel=2, + ) + + return await get_async_backend().run_sync_in_worker_thread( + func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter + ) + + +def current_default_thread_limiter() -> CapacityLimiter: + """ + Return the capacity limiter that is used by default to limit the number of + concurrent threads. + + :return: a capacity limiter object + + """ + return get_async_backend().current_default_thread_limiter() diff --git a/venv/Lib/site-packages/certifi-2025.4.26.dist-info/INSTALLER b/venv/Lib/site-packages/certifi-2025.4.26.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/certifi-2025.4.26.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/certifi-2025.4.26.dist-info/METADATA b/venv/Lib/site-packages/certifi-2025.4.26.dist-info/METADATA new file mode 100644 index 00000000..bba2b699 --- /dev/null +++ b/venv/Lib/site-packages/certifi-2025.4.26.dist-info/METADATA @@ -0,0 +1,78 @@ +Metadata-Version: 2.4 +Name: certifi +Version: 2025.4.26 +Summary: Python package for providing Mozilla's CA Bundle. +Home-page: https://github.com/certifi/python-certifi +Author: Kenneth Reitz +Author-email: me@kennethreitz.com +License: MPL-2.0 +Project-URL: Source, https://github.com/certifi/python-certifi +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) +Classifier: Natural Language :: English +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Requires-Python: >=3.6 +License-File: LICENSE +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: home-page +Dynamic: license +Dynamic: license-file +Dynamic: project-url +Dynamic: requires-python +Dynamic: summary + +Certifi: Python SSL Certificates +================================ + +Certifi provides Mozilla's carefully curated collection of Root Certificates for +validating the trustworthiness of SSL certificates while verifying the identity +of TLS hosts. It has been extracted from the `Requests`_ project. + +Installation +------------ + +``certifi`` is available on PyPI. Simply install it with ``pip``:: + + $ pip install certifi + +Usage +----- + +To reference the installed certificate authority (CA) bundle, you can use the +built-in function:: + + >>> import certifi + + >>> certifi.where() + '/usr/local/lib/python3.7/site-packages/certifi/cacert.pem' + +Or from the command line:: + + $ python -m certifi + /usr/local/lib/python3.7/site-packages/certifi/cacert.pem + +Enjoy! + +.. _`Requests`: https://requests.readthedocs.io/en/master/ + +Addition/Removal of Certificates +-------------------------------- + +Certifi does not support any addition/removal or other modification of the +CA trust store content. This project is intended to provide a reliable and +highly portable root of trust to python deployments. Look to upstream projects +for methods to use alternate trust. diff --git a/venv/Lib/site-packages/certifi-2025.4.26.dist-info/RECORD b/venv/Lib/site-packages/certifi-2025.4.26.dist-info/RECORD new file mode 100644 index 00000000..715a9b5a --- /dev/null +++ b/venv/Lib/site-packages/certifi-2025.4.26.dist-info/RECORD @@ -0,0 +1,14 @@ +certifi-2025.4.26.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +certifi-2025.4.26.dist-info/METADATA,sha256=Q1SDFkY5LOQAJmDltZz2wU3VTv1Kh5X-rjGI4KiPHNM,2473 +certifi-2025.4.26.dist-info/RECORD,, +certifi-2025.4.26.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91 +certifi-2025.4.26.dist-info/licenses/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 +certifi-2025.4.26.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8 +certifi/__init__.py,sha256=9pyWUGr6sbAlksfOHo0BTV0Gxljjh4IK1kXAjHgjL4I,94 +certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243 +certifi/__pycache__/__init__.cpython-312.pyc,, +certifi/__pycache__/__main__.cpython-312.pyc,, +certifi/__pycache__/core.cpython-312.pyc,, +certifi/cacert.pem,sha256=K3sQJvGKKX4hSBicoMvn0-f578NvcjHMwoIKQE_rVZY,283771 +certifi/core.py,sha256=qRDDFyXVJwTB_EmoGppaXU_R9qCZvhl-EzxPMuV3nTA,4426 +certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/Lib/site-packages/certifi-2025.4.26.dist-info/WHEEL b/venv/Lib/site-packages/certifi-2025.4.26.dist-info/WHEEL new file mode 100644 index 00000000..8acb9559 --- /dev/null +++ b/venv/Lib/site-packages/certifi-2025.4.26.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (79.0.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/certifi-2025.4.26.dist-info/licenses/LICENSE b/venv/Lib/site-packages/certifi-2025.4.26.dist-info/licenses/LICENSE new file mode 100644 index 00000000..62b076cd --- /dev/null +++ b/venv/Lib/site-packages/certifi-2025.4.26.dist-info/licenses/LICENSE @@ -0,0 +1,20 @@ +This package contains a modified version of ca-bundle.crt: + +ca-bundle.crt -- Bundle of CA Root Certificates + +This is a bundle of X.509 certificates of public Certificate Authorities +(CA). These were automatically extracted from Mozilla's root certificates +file (certdata.txt). This file can be found in the mozilla source tree: +https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt +It contains the certificates in PEM format and therefore +can be directly used with curl / libcurl / php_curl, or with +an Apache+mod_ssl webserver for SSL client authentication. +Just configure this file as the SSLCACertificateFile.# + +***** BEGIN LICENSE BLOCK ***** +This Source Code Form is subject to the terms of the Mozilla Public License, +v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain +one at http://mozilla.org/MPL/2.0/. + +***** END LICENSE BLOCK ***** +@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $ diff --git a/venv/Lib/site-packages/certifi-2025.4.26.dist-info/top_level.txt b/venv/Lib/site-packages/certifi-2025.4.26.dist-info/top_level.txt new file mode 100644 index 00000000..963eac53 --- /dev/null +++ b/venv/Lib/site-packages/certifi-2025.4.26.dist-info/top_level.txt @@ -0,0 +1 @@ +certifi diff --git a/venv/Lib/site-packages/certifi/__init__.py b/venv/Lib/site-packages/certifi/__init__.py new file mode 100644 index 00000000..bf83fa93 --- /dev/null +++ b/venv/Lib/site-packages/certifi/__init__.py @@ -0,0 +1,4 @@ +from .core import contents, where + +__all__ = ["contents", "where"] +__version__ = "2025.04.26" diff --git a/venv/Lib/site-packages/certifi/__main__.py b/venv/Lib/site-packages/certifi/__main__.py new file mode 100644 index 00000000..8945b5da --- /dev/null +++ b/venv/Lib/site-packages/certifi/__main__.py @@ -0,0 +1,12 @@ +import argparse + +from certifi import contents, where + +parser = argparse.ArgumentParser() +parser.add_argument("-c", "--contents", action="store_true") +args = parser.parse_args() + +if args.contents: + print(contents()) +else: + print(where()) diff --git a/venv/Lib/site-packages/certifi/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/certifi/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..55b6e497 Binary files /dev/null and b/venv/Lib/site-packages/certifi/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/certifi/__pycache__/__main__.cpython-312.pyc b/venv/Lib/site-packages/certifi/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 00000000..86ff43c4 Binary files /dev/null and b/venv/Lib/site-packages/certifi/__pycache__/__main__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/certifi/__pycache__/core.cpython-312.pyc b/venv/Lib/site-packages/certifi/__pycache__/core.cpython-312.pyc new file mode 100644 index 00000000..53e4a873 Binary files /dev/null and b/venv/Lib/site-packages/certifi/__pycache__/core.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/certifi/cacert.pem b/venv/Lib/site-packages/certifi/cacert.pem new file mode 100644 index 00000000..b1d0cfd8 --- /dev/null +++ b/venv/Lib/site-packages/certifi/cacert.pem @@ -0,0 +1,4676 @@ + +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2" +# Serial: 1289 +# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b +# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7 +# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86 +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3" +# Serial: 1478 +# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf +# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85 +# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35 +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Label: "SwissSign Gold CA - G2" +# Serial: 13492815561806991280 +# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93 +# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61 +# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95 +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +# Issuer: CN=SecureTrust CA O=SecureTrust Corporation +# Subject: CN=SecureTrust CA O=SecureTrust Corporation +# Label: "SecureTrust CA" +# Serial: 17199774589125277788362757014266862032 +# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1 +# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11 +# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73 +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +# Issuer: CN=Secure Global CA O=SecureTrust Corporation +# Subject: CN=Secure Global CA O=SecureTrust Corporation +# Label: "Secure Global CA" +# Serial: 9751836167731051554232119481456978597 +# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de +# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b +# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69 +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 104350513648249232941998508985834464573 +# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 +# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b +# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- + +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Issuer: CN=Certigna O=Dhimyotis +# Subject: CN=Certigna O=Dhimyotis +# Label: "Certigna" +# Serial: 18364802974209362175 +# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff +# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97 +# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Label: "ePKI Root Certification Authority" +# Serial: 28956088682735189655030529057352760477 +# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3 +# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0 +# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5 +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +# Issuer: O=certSIGN OU=certSIGN ROOT CA +# Subject: O=certSIGN OU=certSIGN ROOT CA +# Label: "certSIGN ROOT CA" +# Serial: 35210227249154 +# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17 +# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b +# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny" +# Serial: 80544274841616 +# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88 +# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91 +# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Label: "Microsec e-Szigno Root CA 2009" +# Serial: 14014712776195784473 +# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1 +# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e +# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78 +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Issuer: CN=Izenpe.com O=IZENPE S.A. +# Subject: CN=Izenpe.com O=IZENPE S.A. +# Label: "Izenpe.com" +# Serial: 917563065490389241595536686991402621 +# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73 +# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19 +# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Services Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 +# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f +# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA" +# Serial: 279744 +# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78 +# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e +# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Label: "TWCA Root Certification Authority" +# Serial: 1 +# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79 +# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48 +# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44 +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Label: "Security Communication RootCA2" +# Serial: 0 +# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43 +# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74 +# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Label: "Actalis Authentication Root CA" +# Serial: 6271844772424770508 +# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6 +# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac +# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66 +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 2 Root CA" +# Serial: 2 +# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29 +# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99 +# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48 +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 3 Root CA" +# Serial: 2 +# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec +# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57 +# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 3" +# Serial: 1 +# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef +# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1 +# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 2009" +# Serial: 623603 +# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f +# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0 +# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 EV 2009" +# Serial: 623604 +# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6 +# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83 +# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig Root R2 O=Disig a.s. +# Subject: CN=CA Disig Root R2 O=Disig a.s. +# Label: "CA Disig Root R2" +# Serial: 10572350602393338211 +# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03 +# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71 +# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03 +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Label: "ACCVRAIZ1" +# Serial: 6828503384748696800 +# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02 +# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17 +# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13 +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE +AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw +CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ +BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND +VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb +qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY +HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo +G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA +lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr +IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ +0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH +k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 +4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO +m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa +cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl +uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI +KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls +ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG +AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT +VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG +CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA +cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA +QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA +7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA +cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA +QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA +czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu +aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt +aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud +DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF +BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp +D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU +JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m +AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD +vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms +tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH +7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA +h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF +d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H +pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA Global Root CA" +# Serial: 3262 +# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96 +# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65 +# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- + +# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Label: "TeliaSonera Root CA v1" +# Serial: 199041966741090107964904287217786801558 +# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c +# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37 +# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89 +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 2" +# Serial: 1 +# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a +# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9 +# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot 2011 O=Atos +# Subject: CN=Atos TrustedRoot 2011 O=Atos +# Label: "Atos TrustedRoot 2011" +# Serial: 6643877497813316402 +# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56 +# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21 +# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 1 G3" +# Serial: 687049649626669250736271037606554624078720034195 +# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab +# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67 +# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2 G3" +# Serial: 390156079458959257446133169266079962026824725800 +# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06 +# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36 +# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3 G3" +# Serial: 268090761170461462463995952157327242137089239581 +# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7 +# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d +# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Label: "COMODO RSA Certification Authority" +# Serial: 101909084537582093308941363524873193117 +# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 +# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 +# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Label: "USERTrust RSA Certification Authority" +# Serial: 2645093764781058787591871645665788717 +# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 +# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e +# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Label: "USERTrust ECC Certification Authority" +# Serial: 123013823720199481456569720443997572134 +# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 +# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 +# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Label: "GlobalSign ECC Root CA - R5" +# Serial: 32785792099990507226680698011560947931244 +# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 +# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa +# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Label: "IdenTrust Commercial Root CA 1" +# Serial: 13298821034946342390520003877796839426 +# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7 +# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25 +# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Label: "IdenTrust Public Sector Root CA 1" +# Serial: 13298821034946342390521976156843933698 +# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba +# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd +# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority +# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority +# Label: "CFCA EV ROOT" +# Serial: 407555286 +# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30 +# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83 +# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Label: "SZAFIR ROOT CA2" +# Serial: 357043034767186914217277344587386743377558296292 +# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99 +# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de +# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 +ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw +NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg +Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN +QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT +3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw +3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 +3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 +BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN +XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF +AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw +8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG +nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP +oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy +d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg +LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA 2" +# Serial: 44979900017204383099463764357512596969 +# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2 +# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92 +# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04 +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce +# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6 +# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36 +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix +DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k +IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT +N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v +dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG +A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh +ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx +QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA +4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 +AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 +4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C +ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV +9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD +gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 +Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq +NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko +LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd +ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I +XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI +M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot +9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V +Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea +j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh +X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ +l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf +bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 +pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK +e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 +vm9qp/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef +# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66 +# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33 +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN +BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl +bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv +b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ +BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj +YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 +MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 +dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg +QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa +jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi +C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep +lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof +TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X1 O=Internet Security Research Group +# Subject: CN=ISRG Root X1 O=Internet Security Research Group +# Label: "ISRG Root X1" +# Serial: 172886928669790476064670243504169061120 +# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e +# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8 +# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6 +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- + +# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Label: "AC RAIZ FNMT-RCM" +# Serial: 485876308206448804701554682760554759 +# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d +# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20 +# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx +CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ +WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ +BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG +Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/ +yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf +BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz +WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF +tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z +374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC +IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL +mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7 +wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS +MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2 +ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet +UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H +YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3 +LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1 +RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM +LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf +77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N +JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm +fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp +6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp +1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B +9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok +RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv +uu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 1 O=Amazon +# Subject: CN=Amazon Root CA 1 O=Amazon +# Label: "Amazon Root CA 1" +# Serial: 143266978916655856878034712317230054538369994 +# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6 +# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16 +# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 2 O=Amazon +# Subject: CN=Amazon Root CA 2 O=Amazon +# Label: "Amazon Root CA 2" +# Serial: 143266982885963551818349160658925006970653239 +# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66 +# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a +# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4 +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK +gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ +W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg +1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K +8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r +2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me +z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR +8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj +mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz +7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 ++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI +0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm +UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 +LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS +k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl +7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm +btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl +urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ +fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 +n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE +76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H +9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT +4PsJYGw= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 3 O=Amazon +# Subject: CN=Amazon Root CA 3 O=Amazon +# Label: "Amazon Root CA 3" +# Serial: 143266986699090766294700635381230934788665930 +# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87 +# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e +# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4 +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl +ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr +ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr +BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM +YyRIHN8wfdVoOw== +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 4 O=Amazon +# Subject: CN=Amazon Root CA 4 O=Amazon +# Label: "Amazon Root CA 4" +# Serial: 143266989758080763974105200630763877849284878 +# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd +# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be +# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92 +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi +9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk +M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB +MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw +CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW +1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- + +# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1" +# Serial: 1 +# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49 +# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca +# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16 +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx +GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp +bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w +KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0 +BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy +dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG +EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll +IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU +QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT +TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg +LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7 +a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr +LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr +N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X +YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/ +iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f +AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH +V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf +IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4 +lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c +8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf +lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Label: "GlobalSign Root CA - R6" +# Serial: 1417766617973444989252670301619537 +# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae +# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1 +# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69 +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg +MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx +MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET +MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI +xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k +ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD +aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw +LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw +1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX +k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2 +SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h +bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n +WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY +rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce +MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu +bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt +Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61 +55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj +vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf +cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz +oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp +nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs +pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v +JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R +8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4 +5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GC CA" +# Serial: 44084345621038548146064804565436152554 +# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23 +# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31 +# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d +-----BEGIN CERTIFICATE----- +MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw +CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91 +bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg +Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ +BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu +ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS +b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni +eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W +p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T +rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV +57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg +Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 +-----END CERTIFICATE----- + +# Issuer: CN=UCA Global G2 Root O=UniTrust +# Subject: CN=UCA Global G2 Root O=UniTrust +# Label: "UCA Global G2 Root" +# Serial: 124779693093741543919145257850076631279 +# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8 +# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a +# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9 +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH +bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x +CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds +b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr +b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9 +kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm +VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R +VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc +C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj +tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY +D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv +j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl +NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6 +iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP +O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV +ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj +L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5 +1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl +1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU +b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV +PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj +y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb +EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg +DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI ++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy +YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX +UB+K+wb1whnw0A== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Extended Validation Root O=UniTrust +# Subject: CN=UCA Extended Validation Root O=UniTrust +# Label: "UCA Extended Validation Root" +# Serial: 106100277556486529736699587978573607008 +# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2 +# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a +# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF +eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx +MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV +BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog +D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS +sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop +O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk +sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi +c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj +VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz +KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/ +TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G +sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs +1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD +fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN +l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR +ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ +VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5 +c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp +4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s +t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj +2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO +vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C +xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx +cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM +fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax +-----END CERTIFICATE----- + +# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Label: "Certigna Root CA" +# Serial: 269714418870597844693661054334862075617 +# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77 +# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43 +# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68 +-----BEGIN CERTIFICATE----- +MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw +WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw +MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x +MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD +VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX +BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO +ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M +CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu +I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm +TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh +C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf +ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz +IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT +Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k +JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5 +hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB +GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of +1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov +L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo +dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr +aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq +hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L +6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG +HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6 +0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB +lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi +o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1 +gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v +faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63 +Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh +jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw +3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign Root CA - G1" +# Serial: 235931866688319308814040 +# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac +# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c +# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67 +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD +VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU +ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH +MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO +MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv +Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz +f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO +8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq +d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM +tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt +Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB +o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x +PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM +wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d +GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH +6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby +RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx +iN66zB+Afko= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign ECC Root CA - G3" +# Serial: 287880440101571086945156 +# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40 +# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1 +# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b +-----BEGIN CERTIFICATE----- +MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG +EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo +bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g +RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ +TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s +b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0 +WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS +fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB +zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq +hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB +CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD ++JbNR6iC8hZVdyR+EhCVBCyj +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Label: "emSign Root CA - C1" +# Serial: 825510296613316004955058 +# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68 +# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01 +# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG +A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg +SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v +dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ +BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ +HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH +3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH +GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c +xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1 +aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq +TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87 +/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4 +kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG +YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT ++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo +WXzhriKi4gp6D/piq1JM4fHfyr6DDUI= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Label: "emSign ECC Root CA - C3" +# Serial: 582948710642506000014504 +# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5 +# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66 +# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3 +-----BEGIN CERTIFICATE----- +MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG +EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx +IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND +IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci +MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti +sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O +BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c +3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J +0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ== +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Label: "Hongkong Post Root CA 3" +# Serial: 46170865288971385588281144162979347873371282084 +# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0 +# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02 +# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6 +-----BEGIN CERTIFICATE----- +MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ +SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n +a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5 +NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT +CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u +Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO +dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI +VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV +9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY +2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY +vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt +bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb +x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+ +l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK +TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj +Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e +i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw +DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG +7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk +MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr +gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk +GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS +3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm +Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+ +l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c +JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP +L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa +LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG +mpv0 +-----END CERTIFICATE----- + +# Issuer: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation +# Subject: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation +# Label: "Microsoft ECC Root Certificate Authority 2017" +# Serial: 136839042543790627607696632466672567020 +# MD5 Fingerprint: dd:a1:03:e6:4a:93:10:d1:bf:f0:19:42:cb:fe:ed:67 +# SHA1 Fingerprint: 99:9a:64:c3:7f:f4:7d:9f:ab:95:f1:47:69:89:14:60:ee:c4:c3:c5 +# SHA256 Fingerprint: 35:8d:f3:9d:76:4a:f9:e1:b7:66:e9:c9:72:df:35:2e:e1:5c:fa:c2:27:af:6a:d1:d7:0e:8e:4a:6e:dc:ba:02 +-----BEGIN CERTIFICATE----- +MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYD +VQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIw +MTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4MjMxNjA0WjBlMQswCQYDVQQGEwJV +UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNy +b3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZR +ogPZnZH6thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYb +hGBKia/teQ87zvH2RPUBeMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBTIy5lycFIM+Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3 +FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlfXu5gKcs68tvWMoQZP3zV +L8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaReNtUjGUB +iudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M= +-----END CERTIFICATE----- + +# Issuer: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation +# Subject: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation +# Label: "Microsoft RSA Root Certificate Authority 2017" +# Serial: 40975477897264996090493496164228220339 +# MD5 Fingerprint: 10:ff:00:ff:cf:c9:f8:c7:7a:c0:ee:35:8e:c9:0f:47 +# SHA1 Fingerprint: 73:a5:e6:4a:3b:ff:83:16:ff:0e:dc:cc:61:8a:90:6e:4e:ae:4d:74 +# SHA256 Fingerprint: c7:41:f7:0f:4b:2a:8d:88:bf:2e:71:c1:41:22:ef:53:ef:10:eb:a0:cf:a5:e6:4c:fa:20:f4:18:85:30:73:e0 +-----BEGIN CERTIFICATE----- +MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl +MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw +NAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIwNzE4MjMwMDIzWjBlMQswCQYDVQQG +EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1N +aWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZ +Nt9GkMml7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0 +ZdDMbRnMlfl7rEqUrQ7eS0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1 +HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw71VdyvD/IybLeS2v4I2wDwAW9lcfNcztm +gGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+dkC0zVJhUXAoP8XFWvLJ +jEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49FyGcohJUc +aDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaG +YaRSMLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6 +W6IYZVcSn2i51BVrlMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4K +UGsTuqwPN1q3ErWQgR5WrlcihtnJ0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH ++FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJClTUFLkqqNfs+avNJVgyeY+Q +W5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZC +LgLNFgVZJ8og6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OC +gMNPOsduET/m4xaRhPtthH80dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6 +tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk+ONVFT24bcMKpBLBaYVu32TxU5nh +SnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex/2kskZGT4d9Mozd2 +TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDyAmH3 +pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGR +xpl/j8nWZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiApp +GWSZI1b7rCoucL5mxAyE7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9 +dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKTc0QWbej09+CVgI+WXTik9KveCjCHk9hN +AHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D5KbvtwEwXlGjefVwaaZB +RA+GsCyRxj3qrg+E +-----END CERTIFICATE----- + +# Issuer: CN=e-Szigno Root CA 2017 O=Microsec Ltd. +# Subject: CN=e-Szigno Root CA 2017 O=Microsec Ltd. +# Label: "e-Szigno Root CA 2017" +# Serial: 411379200276854331539784714 +# MD5 Fingerprint: de:1f:f6:9e:84:ae:a7:b4:21:ce:1e:58:7d:d1:84:98 +# SHA1 Fingerprint: 89:d4:83:03:4f:9e:9a:48:80:5f:72:37:d4:a9:a6:ef:cb:7c:1f:d1 +# SHA256 Fingerprint: be:b0:0b:30:83:9b:9b:c3:2c:32:e4:44:79:05:95:06:41:f2:64:21:b1:5e:d0:89:19:8b:51:8a:e2:ea:1b:99 +-----BEGIN CERTIFICATE----- +MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNV +BAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRk +LjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJv +b3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZaFw00MjA4MjIxMjA3MDZaMHExCzAJ +BgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMg +THRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25v +IFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtv +xie+RJCxs1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+H +Wyx7xf58etqjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBSHERUI0arBeAyxr87GyZDvvzAEwDAfBgNVHSMEGDAWgBSHERUI0arB +eAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEAtVfd14pVCzbhhkT61Nlo +jbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxOsvxyqltZ ++efcMQ== +-----END CERTIFICATE----- + +# Issuer: O=CERTSIGN SA OU=certSIGN ROOT CA G2 +# Subject: O=CERTSIGN SA OU=certSIGN ROOT CA G2 +# Label: "certSIGN Root CA G2" +# Serial: 313609486401300475190 +# MD5 Fingerprint: 8c:f1:75:8a:c6:19:cf:94:b7:f7:65:20:87:c3:97:c7 +# SHA1 Fingerprint: 26:f9:93:b4:ed:3d:28:27:b0:b9:4b:a7:e9:15:1d:a3:8d:92:e5:32 +# SHA256 Fingerprint: 65:7c:fe:2f:a7:3f:aa:38:46:25:71:f3:32:a2:36:3a:46:fc:e7:02:09:51:71:07:02:cd:fb:b6:ee:da:33:05 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV +BAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04g +Uk9PVCBDQSBHMjAeFw0xNzAyMDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJ +BgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJ +R04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDF +dRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05N0Iw +vlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZ +uIt4ImfkabBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhp +n+Sc8CnTXPnGFiWeI8MgwT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKs +cpc/I1mbySKEwQdPzH/iV8oScLumZfNpdWO9lfsbl83kqK/20U6o2YpxJM02PbyW +xPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91QqhngLjYl/rNUssuHLoPj1P +rCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732jcZZroiF +DsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fx +DTvf95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgy +LcsUDFDYg2WD7rlcz8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6C +eWRgKRM+o/1Pcmqr4tTluCRVLERLiohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSCIS1mxteg4BXrzkwJ +d8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOBywaK8SJJ6ejq +kX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC +b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQl +qiCA2ClV9+BB/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0 +OJD7uNGzcgbJceaBxXntC6Z58hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+c +NywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5BiKDUyUM/FHE5r7iOZULJK2v0ZXk +ltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklWatKcsWMy5WHgUyIO +pwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tUSxfj +03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZk +PuXaTH4MNMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE +1LlSVHJ7liXMvGnjSG4N0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MX +QRBdJ3NghVdJIgc= +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global Certification Authority" +# Serial: 1846098327275375458322922162 +# MD5 Fingerprint: f8:1c:18:2d:2f:ba:5f:6d:a1:6c:bc:c7:ab:91:c7:0e +# SHA1 Fingerprint: 2f:8f:36:4f:e1:58:97:44:21:59:87:a5:2a:9a:d0:69:95:26:7f:b5 +# SHA256 Fingerprint: 97:55:20:15:f5:dd:fc:3c:87:88:c0:06:94:45:55:40:88:94:45:00:84:f1:00:86:70:86:bc:1a:2b:b5:8d:c8 +-----BEGIN CERTIFICATE----- +MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQsw +CQYDVQQGEwJVUzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28x +ITAfBgNVBAoMGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1 +c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMx +OTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJVUzERMA8GA1UECAwI +SWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2ZSBI +b2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +ALldUShLPDeS0YLOvR29zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0Xzn +swuvCAAJWX/NKSqIk4cXGIDtiLK0thAfLdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu +7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4BqstTnoApTAbqOl5F2brz8 +1Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9oWN0EACyW +80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotP +JqX+OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1l +RtzuzWniTY+HKE40Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfw +hI0Vcnyh78zyiGG69Gm7DIwLdVcEuE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10 +coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm+9jaJXLE9gCxInm943xZYkqc +BW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqjifLJS3tBEW1n +twiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1Ud +DwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W +0OhUKDtkLSGm+J1WE2pIPU/HPinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfe +uyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0HZJDmHvUqoai7PF35owgLEQzxPy0Q +lG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla4gt5kNdXElE1GYhB +aCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5RvbbE +sLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPT +MaCm/zjdzyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qe +qu5AvzSxnI9O4fKSTx+O856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxh +VicGaeVyQYHTtgGJoC86cnn+OjC/QezHYj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8 +h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu3R3y4G5OBVixwJAWKqQ9 +EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP29FpHOTK +yeC2nOnOcXHebD8WpHk= +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global ECC P256 Certification Authority" +# Serial: 4151900041497450638097112925 +# MD5 Fingerprint: 5b:44:e3:8d:5d:36:86:26:e8:0d:05:d2:59:a7:83:54 +# SHA1 Fingerprint: b4:90:82:dd:45:0c:be:8b:5b:b1:66:d3:e2:a4:08:26:cd:ed:42:cf +# SHA256 Fingerprint: 94:5b:bc:82:5e:a5:54:f4:89:d1:fd:51:a7:3d:df:2e:a6:24:ac:70:19:a0:52:05:22:5c:22:a7:8c:cf:a8:b4 +-----BEGIN CERTIFICATE----- +MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf +BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3 +YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYDVQQGEwJVUzERMA8G +A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0 +d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF +Q0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqG +SM49AwEHA0IABH77bOYj43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoN +FWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqmP62jQzBBMA8GA1UdEwEB/wQFMAMBAf8w +DwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt0UrrdaVKEJmzsaGLSvcw +CgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjzRM4q3wgh +DDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7 +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global ECC P384 Certification Authority" +# Serial: 2704997926503831671788816187 +# MD5 Fingerprint: ea:cf:60:c4:3b:b9:15:29:40:a1:97:ed:78:27:93:d6 +# SHA1 Fingerprint: e7:f3:a3:c8:cf:6f:c3:04:2e:6d:0e:67:32:c5:9e:68:95:0d:5e:d2 +# SHA256 Fingerprint: 55:90:38:59:c8:c0:c3:eb:b8:75:9e:ce:4e:25:57:22:5f:f5:75:8b:bd:38:eb:d4:82:76:60:1e:1b:d5:80:97 +-----BEGIN CERTIFICATE----- +MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf +BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3 +YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYDVQQGEwJVUzERMA8G +A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0 +d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF +Q0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuB +BAAiA2IABGvaDXU1CDFHBa5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJ +j9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr/TklZvFe/oyujUF5nQlgziip04pt89ZF +1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNVHQ8BAf8EBQMDBwYAMB0G +A1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNnADBkAjA3 +AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsC +MGclCrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVu +Sw== +-----END CERTIFICATE----- + +# Issuer: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp. +# Subject: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp. +# Label: "NAVER Global Root Certification Authority" +# Serial: 9013692873798656336226253319739695165984492813 +# MD5 Fingerprint: c8:7e:41:f6:25:3b:f5:09:b3:17:e8:46:3d:bf:d0:9b +# SHA1 Fingerprint: 8f:6b:f2:a9:27:4a:da:14:a0:c4:f4:8e:61:27:f9:c0:1e:78:5d:d1 +# SHA256 Fingerprint: 88:f4:38:dc:f8:ff:d1:fa:8f:42:91:15:ff:e5:f8:2a:e1:e0:6e:0c:70:c3:75:fa:ad:71:7b:34:a4:9e:72:65 +-----BEGIN CERTIFICATE----- +MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEM +BQAwaTELMAkGA1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRG +T1JNIENvcnAuMTIwMAYDVQQDDClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4NDJaFw0zNzA4MTgyMzU5NTlaMGkx +CzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVTUyBQTEFURk9STSBD +b3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVA +iQqrDZBbUGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH +38dq6SZeWYp34+hInDEW+j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lE +HoSTGEq0n+USZGnQJoViAbbJAh2+g1G7XNr4rRVqmfeSVPc0W+m/6imBEtRTkZaz +kVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2aacp+yPOiNgSnABIqKYP +szuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4Yb8Obtoq +vC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHf +nZ3zVHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaG +YQ5fG8Ir4ozVu53BA0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo +0es+nPxdGoMuK8u180SdOqcXYZaicdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3a +CJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejyYhbLgGvtPe31HzClrkvJE+2K +AQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNVHQ4EFgQU0p+I +36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB +Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoN +qo0hV4/GPnrK21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatj +cu3cvuzHV+YwIHHW1xDBE1UBjCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm ++LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bxhYTeodoS76TiEJd6eN4MUZeoIUCL +hr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTgE34h5prCy8VCZLQe +lHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTHD8z7 +p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8 +piKCk5XQA76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLR +LBT/DShycpWbXgnbiUSYqqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX +5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oGI/hGoiLtk/bdmuYqh7GYVPEi92tF4+KO +dh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmgkpzNNIaRkPpkUZ3+/uul +9XXeifdy +-----END CERTIFICATE----- + +# Issuer: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Subject: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Label: "AC RAIZ FNMT-RCM SERVIDORES SEGUROS" +# Serial: 131542671362353147877283741781055151509 +# MD5 Fingerprint: 19:36:9c:52:03:2f:d2:d1:bb:23:cc:dd:1e:12:55:bb +# SHA1 Fingerprint: 62:ff:d9:9e:c0:65:0d:03:ce:75:93:d2:ed:3f:2d:32:c9:e3:e5:4a +# SHA256 Fingerprint: 55:41:53:b1:3d:2c:f9:dd:b7:53:bf:be:1a:4e:0a:e0:8d:0a:a4:18:70:58:fe:60:a2:b8:62:b2:e4:b8:7b:cb +-----BEGIN CERTIFICATE----- +MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQsw +CQYDVQQGEwJFUzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgw +FgYDVQRhDA9WQVRFUy1RMjgyNjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1S +Q00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4MTIyMDA5MzczM1oXDTQzMTIyMDA5 +MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQtUkNNMQ4wDAYDVQQL +DAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNBQyBS +QUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LH +sbI6GA60XYyzZl2hNPk2LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oK +Um8BA06Oi6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqGSM49BAMDA2kAMGYCMQCu +SuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoDzBOQn5IC +MQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJy +v+c= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Label: "GlobalSign Root R46" +# Serial: 1552617688466950547958867513931858518042577 +# MD5 Fingerprint: c4:14:30:e4:fa:66:43:94:2a:6a:1b:24:5f:19:d0:ef +# SHA1 Fingerprint: 53:a2:b0:4b:ca:6b:d6:45:e6:39:8a:8e:c4:0d:d2:bf:77:c3:a2:90 +# SHA256 Fingerprint: 4f:a3:12:6d:8d:3a:11:d1:c4:85:5a:4f:80:7c:ba:d6:cf:91:9d:3a:5a:88:b0:3b:ea:2c:63:72:d9:3c:40:c9 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUA +MEYxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYD +VQQDExNHbG9iYWxTaWduIFJvb3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMy +MDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYt +c2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08EsCVeJ +OaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQG +vGIFAha/r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud +316HCkD7rRlr+/fKYIje2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo +0q3v84RLHIf8E6M6cqJaESvWJ3En7YEtbWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSE +y132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvjK8Cd+RTyG/FWaha/LIWF +zXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD412lPFzYE ++cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCN +I/onccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzs +x2sZy/N78CsHpdlseVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqa +ByFrgY/bxFn63iLABJzjqls2k+g9vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC +4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEMBQADggIBAHx4 +7PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg +JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti +2kM3S+LGteWygxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIk +pnnpHs6i58FZFZ8d4kuaPp92CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRF +FRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZmOUdkLG5NrmJ7v2B0GbhWrJKsFjLt +rWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qqJZ4d16GLuc1CLgSk +ZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwyeqiv5 +u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP +4vkYxboznxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6 +N3ec592kD3ZDZopD8p/7DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3 +vouXsXgxT7PntgMTzlSdriVZzH81Xwj3QEUxeCp6 +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Label: "GlobalSign Root E46" +# Serial: 1552617690338932563915843282459653771421763 +# MD5 Fingerprint: b5:b8:66:ed:de:08:83:e3:c9:e2:01:34:06:ac:51:6f +# SHA1 Fingerprint: 39:b4:6c:d5:fe:80:06:eb:e2:2f:4a:bb:08:33:a0:af:db:b9:dd:84 +# SHA256 Fingerprint: cb:b9:c4:4d:84:b8:04:3e:10:50:ea:31:a6:9f:51:49:55:d7:bf:d2:e2:c6:b4:93:01:01:9a:d6:1d:9f:50:58 +-----BEGIN CERTIFICATE----- +MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYx +CzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQD +ExNHbG9iYWxTaWduIFJvb3QgRTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAw +MDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex +HDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkBjtjq +R+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGdd +yXqBPCCjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBQxCpCPtsad0kRLgLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ +7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZkvLtoURMMA/cVi4RguYv/Uo7njLwcAjA8 ++RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+CAezNIm8BZ/3Hobui3A= +-----END CERTIFICATE----- + +# Issuer: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH +# Subject: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH +# Label: "GLOBALTRUST 2020" +# Serial: 109160994242082918454945253 +# MD5 Fingerprint: 8a:c7:6f:cb:6d:e3:cc:a2:f1:7c:83:fa:0e:78:d7:e8 +# SHA1 Fingerprint: d0:67:c1:13:51:01:0c:aa:d0:c7:6a:65:37:31:16:26:4f:53:71:a2 +# SHA256 Fingerprint: 9a:29:6a:51:82:d1:d4:51:a2:e3:7f:43:9b:74:da:af:a2:67:52:33:29:f9:0f:9a:0d:20:07:c3:34:e2:3c:9a +-----BEGIN CERTIFICATE----- +MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkG +A1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkw +FwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYx +MDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9u +aXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMIICIjANBgkq +hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWiD59b +RatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9Z +YybNpyrOVPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3 +QWPKzv9pj2gOlTblzLmMCcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPw +yJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCmfecqQjuCgGOlYx8ZzHyyZqjC0203b+J+ +BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKAA1GqtH6qRNdDYfOiaxaJ +SaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9ORJitHHmkH +r96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj0 +4KlGDfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9Me +dKZssCz3AwyIDMvUclOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIw +q7ejMZdnrY8XD2zHc+0klGvIg5rQmjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2 +nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1UdIwQYMBaAFNwu +H9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA +VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJC +XtzoRlgHNQIw4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd +6IwPS3BD0IL/qMy/pJTAvoe9iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf ++I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS8cE54+X1+NZK3TTN+2/BT+MAi1bi +kvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2HcqtbepBEX4tdJP7 +wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxSvTOB +TI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6C +MUO+1918oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn +4rnvyOL2NSl6dPrFf4IFYqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+I +aFvowdlxfv1k7/9nR4hYJS8+hge9+6jlgqispdNpQ80xiEmEU5LAsTkbOYMBMMTy +qfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg== +-----END CERTIFICATE----- + +# Issuer: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Subject: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Label: "ANF Secure Server Root CA" +# Serial: 996390341000653745 +# MD5 Fingerprint: 26:a6:44:5a:d9:af:4e:2f:b2:1d:b6:65:b0:4e:e8:96 +# SHA1 Fingerprint: 5b:6e:68:d0:cc:15:b6:a0:5f:1e:c1:5f:ae:02:fc:6b:2f:5d:6f:74 +# SHA256 Fingerprint: fb:8f:ec:75:91:69:b9:10:6b:1e:51:16:44:c6:18:c5:13:04:37:3f:6c:06:43:08:8d:8b:ef:fd:1b:99:75:99 +-----BEGIN CERTIFICATE----- +MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNV +BAUTCUc2MzI4NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlk +YWQgZGUgQ2VydGlmaWNhY2lvbjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNV +BAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3QgQ0EwHhcNMTkwOTA0MTAwMDM4WhcN +MzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEwMQswCQYDVQQGEwJF +UzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQwEgYD +VQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9v +dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCj +cqQZAZ2cC4Ffc0m6p6zzBE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9q +yGFOtibBTI3/TO80sh9l2Ll49a2pcbnvT1gdpd50IJeh7WhM3pIXS7yr/2WanvtH +2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcvB2VSAKduyK9o7PQUlrZX +H1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXsezx76W0OL +zc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyR +p1RMVwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQz +W7i1o0TJrH93PB0j7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/ +SiOL9V8BY9KHcyi1Swr1+KuCLH5zJTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJn +LNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe8TZBAQIvfXOn3kLMTOmJDVb3 +n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVOHj1tyRRM4y5B +u8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj +o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC +AgEATh65isagmD9uw2nAalxJUqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L +9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzxj6ptBZNscsdW699QIyjlRRA96Gej +rw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDtdD+4E5UGUcjohybK +pFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM5gf0 +vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjq +OknkJjCb5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ +/zo1PqVUSlJZS2Db7v54EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ9 +2zg/LFis6ELhDtjTO0wugumDLmsx2d1Hhk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI ++PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGyg77FGr8H6lnco4g175x2 +MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3r5+qPeoo +tt7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw= +-----END CERTIFICATE----- + +# Issuer: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum EC-384 CA" +# Serial: 160250656287871593594747141429395092468 +# MD5 Fingerprint: b6:65:b3:96:60:97:12:a1:ec:4e:e1:3d:a3:c6:c9:f1 +# SHA1 Fingerprint: f3:3e:78:3c:ac:df:f4:a2:cc:ac:67:55:69:56:d7:e5:16:3c:e1:ed +# SHA256 Fingerprint: 6b:32:80:85:62:53:18:aa:50:d1:73:c9:8d:8b:da:09:d5:7e:27:41:3d:11:4c:f7:87:a0:f5:d0:6c:03:0c:f6 +-----BEGIN CERTIFICATE----- +MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQsw +CQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScw +JQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMT +EENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2MDcyNDU0WhcNNDMwMzI2MDcyNDU0 +WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBT +LkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAX +BgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATE +KI6rGFtqvm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7Tm +Fy8as10CW4kjPMIRBSqniBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68Kj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI0GZnQkdjrzife81r1HfS+8 +EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjADVS2m5hjEfO/J +UG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0QoSZ/6vn +nvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k= +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Root CA" +# Serial: 40870380103424195783807378461123655149 +# MD5 Fingerprint: 51:e1:c2:e7:fe:4c:84:af:59:0e:2f:f4:54:6f:ea:29 +# SHA1 Fingerprint: c8:83:44:c0:18:ae:9f:cc:f1:87:b7:8f:22:d1:c5:d7:45:84:ba:e5 +# SHA256 Fingerprint: fe:76:96:57:38:55:77:3e:37:a9:5e:7a:d4:d9:cc:96:c3:01:57:c1:5d:31:76:5b:a9:b1:57:04:e1:ae:78:fd +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6 +MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEu +MScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNV +BAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwHhcNMTgwMzE2MTIxMDEzWhcNNDMw +MzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEg +U3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZ +n0EGze2jusDbCSzBfN8pfktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/q +p1x4EaTByIVcJdPTsuclzxFUl6s1wB52HO8AU5853BSlLCIls3Jy/I2z5T4IHhQq +NwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2fJmItdUDmj0VDT06qKhF +8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGtg/BKEiJ3 +HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGa +mqi4NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi +7VdNIuJGmj8PkTQkfVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSF +ytKAQd8FqKPVhJBPC/PgP5sZ0jeJP/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0P +qafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSYnjYJdmZm/Bo/6khUHL4wvYBQ +v3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHKHRzQ+8S1h9E6 +Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1 +vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQAD +ggIBAEii1QALLtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4 +WxmB82M+w85bj/UvXgF2Ez8sALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvo +zMrnadyHncI013nR03e4qllY/p0m+jiGPp2Kh2RX5Rc64vmNueMzeMGQ2Ljdt4NR +5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8CYyqOhNf6DR5UMEQ +GfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA4kZf +5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq +0Uc9NneoWWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7D +P78v3DSk+yshzWePS/Tj6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTM +qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP +0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf +E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb +-----END CERTIFICATE----- + +# Issuer: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Subject: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Label: "TunTrust Root CA" +# Serial: 108534058042236574382096126452369648152337120275 +# MD5 Fingerprint: 85:13:b9:90:5b:36:5c:b6:5e:b8:5a:f8:e0:31:57:b4 +# SHA1 Fingerprint: cf:e9:70:84:0f:e0:73:0f:9d:f6:0c:7f:2c:4b:ee:20:46:34:9c:bb +# SHA256 Fingerprint: 2e:44:10:2a:b5:8c:b8:54:19:45:1c:8e:19:d9:ac:f3:66:2c:af:bc:61:4b:6a:53:96:0a:30:f7:d0:e2:eb:41 +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQEL +BQAwYTELMAkGA1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUg +Q2VydGlmaWNhdGlvbiBFbGVjdHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJv +b3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQwNDI2MDg1NzU2WjBhMQswCQYDVQQG +EwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBDZXJ0aWZpY2F0aW9u +IEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZ +n56eY+hz2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd +2JQDoOw05TDENX37Jk0bbjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgF +VwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZ +GoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAdgjH8KcwAWJeRTIAAHDOF +li/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViWVSHbhlnU +r8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2 +eY8fTpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIb +MlEsPvLfe/ZdeikZjuXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISg +jwBUFfyRbVinljvrS5YnzWuioYasDXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB +7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwSVXAkPcvCFDVDXSdOvsC9qnyW +5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI04Y+oXNZtPdE +ITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0 +90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+z +xiD2BkewhpMl0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYu +QEkHDVneixCwSQXi/5E/S7fdAo74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4 +FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRYYdZ2vyJ/0Adqp2RT8JeNnYA/u8EH +22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJpadbGNjHh/PqAulxP +xOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65xxBzn +dFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5 +Xc0yGYuPjCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7b +nV2UqL1g52KAdoGDDIzMMEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQ +CvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9zZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZH +u/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3rAZ3r2OvEhJn7wAzMMujj +d9qDRIueVSjAi1jTkD5OGwDxFa2DK5o= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS RSA Root CA 2021" +# Serial: 76817823531813593706434026085292783742 +# MD5 Fingerprint: 65:47:9b:58:86:dd:2c:f0:fc:a2:84:1f:1e:96:c4:91 +# SHA1 Fingerprint: 02:2d:05:82:fa:88:ce:14:0c:06:79:de:7f:14:10:e9:45:d7:a5:6d +# SHA256 Fingerprint: d9:5d:0e:8e:da:79:52:5b:f9:be:b1:1b:14:d2:10:0d:32:94:98:5f:0c:62:d9:fa:bd:9c:d9:99:ec:cb:7b:1d +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv +b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l +mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE +4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv +a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M +pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw +Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b +LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY +AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB +AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq +E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr +W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ +CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU +X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3 +f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja +H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP +JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P +zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt +jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0 +/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT +BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79 +aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW +xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU +63ZTGI0RmLo= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS ECC Root CA 2021" +# Serial: 137515985548005187474074462014555733966 +# MD5 Fingerprint: ae:f7:4c:e5:66:35:d1:b7:9b:8c:22:93:74:d3:4b:b0 +# SHA1 Fingerprint: bc:b0:c1:9d:e9:98:92:70:19:38:57:e9:8d:a7:b4:5d:6e:ee:01:48 +# SHA256 Fingerprint: 3f:99:cc:47:4a:cf:ce:4d:fe:d5:87:94:66:5e:47:8d:15:47:73:9f:2e:78:0f:1b:b4:ca:9b:13:30:97:d4:01 +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg +Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7 +KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y +STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD +AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw +SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN +nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 1977337328857672817 +# MD5 Fingerprint: 4e:6e:9b:54:4c:ca:b7:fa:48:e4:90:b1:15:4b:1c:a3 +# SHA1 Fingerprint: 0b:be:c2:27:22:49:cb:39:aa:db:35:5c:53:e3:8c:ae:78:ff:b6:fe +# SHA256 Fingerprint: 57:de:05:83:ef:d2:b2:6e:03:61:da:99:da:9d:f4:64:8d:ef:7e:e8:44:1c:3b:72:8a:fa:9b:cd:e0:f9:b2:6a +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1 +MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1UdDgQWBBRlzeurNR4APn7VdMAc +tHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4wgZswgZgGBFUd +IAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j +b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABC +AG8AbgBhAG4AbwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAw +ADEANzAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9m +iWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL4QjbEwj4KKE1soCzC1HA01aajTNF +Sa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDbLIpgD7dvlAceHabJ +hfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1ilI45P +Vf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZE +EAEeiGaPcjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV +1aUsIC+nmCjuRfzxuIgALI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2t +CsvMo2ebKHTEm9caPARYpoKdrcd7b/+Alun4jWq9GJAd/0kakFI3ky88Al2CdgtR +5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH9IBk9W6VULgRfhVwOEqw +f9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpfNIbnYrX9 +ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNK +GbqEZycPvEJdvSRUDewdcAZfpLz6IHxV +-----END CERTIFICATE----- + +# Issuer: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus ECC Root CA" +# Serial: 630369271402956006249506845124680065938238527194 +# MD5 Fingerprint: de:4b:c1:f5:52:8c:9b:43:e1:3e:8f:55:54:17:8d:85 +# SHA1 Fingerprint: f6:9c:db:b0:fc:f6:02:13:b6:52:32:a6:a3:91:3f:16:70:da:c3:e1 +# SHA256 Fingerprint: 30:fb:ba:2c:32:23:8e:2a:98:54:7a:f9:79:31:e5:50:42:8b:9b:3f:1c:8e:eb:66:33:dc:fa:86:c5:b2:7d:d3 +-----BEGIN CERTIFICATE----- +MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMw +RzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAY +BgNVBAMTEXZUcnVzIEVDQyBSb290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDcz +MTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28u +LEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+cToL0 +v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUd +e4BdS49nTPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIw +V53dVvHH4+m4SVBrm2nDb+zDfSXkV5UTQJtS0zvzQBm8JsctBp61ezaf9SXUY2sA +AjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQLYgmRWAD5Tfs0aNoJrSEG +GJTO +-----END CERTIFICATE----- + +# Issuer: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus Root CA" +# Serial: 387574501246983434957692974888460947164905180485 +# MD5 Fingerprint: b8:c9:37:df:fa:6b:31:84:64:c5:ea:11:6a:1b:75:fc +# SHA1 Fingerprint: 84:1a:69:fb:f5:cd:1a:25:34:13:3d:e3:f8:fc:b8:99:d0:c9:14:b7 +# SHA256 Fingerprint: 8a:71:de:65:59:33:6f:42:6c:26:e5:38:80:d0:0d:88:a1:8d:a4:c6:a9:1f:0d:cb:61:94:e2:06:c5:c9:63:87 +-----BEGIN CERTIFICATE----- +MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQEL +BQAwQzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4x +FjAUBgNVBAMTDXZUcnVzIFJvb3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMx +MDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoGA1UEChMTaVRydXNDaGluYSBDby4s +THRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZotsSKYc +IrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykU +AyyNJJrIZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+ +GrPSbcKvdmaVayqwlHeFXgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z9 +8Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KAYPxMvDVTAWqXcoKv8R1w6Jz1717CbMdH +flqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70kLJrxLT5ZOrpGgrIDajt +J8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2AXPKBlim +0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZN +pGvu/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQ +UqqzApVg+QxMaPnu1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHW +OXSuTEGC2/KmSNGzm/MzqvOmwMVO9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMB +AAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYgscasGrz2iTAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAKbqSSaet +8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd +nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1j +bhd47F18iMjrjld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvM +Kar5CKXiNxTKsbhm7xqC5PD48acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIiv +TDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJnxDHO2zTlJQNgJXtxmOTAGytfdELS +S8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554WgicEFOwE30z9J4nfr +I8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4sEb9 +b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNB +UvupLnKWnyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1P +Ti07NEPhmg4NpGaXutIcSkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929ven +sBxXVsFy6K2ir40zSbofitzmdHxghm+Hl3s= +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X2 O=Internet Security Research Group +# Subject: CN=ISRG Root X2 O=Internet Security Research Group +# Label: "ISRG Root X2" +# Serial: 87493402998870891108772069816698636114 +# MD5 Fingerprint: d3:9e:c4:1e:23:3c:a6:df:cf:a3:7e:6d:e0:14:e6:e5 +# SHA1 Fingerprint: bd:b1:b9:3c:d5:97:8d:45:c6:26:14:55:f8:db:95:c7:5a:d1:53:af +# SHA256 Fingerprint: 69:72:9b:8e:15:a8:6e:fc:17:7a:57:af:b7:17:1d:fc:64:ad:d2:8c:2f:ca:8c:f1:50:7e:34:45:3c:cb:14:70 +-----BEGIN CERTIFICATE----- +MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw +CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg +R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00 +MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT +ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW ++1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9 +ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI +zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW +tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1 +/q4AaOeMSQ+2b1tbFfLn +-----END CERTIFICATE----- + +# Issuer: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Subject: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Label: "HiPKI Root CA - G1" +# Serial: 60966262342023497858655262305426234976 +# MD5 Fingerprint: 69:45:df:16:65:4b:e8:68:9a:8f:76:5f:ff:80:9e:d3 +# SHA1 Fingerprint: 6a:92:e4:a8:ee:1b:ec:96:45:37:e3:29:57:49:cd:96:e3:e5:d2:60 +# SHA256 Fingerprint: f0:15:ce:3c:c2:39:bf:ef:06:4b:e9:f1:d2:c4:17:e1:a0:26:4a:0a:94:be:1f:0c:8d:12:18:64:eb:69:49:cc +-----BEGIN CERTIFICATE----- +MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBP +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xGzAZBgNVBAMMEkhpUEtJIFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRa +Fw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3 +YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kgUm9vdCBDQSAtIEcx +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0o9Qw +qNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twv +Vcg3Px+kwJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6 +lZgRZq2XNdZ1AYDgr/SEYYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnz +Qs7ZngyzsHeXZJzA9KMuH5UHsBffMNsAGJZMoYFL3QRtU6M9/Aes1MU3guvklQgZ +KILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfdhSi8MEyr48KxRURHH+CK +FgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj1jOXTyFj +HluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDr +y+K49a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ +/W3c1pzAtH2lsN0/Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgM +a/aOEmem8rJY5AIJEzypuxC00jBF8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6 +fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQDAgGGMA0GCSqG +SIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi +7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqc +SE5XCV0vrPSltJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6Fza +ZsT0pPBWGTMpWmWSBUdGSquEwx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9Tc +XzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07QJNBAsNB1CI69aO4I1258EHBGG3zg +iLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv5wiZqAxeJoBF1Pho +L5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+GpzjLrF +Ne85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wr +kkVbbiVghUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+ +vhV4nYWBSipX3tUZQ9rbyltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQU +YDksswBVLuT1sw5XxJFBAJw/6KXf6vb/yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Label: "GlobalSign ECC Root CA - R4" +# Serial: 159662223612894884239637590694 +# MD5 Fingerprint: 26:29:f8:6d:e1:88:bf:a2:65:7f:aa:c4:cd:0f:7f:fc +# SHA1 Fingerprint: 6b:a0:b0:98:e1:71:ef:5a:ad:fe:48:15:80:77:10:f4:bd:6f:0b:28 +# SHA256 Fingerprint: b0:85:d7:0b:96:4f:19:1a:73:e4:af:0d:54:ae:7a:0e:07:aa:fd:af:9b:71:dd:08:62:13:8a:b7:32:5a:24:a2 +-----BEGIN CERTIFICATE----- +MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD +VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw +MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g +UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT +BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx +uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV +HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/ ++wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147 +bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R1 O=Google Trust Services LLC +# Subject: CN=GTS Root R1 O=Google Trust Services LLC +# Label: "GTS Root R1" +# Serial: 159662320309726417404178440727 +# MD5 Fingerprint: 05:fe:d0:bf:71:a8:a3:76:63:da:01:e0:d8:52:dc:40 +# SHA1 Fingerprint: e5:8c:1c:c4:91:3b:38:63:4b:e9:10:6e:e3:ad:8e:6b:9d:d9:81:4a +# SHA256 Fingerprint: d9:47:43:2a:bd:e7:b7:fa:90:fc:2e:6b:59:10:1b:12:80:e0:e1:c7:e4:e4:0f:a3:c6:88:7f:ff:57:a7:f4:cf +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo +27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w +Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw +TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl +qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH +szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8 +Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk +MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92 +wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p +aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN +VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb +C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe +QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy +h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4 +7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J +ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef +MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/ +Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT +6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ +0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm +2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb +bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R2 O=Google Trust Services LLC +# Subject: CN=GTS Root R2 O=Google Trust Services LLC +# Label: "GTS Root R2" +# Serial: 159662449406622349769042896298 +# MD5 Fingerprint: 1e:39:c0:53:e6:1e:29:82:0b:ca:52:55:36:5d:57:dc +# SHA1 Fingerprint: 9a:44:49:76:32:db:de:fa:d0:bc:fb:5a:7b:17:bd:9e:56:09:24:94 +# SHA256 Fingerprint: 8d:25:cd:97:22:9d:bf:70:35:6b:da:4e:b3:cc:73:40:31:e2:4c:f0:0f:af:cf:d3:2d:c7:6e:b5:84:1c:7e:a8 +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt +nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY +6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu +MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k +RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg +f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV ++3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo +dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW +Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa +G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq +gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H +vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8 +0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC +B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u +NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg +yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev +HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6 +xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR +TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg +JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV +7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl +6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R3 O=Google Trust Services LLC +# Subject: CN=GTS Root R3 O=Google Trust Services LLC +# Label: "GTS Root R3" +# Serial: 159662495401136852707857743206 +# MD5 Fingerprint: 3e:e7:9d:58:02:94:46:51:94:e5:e0:22:4a:8b:e7:73 +# SHA1 Fingerprint: ed:e5:71:80:2b:c8:92:b9:5b:83:3c:d2:32:68:3f:09:cd:a0:1e:46 +# SHA256 Fingerprint: 34:d8:a7:3e:e2:08:d9:bc:db:0d:95:65:20:93:4b:4e:40:e6:94:82:59:6e:8b:6f:73:c8:42:6b:01:0a:6f:48 +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G +jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2 +4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7 +VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm +ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R4 O=Google Trust Services LLC +# Subject: CN=GTS Root R4 O=Google Trust Services LLC +# Label: "GTS Root R4" +# Serial: 159662532700760215368942768210 +# MD5 Fingerprint: 43:96:83:77:19:4d:76:b3:9d:65:52:e4:1d:22:a5:e8 +# SHA1 Fingerprint: 77:d3:03:67:b5:e0:0c:15:f6:0c:38:61:df:7c:e1:3b:92:46:4d:47 +# SHA256 Fingerprint: 34:9d:fa:40:58:c5:e2:63:12:3b:39:8a:e7:95:57:3c:4e:13:13:c8:3f:e6:8f:93:55:6c:d5:e8:03:1b:3c:7d +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi +QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR +HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D +9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8 +p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD +-----END CERTIFICATE----- + +# Issuer: CN=Telia Root CA v2 O=Telia Finland Oyj +# Subject: CN=Telia Root CA v2 O=Telia Finland Oyj +# Label: "Telia Root CA v2" +# Serial: 7288924052977061235122729490515358 +# MD5 Fingerprint: 0e:8f:ac:aa:82:df:85:b1:f4:dc:10:1c:fc:99:d9:48 +# SHA1 Fingerprint: b9:99:cd:d1:73:50:8a:c4:47:05:08:9c:8c:88:fb:be:a0:2b:40:cd +# SHA256 Fingerprint: 24:2b:69:74:2f:cb:1e:5b:2a:bf:98:89:8b:94:57:21:87:54:4e:5b:4d:99:11:78:65:73:62:1f:6a:74:b8:2c +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQx +CzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UE +AwwQVGVsaWEgUm9vdCBDQSB2MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1 +NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZ +MBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ76zBq +AMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9 +vVYiQJ3q9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9 +lRdU2HhE8Qx3FZLgmEKnpNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTOD +n3WhUidhOPFZPY5Q4L15POdslv5e2QJltI5c0BE0312/UqeBAMN/mUWZFdUXyApT +7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW5olWK8jjfN7j/4nlNW4o +6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNrRBH0pUPC +TEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6 +WT0EBXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63R +DolUK5X6wK0dmBR4M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZI +pEYslOqodmJHixBTB0hXbOKSTbauBcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGj +YzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7Wxy+G2CQ5MB0GA1UdDgQWBBRy +rOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ +8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi +0f6X+J8wfBj5tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMM +A8iZGok1GTzTyVR8qPAs5m4HeW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBS +SRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+Cy748fdHif64W1lZYudogsYMVoe+K +TTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygCQMez2P2ccGrGKMOF +6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15h2Er +3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMt +Ty3EHD70sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pT +VmBds9hCG1xLEooc6+t9xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAW +ysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQraVplI/owd8k+BsHMYeB2F326CjYSlKA +rBPuUBQemMc= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST BR Root CA 1 2020" +# Serial: 165870826978392376648679885835942448534 +# MD5 Fingerprint: b5:aa:4b:d5:ed:f7:e3:55:2e:8f:72:0a:f3:75:b8:ed +# SHA1 Fingerprint: 1f:5b:98:f0:e3:b5:f7:74:3c:ed:e6:b0:36:7d:32:cd:f4:09:41:67 +# SHA256 Fingerprint: e5:9a:aa:81:60:09:c2:2b:ff:5b:25:ba:d3:7d:f3:06:f0:49:79:7c:1f:81:d8:5a:b0:89:e6:57:bd:8f:00:44 +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEJSIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5 +NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7dPYS +zuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0 +QVK5buXuQqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/ +VbNafAkl1bK6CKBrqx9tMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2JyX3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFW +wKrY7RjEsK70PvomAjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHV +dWNbFJWcHwHP2NVypw87 +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST EV Root CA 1 2020" +# Serial: 126288379621884218666039612629459926992 +# MD5 Fingerprint: 8c:2d:9d:70:9f:48:99:11:06:11:fb:e9:cb:30:c0:6e +# SHA1 Fingerprint: 61:db:8c:21:59:69:03:90:d8:7c:9c:12:86:54:cf:9d:3d:f4:dd:07 +# SHA256 Fingerprint: 08:17:0d:1a:a3:64:53:90:1a:2f:95:92:45:e3:47:db:0c:8d:37:ab:aa:bc:56:b8:1a:a1:00:dc:95:89:70:db +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEVWIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5 +NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8ZRCC +/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rD +wpdhQntJraOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3 +OqQo5FD4pPfsazK2/umLMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2V2X3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CA +y/m0sRtW9XLS/BnRAjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJb +gfM0agPnIjhQW+0ZT0MW +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS ECC P384 Root G5" +# Serial: 13129116028163249804115411775095713523 +# MD5 Fingerprint: d3:71:04:6a:43:1c:db:a6:59:e1:a8:a3:aa:c5:71:ed +# SHA1 Fingerprint: 17:f3:de:5e:9f:0f:19:e9:8e:f6:1f:32:26:6e:20:c4:07:ae:30:ee +# SHA256 Fingerprint: 01:8e:13:f0:77:25:32:cf:80:9b:d1:b1:72:81:86:72:83:fc:48:c6:e1:3b:e9:c6:98:12:85:4a:49:0c:1b:05 +-----BEGIN CERTIFICATE----- +MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURp +Z2lDZXJ0IFRMUyBFQ0MgUDM4NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2 +MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJ +bmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQgUm9vdCBHNTB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1TzvdlHJS +7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp +0zVozptjn4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICIS +B4CIfBFqMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49 +BAMDA2gAMGUCMQCJao1H5+z8blUD2WdsJk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQ +LgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIxAJSdYsiJvRmEFOml+wG4 +DXZDjC5Ty3zfDBeWUA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS RSA4096 Root G5" +# Serial: 11930366277458970227240571539258396554 +# MD5 Fingerprint: ac:fe:f7:34:96:a9:f2:b3:b4:12:4b:e4:27:41:6f:e1 +# SHA1 Fingerprint: a7:88:49:dc:5d:7c:75:8c:8c:de:39:98:56:b3:aa:d0:b2:a5:71:35 +# SHA256 Fingerprint: 37:1a:00:dc:05:33:b3:72:1a:7e:eb:40:e8:41:9e:70:79:9d:2b:0a:0f:2c:1d:80:69:31:65:f7:ce:c4:ad:75 +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBN +MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMT +HERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcN +NDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQs +IEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS87IE+ +ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG0 +2C+JFvuUAT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgp +wgscONyfMXdcvyej/Cestyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZM +pG2T6T867jp8nVid9E6P/DsjyG244gXazOvswzH016cpVIDPRFtMbzCe88zdH5RD +nU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnVDdXifBBiqmvwPXbzP6Po +sMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9qTXeXAaDx +Zre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cd +Lvvyz6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvX +KyY//SovcfXWJL5/MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNe +XoVPzthwiHvOAbWWl9fNff2C+MIkwcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPL +tgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4EFgQUUTMc7TZArxfTJc1paPKv +TiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN +AQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw +GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7H +PNtQOa27PShNlnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLF +O4uJ+DQtpBflF+aZfTCIITfNMBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQ +REtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/u4cnYiWB39yhL/btp/96j1EuMPik +AdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9GOUrYU9DzLjtxpdRv +/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh47a+ +p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilw +MUc/dNAUFvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WF +qUITVuwhd4GTWgzqltlJyqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCK +ovfepEWFJqgejF0pW8hL2JpqA15w8oVPbEtoL8pU9ozaMv7Da4M/OMZ+ +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root R1 O=Certainly +# Subject: CN=Certainly Root R1 O=Certainly +# Label: "Certainly Root R1" +# Serial: 188833316161142517227353805653483829216 +# MD5 Fingerprint: 07:70:d4:3e:82:87:a0:fa:33:36:13:f4:fa:33:e7:12 +# SHA1 Fingerprint: a0:50:ee:0f:28:71:f4:27:b2:12:6d:6f:50:96:25:ba:cc:86:42:af +# SHA256 Fingerprint: 77:b8:2c:d8:64:4c:43:05:f7:ac:c5:cb:15:6b:45:67:50:04:03:3d:51:c6:0c:62:02:a8:e0:c3:34:67:d3:a0 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAw +PTELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2Vy +dGFpbmx5IFJvb3QgUjEwHhcNMjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9 +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0 +YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANA2 +1B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O5MQT +vqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbed +aFySpvXl8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b0 +1C7jcvk2xusVtyWMOvwlDbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5 +r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGIXsXwClTNSaa/ApzSRKft43jvRl5tcdF5 +cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkNKPl6I7ENPT2a/Z2B7yyQ +wHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQAjeZjOVJ +6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA +2CnbrlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyH +Wyf5QBGenDPBt+U1VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMR +eiFPCyEQtkA6qyI6BJyLm4SGcprSp6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB +/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTgqj8ljZ9EXME66C6u +d0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAszHQNTVfSVcOQr +PbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d +8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi +1wrykXprOQ4vMMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrd +rRT90+7iIgXr0PK3aBLXWopBGsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9di +taY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+gjwN/KUD+nsa2UUeYNrEjvn8K8l7 +lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgHJBu6haEaBQmAupVj +yTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7fpYn +Kx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLy +yCwzk5Iwx06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5n +wXARPbv0+Em34yaXOp/SX3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6 +OV+KmalBWQewLK8= +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root E1 O=Certainly +# Subject: CN=Certainly Root E1 O=Certainly +# Label: "Certainly Root E1" +# Serial: 8168531406727139161245376702891150584 +# MD5 Fingerprint: 0a:9e:ca:cd:3e:52:50:c6:36:f3:4b:a3:ed:a7:53:e9 +# SHA1 Fingerprint: f9:e1:6d:dc:01:89:cf:d5:82:45:63:3e:c5:37:7d:c2:eb:93:6f:2b +# SHA256 Fingerprint: b4:58:5f:22:e4:ac:75:6a:4e:86:12:a1:36:1c:5d:9d:03:1a:93:fd:84:fe:bb:77:8f:a3:06:8b:0f:c4:2d:c2 +-----BEGIN CERTIFICATE----- +MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQsw +CQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlu +bHkgUm9vdCBFMTAeFw0yMTA0MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJ +BgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlubHkxGjAYBgNVBAMTEUNlcnRhaW5s +eSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4fxzf7flHh4axpMCK ++IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9YBk2 +QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4 +hevIIgcwCgYIKoZIzj0EAwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozm +ut6Dacpps6kFtZaSF4fC0urQe87YQVt8rgIwRt7qy12a7DLCZRawTDBcMPPaTnOG +BtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR +-----END CERTIFICATE----- + +# Issuer: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD. +# Subject: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD. +# Label: "Security Communication ECC RootCA1" +# Serial: 15446673492073852651 +# MD5 Fingerprint: 7e:43:b0:92:68:ec:05:43:4c:98:ab:5d:35:2e:7e:86 +# SHA1 Fingerprint: b8:0e:26:a9:bf:d2:b2:3b:c0:ef:46:c9:ba:c7:bb:f6:1d:0d:41:41 +# SHA256 Fingerprint: e7:4f:bd:a5:5b:d5:64:c4:73:a3:6b:44:1a:a7:99:c8:a6:8e:07:74:40:e8:28:8b:9f:a1:e5:0e:4b:ba:ca:11 +-----BEGIN CERTIFICATE----- +MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYT +AkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMSswKQYD +VQQDEyJTZWN1cml0eSBDb21tdW5pY2F0aW9uIEVDQyBSb290Q0ExMB4XDTE2MDYx +NjA1MTUyOFoXDTM4MDExODA1MTUyOFowYTELMAkGA1UEBhMCSlAxJTAjBgNVBAoT +HFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKzApBgNVBAMTIlNlY3VyaXR5 +IENvbW11bmljYXRpb24gRUNDIFJvb3RDQTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AASkpW9gAwPDvTH00xecK4R1rOX9PVdu12O/5gSJko6BnOPpR27KkBLIE+Cnnfdl +dB9sELLo5OnvbYUymUSxXv3MdhDYW72ixvnWQuRXdtyQwjWpS4g8EkdtXP9JTxpK +ULGjQjBAMB0GA1UdDgQWBBSGHOf+LaVKiwj+KBH6vqNm+GBZLzAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjAVXUI9/Lbu +9zuxNuie9sRGKEkz0FhDKmMpzE2xtHqiuQ04pV1IKv3LsnNdo4gIxwwCMQDAqy0O +be0YottT6SXbVQjgUMzfRGEWgqtJsLKB7HOHeLRMsmIbEvoWTSVLY70eN9k= +-----END CERTIFICATE----- + +# Issuer: CN=BJCA Global Root CA1 O=BEIJING CERTIFICATE AUTHORITY +# Subject: CN=BJCA Global Root CA1 O=BEIJING CERTIFICATE AUTHORITY +# Label: "BJCA Global Root CA1" +# Serial: 113562791157148395269083148143378328608 +# MD5 Fingerprint: 42:32:99:76:43:33:36:24:35:07:82:9b:28:f9:d0:90 +# SHA1 Fingerprint: d5:ec:8d:7b:4c:ba:79:f4:e7:e8:cb:9d:6b:ae:77:83:10:03:21:6a +# SHA256 Fingerprint: f3:89:6f:88:fe:7c:0a:88:27:66:a7:fa:6a:d2:74:9f:b5:7a:7f:3e:98:fb:76:9c:1f:a7:b0:9c:2c:44:d5:ae +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIQVW9l47TZkGobCdFsPsBsIDANBgkqhkiG9w0BAQsFADBU +MQswCQYDVQQGEwJDTjEmMCQGA1UECgwdQkVJSklORyBDRVJUSUZJQ0FURSBBVVRI +T1JJVFkxHTAbBgNVBAMMFEJKQ0EgR2xvYmFsIFJvb3QgQ0ExMB4XDTE5MTIxOTAz +MTYxN1oXDTQ0MTIxMjAzMTYxN1owVDELMAkGA1UEBhMCQ04xJjAkBgNVBAoMHUJF +SUpJTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZMR0wGwYDVQQDDBRCSkNBIEdsb2Jh +bCBSb290IENBMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAPFmCL3Z +xRVhy4QEQaVpN3cdwbB7+sN3SJATcmTRuHyQNZ0YeYjjlwE8R4HyDqKYDZ4/N+AZ +spDyRhySsTphzvq3Rp4Dhtczbu33RYx2N95ulpH3134rhxfVizXuhJFyV9xgw8O5 +58dnJCNPYwpj9mZ9S1WnP3hkSWkSl+BMDdMJoDIwOvqfwPKcxRIqLhy1BDPapDgR +at7GGPZHOiJBhyL8xIkoVNiMpTAK+BcWyqw3/XmnkRd4OJmtWO2y3syJfQOcs4ll +5+M7sSKGjwZteAf9kRJ/sGsciQ35uMt0WwfCyPQ10WRjeulumijWML3mG90Vr4Tq +nMfK9Q7q8l0ph49pczm+LiRvRSGsxdRpJQaDrXpIhRMsDQa4bHlW/KNnMoH1V6XK +V0Jp6VwkYe/iMBhORJhVb3rCk9gZtt58R4oRTklH2yiUAguUSiz5EtBP6DF+bHq/ +pj+bOT0CFqMYs2esWz8sgytnOYFcuX6U1WTdno9uruh8W7TXakdI136z1C2OVnZO +z2nxbkRs1CTqjSShGL+9V/6pmTW12xB3uD1IutbB5/EjPtffhZ0nPNRAvQoMvfXn +jSXWgXSHRtQpdaJCbPdzied9v3pKH9MiyRVVz99vfFXQpIsHETdfg6YmV6YBW37+ +WGgHqel62bno/1Afq8K0wM7o6v0PvY1NuLxxAgMBAAGjQjBAMB0GA1UdDgQWBBTF +7+3M2I0hxkjk49cULqcWk+WYATAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAUoKsITQfI/Ki2Pm4rzc2IInRNwPWaZ+4 +YRC6ojGYWUfo0Q0lHhVBDOAqVdVXUsv45Mdpox1NcQJeXyFFYEhcCY5JEMEE3Kli +awLwQ8hOnThJdMkycFRtwUf8jrQ2ntScvd0g1lPJGKm1Vrl2i5VnZu69mP6u775u ++2D2/VnGKhs/I0qUJDAnyIm860Qkmss9vk/Ves6OF8tiwdneHg56/0OGNFK8YT88 +X7vZdrRTvJez/opMEi4r89fO4aL/3Xtw+zuhTaRjAv04l5U/BXCga99igUOLtFkN +SoxUnMW7gZ/NfaXvCyUeOiDbHPwfmGcCCtRzRBPbUYQaVQNW4AB+dAb/OMRyHdOo +P2gxXdMJxy6MW2Pg6Nwe0uxhHvLe5e/2mXZgLR6UcnHGCyoyx5JO1UbXHfmpGQrI ++pXObSOYqgs4rZpWDW+N8TEAiMEXnM0ZNjX+VVOg4DwzX5Ze4jLp3zO7Bkqp2IRz +znfSxqxx4VyjHQy7Ct9f4qNx2No3WqB4K/TUfet27fJhcKVlmtOJNBir+3I+17Q9 +eVzYH6Eze9mCUAyTF6ps3MKCuwJXNq+YJyo5UOGwifUll35HaBC07HPKs5fRJNz2 +YqAo07WjuGS3iGJCz51TzZm+ZGiPTx4SSPfSKcOYKMryMguTjClPPGAyzQWWYezy +r/6zcCwupvI= +-----END CERTIFICATE----- + +# Issuer: CN=BJCA Global Root CA2 O=BEIJING CERTIFICATE AUTHORITY +# Subject: CN=BJCA Global Root CA2 O=BEIJING CERTIFICATE AUTHORITY +# Label: "BJCA Global Root CA2" +# Serial: 58605626836079930195615843123109055211 +# MD5 Fingerprint: 5e:0a:f6:47:5f:a6:14:e8:11:01:95:3f:4d:01:eb:3c +# SHA1 Fingerprint: f4:27:86:eb:6e:b8:6d:88:31:67:02:fb:ba:66:a4:53:00:aa:7a:a6 +# SHA256 Fingerprint: 57:4d:f6:93:1e:27:80:39:66:7b:72:0a:fd:c1:60:0f:c2:7e:b6:6d:d3:09:29:79:fb:73:85:64:87:21:28:82 +-----BEGIN CERTIFICATE----- +MIICJTCCAaugAwIBAgIQLBcIfWQqwP6FGFkGz7RK6zAKBggqhkjOPQQDAzBUMQsw +CQYDVQQGEwJDTjEmMCQGA1UECgwdQkVJSklORyBDRVJUSUZJQ0FURSBBVVRIT1JJ +VFkxHTAbBgNVBAMMFEJKQ0EgR2xvYmFsIFJvb3QgQ0EyMB4XDTE5MTIxOTAzMTgy +MVoXDTQ0MTIxMjAzMTgyMVowVDELMAkGA1UEBhMCQ04xJjAkBgNVBAoMHUJFSUpJ +TkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZMR0wGwYDVQQDDBRCSkNBIEdsb2JhbCBS +b290IENBMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABJ3LgJGNU2e1uVCxA/jlSR9B +IgmwUVJY1is0j8USRhTFiy8shP8sbqjV8QnjAyEUxEM9fMEsxEtqSs3ph+B99iK+ ++kpRuDCK/eHeGBIK9ke35xe/J4rUQUyWPGCWwf0VHKNCMEAwHQYDVR0OBBYEFNJK +sVF/BvDRgh9Obl+rg/xI1LCRMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMAoGCCqGSM49BAMDA2gAMGUCMBq8W9f+qdJUDkpd0m2xQNz0Q9XSSpkZElaA +94M04TVOSG0ED1cxMDAtsaqdAzjbBgIxAMvMh1PLet8gUXOQwKhbYdDFUDn9hf7B +43j4ptZLvZuHjw/l1lOWqzzIQNph91Oj9w== +-----END CERTIFICATE----- + +# Issuer: CN=Sectigo Public Server Authentication Root E46 O=Sectigo Limited +# Subject: CN=Sectigo Public Server Authentication Root E46 O=Sectigo Limited +# Label: "Sectigo Public Server Authentication Root E46" +# Serial: 88989738453351742415770396670917916916 +# MD5 Fingerprint: 28:23:f8:b2:98:5c:37:16:3b:3e:46:13:4e:b0:b3:01 +# SHA1 Fingerprint: ec:8a:39:6c:40:f0:2e:bc:42:75:d4:9f:ab:1c:1a:5b:67:be:d2:9a +# SHA256 Fingerprint: c9:0f:26:f0:fb:1b:40:18:b2:22:27:51:9b:5c:a2:b5:3e:2c:a5:b3:be:5c:f1:8e:fe:1b:ef:47:38:0c:53:83 +-----BEGIN CERTIFICATE----- +MIICOjCCAcGgAwIBAgIQQvLM2htpN0RfFf51KBC49DAKBggqhkjOPQQDAzBfMQsw +CQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1T +ZWN0aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwHhcN +MjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1OTU5WjBfMQswCQYDVQQGEwJHQjEYMBYG +A1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1YmxpYyBT +ZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAR2+pmpbiDt+dd34wc7qNs9Xzjoq1WmVk/WSOrsfy2qw7LFeeyZYX8QeccC +WvkEN/U0NSt3zn8gj1KjAIns1aeibVvjS5KToID1AZTc8GgHHs3u/iVStSBDHBv+ +6xnOQ6OjQjBAMB0GA1UdDgQWBBTRItpMWfFLXyY4qp3W7usNw/upYTAOBgNVHQ8B +Af8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNnADBkAjAn7qRa +qCG76UeXlImldCBteU/IvZNeWBj7LRoAasm4PdCkT0RHlAFWovgzJQxC36oCMB3q +4S6ILuH5px0CMk7yn2xVdOOurvulGu7t0vzCAxHrRVxgED1cf5kDW21USAGKcw== +-----END CERTIFICATE----- + +# Issuer: CN=Sectigo Public Server Authentication Root R46 O=Sectigo Limited +# Subject: CN=Sectigo Public Server Authentication Root R46 O=Sectigo Limited +# Label: "Sectigo Public Server Authentication Root R46" +# Serial: 156256931880233212765902055439220583700 +# MD5 Fingerprint: 32:10:09:52:00:d5:7e:6c:43:df:15:c0:b1:16:93:e5 +# SHA1 Fingerprint: ad:98:f9:f3:e4:7d:75:3b:65:d4:82:b3:a4:52:17:bb:6e:f5:e4:38 +# SHA256 Fingerprint: 7b:b6:47:a6:2a:ee:ac:88:bf:25:7a:a5:22:d0:1f:fe:a3:95:e0:ab:45:c7:3f:93:f6:56:54:ec:38:f2:5a:06 +-----BEGIN CERTIFICATE----- +MIIFijCCA3KgAwIBAgIQdY39i658BwD6qSWn4cetFDANBgkqhkiG9w0BAQwFADBf +MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQD +Ey1TZWN0aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYw +HhcNMjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1OTU5WjBfMQswCQYDVQQGEwJHQjEY +MBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1Ymxp +YyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCTvtU2UnXYASOgHEdCSe5jtrch/cSV1UgrJnwUUxDa +ef0rty2k1Cz66jLdScK5vQ9IPXtamFSvnl0xdE8H/FAh3aTPaE8bEmNtJZlMKpnz +SDBh+oF8HqcIStw+KxwfGExxqjWMrfhu6DtK2eWUAtaJhBOqbchPM8xQljeSM9xf +iOefVNlI8JhD1mb9nxc4Q8UBUQvX4yMPFF1bFOdLvt30yNoDN9HWOaEhUTCDsG3X +ME6WW5HwcCSrv0WBZEMNvSE6Lzzpng3LILVCJ8zab5vuZDCQOc2TZYEhMbUjUDM3 +IuM47fgxMMxF/mL50V0yeUKH32rMVhlATc6qu/m1dkmU8Sf4kaWD5QazYw6A3OAS +VYCmO2a0OYctyPDQ0RTp5A1NDvZdV3LFOxxHVp3i1fuBYYzMTYCQNFu31xR13NgE +SJ/AwSiItOkcyqex8Va3e0lMWeUgFaiEAin6OJRpmkkGj80feRQXEgyDet4fsZfu ++Zd4KKTIRJLpfSYFplhym3kT2BFfrsU4YjRosoYwjviQYZ4ybPUHNs2iTG7sijbt +8uaZFURww3y8nDnAtOFr94MlI1fZEoDlSfB1D++N6xybVCi0ITz8fAr/73trdf+L +HaAZBav6+CuBQug4urv7qv094PPK306Xlynt8xhW6aWWrL3DkJiy4Pmi1KZHQ3xt +zwIDAQABo0IwQDAdBgNVHQ4EFgQUVnNYZJX5khqwEioEYnmhQBWIIUkwDgYDVR0P +AQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAC9c +mTz8Bl6MlC5w6tIyMY208FHVvArzZJ8HXtXBc2hkeqK5Duj5XYUtqDdFqij0lgVQ +YKlJfp/imTYpE0RHap1VIDzYm/EDMrraQKFz6oOht0SmDpkBm+S8f74TlH7Kph52 +gDY9hAaLMyZlbcp+nv4fjFg4exqDsQ+8FxG75gbMY/qB8oFM2gsQa6H61SilzwZA +Fv97fRheORKkU55+MkIQpiGRqRxOF3yEvJ+M0ejf5lG5Nkc/kLnHvALcWxxPDkjB +JYOcCj+esQMzEhonrPcibCTRAUH4WAP+JWgiH5paPHxsnnVI84HxZmduTILA7rpX +DhjvLpr3Etiga+kFpaHpaPi8TD8SHkXoUsCjvxInebnMMTzD9joiFgOgyY9mpFui +TdaBJQbpdqQACj7LzTWb4OE4y2BThihCQRxEV+ioratF4yUQvNs+ZUH7G6aXD+u5 +dHn5HrwdVw1Hr8Mvn4dGp+smWg9WY7ViYG4A++MnESLn/pmPNPW56MORcr3Ywx65 +LvKRRFHQV80MNNVIIb/bE/FmJUNS0nAiNs2fxBx1IK1jcmMGDw4nztJqDby1ORrp +0XZ60Vzk50lJLVU3aPAaOpg+VBeHVOmmJ1CJeyAvP/+/oYtKR5j/K3tJPsMpRmAY +QqszKbrAKbkTidOIijlBO8n9pu0f9GBj39ItVQGL +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com TLS RSA Root CA 2022 O=SSL Corporation +# Subject: CN=SSL.com TLS RSA Root CA 2022 O=SSL Corporation +# Label: "SSL.com TLS RSA Root CA 2022" +# Serial: 148535279242832292258835760425842727825 +# MD5 Fingerprint: d8:4e:c6:59:30:d8:fe:a0:d6:7a:5a:2c:2c:69:78:da +# SHA1 Fingerprint: ec:2c:83:40:72:af:26:95:10:ff:0e:f2:03:ee:31:70:f6:78:9d:ca +# SHA256 Fingerprint: 8f:af:7d:2e:2c:b4:70:9b:b8:e0:b3:36:66:bf:75:a5:dd:45:b5:de:48:0f:8e:a8:d4:bf:e6:be:bc:17:f2:ed +-----BEGIN CERTIFICATE----- +MIIFiTCCA3GgAwIBAgIQb77arXO9CEDii02+1PdbkTANBgkqhkiG9w0BAQsFADBO +MQswCQYDVQQGEwJVUzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQD +DBxTU0wuY29tIFRMUyBSU0EgUm9vdCBDQSAyMDIyMB4XDTIyMDgyNTE2MzQyMloX +DTQ2MDgxOTE2MzQyMVowTjELMAkGA1UEBhMCVVMxGDAWBgNVBAoMD1NTTCBDb3Jw +b3JhdGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgUlNBIFJvb3QgQ0EgMjAyMjCC +AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANCkCXJPQIgSYT41I57u9nTP +L3tYPc48DRAokC+X94xI2KDYJbFMsBFMF3NQ0CJKY7uB0ylu1bUJPiYYf7ISf5OY +t6/wNr/y7hienDtSxUcZXXTzZGbVXcdotL8bHAajvI9AI7YexoS9UcQbOcGV0ins +S657Lb85/bRi3pZ7QcacoOAGcvvwB5cJOYF0r/c0WRFXCsJbwST0MXMwgsadugL3 +PnxEX4MN8/HdIGkWCVDi1FW24IBydm5MR7d1VVm0U3TZlMZBrViKMWYPHqIbKUBO +L9975hYsLfy/7PO0+r4Y9ptJ1O4Fbtk085zx7AGL0SDGD6C1vBdOSHtRwvzpXGk3 +R2azaPgVKPC506QVzFpPulJwoxJF3ca6TvvC0PeoUidtbnm1jPx7jMEWTO6Af77w +dr5BUxIzrlo4QqvXDz5BjXYHMtWrifZOZ9mxQnUjbvPNQrL8VfVThxc7wDNY8VLS ++YCk8OjwO4s4zKTGkH8PnP2L0aPP2oOnaclQNtVcBdIKQXTbYxE3waWglksejBYS +d66UNHsef8JmAOSqg+qKkK3ONkRN0VHpvB/zagX9wHQfJRlAUW7qglFA35u5CCoG +AtUjHBPW6dvbxrB6y3snm/vg1UYk7RBLY0ulBY+6uB0rpvqR4pJSvezrZ5dtmi2f +gTIFZzL7SAg/2SW4BCUvAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j +BBgwFoAU+y437uOEeicuzRk1sTN8/9REQrkwHQYDVR0OBBYEFPsuN+7jhHonLs0Z +NbEzfP/UREK5MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAjYlt +hEUY8U+zoO9opMAdrDC8Z2awms22qyIZZtM7QbUQnRC6cm4pJCAcAZli05bg4vsM +QtfhWsSWTVTNj8pDU/0quOr4ZcoBwq1gaAafORpR2eCNJvkLTqVTJXojpBzOCBvf +R4iyrT7gJ4eLSYwfqUdYe5byiB0YrrPRpgqU+tvT5TgKa3kSM/tKWTcWQA673vWJ +DPFs0/dRa1419dvAJuoSc06pkZCmF8NsLzjUo3KUQyxi4U5cMj29TH0ZR6LDSeeW +P4+a0zvkEdiLA9z2tmBVGKaBUfPhqBVq6+AL8BQx1rmMRTqoENjwuSfr98t67wVy +lrXEj5ZzxOhWc5y8aVFjvO9nHEMaX3cZHxj4HCUp+UmZKbaSPaKDN7EgkaibMOlq +bLQjk2UEqxHzDh1TJElTHaE/nUiSEeJ9DU/1172iWD54nR4fK/4huxoTtrEoZP2w +AgDHbICivRZQIA9ygV/MlP+7mea6kMvq+cYMwq7FGc4zoWtcu358NFcXrfA/rs3q +r5nsLFR+jM4uElZI7xc7P0peYNLcdDa8pUNjyw9bowJWCZ4kLOGGgYz+qxcs+sji +Mho6/4UIyYOf8kpIEFR3N+2ivEC+5BB09+Rbu7nzifmPQdjH5FCQNYA+HLhNkNPU +98OwoX6EyneSMSy4kLGCenROmxMmtNVQZlR4rmA= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com TLS ECC Root CA 2022 O=SSL Corporation +# Subject: CN=SSL.com TLS ECC Root CA 2022 O=SSL Corporation +# Label: "SSL.com TLS ECC Root CA 2022" +# Serial: 26605119622390491762507526719404364228 +# MD5 Fingerprint: 99:d7:5c:f1:51:36:cc:e9:ce:d9:19:2e:77:71:56:c5 +# SHA1 Fingerprint: 9f:5f:d9:1a:54:6d:f5:0c:71:f0:ee:7a:bd:17:49:98:84:73:e2:39 +# SHA256 Fingerprint: c3:2f:fd:9f:46:f9:36:d1:6c:36:73:99:09:59:43:4b:9a:d6:0a:af:bb:9e:7c:f3:36:54:f1:44:cc:1b:a1:43 +-----BEGIN CERTIFICATE----- +MIICOjCCAcCgAwIBAgIQFAP1q/s3ixdAW+JDsqXRxDAKBggqhkjOPQQDAzBOMQsw +CQYDVQQGEwJVUzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQDDBxT +U0wuY29tIFRMUyBFQ0MgUm9vdCBDQSAyMDIyMB4XDTIyMDgyNTE2MzM0OFoXDTQ2 +MDgxOTE2MzM0N1owTjELMAkGA1UEBhMCVVMxGDAWBgNVBAoMD1NTTCBDb3Jwb3Jh +dGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgRUNDIFJvb3QgQ0EgMjAyMjB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABEUpNXP6wrgjzhR9qLFNoFs27iosU8NgCTWyJGYm +acCzldZdkkAZDsalE3D07xJRKF3nzL35PIXBz5SQySvOkkJYWWf9lCcQZIxPBLFN +SeR7T5v15wj4A4j3p8OSSxlUgaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSME +GDAWgBSJjy+j6CugFFR781a4Jl9nOAuc0DAdBgNVHQ4EFgQUiY8vo+groBRUe/NW +uCZfZzgLnNAwDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMDA2gAMGUCMFXjIlbp +15IkWE8elDIPDAI2wv2sdDJO4fscgIijzPvX6yv/N33w7deedWo1dlJF4AIxAMeN +b0Igj762TVntd00pxCAgRWSGOlDGxK0tk/UYfXLtqc/ErFc2KAhl3zx5Zn6g6g== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot Root CA ECC TLS 2021 O=Atos +# Subject: CN=Atos TrustedRoot Root CA ECC TLS 2021 O=Atos +# Label: "Atos TrustedRoot Root CA ECC TLS 2021" +# Serial: 81873346711060652204712539181482831616 +# MD5 Fingerprint: 16:9f:ad:f1:70:ad:79:d6:ed:29:b4:d1:c5:79:70:a8 +# SHA1 Fingerprint: 9e:bc:75:10:42:b3:02:f3:81:f4:f7:30:62:d4:8f:c3:a7:51:b2:dd +# SHA256 Fingerprint: b2:fa:e5:3e:14:cc:d7:ab:92:12:06:47:01:ae:27:9c:1d:89:88:fa:cb:77:5f:a8:a0:08:91:4e:66:39:88:a8 +-----BEGIN CERTIFICATE----- +MIICFTCCAZugAwIBAgIQPZg7pmY9kGP3fiZXOATvADAKBggqhkjOPQQDAzBMMS4w +LAYDVQQDDCVBdG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgRUNDIFRMUyAyMDIxMQ0w +CwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0yMTA0MjIwOTI2MjNaFw00MTA0 +MTcwOTI2MjJaMEwxLjAsBgNVBAMMJUF0b3MgVHJ1c3RlZFJvb3QgUm9vdCBDQSBF +Q0MgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNVBAYTAkRFMHYwEAYHKoZI +zj0CAQYFK4EEACIDYgAEloZYKDcKZ9Cg3iQZGeHkBQcfl+3oZIK59sRxUM6KDP/X +tXa7oWyTbIOiaG6l2b4siJVBzV3dscqDY4PMwL502eCdpO5KTlbgmClBk1IQ1SQ4 +AjJn8ZQSb+/Xxd4u/RmAo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR2 +KCXWfeBmmnoJsmo7jjPXNtNPojAOBgNVHQ8BAf8EBAMCAYYwCgYIKoZIzj0EAwMD +aAAwZQIwW5kp85wxtolrbNa9d+F851F+uDrNozZffPc8dz7kUK2o59JZDCaOMDtu +CCrCp1rIAjEAmeMM56PDr9NJLkaCI2ZdyQAUEv049OGYa3cpetskz2VAv9LcjBHo +9H1/IISpQuQo +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot Root CA RSA TLS 2021 O=Atos +# Subject: CN=Atos TrustedRoot Root CA RSA TLS 2021 O=Atos +# Label: "Atos TrustedRoot Root CA RSA TLS 2021" +# Serial: 111436099570196163832749341232207667876 +# MD5 Fingerprint: d4:d3:46:b8:9a:c0:9c:76:5d:9e:3a:c3:b9:99:31:d2 +# SHA1 Fingerprint: 18:52:3b:0d:06:37:e4:d6:3a:df:23:e4:98:fb:5b:16:fb:86:74:48 +# SHA256 Fingerprint: 81:a9:08:8e:a5:9f:b3:64:c5:48:a6:f8:55:59:09:9b:6f:04:05:ef:bf:18:e5:32:4e:c9:f4:57:ba:00:11:2f +-----BEGIN CERTIFICATE----- +MIIFZDCCA0ygAwIBAgIQU9XP5hmTC/srBRLYwiqipDANBgkqhkiG9w0BAQwFADBM +MS4wLAYDVQQDDCVBdG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgUlNBIFRMUyAyMDIx +MQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0yMTA0MjIwOTIxMTBaFw00 +MTA0MTcwOTIxMDlaMEwxLjAsBgNVBAMMJUF0b3MgVHJ1c3RlZFJvb3QgUm9vdCBD +QSBSU0EgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNVBAYTAkRFMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtoAOxHm9BYx9sKOdTSJNy/BBl01Z +4NH+VoyX8te9j2y3I49f1cTYQcvyAh5x5en2XssIKl4w8i1mx4QbZFc4nXUtVsYv +Ye+W/CBGvevUez8/fEc4BKkbqlLfEzfTFRVOvV98r61jx3ncCHvVoOX3W3WsgFWZ +kmGbzSoXfduP9LVq6hdKZChmFSlsAvFr1bqjM9xaZ6cF4r9lthawEO3NUDPJcFDs +GY6wx/J0W2tExn2WuZgIWWbeKQGb9Cpt0xU6kGpn8bRrZtkh68rZYnxGEFzedUln +nkL5/nWpo63/dgpnQOPF943HhZpZnmKaau1Fh5hnstVKPNe0OwANwI8f4UDErmwh +3El+fsqyjW22v5MvoVw+j8rtgI5Y4dtXz4U2OLJxpAmMkokIiEjxQGMYsluMWuPD +0xeqqxmjLBvk1cbiZnrXghmmOxYsL3GHX0WelXOTwkKBIROW1527k2gV+p2kHYzy +geBYBr3JtuP2iV2J+axEoctr+hbxx1A9JNr3w+SH1VbxT5Aw+kUJWdo0zuATHAR8 +ANSbhqRAvNncTFd+rrcztl524WWLZt+NyteYr842mIycg5kDcPOvdO3GDjbnvezB +c6eUWsuSZIKmAMFwoW4sKeFYV+xafJlrJaSQOoD0IJ2azsct+bJLKZWD6TWNp0lI +pw9MGZHQ9b8Q4HECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +dEmZ0f+0emhFdcN+tNzMzjkz2ggwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +DAUAA4ICAQAjQ1MkYlxt/T7Cz1UAbMVWiLkO3TriJQ2VSpfKgInuKs1l+NsW4AmS +4BjHeJi78+xCUvuppILXTdiK/ORO/auQxDh1MoSf/7OwKwIzNsAQkG8dnK/haZPs +o0UvFJ/1TCplQ3IM98P4lYsU84UgYt1UU90s3BiVaU+DR3BAM1h3Egyi61IxHkzJ +qM7F78PRreBrAwA0JrRUITWXAdxfG/F851X6LWh3e9NpzNMOa7pNdkTWwhWaJuyw +xfW70Xp0wmzNxbVe9kzmWy2B27O3Opee7c9GslA9hGCZcbUztVdF5kJHdWoOsAgM +rr3e97sPWD2PAzHoPYJQyi9eDF20l74gNAf0xBLh7tew2VktafcxBPTy+av5EzH4 +AXcOPUIjJsyacmdRIXrMPIWo6iFqO9taPKU0nprALN+AnCng33eU0aKAQv9qTFsR +0PXNor6uzFFcw9VUewyu1rkGd4Di7wcaaMxZUa1+XGdrudviB0JbuAEFWDlN5LuY +o7Ey7Nmj1m+UI/87tyll5gfp77YZ6ufCOB0yiJA8EytuzO+rdwY0d4RPcuSBhPm5 +dDTedk+SKlOxJTnbPP/lPqYO5Wue/9vsL3SD3460s6neFE3/MaNFcyT6lSnMEpcE +oji2jbDwN/zIIX8/syQbPYtuzE2wFg2WHYMfRsCbvUOZ58SWLs5fyQ== +-----END CERTIFICATE----- + +# Issuer: CN=TrustAsia Global Root CA G3 O=TrustAsia Technologies, Inc. +# Subject: CN=TrustAsia Global Root CA G3 O=TrustAsia Technologies, Inc. +# Label: "TrustAsia Global Root CA G3" +# Serial: 576386314500428537169965010905813481816650257167 +# MD5 Fingerprint: 30:42:1b:b7:bb:81:75:35:e4:16:4f:53:d2:94:de:04 +# SHA1 Fingerprint: 63:cf:b6:c1:27:2b:56:e4:88:8e:1c:23:9a:b6:2e:81:47:24:c3:c7 +# SHA256 Fingerprint: e0:d3:22:6a:eb:11:63:c2:e4:8f:f9:be:3b:50:b4:c6:43:1b:e7:bb:1e:ac:c5:c3:6b:5d:5e:c5:09:03:9a:08 +-----BEGIN CERTIFICATE----- +MIIFpTCCA42gAwIBAgIUZPYOZXdhaqs7tOqFhLuxibhxkw8wDQYJKoZIhvcNAQEM +BQAwWjELMAkGA1UEBhMCQ04xJTAjBgNVBAoMHFRydXN0QXNpYSBUZWNobm9sb2dp +ZXMsIEluYy4xJDAiBgNVBAMMG1RydXN0QXNpYSBHbG9iYWwgUm9vdCBDQSBHMzAe +Fw0yMTA1MjAwMjEwMTlaFw00NjA1MTkwMjEwMTlaMFoxCzAJBgNVBAYTAkNOMSUw +IwYDVQQKDBxUcnVzdEFzaWEgVGVjaG5vbG9naWVzLCBJbmMuMSQwIgYDVQQDDBtU +cnVzdEFzaWEgR2xvYmFsIFJvb3QgQ0EgRzMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDAMYJhkuSUGwoqZdC+BqmHO1ES6nBBruL7dOoKjbmzTNyPtxNS +T1QY4SxzlZHFZjtqz6xjbYdT8PfxObegQ2OwxANdV6nnRM7EoYNl9lA+sX4WuDqK +AtCWHwDNBSHvBm3dIZwZQ0WhxeiAysKtQGIXBsaqvPPW5vxQfmZCHzyLpnl5hkA1 +nyDvP+uLRx+PjsXUjrYsyUQE49RDdT/VP68czH5GX6zfZBCK70bwkPAPLfSIC7Ep +qq+FqklYqL9joDiR5rPmd2jE+SoZhLsO4fWvieylL1AgdB4SQXMeJNnKziyhWTXA +yB1GJ2Faj/lN03J5Zh6fFZAhLf3ti1ZwA0pJPn9pMRJpxx5cynoTi+jm9WAPzJMs +hH/x/Gr8m0ed262IPfN2dTPXS6TIi/n1Q1hPy8gDVI+lhXgEGvNz8teHHUGf59gX +zhqcD0r83ERoVGjiQTz+LISGNzzNPy+i2+f3VANfWdP3kXjHi3dqFuVJhZBFcnAv +kV34PmVACxmZySYgWmjBNb9Pp1Hx2BErW+Canig7CjoKH8GB5S7wprlppYiU5msT +f9FkPz2ccEblooV7WIQn3MSAPmeamseaMQ4w7OYXQJXZRe0Blqq/DPNL0WP3E1jA +uPP6Z92bfW1K/zJMtSU7/xxnD4UiWQWRkUF3gdCFTIcQcf+eQxuulXUtgQIDAQAB +o2MwYTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEDk5PIj7zjKsK5Xf/Ih +MBY027ySMB0GA1UdDgQWBBRA5OTyI+84yrCuV3/yITAWNNu8kjAOBgNVHQ8BAf8E +BAMCAQYwDQYJKoZIhvcNAQEMBQADggIBACY7UeFNOPMyGLS0XuFlXsSUT9SnYaP4 +wM8zAQLpw6o1D/GUE3d3NZ4tVlFEbuHGLige/9rsR82XRBf34EzC4Xx8MnpmyFq2 +XFNFV1pF1AWZLy4jVe5jaN/TG3inEpQGAHUNcoTpLrxaatXeL1nHo+zSh2bbt1S1 +JKv0Q3jbSwTEb93mPmY+KfJLaHEih6D4sTNjduMNhXJEIlU/HHzp/LgV6FL6qj6j +ITk1dImmasI5+njPtqzn59ZW/yOSLlALqbUHM/Q4X6RJpstlcHboCoWASzY9M/eV +VHUl2qzEc4Jl6VL1XP04lQJqaTDFHApXB64ipCz5xUG3uOyfT0gA+QEEVcys+TIx +xHWVBqB/0Y0n3bOppHKH/lmLmnp0Ft0WpWIp6zqW3IunaFnT63eROfjXy9mPX1on +AX1daBli2MjN9LdyR75bl87yraKZk62Uy5P2EgmVtqvXO9A/EcswFi55gORngS1d +7XB4tmBZrOFdRWOPyN9yaFvqHbgB8X7754qz41SgOAngPN5C8sLtLpvzHzW2Ntjj +gKGLzZlkD8Kqq7HK9W+eQ42EVJmzbsASZthwEPEGNTNDqJwuuhQxzhB/HIbjj9LV ++Hfsm6vxL2PZQl/gZ4FkkfGXL/xuJvYz+NO1+MRiqzFRJQJ6+N1rZdVtTTDIZbpo +FGWsJwt0ivKH +-----END CERTIFICATE----- + +# Issuer: CN=TrustAsia Global Root CA G4 O=TrustAsia Technologies, Inc. +# Subject: CN=TrustAsia Global Root CA G4 O=TrustAsia Technologies, Inc. +# Label: "TrustAsia Global Root CA G4" +# Serial: 451799571007117016466790293371524403291602933463 +# MD5 Fingerprint: 54:dd:b2:d7:5f:d8:3e:ed:7c:e0:0b:2e:cc:ed:eb:eb +# SHA1 Fingerprint: 57:73:a5:61:5d:80:b2:e6:ac:38:82:fc:68:07:31:ac:9f:b5:92:5a +# SHA256 Fingerprint: be:4b:56:cb:50:56:c0:13:6a:52:6d:f4:44:50:8d:aa:36:a0:b5:4f:42:e4:ac:38:f7:2a:f4:70:e4:79:65:4c +-----BEGIN CERTIFICATE----- +MIICVTCCAdygAwIBAgIUTyNkuI6XY57GU4HBdk7LKnQV1tcwCgYIKoZIzj0EAwMw +WjELMAkGA1UEBhMCQ04xJTAjBgNVBAoMHFRydXN0QXNpYSBUZWNobm9sb2dpZXMs +IEluYy4xJDAiBgNVBAMMG1RydXN0QXNpYSBHbG9iYWwgUm9vdCBDQSBHNDAeFw0y +MTA1MjAwMjEwMjJaFw00NjA1MTkwMjEwMjJaMFoxCzAJBgNVBAYTAkNOMSUwIwYD +VQQKDBxUcnVzdEFzaWEgVGVjaG5vbG9naWVzLCBJbmMuMSQwIgYDVQQDDBtUcnVz +dEFzaWEgR2xvYmFsIFJvb3QgQ0EgRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATx +s8045CVD5d4ZCbuBeaIVXxVjAd7Cq92zphtnS4CDr5nLrBfbK5bKfFJV4hrhPVbw +LxYI+hW8m7tH5j/uqOFMjPXTNvk4XatwmkcN4oFBButJ+bAp3TPsUKV/eSm4IJij +YzBhMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUpbtKl86zK3+kMd6Xg1mD +pm9xy94wHQYDVR0OBBYEFKW7SpfOsyt/pDHel4NZg6ZvccveMA4GA1UdDwEB/wQE +AwIBBjAKBggqhkjOPQQDAwNnADBkAjBe8usGzEkxn0AAbbd+NvBNEU/zy4k6LHiR +UKNbwMp1JvK/kF0LgoxgKJ/GcJpo5PECMFxYDlZ2z1jD1xCMuo6u47xkdUfFVZDj +/bpV6wfEU6s3qe4hsiFbYI89MvHVI5TWWA== +-----END CERTIFICATE----- + +# Issuer: CN=CommScope Public Trust ECC Root-01 O=CommScope +# Subject: CN=CommScope Public Trust ECC Root-01 O=CommScope +# Label: "CommScope Public Trust ECC Root-01" +# Serial: 385011430473757362783587124273108818652468453534 +# MD5 Fingerprint: 3a:40:a7:fc:03:8c:9c:38:79:2f:3a:a2:6c:b6:0a:16 +# SHA1 Fingerprint: 07:86:c0:d8:dd:8e:c0:80:98:06:98:d0:58:7a:ef:de:a6:cc:a2:5d +# SHA256 Fingerprint: 11:43:7c:da:7b:b4:5e:41:36:5f:45:b3:9a:38:98:6b:0d:e0:0d:ef:34:8e:0c:7b:b0:87:36:33:80:0b:c3:8b +-----BEGIN CERTIFICATE----- +MIICHTCCAaOgAwIBAgIUQ3CCd89NXTTxyq4yLzf39H91oJ4wCgYIKoZIzj0EAwMw +TjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwiQ29t +bVNjb3BlIFB1YmxpYyBUcnVzdCBFQ0MgUm9vdC0wMTAeFw0yMTA0MjgxNzM1NDNa +Fw00NjA0MjgxNzM1NDJaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21tU2Nv +cGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgRUNDIFJvb3QtMDEw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAARLNumuV16ocNfQj3Rid8NeeqrltqLxeP0C +flfdkXmcbLlSiFS8LwS+uM32ENEp7LXQoMPwiXAZu1FlxUOcw5tjnSCDPgYLpkJE +hRGnSjot6dZoL0hOUysHP029uax3OVejQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSOB2LAUN3GGQYARnQE9/OufXVNMDAKBggq +hkjOPQQDAwNoADBlAjEAnDPfQeMjqEI2Jpc1XHvr20v4qotzVRVcrHgpD7oh2MSg +2NED3W3ROT3Ek2DS43KyAjB8xX6I01D1HiXo+k515liWpDVfG2XqYZpwI7UNo5uS +Um9poIyNStDuiw7LR47QjRE= +-----END CERTIFICATE----- + +# Issuer: CN=CommScope Public Trust ECC Root-02 O=CommScope +# Subject: CN=CommScope Public Trust ECC Root-02 O=CommScope +# Label: "CommScope Public Trust ECC Root-02" +# Serial: 234015080301808452132356021271193974922492992893 +# MD5 Fingerprint: 59:b0:44:d5:65:4d:b8:5c:55:19:92:02:b6:d1:94:b2 +# SHA1 Fingerprint: 3c:3f:ef:57:0f:fe:65:93:86:9e:a0:fe:b0:f6:ed:8e:d1:13:c7:e5 +# SHA256 Fingerprint: 2f:fb:7f:81:3b:bb:b3:c8:9a:b4:e8:16:2d:0f:16:d7:15:09:a8:30:cc:9d:73:c2:62:e5:14:08:75:d1:ad:4a +-----BEGIN CERTIFICATE----- +MIICHDCCAaOgAwIBAgIUKP2ZYEFHpgE6yhR7H+/5aAiDXX0wCgYIKoZIzj0EAwMw +TjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwiQ29t +bVNjb3BlIFB1YmxpYyBUcnVzdCBFQ0MgUm9vdC0wMjAeFw0yMTA0MjgxNzQ0NTRa +Fw00NjA0MjgxNzQ0NTNaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21tU2Nv +cGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgRUNDIFJvb3QtMDIw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAAR4MIHoYx7l63FRD/cHB8o5mXxO1Q/MMDAL +j2aTPs+9xYa9+bG3tD60B8jzljHz7aRP+KNOjSkVWLjVb3/ubCK1sK9IRQq9qEmU +v4RDsNuESgMjGWdqb8FuvAY5N9GIIvejQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTmGHX/72DehKT1RsfeSlXjMjZ59TAKBggq +hkjOPQQDAwNnADBkAjAmc0l6tqvmSfR9Uj/UQQSugEODZXW5hYA4O9Zv5JOGq4/n +ich/m35rChJVYaoR4HkCMHfoMXGsPHED1oQmHhS48zs73u1Z/GtMMH9ZzkXpc2AV +mkzw5l4lIhVtwodZ0LKOag== +-----END CERTIFICATE----- + +# Issuer: CN=CommScope Public Trust RSA Root-01 O=CommScope +# Subject: CN=CommScope Public Trust RSA Root-01 O=CommScope +# Label: "CommScope Public Trust RSA Root-01" +# Serial: 354030733275608256394402989253558293562031411421 +# MD5 Fingerprint: 0e:b4:15:bc:87:63:5d:5d:02:73:d4:26:38:68:73:d8 +# SHA1 Fingerprint: 6d:0a:5f:f7:b4:23:06:b4:85:b3:b7:97:64:fc:ac:75:f5:33:f2:93 +# SHA256 Fingerprint: 02:bd:f9:6e:2a:45:dd:9b:f1:8f:c7:e1:db:df:21:a0:37:9b:a3:c9:c2:61:03:44:cf:d8:d6:06:fe:c1:ed:81 +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIUPgNJgXUWdDGOTKvVxZAplsU5EN0wDQYJKoZIhvcNAQEL +BQAwTjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwi +Q29tbVNjb3BlIFB1YmxpYyBUcnVzdCBSU0EgUm9vdC0wMTAeFw0yMTA0MjgxNjQ1 +NTRaFw00NjA0MjgxNjQ1NTNaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21t +U2NvcGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgUlNBIFJvb3Qt +MDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwSGWjDR1C45FtnYSk +YZYSwu3D2iM0GXb26v1VWvZVAVMP8syMl0+5UMuzAURWlv2bKOx7dAvnQmtVzslh +suitQDy6uUEKBU8bJoWPQ7VAtYXR1HHcg0Hz9kXHgKKEUJdGzqAMxGBWBB0HW0al +DrJLpA6lfO741GIDuZNqihS4cPgugkY4Iw50x2tBt9Apo52AsH53k2NC+zSDO3Oj +WiE260f6GBfZumbCk6SP/F2krfxQapWsvCQz0b2If4b19bJzKo98rwjyGpg/qYFl +P8GMicWWMJoKz/TUyDTtnS+8jTiGU+6Xn6myY5QXjQ/cZip8UlF1y5mO6D1cv547 +KI2DAg+pn3LiLCuz3GaXAEDQpFSOm117RTYm1nJD68/A6g3czhLmfTifBSeolz7p +UcZsBSjBAg/pGG3svZwG1KdJ9FQFa2ww8esD1eo9anbCyxooSU1/ZOD6K9pzg4H/ +kQO9lLvkuI6cMmPNn7togbGEW682v3fuHX/3SZtS7NJ3Wn2RnU3COS3kuoL4b/JO +Hg9O5j9ZpSPcPYeoKFgo0fEbNttPxP/hjFtyjMcmAyejOQoBqsCyMWCDIqFPEgkB +Ea801M/XrmLTBQe0MXXgDW1XT2mH+VepuhX2yFJtocucH+X8eKg1mp9BFM6ltM6U +CBwJrVbl2rZJmkrqYxhTnCwuwwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUN12mmnQywsL5x6YVEFm45P3luG0wDQYJ +KoZIhvcNAQELBQADggIBAK+nz97/4L1CjU3lIpbfaOp9TSp90K09FlxD533Ahuh6 +NWPxzIHIxgvoLlI1pKZJkGNRrDSsBTtXAOnTYtPZKdVUvhwQkZyybf5Z/Xn36lbQ +nmhUQo8mUuJM3y+Xpi/SB5io82BdS5pYV4jvguX6r2yBS5KPQJqTRlnLX3gWsWc+ +QgvfKNmwrZggvkN80V4aCRckjXtdlemrwWCrWxhkgPut4AZ9HcpZuPN4KWfGVh2v +trV0KnahP/t1MJ+UXjulYPPLXAziDslg+MkfFoom3ecnf+slpoq9uC02EJqxWE2a +aE9gVOX2RhOOiKy8IUISrcZKiX2bwdgt6ZYD9KJ0DLwAHb/WNyVntHKLr4W96ioD +j8z7PEQkguIBpQtZtjSNMgsSDesnwv1B10A8ckYpwIzqug/xBpMu95yo9GA+o/E4 +Xo4TwbM6l4c/ksp4qRyv0LAbJh6+cOx69TOY6lz/KwsETkPdY34Op054A5U+1C0w +lREQKC6/oAI+/15Z0wUOlV9TRe9rh9VIzRamloPh37MG88EU26fsHItdkJANclHn +YfkUyq+Dj7+vsQpZXdxc1+SWrVtgHdqul7I52Qb1dgAT+GhMIbA1xNxVssnBQVoc +icCMb3SgazNNtQEo/a2tiRc7ppqEvOuM6sRxJKi6KfkIsidWNTJf6jn7MZrVGczw +-----END CERTIFICATE----- + +# Issuer: CN=CommScope Public Trust RSA Root-02 O=CommScope +# Subject: CN=CommScope Public Trust RSA Root-02 O=CommScope +# Label: "CommScope Public Trust RSA Root-02" +# Serial: 480062499834624527752716769107743131258796508494 +# MD5 Fingerprint: e1:29:f9:62:7b:76:e2:96:6d:f3:d4:d7:0f:ae:1f:aa +# SHA1 Fingerprint: ea:b0:e2:52:1b:89:93:4c:11:68:f2:d8:9a:ac:22:4c:a3:8a:57:ae +# SHA256 Fingerprint: ff:e9:43:d7:93:42:4b:4f:7c:44:0c:1c:3d:64:8d:53:63:f3:4b:82:dc:87:aa:7a:9f:11:8f:c5:de:e1:01:f1 +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIUVBa/O345lXGN0aoApYYNK496BU4wDQYJKoZIhvcNAQEL +BQAwTjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwi +Q29tbVNjb3BlIFB1YmxpYyBUcnVzdCBSU0EgUm9vdC0wMjAeFw0yMTA0MjgxNzE2 +NDNaFw00NjA0MjgxNzE2NDJaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21t +U2NvcGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgUlNBIFJvb3Qt +MDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDh+g77aAASyE3VrCLE +NQE7xVTlWXZjpX/rwcRqmL0yjReA61260WI9JSMZNRTpf4mnG2I81lDnNJUDMrG0 +kyI9p+Kx7eZ7Ti6Hmw0zdQreqjXnfuU2mKKuJZ6VszKWpCtYHu8//mI0SFHRtI1C +rWDaSWqVcN3SAOLMV2MCe5bdSZdbkk6V0/nLKR8YSvgBKtJjCW4k6YnS5cciTNxz +hkcAqg2Ijq6FfUrpuzNPDlJwnZXjfG2WWy09X6GDRl224yW4fKcZgBzqZUPckXk2 +LHR88mcGyYnJ27/aaL8j7dxrrSiDeS/sOKUNNwFnJ5rpM9kzXzehxfCrPfp4sOcs +n/Y+n2Dg70jpkEUeBVF4GiwSLFworA2iI540jwXmojPOEXcT1A6kHkIfhs1w/tku +FT0du7jyU1fbzMZ0KZwYszZ1OC4PVKH4kh+Jlk+71O6d6Ts2QrUKOyrUZHk2EOH5 +kQMreyBUzQ0ZGshBMjTRsJnhkB4BQDa1t/qp5Xd1pCKBXbCL5CcSD1SIxtuFdOa3 +wNemKfrb3vOTlycEVS8KbzfFPROvCgCpLIscgSjX74Yxqa7ybrjKaixUR9gqiC6v +wQcQeKwRoi9C8DfF8rhW3Q5iLc4tVn5V8qdE9isy9COoR+jUKgF4z2rDN6ieZdIs +5fq6M8EGRPbmz6UNp2YINIos8wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUR9DnsSL/nSz12Vdgs7GxcJXvYXowDQYJ +KoZIhvcNAQELBQADggIBAIZpsU0v6Z9PIpNojuQhmaPORVMbc0RTAIFhzTHjCLqB +KCh6krm2qMhDnscTJk3C2OVVnJJdUNjCK9v+5qiXz1I6JMNlZFxHMaNlNRPDk7n3 ++VGXu6TwYofF1gbTl4MgqX67tiHCpQ2EAOHyJxCDut0DgdXdaMNmEMjRdrSzbyme +APnCKfWxkxlSaRosTKCL4BWaMS/TiJVZbuXEs1DIFAhKm4sTg7GkcrI7djNB3Nyq +pgdvHSQSn8h2vS/ZjvQs7rfSOBAkNlEv41xdgSGn2rtO/+YHqP65DSdsu3BaVXoT +6fEqSWnHX4dXTEN5bTpl6TBcQe7rd6VzEojov32u5cSoHw2OHG1QAk8mGEPej1WF +sQs3BWDJVTkSBKEqz3EWnzZRSb9wO55nnPt7eck5HHisd5FUmrh1CoFSl+NmYWvt +PjgelmFV4ZFUjO2MJB+ByRCac5krFk5yAD9UG/iNuovnFNa2RU9g7Jauwy8CTl2d +lklyALKrdVwPaFsdZcJfMw8eD/A7hvWwTruc9+olBdytoptLFwG+Qt81IR2tq670 +v64fG9PiO/yzcnMcmyiQiRM9HcEARwmWmjgb3bHPDcK0RPOWlc4yOo80nOAXx17O +rg3bhzjlP1v9mxnhMUF6cKojawHhRUzNlM47ni3niAIi9G7oyOzWPPO5std3eqx7 +-----END CERTIFICATE----- + +# Issuer: CN=Telekom Security TLS ECC Root 2020 O=Deutsche Telekom Security GmbH +# Subject: CN=Telekom Security TLS ECC Root 2020 O=Deutsche Telekom Security GmbH +# Label: "Telekom Security TLS ECC Root 2020" +# Serial: 72082518505882327255703894282316633856 +# MD5 Fingerprint: c1:ab:fe:6a:10:2c:03:8d:bc:1c:22:32:c0:85:a7:fd +# SHA1 Fingerprint: c0:f8:96:c5:a9:3b:01:06:21:07:da:18:42:48:bc:e9:9d:88:d5:ec +# SHA256 Fingerprint: 57:8a:f4:de:d0:85:3f:4e:59:98:db:4a:ea:f9:cb:ea:8d:94:5f:60:b6:20:a3:8d:1a:3c:13:b2:bc:7b:a8:e1 +-----BEGIN CERTIFICATE----- +MIICQjCCAcmgAwIBAgIQNjqWjMlcsljN0AFdxeVXADAKBggqhkjOPQQDAzBjMQsw +CQYDVQQGEwJERTEnMCUGA1UECgweRGV1dHNjaGUgVGVsZWtvbSBTZWN1cml0eSBH +bWJIMSswKQYDVQQDDCJUZWxla29tIFNlY3VyaXR5IFRMUyBFQ0MgUm9vdCAyMDIw +MB4XDTIwMDgyNTA3NDgyMFoXDTQ1MDgyNTIzNTk1OVowYzELMAkGA1UEBhMCREUx +JzAlBgNVBAoMHkRldXRzY2hlIFRlbGVrb20gU2VjdXJpdHkgR21iSDErMCkGA1UE +AwwiVGVsZWtvbSBTZWN1cml0eSBUTFMgRUNDIFJvb3QgMjAyMDB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABM6//leov9Wq9xCazbzREaK9Z0LMkOsVGJDZos0MKiXrPk/O +tdKPD/M12kOLAoC+b1EkHQ9rK8qfwm9QMuU3ILYg/4gND21Ju9sGpIeQkpT0CdDP +f8iAC8GXs7s1J8nCG6NCMEAwHQYDVR0OBBYEFONyzG6VmUex5rNhTNHLq+O6zd6f +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMAoGCCqGSM49BAMDA2cA +MGQCMHVSi7ekEE+uShCLsoRbQuHmKjYC2qBuGT8lv9pZMo7k+5Dck2TOrbRBR2Di +z6fLHgIwN0GMZt9Ba9aDAEH9L1r3ULRn0SyocddDypwnJJGDSA3PzfdUga/sf+Rn +27iQ7t0l +-----END CERTIFICATE----- + +# Issuer: CN=Telekom Security TLS RSA Root 2023 O=Deutsche Telekom Security GmbH +# Subject: CN=Telekom Security TLS RSA Root 2023 O=Deutsche Telekom Security GmbH +# Label: "Telekom Security TLS RSA Root 2023" +# Serial: 44676229530606711399881795178081572759 +# MD5 Fingerprint: bf:5b:eb:54:40:cd:48:71:c4:20:8d:7d:de:0a:42:f2 +# SHA1 Fingerprint: 54:d3:ac:b3:bd:57:56:f6:85:9d:ce:e5:c3:21:e2:d4:ad:83:d0:93 +# SHA256 Fingerprint: ef:c6:5c:ad:bb:59:ad:b6:ef:e8:4d:a2:23:11:b3:56:24:b7:1b:3b:1e:a0:da:8b:66:55:17:4e:c8:97:86:46 +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIQIZxULej27HF3+k7ow3BXlzANBgkqhkiG9w0BAQwFADBj +MQswCQYDVQQGEwJERTEnMCUGA1UECgweRGV1dHNjaGUgVGVsZWtvbSBTZWN1cml0 +eSBHbWJIMSswKQYDVQQDDCJUZWxla29tIFNlY3VyaXR5IFRMUyBSU0EgUm9vdCAy +MDIzMB4XDTIzMDMyODEyMTY0NVoXDTQ4MDMyNzIzNTk1OVowYzELMAkGA1UEBhMC +REUxJzAlBgNVBAoMHkRldXRzY2hlIFRlbGVrb20gU2VjdXJpdHkgR21iSDErMCkG +A1UEAwwiVGVsZWtvbSBTZWN1cml0eSBUTFMgUlNBIFJvb3QgMjAyMzCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAO01oYGA88tKaVvC+1GDrib94W7zgRJ9 +cUD/h3VCKSHtgVIs3xLBGYSJwb3FKNXVS2xE1kzbB5ZKVXrKNoIENqil/Cf2SfHV +cp6R+SPWcHu79ZvB7JPPGeplfohwoHP89v+1VmLhc2o0mD6CuKyVU/QBoCcHcqMA +U6DksquDOFczJZSfvkgdmOGjup5czQRxUX11eKvzWarE4GC+j4NSuHUaQTXtvPM6 +Y+mpFEXX5lLRbtLevOP1Czvm4MS9Q2QTps70mDdsipWol8hHD/BeEIvnHRz+sTug +BTNoBUGCwQMrAcjnj02r6LX2zWtEtefdi+zqJbQAIldNsLGyMcEWzv/9FIS3R/qy +8XDe24tsNlikfLMR0cN3f1+2JeANxdKz+bi4d9s3cXFH42AYTyS2dTd4uaNir73J +co4vzLuu2+QVUhkHM/tqty1LkCiCc/4YizWN26cEar7qwU02OxY2kTLvtkCJkUPg +8qKrBC7m8kwOFjQgrIfBLX7JZkcXFBGk8/ehJImr2BrIoVyxo/eMbcgByU/J7MT8 +rFEz0ciD0cmfHdRHNCk+y7AO+oMLKFjlKdw/fKifybYKu6boRhYPluV75Gp6SG12 +mAWl3G0eQh5C2hrgUve1g8Aae3g1LDj1H/1Joy7SWWO/gLCMk3PLNaaZlSJhZQNg ++y+TS/qanIA7AgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtqeX +gj10hZv3PJ+TmpV5dVKMbUcwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS2 +p5eCPXSFm/c8n5OalXl1UoxtRzANBgkqhkiG9w0BAQwFAAOCAgEAqMxhpr51nhVQ +pGv7qHBFfLp+sVr8WyP6Cnf4mHGCDG3gXkaqk/QeoMPhk9tLrbKmXauw1GLLXrtm +9S3ul0A8Yute1hTWjOKWi0FpkzXmuZlrYrShF2Y0pmtjxrlO8iLpWA1WQdH6DErw +M807u20hOq6OcrXDSvvpfeWxm4bu4uB9tPcy/SKE8YXJN3nptT+/XOR0so8RYgDd +GGah2XsjX/GO1WfoVNpbOms2b/mBsTNHM3dA+VKq3dSDz4V4mZqTuXNnQkYRIer+ +CqkbGmVps4+uFrb2S1ayLfmlyOw7YqPta9BO1UAJpB+Y1zqlklkg5LB9zVtzaL1t +xKITDmcZuI1CfmwMmm6gJC3VRRvcxAIU/oVbZZfKTpBQCHpCNfnqwmbU+AGuHrS+ +w6jv/naaoqYfRvaE7fzbzsQCzndILIyy7MMAo+wsVRjBfhnu4S/yrYObnqsZ38aK +L4x35bcF7DvB7L6Gs4a8wPfc5+pbrrLMtTWGS9DiP7bY+A4A7l3j941Y/8+LN+lj +X273CXE2whJdV/LItM3z7gLfEdxquVeEHVlNjM7IDiPCtyaaEBRx/pOyiriA8A4Q +ntOoUAw3gi/q4Iqd4Sw5/7W0cwDk90imc6y/st53BIe0o82bNSQ3+pCTE4FCxpgm +dTdmQRCsu/WU48IxK63nI1bMNSWSs1A= +-----END CERTIFICATE----- + +# Issuer: CN=FIRMAPROFESIONAL CA ROOT-A WEB O=Firmaprofesional SA +# Subject: CN=FIRMAPROFESIONAL CA ROOT-A WEB O=Firmaprofesional SA +# Label: "FIRMAPROFESIONAL CA ROOT-A WEB" +# Serial: 65916896770016886708751106294915943533 +# MD5 Fingerprint: 82:b2:ad:45:00:82:b0:66:63:f8:5f:c3:67:4e:ce:a3 +# SHA1 Fingerprint: a8:31:11:74:a6:14:15:0d:ca:77:dd:0e:e4:0c:5d:58:fc:a0:72:a5 +# SHA256 Fingerprint: be:f2:56:da:f2:6e:9c:69:bd:ec:16:02:35:97:98:f3:ca:f7:18:21:a0:3e:01:82:57:c5:3c:65:61:7f:3d:4a +-----BEGIN CERTIFICATE----- +MIICejCCAgCgAwIBAgIQMZch7a+JQn81QYehZ1ZMbTAKBggqhkjOPQQDAzBuMQsw +CQYDVQQGEwJFUzEcMBoGA1UECgwTRmlybWFwcm9mZXNpb25hbCBTQTEYMBYGA1UE +YQwPVkFURVMtQTYyNjM0MDY4MScwJQYDVQQDDB5GSVJNQVBST0ZFU0lPTkFMIENB +IFJPT1QtQSBXRUIwHhcNMjIwNDA2MDkwMTM2WhcNNDcwMzMxMDkwMTM2WjBuMQsw +CQYDVQQGEwJFUzEcMBoGA1UECgwTRmlybWFwcm9mZXNpb25hbCBTQTEYMBYGA1UE +YQwPVkFURVMtQTYyNjM0MDY4MScwJQYDVQQDDB5GSVJNQVBST0ZFU0lPTkFMIENB +IFJPT1QtQSBXRUIwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARHU+osEaR3xyrq89Zf +e9MEkVz6iMYiuYMQYneEMy3pA4jU4DP37XcsSmDq5G+tbbT4TIqk5B/K6k84Si6C +cyvHZpsKjECcfIr28jlgst7L7Ljkb+qbXbdTkBgyVcUgt5SjYzBhMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAUk+FDY1w8ndYn81LsF7Kpryz3dvgwHQYDVR0O +BBYEFJPhQ2NcPJ3WJ/NS7Beyqa8s93b4MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjO +PQQDAwNoADBlAjAdfKR7w4l1M+E7qUW/Runpod3JIha3RxEL2Jq68cgLcFBTApFw +hVmpHqTm6iMxoAACMQD94vizrxa5HnPEluPBMBnYfubDl94cT7iJLzPrSA8Z94dG +XSaQpYXFuXqUPoeovQA= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA CYBER Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA CYBER Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA CYBER Root CA" +# Serial: 85076849864375384482682434040119489222 +# MD5 Fingerprint: 0b:33:a0:97:52:95:d4:a9:fd:bb:db:6e:a3:55:5b:51 +# SHA1 Fingerprint: f6:b1:1c:1a:83:38:e9:7b:db:b3:a8:c8:33:24:e0:2d:9c:7f:26:66 +# SHA256 Fingerprint: 3f:63:bb:28:14:be:17:4e:c8:b6:43:9c:f0:8d:6d:56:f0:b7:c4:05:88:3a:56:48:a3:34:42:4d:6b:3e:c5:58 +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIQQAE0jMIAAAAAAAAAATzyxjANBgkqhkiG9w0BAQwFADBQ +MQswCQYDVQQGEwJUVzESMBAGA1UEChMJVEFJV0FOLUNBMRAwDgYDVQQLEwdSb290 +IENBMRswGQYDVQQDExJUV0NBIENZQkVSIFJvb3QgQ0EwHhcNMjIxMTIyMDY1NDI5 +WhcNNDcxMTIyMTU1OTU5WjBQMQswCQYDVQQGEwJUVzESMBAGA1UEChMJVEFJV0FO +LUNBMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJUV0NBIENZQkVSIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDG+Moe2Qkgfh1sTs6P +40czRJzHyWmqOlt47nDSkvgEs1JSHWdyKKHfi12VCv7qze33Kc7wb3+szT3vsxxF +avcokPFhV8UMxKNQXd7UtcsZyoC5dc4pztKFIuwCY8xEMCDa6pFbVuYdHNWdZsc/ +34bKS1PE2Y2yHer43CdTo0fhYcx9tbD47nORxc5zb87uEB8aBs/pJ2DFTxnk684i +JkXXYJndzk834H/nY62wuFm40AZoNWDTNq5xQwTxaWV4fPMf88oon1oglWa0zbfu +j3ikRRjpJi+NmykosaS3Om251Bw4ckVYsV7r8Cibt4LK/c/WMw+f+5eesRycnupf +Xtuq3VTpMCEobY5583WSjCb+3MX2w7DfRFlDo7YDKPYIMKoNM+HvnKkHIuNZW0CP +2oi3aQiotyMuRAlZN1vH4xfyIutuOVLF3lSnmMlLIJXcRolftBL5hSmO68gnFSDA +S9TMfAxsNAwmmyYxpjyn9tnQS6Jk/zuZQXLB4HCX8SS7K8R0IrGsayIyJNN4KsDA +oS/xUgXJP+92ZuJF2A09rZXIx4kmyA+upwMu+8Ff+iDhcK2wZSA3M2Cw1a/XDBzC +kHDXShi8fgGwsOsVHkQGzaRP6AzRwyAQ4VRlnrZR0Bp2a0JaWHY06rc3Ga4udfmW +5cFZ95RXKSWNOkyrTZpB0F8mAwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBSdhWEUfMFib5do5E83QOGt4A1WNzAd +BgNVHQ4EFgQUnYVhFHzBYm+XaORPN0DhreANVjcwDQYJKoZIhvcNAQEMBQADggIB +AGSPesRiDrWIzLjHhg6hShbNcAu3p4ULs3a2D6f/CIsLJc+o1IN1KriWiLb73y0t +tGlTITVX1olNc79pj3CjYcya2x6a4CD4bLubIp1dhDGaLIrdaqHXKGnK/nZVekZn +68xDiBaiA9a5F/gZbG0jAn/xX9AKKSM70aoK7akXJlQKTcKlTfjF/biBzysseKNn +TKkHmvPfXvt89YnNdJdhEGoHK4Fa0o635yDRIG4kqIQnoVesqlVYL9zZyvpoBJ7t +RCT5dEA7IzOrg1oYJkK2bVS1FmAwbLGg+LhBoF1JSdJlBTrq/p1hvIbZv97Tujqx +f36SNI7JAG7cmL3c7IAFrQI932XtCwP39xaEBDG6k5TY8hL4iuO/Qq+n1M0RFxbI +Qh0UqEL20kCGoE8jypZFVmAGzbdVAaYBlGX+bgUJurSkquLvWL69J1bY73NxW0Qz +8ppy6rBePm6pUlvscG21h483XjyMnM7k8M4MZ0HMzvaAq07MTFb1wWFZk7Q+ptq4 +NxKfKjLji7gh7MMrZQzvIt6IKTtM1/r+t+FHvpw+PoP7UV31aPcuIYXcv/Fa4nzX +xeSDwWrruoBa3lwtcHb4yOWHh8qgnaHlIhInD0Q9HWzq1MKLL295q39QpsQZp6F6 +t5b5wR9iWqJDB0BeJsas7a5wFsWqynKKTbDPAYsDP27X +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign Root CA12 O=Cybertrust Japan Co., Ltd. +# Subject: CN=SecureSign Root CA12 O=Cybertrust Japan Co., Ltd. +# Label: "SecureSign Root CA12" +# Serial: 587887345431707215246142177076162061960426065942 +# MD5 Fingerprint: c6:89:ca:64:42:9b:62:08:49:0b:1e:7f:e9:07:3d:e8 +# SHA1 Fingerprint: 7a:22:1e:3d:de:1b:06:ac:9e:c8:47:70:16:8e:3c:e5:f7:6b:06:f4 +# SHA256 Fingerprint: 3f:03:4b:b5:70:4d:44:b2:d0:85:45:a0:20:57:de:93:eb:f3:90:5f:ce:72:1a:cb:c7:30:c0:6d:da:ee:90:4e +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUZvnHwa/swlG07VOX5uaCwysckBYwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCSlAxIzAhBgNVBAoTGkN5YmVydHJ1c3QgSmFwYW4gQ28u +LCBMdGQuMR0wGwYDVQQDExRTZWN1cmVTaWduIFJvb3QgQ0ExMjAeFw0yMDA0MDgw +NTM2NDZaFw00MDA0MDgwNTM2NDZaMFExCzAJBgNVBAYTAkpQMSMwIQYDVQQKExpD +eWJlcnRydXN0IEphcGFuIENvLiwgTHRkLjEdMBsGA1UEAxMUU2VjdXJlU2lnbiBS +b290IENBMTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6OcE3emhF +KxS06+QT61d1I02PJC0W6K6OyX2kVzsqdiUzg2zqMoqUm048luT9Ub+ZyZN+v/mt +p7JIKwccJ/VMvHASd6SFVLX9kHrko+RRWAPNEHl57muTH2SOa2SroxPjcf59q5zd +J1M3s6oYwlkm7Fsf0uZlfO+TvdhYXAvA42VvPMfKWeP+bl+sg779XSVOKik71gur +FzJ4pOE+lEa+Ym6b3kaosRbnhW70CEBFEaCeVESE99g2zvVQR9wsMJvuwPWW0v4J +hscGWa5Pro4RmHvzC1KqYiaqId+OJTN5lxZJjfU+1UefNzFJM3IFTQy2VYzxV4+K +h9GtxRESOaCtAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBRXNPN0zwRL1SXm8UC2LEzZLemgrTANBgkqhkiG9w0BAQsF +AAOCAQEAPrvbFxbS8hQBICw4g0utvsqFepq2m2um4fylOqyttCg6r9cBg0krY6Ld +mmQOmFxv3Y67ilQiLUoT865AQ9tPkbeGGuwAtEGBpE/6aouIs3YIcipJQMPTw4WJ +mBClnW8Zt7vPemVV2zfrPIpyMpcemik+rY3moxtt9XUa5rBouVui7mlHJzWhhpmA +8zNL4WukJsPvdFlseqJkth5Ew1DgDzk9qTPxpfPSvWKErI4cqc1avTc7bgoitPQV +55FYxTpE05Uo2cBl6XLK0A+9H7MV2anjpEcJnuDLN/v9vZfVvhgaaaI5gdka9at/ +yOPiZwud9AzqVN/Ssq+xIvEg37xEHA== +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign Root CA14 O=Cybertrust Japan Co., Ltd. +# Subject: CN=SecureSign Root CA14 O=Cybertrust Japan Co., Ltd. +# Label: "SecureSign Root CA14" +# Serial: 575790784512929437950770173562378038616896959179 +# MD5 Fingerprint: 71:0d:72:fa:92:19:65:5e:89:04:ac:16:33:f0:bc:d5 +# SHA1 Fingerprint: dd:50:c0:f7:79:b3:64:2e:74:a2:b8:9d:9f:d3:40:dd:bb:f0:f2:4f +# SHA256 Fingerprint: 4b:00:9c:10:34:49:4f:9a:b5:6b:ba:3b:a1:d6:27:31:fc:4d:20:d8:95:5a:dc:ec:10:a9:25:60:72:61:e3:38 +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIUZNtaDCBO6Ncpd8hQJ6JaJ90t8sswDQYJKoZIhvcNAQEM +BQAwUTELMAkGA1UEBhMCSlAxIzAhBgNVBAoTGkN5YmVydHJ1c3QgSmFwYW4gQ28u +LCBMdGQuMR0wGwYDVQQDExRTZWN1cmVTaWduIFJvb3QgQ0ExNDAeFw0yMDA0MDgw +NzA2MTlaFw00NTA0MDgwNzA2MTlaMFExCzAJBgNVBAYTAkpQMSMwIQYDVQQKExpD +eWJlcnRydXN0IEphcGFuIENvLiwgTHRkLjEdMBsGA1UEAxMUU2VjdXJlU2lnbiBS +b290IENBMTQwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDF0nqh1oq/ +FjHQmNE6lPxauG4iwWL3pwon71D2LrGeaBLwbCRjOfHw3xDG3rdSINVSW0KZnvOg +vlIfX8xnbacuUKLBl422+JX1sLrcneC+y9/3OPJH9aaakpUqYllQC6KxNedlsmGy +6pJxaeQp8E+BgQQ8sqVb1MWoWWd7VRxJq3qdwudzTe/NCcLEVxLbAQ4jeQkHO6Lo +/IrPj8BGJJw4J+CDnRugv3gVEOuGTgpa/d/aLIJ+7sr2KeH6caH3iGicnPCNvg9J +kdjqOvn90Ghx2+m1K06Ckm9mH+Dw3EzsytHqunQG+bOEkJTRX45zGRBdAuVwpcAQ +0BB8b8VYSbSwbprafZX1zNoCr7gsfXmPvkPx+SgojQlD+Ajda8iLLCSxjVIHvXib +y8posqTdDEx5YMaZ0ZPxMBoH064iwurO8YQJzOAUbn8/ftKChazcqRZOhaBgy/ac +18izju3Gm5h1DVXoX+WViwKkrkMpKBGk5hIwAUt1ax5mnXkvpXYvHUC0bcl9eQjs +0Wq2XSqypWa9a4X0dFbD9ed1Uigspf9mR6XU/v6eVL9lfgHWMI+lNpyiUBzuOIAB +SMbHdPTGrMNASRZhdCyvjG817XsYAFs2PJxQDcqSMxDxJklt33UkN4Ii1+iW/RVL +ApY+B3KVfqs9TC7XyvDf4Fg/LS8EmjijAQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUBpOjCl4oaTeqYR3r6/wtbyPk +86AwDQYJKoZIhvcNAQEMBQADggIBAJaAcgkGfpzMkwQWu6A6jZJOtxEaCnFxEM0E +rX+lRVAQZk5KQaID2RFPeje5S+LGjzJmdSX7684/AykmjbgWHfYfM25I5uj4V7Ib +ed87hwriZLoAymzvftAj63iP/2SbNDefNWWipAA9EiOWWF3KY4fGoweITedpdopT +zfFP7ELyk+OZpDc8h7hi2/DsHzc/N19DzFGdtfCXwreFamgLRB7lUe6TzktuhsHS +DCRZNhqfLJGP4xjblJUK7ZGqDpncllPjYYPGFrojutzdfhrGe0K22VoF3Jpf1d+4 +2kd92jjbrDnVHmtsKheMYc2xbXIBw8MgAGJoFjHVdqqGuw6qnsb58Nn4DSEC5MUo +FlkRudlpcyqSeLiSV5sI8jrlL5WwWLdrIBRtFO8KvH7YVdiI2i/6GaX7i+B/OfVy +K4XELKzvGUWSTLNhB9xNH27SgRNcmvMSZ4PPmz+Ln52kuaiWA3rF7iDeM9ovnhp6 +dB7h7sxaOgTdsxoEqBRjrLdHEoOabPXm6RUVkRqEGQ6UROcSjiVbgGcZ3GOTEAtl +Lor6CZpO2oYofaphNdgOpygau1LgePhsumywbrmHXumZNTfxPWQrqaA0k89jL9WB +365jJ6UeTo3cKXhZ+PmhIIynJkBugnLNeLLIjzwec+fBH7/PzqUqm9tEZDKgu39c +JRNItX+S +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign Root CA15 O=Cybertrust Japan Co., Ltd. +# Subject: CN=SecureSign Root CA15 O=Cybertrust Japan Co., Ltd. +# Label: "SecureSign Root CA15" +# Serial: 126083514594751269499665114766174399806381178503 +# MD5 Fingerprint: 13:30:fc:c4:62:a6:a9:de:b5:c1:68:af:b5:d2:31:47 +# SHA1 Fingerprint: cb:ba:83:c8:c1:5a:5d:f1:f9:73:6f:ca:d7:ef:28:13:06:4a:07:7d +# SHA256 Fingerprint: e7:78:f0:f0:95:fe:84:37:29:cd:1a:00:82:17:9e:53:14:a9:c2:91:44:28:05:e1:fb:1d:8f:b6:b8:88:6c:3a +-----BEGIN CERTIFICATE----- +MIICIzCCAamgAwIBAgIUFhXHw9hJp75pDIqI7fBw+d23PocwCgYIKoZIzj0EAwMw +UTELMAkGA1UEBhMCSlAxIzAhBgNVBAoTGkN5YmVydHJ1c3QgSmFwYW4gQ28uLCBM +dGQuMR0wGwYDVQQDExRTZWN1cmVTaWduIFJvb3QgQ0ExNTAeFw0yMDA0MDgwODMy +NTZaFw00NTA0MDgwODMyNTZaMFExCzAJBgNVBAYTAkpQMSMwIQYDVQQKExpDeWJl +cnRydXN0IEphcGFuIENvLiwgTHRkLjEdMBsGA1UEAxMUU2VjdXJlU2lnbiBSb290 +IENBMTUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQLUHSNZDKZmbPSYAi4Io5GdCx4 +wCtELW1fHcmuS1Iggz24FG1Th2CeX2yF2wYUleDHKP+dX+Sq8bOLbe1PL0vJSpSR +ZHX+AezB2Ot6lHhWGENfa4HL9rzatAy2KZMIaY+jQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTrQciu/NWeUUj1vYv0hyCTQSvT +9DAKBggqhkjOPQQDAwNoADBlAjEA2S6Jfl5OpBEHvVnCB96rMjhTKkZEBhd6zlHp +4P9mLQlO4E/0BdGF9jVg3PVys0Z9AjBEmEYagoUeYWmJSwdLZrWeqrqgHkHZAXQ6 +bkU6iYAZezKYVWOr62Nuk22rGwlgMU4= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST BR Root CA 2 2023 O=D-Trust GmbH +# Subject: CN=D-TRUST BR Root CA 2 2023 O=D-Trust GmbH +# Label: "D-TRUST BR Root CA 2 2023" +# Serial: 153168538924886464690566649552453098598 +# MD5 Fingerprint: e1:09:ed:d3:60:d4:56:1b:47:1f:b7:0c:5f:1b:5f:85 +# SHA1 Fingerprint: 2d:b0:70:ee:71:94:af:69:68:17:db:79:ce:58:9f:a0:6b:96:f7:87 +# SHA256 Fingerprint: 05:52:e6:f8:3f:df:65:e8:fa:96:70:e6:66:df:28:a4:e2:13:40:b5:10:cb:e5:25:66:f9:7c:4f:b9:4b:2b:d1 +-----BEGIN CERTIFICATE----- +MIIFqTCCA5GgAwIBAgIQczswBEhb2U14LnNLyaHcZjANBgkqhkiG9w0BAQ0FADBI +MQswCQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlE +LVRSVVNUIEJSIFJvb3QgQ0EgMiAyMDIzMB4XDTIzMDUwOTA4NTYzMVoXDTM4MDUw +OTA4NTYzMFowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEi +MCAGA1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDIgMjAyMzCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAK7/CVmRgApKaOYkP7in5Mg6CjoWzckjYaCTcfKr +i3OPoGdlYNJUa2NRb0kz4HIHE304zQaSBylSa053bATTlfrdTIzZXcFhfUvnKLNE +gXtRr90zsWh81k5M/itoucpmacTsXld/9w3HnDY25QdgrMBM6ghs7wZ8T1soegj8 +k12b9py0i4a6Ibn08OhZWiihNIQaJZG2tY/vsvmA+vk9PBFy2OMvhnbFeSzBqZCT +Rphny4NqoFAjpzv2gTng7fC5v2Xx2Mt6++9zA84A9H3X4F07ZrjcjrqDy4d2A/wl +2ecjbwb9Z/Pg/4S8R7+1FhhGaRTMBffb00msa8yr5LULQyReS2tNZ9/WtT5PeB+U +cSTq3nD88ZP+npNa5JRal1QMNXtfbO4AHyTsA7oC9Xb0n9Sa7YUsOCIvx9gvdhFP +/Wxc6PWOJ4d/GUohR5AdeY0cW/jPSoXk7bNbjb7EZChdQcRurDhaTyN0dKkSw/bS +uREVMweR2Ds3OmMwBtHFIjYoYiMQ4EbMl6zWK11kJNXuHA7e+whadSr2Y23OC0K+ +0bpwHJwh5Q8xaRfX/Aq03u2AnMuStIv13lmiWAmlY0cL4UEyNEHZmrHZqLAbWt4N +DfTisl01gLmB1IRpkQLLddCNxbU9CZEJjxShFHR5PtbJFR2kWVki3PaKRT08EtY+ +XTIvAgMBAAGjgY4wgYswDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUZ5Dw1t61 +GNVGKX5cq/ieCLxklRAwDgYDVR0PAQH/BAQDAgEGMEkGA1UdHwRCMEAwPqA8oDqG +OGh0dHA6Ly9jcmwuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3RfYnJfcm9vdF9jYV8y +XzIwMjMuY3JsMA0GCSqGSIb3DQEBDQUAA4ICAQA097N3U9swFrktpSHxQCF16+tI +FoE9c+CeJyrrd6kTpGoKWloUMz1oH4Guaf2Mn2VsNELZLdB/eBaxOqwjMa1ef67n +riv6uvw8l5VAk1/DLQOj7aRvU9f6QA4w9QAgLABMjDu0ox+2v5Eyq6+SmNMW5tTR +VFxDWy6u71cqqLRvpO8NVhTaIasgdp4D/Ca4nj8+AybmTNudX0KEPUUDAxxZiMrc +LmEkWqTqJwtzEr5SswrPMhfiHocaFpVIbVrg0M8JkiZmkdijYQ6qgYF/6FKC0ULn +4B0Y+qSFNueG4A3rvNTJ1jxD8V1Jbn6Bm2m1iWKPiFLY1/4nwSPFyysCu7Ff/vtD +hQNGvl3GyiEm/9cCnnRK3PgTFbGBVzbLZVzRHTF36SXDw7IyN9XxmAnkbWOACKsG +koHU6XCPpz+y7YaMgmo1yEJagtFSGkUPFaUA8JR7ZSdXOUPPfH/mvTWze/EZTN46 +ls/pdu4D58JDUjxqgejBWoC9EV2Ta/vH5mQ/u2kc6d0li690yVRAysuTEwrt+2aS +Ecr1wPrYg1UDfNPFIkZ1cGt5SAYqgpq/5usWDiJFAbzdNpQ0qTUmiteXue4Icr80 +knCDgKs4qllo3UCkGJCy89UDyibK79XH4I9TjvAA46jtn/mtd+ArY0+ew+43u3gJ +hJ65bvspmZDogNOfJA== +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST EV Root CA 2 2023 O=D-Trust GmbH +# Subject: CN=D-TRUST EV Root CA 2 2023 O=D-Trust GmbH +# Label: "D-TRUST EV Root CA 2 2023" +# Serial: 139766439402180512324132425437959641711 +# MD5 Fingerprint: 96:b4:78:09:f0:09:cb:77:eb:bb:1b:4d:6f:36:bc:b6 +# SHA1 Fingerprint: a5:5b:d8:47:6c:8f:19:f7:4c:f4:6d:6b:b6:c2:79:82:22:df:54:8b +# SHA256 Fingerprint: 8e:82:21:b2:e7:d4:00:78:36:a1:67:2f:0d:cc:29:9c:33:bc:07:d3:16:f1:32:fa:1a:20:6d:58:71:50:f1:ce +-----BEGIN CERTIFICATE----- +MIIFqTCCA5GgAwIBAgIQaSYJfoBLTKCnjHhiU19abzANBgkqhkiG9w0BAQ0FADBI +MQswCQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlE +LVRSVVNUIEVWIFJvb3QgQ0EgMiAyMDIzMB4XDTIzMDUwOTA5MTAzM1oXDTM4MDUw +OTA5MTAzMlowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEi +MCAGA1UEAxMZRC1UUlVTVCBFViBSb290IENBIDIgMjAyMzCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBANiOo4mAC7JXUtypU0w3uX9jFxPvp1sjW2l1sJkK +F8GLxNuo4MwxusLyzV3pt/gdr2rElYfXR8mV2IIEUD2BCP/kPbOx1sWy/YgJ25yE +7CUXFId/MHibaljJtnMoPDT3mfd/06b4HEV8rSyMlD/YZxBTfiLNTiVR8CUkNRFe +EMbsh2aJgWi6zCudR3Mfvc2RpHJqnKIbGKBv7FD0fUDCqDDPvXPIEysQEx6Lmqg6 +lHPTGGkKSv/BAQP/eX+1SH977ugpbzZMlWGG2Pmic4ruri+W7mjNPU0oQvlFKzIb +RlUWaqZLKfm7lVa/Rh3sHZMdwGWyH6FDrlaeoLGPaxK3YG14C8qKXO0elg6DpkiV +jTujIcSuWMYAsoS0I6SWhjW42J7YrDRJmGOVxcttSEfi8i4YHtAxq9107PncjLgc +jmgjutDzUNzPZY9zOjLHfP7KgiJPvo5iR2blzYfi6NUPGJ/lBHJLRjwQ8kTCZFZx +TnXonMkmdMV9WdEKWw9t/p51HBjGGjp82A0EzM23RWV6sY+4roRIPrN6TagD4uJ+ +ARZZaBhDM7DS3LAaQzXupdqpRlyuhoFBAUp0JuyfBr/CBTdkdXgpaP3F9ev+R/nk +hbDhezGdpn9yo7nELC7MmVcOIQxFAZRl62UJxmMiCzNJkkg8/M3OsD6Onov4/knF +NXJHAgMBAAGjgY4wgYswDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUqvyREBuH +kV8Wub9PS5FeAByxMoAwDgYDVR0PAQH/BAQDAgEGMEkGA1UdHwRCMEAwPqA8oDqG +OGh0dHA6Ly9jcmwuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3RfZXZfcm9vdF9jYV8y +XzIwMjMuY3JsMA0GCSqGSIb3DQEBDQUAA4ICAQCTy6UfmRHsmg1fLBWTxj++EI14 +QvBukEdHjqOSMo1wj/Zbjb6JzkcBahsgIIlbyIIQbODnmaprxiqgYzWRaoUlrRc4 +pZt+UPJ26oUFKidBK7GB0aL2QHWpDsvxVUjY7NHss+jOFKE17MJeNRqrphYBBo7q +3C+jisosketSjl8MmxfPy3MHGcRqwnNU73xDUmPBEcrCRbH0O1P1aa4846XerOhU +t7KR/aypH/KH5BfGSah82ApB9PI+53c0BFLd6IHyTS9URZ0V4U/M5d40VxDJI3IX +cI1QcB9WbMy5/zpaT2N6w25lBx2Eof+pDGOJbbJAiDnXH3dotfyc1dZnaVuodNv8 +ifYbMvekJKZ2t0dT741Jj6m2g1qllpBFYfXeA08mD6iL8AOWsKwV0HFaanuU5nCT +2vFp4LJiTZ6P/4mdm13NRemUAiKN4DV/6PEEeXFsVIP4M7kFMhtYVRFP0OUnR3Hs +7dpn1mKmS00PaaLJvOwiS5THaJQXfuKOKD62xur1NGyfN4gHONuGcfrNlUhDbqNP +gofXNJhuS5N5YHVpD/Aa1VP6IQzCP+k/HxiMkl14p3ZnGbuy6n/pcAlWVqOwDAst +Nl7F6cTVg8uGF5csbBNvh1qvSaYd2804BC5f4ko1Di1L+KIkBI3Y4WNeApI02phh +XBxvWHZks/wCuPWdCg== +-----END CERTIFICATE----- diff --git a/venv/Lib/site-packages/certifi/core.py b/venv/Lib/site-packages/certifi/core.py new file mode 100644 index 00000000..91f538bb --- /dev/null +++ b/venv/Lib/site-packages/certifi/core.py @@ -0,0 +1,114 @@ +""" +certifi.py +~~~~~~~~~~ + +This module returns the installation location of cacert.pem or its contents. +""" +import sys +import atexit + +def exit_cacert_ctx() -> None: + _CACERT_CTX.__exit__(None, None, None) # type: ignore[union-attr] + + +if sys.version_info >= (3, 11): + + from importlib.resources import as_file, files + + _CACERT_CTX = None + _CACERT_PATH = None + + def where() -> str: + # This is slightly terrible, but we want to delay extracting the file + # in cases where we're inside of a zipimport situation until someone + # actually calls where(), but we don't want to re-extract the file + # on every call of where(), so we'll do it once then store it in a + # global variable. + global _CACERT_CTX + global _CACERT_PATH + if _CACERT_PATH is None: + # This is slightly janky, the importlib.resources API wants you to + # manage the cleanup of this file, so it doesn't actually return a + # path, it returns a context manager that will give you the path + # when you enter it and will do any cleanup when you leave it. In + # the common case of not needing a temporary file, it will just + # return the file system location and the __exit__() is a no-op. + # + # We also have to hold onto the actual context manager, because + # it will do the cleanup whenever it gets garbage collected, so + # we will also store that at the global level as well. + _CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem")) + _CACERT_PATH = str(_CACERT_CTX.__enter__()) + atexit.register(exit_cacert_ctx) + + return _CACERT_PATH + + def contents() -> str: + return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii") + +elif sys.version_info >= (3, 7): + + from importlib.resources import path as get_path, read_text + + _CACERT_CTX = None + _CACERT_PATH = None + + def where() -> str: + # This is slightly terrible, but we want to delay extracting the + # file in cases where we're inside of a zipimport situation until + # someone actually calls where(), but we don't want to re-extract + # the file on every call of where(), so we'll do it once then store + # it in a global variable. + global _CACERT_CTX + global _CACERT_PATH + if _CACERT_PATH is None: + # This is slightly janky, the importlib.resources API wants you + # to manage the cleanup of this file, so it doesn't actually + # return a path, it returns a context manager that will give + # you the path when you enter it and will do any cleanup when + # you leave it. In the common case of not needing a temporary + # file, it will just return the file system location and the + # __exit__() is a no-op. + # + # We also have to hold onto the actual context manager, because + # it will do the cleanup whenever it gets garbage collected, so + # we will also store that at the global level as well. + _CACERT_CTX = get_path("certifi", "cacert.pem") + _CACERT_PATH = str(_CACERT_CTX.__enter__()) + atexit.register(exit_cacert_ctx) + + return _CACERT_PATH + + def contents() -> str: + return read_text("certifi", "cacert.pem", encoding="ascii") + +else: + import os + import types + from typing import Union + + Package = Union[types.ModuleType, str] + Resource = Union[str, "os.PathLike"] + + # This fallback will work for Python versions prior to 3.7 that lack the + # importlib.resources module but relies on the existing `where` function + # so won't address issues with environments like PyOxidizer that don't set + # __file__ on modules. + def read_text( + package: Package, + resource: Resource, + encoding: str = 'utf-8', + errors: str = 'strict' + ) -> str: + with open(where(), encoding=encoding) as data: + return data.read() + + # If we don't have importlib.resources, then we will just do the old logic + # of assuming we're on the filesystem and munge the path directly. + def where() -> str: + f = os.path.dirname(__file__) + + return os.path.join(f, "cacert.pem") + + def contents() -> str: + return read_text("certifi", "cacert.pem", encoding="ascii") diff --git a/venv/Lib/site-packages/certifi/py.typed b/venv/Lib/site-packages/certifi/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/INSTALLER b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/METADATA b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/METADATA new file mode 100644 index 00000000..573d88b9 --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/METADATA @@ -0,0 +1,731 @@ +Metadata-Version: 2.4 +Name: charset-normalizer +Version: 3.4.2 +Summary: The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet. +Author-email: "Ahmed R. TAHRI" +Maintainer-email: "Ahmed R. TAHRI" +License: MIT +Project-URL: Changelog, https://github.com/jawah/charset_normalizer/blob/master/CHANGELOG.md +Project-URL: Documentation, https://charset-normalizer.readthedocs.io/ +Project-URL: Code, https://github.com/jawah/charset_normalizer +Project-URL: Issue tracker, https://github.com/jawah/charset_normalizer/issues +Keywords: encoding,charset,charset-detector,detector,normalization,unicode,chardet,detect +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Text Processing :: Linguistic +Classifier: Topic :: Utilities +Classifier: Typing :: Typed +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE +Provides-Extra: unicode-backport +Dynamic: license-file + +

Charset Detection, for Everyone 👋

+ +

+ The Real First Universal Charset Detector
+ + + + + Download Count Total + + + + +

+

+ Featured Packages
+ + Static Badge + + + Static Badge + +

+

+ In other language (unofficial port - by the community)
+ + Static Badge + +

+ +> A library that helps you read text from an unknown charset encoding.
Motivated by `chardet`, +> I'm trying to resolve the issue by taking a new approach. +> All IANA character set names for which the Python core library provides codecs are supported. + +

+ >>>>> 👉 Try Me Online Now, Then Adopt Me 👈 <<<<< +

+ +This project offers you an alternative to **Universal Charset Encoding Detector**, also known as **Chardet**. + +| Feature | [Chardet](https://github.com/chardet/chardet) | Charset Normalizer | [cChardet](https://github.com/PyYoshi/cChardet) | +|--------------------------------------------------|:---------------------------------------------:|:--------------------------------------------------------------------------------------------------:|:-----------------------------------------------:| +| `Fast` | ❌ | ✅ | ✅ | +| `Universal**` | ❌ | ✅ | ❌ | +| `Reliable` **without** distinguishable standards | ❌ | ✅ | ✅ | +| `Reliable` **with** distinguishable standards | ✅ | ✅ | ✅ | +| `License` | LGPL-2.1
_restrictive_ | MIT | MPL-1.1
_restrictive_ | +| `Native Python` | ✅ | ✅ | ❌ | +| `Detect spoken language` | ❌ | ✅ | N/A | +| `UnicodeDecodeError Safety` | ❌ | ✅ | ❌ | +| `Whl Size (min)` | 193.6 kB | 42 kB | ~200 kB | +| `Supported Encoding` | 33 | 🎉 [99](https://charset-normalizer.readthedocs.io/en/latest/user/support.html#supported-encodings) | 40 | + +

+Reading Normalized TextCat Reading Text +

+ +*\*\* : They are clearly using specific code for a specific encoding even if covering most of used one*
+ +## ⚡ Performance + +This package offer better performance than its counterpart Chardet. Here are some numbers. + +| Package | Accuracy | Mean per file (ms) | File per sec (est) | +|-----------------------------------------------|:--------:|:------------------:|:------------------:| +| [chardet](https://github.com/chardet/chardet) | 86 % | 63 ms | 16 file/sec | +| charset-normalizer | **98 %** | **10 ms** | 100 file/sec | + +| Package | 99th percentile | 95th percentile | 50th percentile | +|-----------------------------------------------|:---------------:|:---------------:|:---------------:| +| [chardet](https://github.com/chardet/chardet) | 265 ms | 71 ms | 7 ms | +| charset-normalizer | 100 ms | 50 ms | 5 ms | + +_updated as of december 2024 using CPython 3.12_ + +Chardet's performance on larger file (1MB+) are very poor. Expect huge difference on large payload. + +> Stats are generated using 400+ files using default parameters. More details on used files, see GHA workflows. +> And yes, these results might change at any time. The dataset can be updated to include more files. +> The actual delays heavily depends on your CPU capabilities. The factors should remain the same. +> Keep in mind that the stats are generous and that Chardet accuracy vs our is measured using Chardet initial capability +> (e.g. Supported Encoding) Challenge-them if you want. + +## ✨ Installation + +Using pip: + +```sh +pip install charset-normalizer -U +``` + +## 🚀 Basic Usage + +### CLI +This package comes with a CLI. + +``` +usage: normalizer [-h] [-v] [-a] [-n] [-m] [-r] [-f] [-t THRESHOLD] + file [file ...] + +The Real First Universal Charset Detector. Discover originating encoding used +on text file. Normalize text to unicode. + +positional arguments: + files File(s) to be analysed + +optional arguments: + -h, --help show this help message and exit + -v, --verbose Display complementary information about file if any. + Stdout will contain logs about the detection process. + -a, --with-alternative + Output complementary possibilities if any. Top-level + JSON WILL be a list. + -n, --normalize Permit to normalize input file. If not set, program + does not write anything. + -m, --minimal Only output the charset detected to STDOUT. Disabling + JSON output. + -r, --replace Replace file when trying to normalize it instead of + creating a new one. + -f, --force Replace file without asking if you are sure, use this + flag with caution. + -t THRESHOLD, --threshold THRESHOLD + Define a custom maximum amount of chaos allowed in + decoded content. 0. <= chaos <= 1. + --version Show version information and exit. +``` + +```bash +normalizer ./data/sample.1.fr.srt +``` + +or + +```bash +python -m charset_normalizer ./data/sample.1.fr.srt +``` + +🎉 Since version 1.4.0 the CLI produce easily usable stdout result in JSON format. + +```json +{ + "path": "/home/default/projects/charset_normalizer/data/sample.1.fr.srt", + "encoding": "cp1252", + "encoding_aliases": [ + "1252", + "windows_1252" + ], + "alternative_encodings": [ + "cp1254", + "cp1256", + "cp1258", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "iso8859_3", + "iso8859_9", + "latin_1", + "mbcs" + ], + "language": "French", + "alphabets": [ + "Basic Latin", + "Latin-1 Supplement" + ], + "has_sig_or_bom": false, + "chaos": 0.149, + "coherence": 97.152, + "unicode_path": null, + "is_preferred": true +} +``` + +### Python +*Just print out normalized text* +```python +from charset_normalizer import from_path + +results = from_path('./my_subtitle.srt') + +print(str(results.best())) +``` + +*Upgrade your code without effort* +```python +from charset_normalizer import detect +``` + +The above code will behave the same as **chardet**. We ensure that we offer the best (reasonable) BC result possible. + +See the docs for advanced usage : [readthedocs.io](https://charset-normalizer.readthedocs.io/en/latest/) + +## 😇 Why + +When I started using Chardet, I noticed that it was not suited to my expectations, and I wanted to propose a +reliable alternative using a completely different method. Also! I never back down on a good challenge! + +I **don't care** about the **originating charset** encoding, because **two different tables** can +produce **two identical rendered string.** +What I want is to get readable text, the best I can. + +In a way, **I'm brute forcing text decoding.** How cool is that ? 😎 + +Don't confuse package **ftfy** with charset-normalizer or chardet. ftfy goal is to repair Unicode string whereas charset-normalizer to convert raw file in unknown encoding to unicode. + +## 🍰 How + + - Discard all charset encoding table that could not fit the binary content. + - Measure noise, or the mess once opened (by chunks) with a corresponding charset encoding. + - Extract matches with the lowest mess detected. + - Additionally, we measure coherence / probe for a language. + +**Wait a minute**, what is noise/mess and coherence according to **YOU ?** + +*Noise :* I opened hundred of text files, **written by humans**, with the wrong encoding table. **I observed**, then +**I established** some ground rules about **what is obvious** when **it seems like** a mess (aka. defining noise in rendered text). + I know that my interpretation of what is noise is probably incomplete, feel free to contribute in order to + improve or rewrite it. + +*Coherence :* For each language there is on earth, we have computed ranked letter appearance occurrences (the best we can). So I thought +that intel is worth something here. So I use those records against decoded text to check if I can detect intelligent design. + +## ⚡ Known limitations + + - Language detection is unreliable when text contains two or more languages sharing identical letters. (eg. HTML (english tags) + Turkish content (Sharing Latin characters)) + - Every charset detector heavily depends on sufficient content. In common cases, do not bother run detection on very tiny content. + +## ⚠️ About Python EOLs + +**If you are running:** + +- Python >=2.7,<3.5: Unsupported +- Python 3.5: charset-normalizer < 2.1 +- Python 3.6: charset-normalizer < 3.1 +- Python 3.7: charset-normalizer < 4.0 + +Upgrade your Python interpreter as soon as possible. + +## 👤 Contributing + +Contributions, issues and feature requests are very much welcome.
+Feel free to check [issues page](https://github.com/ousret/charset_normalizer/issues) if you want to contribute. + +## 📝 License + +Copyright © [Ahmed TAHRI @Ousret](https://github.com/Ousret).
+This project is [MIT](https://github.com/Ousret/charset_normalizer/blob/master/LICENSE) licensed. + +Characters frequencies used in this project © 2012 [Denny Vrandečić](http://simia.net/letters/) + +## 💼 For Enterprise + +Professional support for charset-normalizer is available as part of the [Tidelift +Subscription][1]. Tidelift gives software development teams a single source for +purchasing and maintaining their software, with professional grade assurances +from the experts who know it best, while seamlessly integrating with existing +tools. + +[1]: https://tidelift.com/subscription/pkg/pypi-charset-normalizer?utm_source=pypi-charset-normalizer&utm_medium=readme + +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/7297/badge)](https://www.bestpractices.dev/projects/7297) + +# Changelog +All notable changes to charset-normalizer will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +## [3.4.2](https://github.com/Ousret/charset_normalizer/compare/3.4.1...3.4.2) (2025-05-02) + +### Fixed +- Addressed the DeprecationWarning in our CLI regarding `argparse.FileType` by backporting the target class into the package. (#591) +- Improved the overall reliability of the detector with CJK Ideographs. (#605) (#587) + +### Changed +- Optional mypyc compilation upgraded to version 1.15 for Python >= 3.8 + +## [3.4.1](https://github.com/Ousret/charset_normalizer/compare/3.4.0...3.4.1) (2024-12-24) + +### Changed +- Project metadata are now stored using `pyproject.toml` instead of `setup.cfg` using setuptools as the build backend. +- Enforce annotation delayed loading for a simpler and consistent types in the project. +- Optional mypyc compilation upgraded to version 1.14 for Python >= 3.8 + +### Added +- pre-commit configuration. +- noxfile. + +### Removed +- `build-requirements.txt` as per using `pyproject.toml` native build configuration. +- `bin/integration.py` and `bin/serve.py` in favor of downstream integration test (see noxfile). +- `setup.cfg` in favor of `pyproject.toml` metadata configuration. +- Unused `utils.range_scan` function. + +### Fixed +- Converting content to Unicode bytes may insert `utf_8` instead of preferred `utf-8`. (#572) +- Deprecation warning "'count' is passed as positional argument" when converting to Unicode bytes on Python 3.13+ + +## [3.4.0](https://github.com/Ousret/charset_normalizer/compare/3.3.2...3.4.0) (2024-10-08) + +### Added +- Argument `--no-preemptive` in the CLI to prevent the detector to search for hints. +- Support for Python 3.13 (#512) + +### Fixed +- Relax the TypeError exception thrown when trying to compare a CharsetMatch with anything else than a CharsetMatch. +- Improved the general reliability of the detector based on user feedbacks. (#520) (#509) (#498) (#407) (#537) +- Declared charset in content (preemptive detection) not changed when converting to utf-8 bytes. (#381) + +## [3.3.2](https://github.com/Ousret/charset_normalizer/compare/3.3.1...3.3.2) (2023-10-31) + +### Fixed +- Unintentional memory usage regression when using large payload that match several encoding (#376) +- Regression on some detection case showcased in the documentation (#371) + +### Added +- Noise (md) probe that identify malformed arabic representation due to the presence of letters in isolated form (credit to my wife) + +## [3.3.1](https://github.com/Ousret/charset_normalizer/compare/3.3.0...3.3.1) (2023-10-22) + +### Changed +- Optional mypyc compilation upgraded to version 1.6.1 for Python >= 3.8 +- Improved the general detection reliability based on reports from the community + +## [3.3.0](https://github.com/Ousret/charset_normalizer/compare/3.2.0...3.3.0) (2023-09-30) + +### Added +- Allow to execute the CLI (e.g. normalizer) through `python -m charset_normalizer.cli` or `python -m charset_normalizer` +- Support for 9 forgotten encoding that are supported by Python but unlisted in `encoding.aliases` as they have no alias (#323) + +### Removed +- (internal) Redundant utils.is_ascii function and unused function is_private_use_only +- (internal) charset_normalizer.assets is moved inside charset_normalizer.constant + +### Changed +- (internal) Unicode code blocks in constants are updated using the latest v15.0.0 definition to improve detection +- Optional mypyc compilation upgraded to version 1.5.1 for Python >= 3.8 + +### Fixed +- Unable to properly sort CharsetMatch when both chaos/noise and coherence were close due to an unreachable condition in \_\_lt\_\_ (#350) + +## [3.2.0](https://github.com/Ousret/charset_normalizer/compare/3.1.0...3.2.0) (2023-06-07) + +### Changed +- Typehint for function `from_path` no longer enforce `PathLike` as its first argument +- Minor improvement over the global detection reliability + +### Added +- Introduce function `is_binary` that relies on main capabilities, and optimized to detect binaries +- Propagate `enable_fallback` argument throughout `from_bytes`, `from_path`, and `from_fp` that allow a deeper control over the detection (default True) +- Explicit support for Python 3.12 + +### Fixed +- Edge case detection failure where a file would contain 'very-long' camel cased word (Issue #289) + +## [3.1.0](https://github.com/Ousret/charset_normalizer/compare/3.0.1...3.1.0) (2023-03-06) + +### Added +- Argument `should_rename_legacy` for legacy function `detect` and disregard any new arguments without errors (PR #262) + +### Removed +- Support for Python 3.6 (PR #260) + +### Changed +- Optional speedup provided by mypy/c 1.0.1 + +## [3.0.1](https://github.com/Ousret/charset_normalizer/compare/3.0.0...3.0.1) (2022-11-18) + +### Fixed +- Multi-bytes cutter/chunk generator did not always cut correctly (PR #233) + +### Changed +- Speedup provided by mypy/c 0.990 on Python >= 3.7 + +## [3.0.0](https://github.com/Ousret/charset_normalizer/compare/2.1.1...3.0.0) (2022-10-20) + +### Added +- Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results +- Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES +- Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio +- `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl) + +### Changed +- Build with static metadata using 'build' frontend +- Make the language detection stricter +- Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1 + +### Fixed +- CLI with opt --normalize fail when using full path for files +- TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it +- Sphinx warnings when generating the documentation + +### Removed +- Coherence detector no longer return 'Simple English' instead return 'English' +- Coherence detector no longer return 'Classical Chinese' instead return 'Chinese' +- Breaking: Method `first()` and `best()` from CharsetMatch +- UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII) +- Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches +- Breaking: Top-level function `normalize` +- Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch +- Support for the backport `unicodedata2` + +## [3.0.0rc1](https://github.com/Ousret/charset_normalizer/compare/3.0.0b2...3.0.0rc1) (2022-10-18) + +### Added +- Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results +- Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES +- Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio + +### Changed +- Build with static metadata using 'build' frontend +- Make the language detection stricter + +### Fixed +- CLI with opt --normalize fail when using full path for files +- TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it + +### Removed +- Coherence detector no longer return 'Simple English' instead return 'English' +- Coherence detector no longer return 'Classical Chinese' instead return 'Chinese' + +## [3.0.0b2](https://github.com/Ousret/charset_normalizer/compare/3.0.0b1...3.0.0b2) (2022-08-21) + +### Added +- `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl) + +### Removed +- Breaking: Method `first()` and `best()` from CharsetMatch +- UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII) + +### Fixed +- Sphinx warnings when generating the documentation + +## [3.0.0b1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...3.0.0b1) (2022-08-15) + +### Changed +- Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1 + +### Removed +- Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches +- Breaking: Top-level function `normalize` +- Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch +- Support for the backport `unicodedata2` + +## [2.1.1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...2.1.1) (2022-08-19) + +### Deprecated +- Function `normalize` scheduled for removal in 3.0 + +### Changed +- Removed useless call to decode in fn is_unprintable (#206) + +### Fixed +- Third-party library (i18n xgettext) crashing not recognizing utf_8 (PEP 263) with underscore from [@aleksandernovikov](https://github.com/aleksandernovikov) (#204) + +## [2.1.0](https://github.com/Ousret/charset_normalizer/compare/2.0.12...2.1.0) (2022-06-19) + +### Added +- Output the Unicode table version when running the CLI with `--version` (PR #194) + +### Changed +- Re-use decoded buffer for single byte character sets from [@nijel](https://github.com/nijel) (PR #175) +- Fixing some performance bottlenecks from [@deedy5](https://github.com/deedy5) (PR #183) + +### Fixed +- Workaround potential bug in cpython with Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space (PR #175) +- CLI default threshold aligned with the API threshold from [@oleksandr-kuzmenko](https://github.com/oleksandr-kuzmenko) (PR #181) + +### Removed +- Support for Python 3.5 (PR #192) + +### Deprecated +- Use of backport unicodedata from `unicodedata2` as Python is quickly catching up, scheduled for removal in 3.0 (PR #194) + +## [2.0.12](https://github.com/Ousret/charset_normalizer/compare/2.0.11...2.0.12) (2022-02-12) + +### Fixed +- ASCII miss-detection on rare cases (PR #170) + +## [2.0.11](https://github.com/Ousret/charset_normalizer/compare/2.0.10...2.0.11) (2022-01-30) + +### Added +- Explicit support for Python 3.11 (PR #164) + +### Changed +- The logging behavior have been completely reviewed, now using only TRACE and DEBUG levels (PR #163 #165) + +## [2.0.10](https://github.com/Ousret/charset_normalizer/compare/2.0.9...2.0.10) (2022-01-04) + +### Fixed +- Fallback match entries might lead to UnicodeDecodeError for large bytes sequence (PR #154) + +### Changed +- Skipping the language-detection (CD) on ASCII (PR #155) + +## [2.0.9](https://github.com/Ousret/charset_normalizer/compare/2.0.8...2.0.9) (2021-12-03) + +### Changed +- Moderating the logging impact (since 2.0.8) for specific environments (PR #147) + +### Fixed +- Wrong logging level applied when setting kwarg `explain` to True (PR #146) + +## [2.0.8](https://github.com/Ousret/charset_normalizer/compare/2.0.7...2.0.8) (2021-11-24) +### Changed +- Improvement over Vietnamese detection (PR #126) +- MD improvement on trailing data and long foreign (non-pure latin) data (PR #124) +- Efficiency improvements in cd/alphabet_languages from [@adbar](https://github.com/adbar) (PR #122) +- call sum() without an intermediary list following PEP 289 recommendations from [@adbar](https://github.com/adbar) (PR #129) +- Code style as refactored by Sourcery-AI (PR #131) +- Minor adjustment on the MD around european words (PR #133) +- Remove and replace SRTs from assets / tests (PR #139) +- Initialize the library logger with a `NullHandler` by default from [@nmaynes](https://github.com/nmaynes) (PR #135) +- Setting kwarg `explain` to True will add provisionally (bounded to function lifespan) a specific stream handler (PR #135) + +### Fixed +- Fix large (misleading) sequence giving UnicodeDecodeError (PR #137) +- Avoid using too insignificant chunk (PR #137) + +### Added +- Add and expose function `set_logging_handler` to configure a specific StreamHandler from [@nmaynes](https://github.com/nmaynes) (PR #135) +- Add `CHANGELOG.md` entries, format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) (PR #141) + +## [2.0.7](https://github.com/Ousret/charset_normalizer/compare/2.0.6...2.0.7) (2021-10-11) +### Added +- Add support for Kazakh (Cyrillic) language detection (PR #109) + +### Changed +- Further, improve inferring the language from a given single-byte code page (PR #112) +- Vainly trying to leverage PEP263 when PEP3120 is not supported (PR #116) +- Refactoring for potential performance improvements in loops from [@adbar](https://github.com/adbar) (PR #113) +- Various detection improvement (MD+CD) (PR #117) + +### Removed +- Remove redundant logging entry about detected language(s) (PR #115) + +### Fixed +- Fix a minor inconsistency between Python 3.5 and other versions regarding language detection (PR #117 #102) + +## [2.0.6](https://github.com/Ousret/charset_normalizer/compare/2.0.5...2.0.6) (2021-09-18) +### Fixed +- Unforeseen regression with the loss of the backward-compatibility with some older minor of Python 3.5.x (PR #100) +- Fix CLI crash when using --minimal output in certain cases (PR #103) + +### Changed +- Minor improvement to the detection efficiency (less than 1%) (PR #106 #101) + +## [2.0.5](https://github.com/Ousret/charset_normalizer/compare/2.0.4...2.0.5) (2021-09-14) +### Changed +- The project now comply with: flake8, mypy, isort and black to ensure a better overall quality (PR #81) +- The BC-support with v1.x was improved, the old staticmethods are restored (PR #82) +- The Unicode detection is slightly improved (PR #93) +- Add syntax sugar \_\_bool\_\_ for results CharsetMatches list-container (PR #91) + +### Removed +- The project no longer raise warning on tiny content given for detection, will be simply logged as warning instead (PR #92) + +### Fixed +- In some rare case, the chunks extractor could cut in the middle of a multi-byte character and could mislead the mess detection (PR #95) +- Some rare 'space' characters could trip up the UnprintablePlugin/Mess detection (PR #96) +- The MANIFEST.in was not exhaustive (PR #78) + +## [2.0.4](https://github.com/Ousret/charset_normalizer/compare/2.0.3...2.0.4) (2021-07-30) +### Fixed +- The CLI no longer raise an unexpected exception when no encoding has been found (PR #70) +- Fix accessing the 'alphabets' property when the payload contains surrogate characters (PR #68) +- The logger could mislead (explain=True) on detected languages and the impact of one MBCS match (PR #72) +- Submatch factoring could be wrong in rare edge cases (PR #72) +- Multiple files given to the CLI were ignored when publishing results to STDOUT. (After the first path) (PR #72) +- Fix line endings from CRLF to LF for certain project files (PR #67) + +### Changed +- Adjust the MD to lower the sensitivity, thus improving the global detection reliability (PR #69 #76) +- Allow fallback on specified encoding if any (PR #71) + +## [2.0.3](https://github.com/Ousret/charset_normalizer/compare/2.0.2...2.0.3) (2021-07-16) +### Changed +- Part of the detection mechanism has been improved to be less sensitive, resulting in more accurate detection results. Especially ASCII. (PR #63) +- According to the community wishes, the detection will fall back on ASCII or UTF-8 in a last-resort case. (PR #64) + +## [2.0.2](https://github.com/Ousret/charset_normalizer/compare/2.0.1...2.0.2) (2021-07-15) +### Fixed +- Empty/Too small JSON payload miss-detection fixed. Report from [@tseaver](https://github.com/tseaver) (PR #59) + +### Changed +- Don't inject unicodedata2 into sys.modules from [@akx](https://github.com/akx) (PR #57) + +## [2.0.1](https://github.com/Ousret/charset_normalizer/compare/2.0.0...2.0.1) (2021-07-13) +### Fixed +- Make it work where there isn't a filesystem available, dropping assets frequencies.json. Report from [@sethmlarson](https://github.com/sethmlarson). (PR #55) +- Using explain=False permanently disable the verbose output in the current runtime (PR #47) +- One log entry (language target preemptive) was not show in logs when using explain=True (PR #47) +- Fix undesired exception (ValueError) on getitem of instance CharsetMatches (PR #52) + +### Changed +- Public function normalize default args values were not aligned with from_bytes (PR #53) + +### Added +- You may now use charset aliases in cp_isolation and cp_exclusion arguments (PR #47) + +## [2.0.0](https://github.com/Ousret/charset_normalizer/compare/1.4.1...2.0.0) (2021-07-02) +### Changed +- 4x to 5 times faster than the previous 1.4.0 release. At least 2x faster than Chardet. +- Accent has been made on UTF-8 detection, should perform rather instantaneous. +- The backward compatibility with Chardet has been greatly improved. The legacy detect function returns an identical charset name whenever possible. +- The detection mechanism has been slightly improved, now Turkish content is detected correctly (most of the time) +- The program has been rewritten to ease the readability and maintainability. (+Using static typing)+ +- utf_7 detection has been reinstated. + +### Removed +- This package no longer require anything when used with Python 3.5 (Dropped cached_property) +- Removed support for these languages: Catalan, Esperanto, Kazakh, Baque, Volapük, Azeri, Galician, Nynorsk, Macedonian, and Serbocroatian. +- The exception hook on UnicodeDecodeError has been removed. + +### Deprecated +- Methods coherence_non_latin, w_counter, chaos_secondary_pass of the class CharsetMatch are now deprecated and scheduled for removal in v3.0 + +### Fixed +- The CLI output used the relative path of the file(s). Should be absolute. + +## [1.4.1](https://github.com/Ousret/charset_normalizer/compare/1.4.0...1.4.1) (2021-05-28) +### Fixed +- Logger configuration/usage no longer conflict with others (PR #44) + +## [1.4.0](https://github.com/Ousret/charset_normalizer/compare/1.3.9...1.4.0) (2021-05-21) +### Removed +- Using standard logging instead of using the package loguru. +- Dropping nose test framework in favor of the maintained pytest. +- Choose to not use dragonmapper package to help with gibberish Chinese/CJK text. +- Require cached_property only for Python 3.5 due to constraint. Dropping for every other interpreter version. +- Stop support for UTF-7 that does not contain a SIG. +- Dropping PrettyTable, replaced with pure JSON output in CLI. + +### Fixed +- BOM marker in a CharsetNormalizerMatch instance could be False in rare cases even if obviously present. Due to the sub-match factoring process. +- Not searching properly for the BOM when trying utf32/16 parent codec. + +### Changed +- Improving the package final size by compressing frequencies.json. +- Huge improvement over the larges payload. + +### Added +- CLI now produces JSON consumable output. +- Return ASCII if given sequences fit. Given reasonable confidence. + +## [1.3.9](https://github.com/Ousret/charset_normalizer/compare/1.3.8...1.3.9) (2021-05-13) + +### Fixed +- In some very rare cases, you may end up getting encode/decode errors due to a bad bytes payload (PR #40) + +## [1.3.8](https://github.com/Ousret/charset_normalizer/compare/1.3.7...1.3.8) (2021-05-12) + +### Fixed +- Empty given payload for detection may cause an exception if trying to access the `alphabets` property. (PR #39) + +## [1.3.7](https://github.com/Ousret/charset_normalizer/compare/1.3.6...1.3.7) (2021-05-12) + +### Fixed +- The legacy detect function should return UTF-8-SIG if sig is present in the payload. (PR #38) + +## [1.3.6](https://github.com/Ousret/charset_normalizer/compare/1.3.5...1.3.6) (2021-02-09) + +### Changed +- Amend the previous release to allow prettytable 2.0 (PR #35) + +## [1.3.5](https://github.com/Ousret/charset_normalizer/compare/1.3.4...1.3.5) (2021-02-08) + +### Fixed +- Fix error while using the package with a python pre-release interpreter (PR #33) + +### Changed +- Dependencies refactoring, constraints revised. + +### Added +- Add python 3.9 and 3.10 to the supported interpreters + +MIT License + +Copyright (c) 2025 TAHRI Ahmed R. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/RECORD b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/RECORD new file mode 100644 index 00000000..ae3c1ba4 --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/RECORD @@ -0,0 +1,35 @@ +../../Scripts/normalizer.exe,sha256=pth8QWAVwrCEVK7gQtY8dGyJaiXb8q3y9oQzb-e2gI0,108414 +charset_normalizer-3.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +charset_normalizer-3.4.2.dist-info/METADATA,sha256=WneNNyl9QvsRZYzK1FeEC6Wwag4iIFoTAoevPgpZFTY,36474 +charset_normalizer-3.4.2.dist-info/RECORD,, +charset_normalizer-3.4.2.dist-info/WHEEL,sha256=PqPfOxA7mGIdQ4POxRbyNp_0DDxNzJM1gpSaGcLLjlo,101 +charset_normalizer-3.4.2.dist-info/entry_points.txt,sha256=8C-Y3iXIfyXQ83Tpir2B8t-XLJYpxF5xbb38d_js-h4,65 +charset_normalizer-3.4.2.dist-info/licenses/LICENSE,sha256=GFd0hdNwTxpHne2OVzwJds_tMV_S_ReYP6mI2kwvcNE,1092 +charset_normalizer-3.4.2.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19 +charset_normalizer/__init__.py,sha256=0NT8MHi7SKq3juMqYfOdrkzjisK0L73lneNHH4qaUAs,1638 +charset_normalizer/__main__.py,sha256=2sj_BS6H0sU25C1bMqz9DVwa6kOK9lchSEbSU-_iu7M,115 +charset_normalizer/__pycache__/__init__.cpython-312.pyc,, +charset_normalizer/__pycache__/__main__.cpython-312.pyc,, +charset_normalizer/__pycache__/api.cpython-312.pyc,, +charset_normalizer/__pycache__/cd.cpython-312.pyc,, +charset_normalizer/__pycache__/constant.cpython-312.pyc,, +charset_normalizer/__pycache__/legacy.cpython-312.pyc,, +charset_normalizer/__pycache__/md.cpython-312.pyc,, +charset_normalizer/__pycache__/models.cpython-312.pyc,, +charset_normalizer/__pycache__/utils.cpython-312.pyc,, +charset_normalizer/__pycache__/version.cpython-312.pyc,, +charset_normalizer/api.py,sha256=2a0p2Gnhbdo9O6C04CNxTSN23fIbgOF20nxb0pWPNFM,23285 +charset_normalizer/cd.py,sha256=uq8nVxRpR6Guc16ACvOWtL8KO3w7vYaCh8hHisuOyTg,12917 +charset_normalizer/cli/__init__.py,sha256=d9MUx-1V_qD3x9igIy4JT4oC5CU0yjulk7QyZWeRFhg,144 +charset_normalizer/cli/__main__.py,sha256=-pdJCyPywouPyFsC8_eTSgTmvh1YEvgjsvy1WZ0XjaA,13027 +charset_normalizer/cli/__pycache__/__init__.cpython-312.pyc,, +charset_normalizer/cli/__pycache__/__main__.cpython-312.pyc,, +charset_normalizer/constant.py,sha256=mCJmYzpBU27Ut9kiNWWoBbhhxQ-aRVw3K7LSwoFwBGI,44728 +charset_normalizer/legacy.py,sha256=NgK-8ZQa_M9FHgQjdNSiYzMaB332QGuElZSfCf2y2sQ,2351 +charset_normalizer/md.cp312-win_amd64.pyd,sha256=Rl7qVeM9DRj8hZuyoyQjaRKsZ6zkpojb7Nx1nNTbjr8,10752 +charset_normalizer/md.py,sha256=LSuW2hNgXSgF7JGdRapLAHLuj6pABHiP85LTNAYmu7c,20780 +charset_normalizer/md__mypyc.cp312-win_amd64.pyd,sha256=WXIlB5bZkRXY2k4r70Pmyf7eLRw6zLib-dXXkRIrOZA,125952 +charset_normalizer/models.py,sha256=ZR2PE-fqf6dASZfqdE5Uhkmr0o1MciSdXOjuNqwkmvg,12754 +charset_normalizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +charset_normalizer/utils.py,sha256=XtWIQeOuz7cnGebMzyi4Vvi1JtA84QBSIeR9PDzF7pw,12584 +charset_normalizer/version.py,sha256=wtpyUZ7M57rCLclP3QjzRD0Nj2hvnMOzLZI-vwfTdWs,123 diff --git a/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/WHEEL b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/WHEEL new file mode 100644 index 00000000..97266467 --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.1.0) +Root-Is-Purelib: false +Tag: cp312-cp312-win_amd64 + diff --git a/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/entry_points.txt b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/entry_points.txt new file mode 100644 index 00000000..ec920125 --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +normalizer = charset_normalizer:cli.cli_detect diff --git a/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/licenses/LICENSE b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/licenses/LICENSE new file mode 100644 index 00000000..9725772c --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 TAHRI Ahmed R. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/top_level.txt b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/top_level.txt new file mode 100644 index 00000000..66958f0a --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer-3.4.2.dist-info/top_level.txt @@ -0,0 +1 @@ +charset_normalizer diff --git a/venv/Lib/site-packages/charset_normalizer/__init__.py b/venv/Lib/site-packages/charset_normalizer/__init__.py new file mode 100644 index 00000000..0d3a3799 --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer/__init__.py @@ -0,0 +1,48 @@ +""" +Charset-Normalizer +~~~~~~~~~~~~~~ +The Real First Universal Charset Detector. +A library that helps you read text from an unknown charset encoding. +Motivated by chardet, This package is trying to resolve the issue by taking a new approach. +All IANA character set names for which the Python core library provides codecs are supported. + +Basic usage: + >>> from charset_normalizer import from_bytes + >>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8')) + >>> best_guess = results.best() + >>> str(best_guess) + 'Bсеки човек има право на образование. Oбразованието!' + +Others methods and usages are available - see the full documentation +at . +:copyright: (c) 2021 by Ahmed TAHRI +:license: MIT, see LICENSE for more details. +""" + +from __future__ import annotations + +import logging + +from .api import from_bytes, from_fp, from_path, is_binary +from .legacy import detect +from .models import CharsetMatch, CharsetMatches +from .utils import set_logging_handler +from .version import VERSION, __version__ + +__all__ = ( + "from_fp", + "from_path", + "from_bytes", + "is_binary", + "detect", + "CharsetMatch", + "CharsetMatches", + "__version__", + "VERSION", + "set_logging_handler", +) + +# Attach a NullHandler to the top level logger by default +# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library + +logging.getLogger("charset_normalizer").addHandler(logging.NullHandler()) diff --git a/venv/Lib/site-packages/charset_normalizer/__main__.py b/venv/Lib/site-packages/charset_normalizer/__main__.py new file mode 100644 index 00000000..e0e76f7b --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer/__main__.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from .cli import cli_detect + +if __name__ == "__main__": + cli_detect() diff --git a/venv/Lib/site-packages/charset_normalizer/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/charset_normalizer/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..41543fcd Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/charset_normalizer/__pycache__/__main__.cpython-312.pyc b/venv/Lib/site-packages/charset_normalizer/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 00000000..6e80f4fc Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/__pycache__/__main__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/charset_normalizer/__pycache__/api.cpython-312.pyc b/venv/Lib/site-packages/charset_normalizer/__pycache__/api.cpython-312.pyc new file mode 100644 index 00000000..e074b4f8 Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/__pycache__/api.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/charset_normalizer/__pycache__/cd.cpython-312.pyc b/venv/Lib/site-packages/charset_normalizer/__pycache__/cd.cpython-312.pyc new file mode 100644 index 00000000..e1e3efcb Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/__pycache__/cd.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/charset_normalizer/__pycache__/constant.cpython-312.pyc b/venv/Lib/site-packages/charset_normalizer/__pycache__/constant.cpython-312.pyc new file mode 100644 index 00000000..cf74696c Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/__pycache__/constant.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/charset_normalizer/__pycache__/legacy.cpython-312.pyc b/venv/Lib/site-packages/charset_normalizer/__pycache__/legacy.cpython-312.pyc new file mode 100644 index 00000000..a4328f62 Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/__pycache__/legacy.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/charset_normalizer/__pycache__/md.cpython-312.pyc b/venv/Lib/site-packages/charset_normalizer/__pycache__/md.cpython-312.pyc new file mode 100644 index 00000000..e765f78f Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/__pycache__/md.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/charset_normalizer/__pycache__/models.cpython-312.pyc b/venv/Lib/site-packages/charset_normalizer/__pycache__/models.cpython-312.pyc new file mode 100644 index 00000000..baa48db3 Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/__pycache__/models.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/charset_normalizer/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/charset_normalizer/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..a042299f Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/charset_normalizer/__pycache__/version.cpython-312.pyc b/venv/Lib/site-packages/charset_normalizer/__pycache__/version.cpython-312.pyc new file mode 100644 index 00000000..0632ade4 Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/__pycache__/version.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/charset_normalizer/api.py b/venv/Lib/site-packages/charset_normalizer/api.py new file mode 100644 index 00000000..2c8c0618 --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer/api.py @@ -0,0 +1,668 @@ +from __future__ import annotations + +import logging +from os import PathLike +from typing import BinaryIO + +from .cd import ( + coherence_ratio, + encoding_languages, + mb_encoding_languages, + merge_coherence_ratios, +) +from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE +from .md import mess_ratio +from .models import CharsetMatch, CharsetMatches +from .utils import ( + any_specified_encoding, + cut_sequence_chunks, + iana_name, + identify_sig_or_bom, + is_cp_similar, + is_multi_byte_encoding, + should_strip_sig_or_bom, +) + +logger = logging.getLogger("charset_normalizer") +explain_handler = logging.StreamHandler() +explain_handler.setFormatter( + logging.Formatter("%(asctime)s | %(levelname)s | %(message)s") +) + + +def from_bytes( + sequences: bytes | bytearray, + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.2, + cp_isolation: list[str] | None = None, + cp_exclusion: list[str] | None = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = True, +) -> CharsetMatches: + """ + Given a raw bytes sequence, return the best possibles charset usable to render str objects. + If there is no results, it is a strong indicator that the source is binary/not text. + By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence. + And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will. + + The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page + but never take it for granted. Can improve the performance. + + You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that + purpose. + + This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32. + By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain' + toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging. + Custom logging format and handler can be set manually. + """ + + if not isinstance(sequences, (bytearray, bytes)): + raise TypeError( + "Expected object of type bytes or bytearray, got: {}".format( + type(sequences) + ) + ) + + if explain: + previous_logger_level: int = logger.level + logger.addHandler(explain_handler) + logger.setLevel(TRACE) + + length: int = len(sequences) + + if length == 0: + logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.") + if explain: # Defensive: ensure exit path clean handler + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level or logging.WARNING) + return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")]) + + if cp_isolation is not None: + logger.log( + TRACE, + "cp_isolation is set. use this flag for debugging purpose. " + "limited list of encoding allowed : %s.", + ", ".join(cp_isolation), + ) + cp_isolation = [iana_name(cp, False) for cp in cp_isolation] + else: + cp_isolation = [] + + if cp_exclusion is not None: + logger.log( + TRACE, + "cp_exclusion is set. use this flag for debugging purpose. " + "limited list of encoding excluded : %s.", + ", ".join(cp_exclusion), + ) + cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion] + else: + cp_exclusion = [] + + if length <= (chunk_size * steps): + logger.log( + TRACE, + "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.", + steps, + chunk_size, + length, + ) + steps = 1 + chunk_size = length + + if steps > 1 and length / steps < chunk_size: + chunk_size = int(length / steps) + + is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE + is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE + + if is_too_small_sequence: + logger.log( + TRACE, + "Trying to detect encoding from a tiny portion of ({}) byte(s).".format( + length + ), + ) + elif is_too_large_sequence: + logger.log( + TRACE, + "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format( + length + ), + ) + + prioritized_encodings: list[str] = [] + + specified_encoding: str | None = ( + any_specified_encoding(sequences) if preemptive_behaviour else None + ) + + if specified_encoding is not None: + prioritized_encodings.append(specified_encoding) + logger.log( + TRACE, + "Detected declarative mark in sequence. Priority +1 given for %s.", + specified_encoding, + ) + + tested: set[str] = set() + tested_but_hard_failure: list[str] = [] + tested_but_soft_failure: list[str] = [] + + fallback_ascii: CharsetMatch | None = None + fallback_u8: CharsetMatch | None = None + fallback_specified: CharsetMatch | None = None + + results: CharsetMatches = CharsetMatches() + + early_stop_results: CharsetMatches = CharsetMatches() + + sig_encoding, sig_payload = identify_sig_or_bom(sequences) + + if sig_encoding is not None: + prioritized_encodings.append(sig_encoding) + logger.log( + TRACE, + "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.", + len(sig_payload), + sig_encoding, + ) + + prioritized_encodings.append("ascii") + + if "utf_8" not in prioritized_encodings: + prioritized_encodings.append("utf_8") + + for encoding_iana in prioritized_encodings + IANA_SUPPORTED: + if cp_isolation and encoding_iana not in cp_isolation: + continue + + if cp_exclusion and encoding_iana in cp_exclusion: + continue + + if encoding_iana in tested: + continue + + tested.add(encoding_iana) + + decoded_payload: str | None = None + bom_or_sig_available: bool = sig_encoding == encoding_iana + strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom( + encoding_iana + ) + + if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available: + logger.log( + TRACE, + "Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.", + encoding_iana, + ) + continue + if encoding_iana in {"utf_7"} and not bom_or_sig_available: + logger.log( + TRACE, + "Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.", + encoding_iana, + ) + continue + + try: + is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana) + except (ModuleNotFoundError, ImportError): + logger.log( + TRACE, + "Encoding %s does not provide an IncrementalDecoder", + encoding_iana, + ) + continue + + try: + if is_too_large_sequence and is_multi_byte_decoder is False: + str( + ( + sequences[: int(50e4)] + if strip_sig_or_bom is False + else sequences[len(sig_payload) : int(50e4)] + ), + encoding=encoding_iana, + ) + else: + decoded_payload = str( + ( + sequences + if strip_sig_or_bom is False + else sequences[len(sig_payload) :] + ), + encoding=encoding_iana, + ) + except (UnicodeDecodeError, LookupError) as e: + if not isinstance(e, LookupError): + logger.log( + TRACE, + "Code page %s does not fit given bytes sequence at ALL. %s", + encoding_iana, + str(e), + ) + tested_but_hard_failure.append(encoding_iana) + continue + + similar_soft_failure_test: bool = False + + for encoding_soft_failed in tested_but_soft_failure: + if is_cp_similar(encoding_iana, encoding_soft_failed): + similar_soft_failure_test = True + break + + if similar_soft_failure_test: + logger.log( + TRACE, + "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!", + encoding_iana, + encoding_soft_failed, + ) + continue + + r_ = range( + 0 if not bom_or_sig_available else len(sig_payload), + length, + int(length / steps), + ) + + multi_byte_bonus: bool = ( + is_multi_byte_decoder + and decoded_payload is not None + and len(decoded_payload) < length + ) + + if multi_byte_bonus: + logger.log( + TRACE, + "Code page %s is a multi byte encoding table and it appear that at least one character " + "was encoded using n-bytes.", + encoding_iana, + ) + + max_chunk_gave_up: int = int(len(r_) / 4) + + max_chunk_gave_up = max(max_chunk_gave_up, 2) + early_stop_count: int = 0 + lazy_str_hard_failure = False + + md_chunks: list[str] = [] + md_ratios = [] + + try: + for chunk in cut_sequence_chunks( + sequences, + encoding_iana, + r_, + chunk_size, + bom_or_sig_available, + strip_sig_or_bom, + sig_payload, + is_multi_byte_decoder, + decoded_payload, + ): + md_chunks.append(chunk) + + md_ratios.append( + mess_ratio( + chunk, + threshold, + explain is True and 1 <= len(cp_isolation) <= 2, + ) + ) + + if md_ratios[-1] >= threshold: + early_stop_count += 1 + + if (early_stop_count >= max_chunk_gave_up) or ( + bom_or_sig_available and strip_sig_or_bom is False + ): + break + except ( + UnicodeDecodeError + ) as e: # Lazy str loading may have missed something there + logger.log( + TRACE, + "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s", + encoding_iana, + str(e), + ) + early_stop_count = max_chunk_gave_up + lazy_str_hard_failure = True + + # We might want to check the sequence again with the whole content + # Only if initial MD tests passes + if ( + not lazy_str_hard_failure + and is_too_large_sequence + and not is_multi_byte_decoder + ): + try: + sequences[int(50e3) :].decode(encoding_iana, errors="strict") + except UnicodeDecodeError as e: + logger.log( + TRACE, + "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s", + encoding_iana, + str(e), + ) + tested_but_hard_failure.append(encoding_iana) + continue + + mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0 + if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up: + tested_but_soft_failure.append(encoding_iana) + logger.log( + TRACE, + "%s was excluded because of initial chaos probing. Gave up %i time(s). " + "Computed mean chaos is %f %%.", + encoding_iana, + early_stop_count, + round(mean_mess_ratio * 100, ndigits=3), + ) + # Preparing those fallbacks in case we got nothing. + if ( + enable_fallback + and encoding_iana in ["ascii", "utf_8", specified_encoding] + and not lazy_str_hard_failure + ): + fallback_entry = CharsetMatch( + sequences, + encoding_iana, + threshold, + False, + [], + decoded_payload, + preemptive_declaration=specified_encoding, + ) + if encoding_iana == specified_encoding: + fallback_specified = fallback_entry + elif encoding_iana == "ascii": + fallback_ascii = fallback_entry + else: + fallback_u8 = fallback_entry + continue + + logger.log( + TRACE, + "%s passed initial chaos probing. Mean measured chaos is %f %%", + encoding_iana, + round(mean_mess_ratio * 100, ndigits=3), + ) + + if not is_multi_byte_decoder: + target_languages: list[str] = encoding_languages(encoding_iana) + else: + target_languages = mb_encoding_languages(encoding_iana) + + if target_languages: + logger.log( + TRACE, + "{} should target any language(s) of {}".format( + encoding_iana, str(target_languages) + ), + ) + + cd_ratios = [] + + # We shall skip the CD when its about ASCII + # Most of the time its not relevant to run "language-detection" on it. + if encoding_iana != "ascii": + for chunk in md_chunks: + chunk_languages = coherence_ratio( + chunk, + language_threshold, + ",".join(target_languages) if target_languages else None, + ) + + cd_ratios.append(chunk_languages) + + cd_ratios_merged = merge_coherence_ratios(cd_ratios) + + if cd_ratios_merged: + logger.log( + TRACE, + "We detected language {} using {}".format( + cd_ratios_merged, encoding_iana + ), + ) + + current_match = CharsetMatch( + sequences, + encoding_iana, + mean_mess_ratio, + bom_or_sig_available, + cd_ratios_merged, + ( + decoded_payload + if ( + is_too_large_sequence is False + or encoding_iana in [specified_encoding, "ascii", "utf_8"] + ) + else None + ), + preemptive_declaration=specified_encoding, + ) + + results.append(current_match) + + if ( + encoding_iana in [specified_encoding, "ascii", "utf_8"] + and mean_mess_ratio < 0.1 + ): + # If md says nothing to worry about, then... stop immediately! + if mean_mess_ratio == 0.0: + logger.debug( + "Encoding detection: %s is most likely the one.", + current_match.encoding, + ) + if explain: # Defensive: ensure exit path clean handler + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + return CharsetMatches([current_match]) + + early_stop_results.append(current_match) + + if ( + len(early_stop_results) + and (specified_encoding is None or specified_encoding in tested) + and "ascii" in tested + and "utf_8" in tested + ): + probable_result: CharsetMatch = early_stop_results.best() # type: ignore[assignment] + logger.debug( + "Encoding detection: %s is most likely the one.", + probable_result.encoding, + ) + if explain: # Defensive: ensure exit path clean handler + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + + return CharsetMatches([probable_result]) + + if encoding_iana == sig_encoding: + logger.debug( + "Encoding detection: %s is most likely the one as we detected a BOM or SIG within " + "the beginning of the sequence.", + encoding_iana, + ) + if explain: # Defensive: ensure exit path clean handler + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + return CharsetMatches([results[encoding_iana]]) + + if len(results) == 0: + if fallback_u8 or fallback_ascii or fallback_specified: + logger.log( + TRACE, + "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.", + ) + + if fallback_specified: + logger.debug( + "Encoding detection: %s will be used as a fallback match", + fallback_specified.encoding, + ) + results.append(fallback_specified) + elif ( + (fallback_u8 and fallback_ascii is None) + or ( + fallback_u8 + and fallback_ascii + and fallback_u8.fingerprint != fallback_ascii.fingerprint + ) + or (fallback_u8 is not None) + ): + logger.debug("Encoding detection: utf_8 will be used as a fallback match") + results.append(fallback_u8) + elif fallback_ascii: + logger.debug("Encoding detection: ascii will be used as a fallback match") + results.append(fallback_ascii) + + if results: + logger.debug( + "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.", + results.best().encoding, # type: ignore + len(results) - 1, + ) + else: + logger.debug("Encoding detection: Unable to determine any suitable charset.") + + if explain: + logger.removeHandler(explain_handler) + logger.setLevel(previous_logger_level) + + return results + + +def from_fp( + fp: BinaryIO, + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.20, + cp_isolation: list[str] | None = None, + cp_exclusion: list[str] | None = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = True, +) -> CharsetMatches: + """ + Same thing than the function from_bytes but using a file pointer that is already ready. + Will not close the file pointer. + """ + return from_bytes( + fp.read(), + steps, + chunk_size, + threshold, + cp_isolation, + cp_exclusion, + preemptive_behaviour, + explain, + language_threshold, + enable_fallback, + ) + + +def from_path( + path: str | bytes | PathLike, # type: ignore[type-arg] + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.20, + cp_isolation: list[str] | None = None, + cp_exclusion: list[str] | None = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = True, +) -> CharsetMatches: + """ + Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode. + Can raise IOError. + """ + with open(path, "rb") as fp: + return from_fp( + fp, + steps, + chunk_size, + threshold, + cp_isolation, + cp_exclusion, + preemptive_behaviour, + explain, + language_threshold, + enable_fallback, + ) + + +def is_binary( + fp_or_path_or_payload: PathLike | str | BinaryIO | bytes, # type: ignore[type-arg] + steps: int = 5, + chunk_size: int = 512, + threshold: float = 0.20, + cp_isolation: list[str] | None = None, + cp_exclusion: list[str] | None = None, + preemptive_behaviour: bool = True, + explain: bool = False, + language_threshold: float = 0.1, + enable_fallback: bool = False, +) -> bool: + """ + Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string. + Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match + are disabled to be stricter around ASCII-compatible but unlikely to be a string. + """ + if isinstance(fp_or_path_or_payload, (str, PathLike)): + guesses = from_path( + fp_or_path_or_payload, + steps=steps, + chunk_size=chunk_size, + threshold=threshold, + cp_isolation=cp_isolation, + cp_exclusion=cp_exclusion, + preemptive_behaviour=preemptive_behaviour, + explain=explain, + language_threshold=language_threshold, + enable_fallback=enable_fallback, + ) + elif isinstance( + fp_or_path_or_payload, + ( + bytes, + bytearray, + ), + ): + guesses = from_bytes( + fp_or_path_or_payload, + steps=steps, + chunk_size=chunk_size, + threshold=threshold, + cp_isolation=cp_isolation, + cp_exclusion=cp_exclusion, + preemptive_behaviour=preemptive_behaviour, + explain=explain, + language_threshold=language_threshold, + enable_fallback=enable_fallback, + ) + else: + guesses = from_fp( + fp_or_path_or_payload, + steps=steps, + chunk_size=chunk_size, + threshold=threshold, + cp_isolation=cp_isolation, + cp_exclusion=cp_exclusion, + preemptive_behaviour=preemptive_behaviour, + explain=explain, + language_threshold=language_threshold, + enable_fallback=enable_fallback, + ) + + return not guesses diff --git a/venv/Lib/site-packages/charset_normalizer/cd.py b/venv/Lib/site-packages/charset_normalizer/cd.py new file mode 100644 index 00000000..71a3ed51 --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer/cd.py @@ -0,0 +1,395 @@ +from __future__ import annotations + +import importlib +from codecs import IncrementalDecoder +from collections import Counter +from functools import lru_cache +from typing import Counter as TypeCounter + +from .constant import ( + FREQUENCIES, + KO_NAMES, + LANGUAGE_SUPPORTED_COUNT, + TOO_SMALL_SEQUENCE, + ZH_NAMES, +) +from .md import is_suspiciously_successive_range +from .models import CoherenceMatches +from .utils import ( + is_accentuated, + is_latin, + is_multi_byte_encoding, + is_unicode_range_secondary, + unicode_range, +) + + +def encoding_unicode_range(iana_name: str) -> list[str]: + """ + Return associated unicode ranges in a single byte code page. + """ + if is_multi_byte_encoding(iana_name): + raise OSError("Function not supported on multi-byte code page") + + decoder = importlib.import_module(f"encodings.{iana_name}").IncrementalDecoder + + p: IncrementalDecoder = decoder(errors="ignore") + seen_ranges: dict[str, int] = {} + character_count: int = 0 + + for i in range(0x40, 0xFF): + chunk: str = p.decode(bytes([i])) + + if chunk: + character_range: str | None = unicode_range(chunk) + + if character_range is None: + continue + + if is_unicode_range_secondary(character_range) is False: + if character_range not in seen_ranges: + seen_ranges[character_range] = 0 + seen_ranges[character_range] += 1 + character_count += 1 + + return sorted( + [ + character_range + for character_range in seen_ranges + if seen_ranges[character_range] / character_count >= 0.15 + ] + ) + + +def unicode_range_languages(primary_range: str) -> list[str]: + """ + Return inferred languages used with a unicode range. + """ + languages: list[str] = [] + + for language, characters in FREQUENCIES.items(): + for character in characters: + if unicode_range(character) == primary_range: + languages.append(language) + break + + return languages + + +@lru_cache() +def encoding_languages(iana_name: str) -> list[str]: + """ + Single-byte encoding language association. Some code page are heavily linked to particular language(s). + This function does the correspondence. + """ + unicode_ranges: list[str] = encoding_unicode_range(iana_name) + primary_range: str | None = None + + for specified_range in unicode_ranges: + if "Latin" not in specified_range: + primary_range = specified_range + break + + if primary_range is None: + return ["Latin Based"] + + return unicode_range_languages(primary_range) + + +@lru_cache() +def mb_encoding_languages(iana_name: str) -> list[str]: + """ + Multi-byte encoding language association. Some code page are heavily linked to particular language(s). + This function does the correspondence. + """ + if ( + iana_name.startswith("shift_") + or iana_name.startswith("iso2022_jp") + or iana_name.startswith("euc_j") + or iana_name == "cp932" + ): + return ["Japanese"] + if iana_name.startswith("gb") or iana_name in ZH_NAMES: + return ["Chinese"] + if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: + return ["Korean"] + + return [] + + +@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) +def get_target_features(language: str) -> tuple[bool, bool]: + """ + Determine main aspects from a supported language if it contains accents and if is pure Latin. + """ + target_have_accents: bool = False + target_pure_latin: bool = True + + for character in FREQUENCIES[language]: + if not target_have_accents and is_accentuated(character): + target_have_accents = True + if target_pure_latin and is_latin(character) is False: + target_pure_latin = False + + return target_have_accents, target_pure_latin + + +def alphabet_languages( + characters: list[str], ignore_non_latin: bool = False +) -> list[str]: + """ + Return associated languages associated to given characters. + """ + languages: list[tuple[str, float]] = [] + + source_have_accents = any(is_accentuated(character) for character in characters) + + for language, language_characters in FREQUENCIES.items(): + target_have_accents, target_pure_latin = get_target_features(language) + + if ignore_non_latin and target_pure_latin is False: + continue + + if target_have_accents is False and source_have_accents: + continue + + character_count: int = len(language_characters) + + character_match_count: int = len( + [c for c in language_characters if c in characters] + ) + + ratio: float = character_match_count / character_count + + if ratio >= 0.2: + languages.append((language, ratio)) + + languages = sorted(languages, key=lambda x: x[1], reverse=True) + + return [compatible_language[0] for compatible_language in languages] + + +def characters_popularity_compare( + language: str, ordered_characters: list[str] +) -> float: + """ + Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. + The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). + Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) + """ + if language not in FREQUENCIES: + raise ValueError(f"{language} not available") + + character_approved_count: int = 0 + FREQUENCIES_language_set = set(FREQUENCIES[language]) + + ordered_characters_count: int = len(ordered_characters) + target_language_characters_count: int = len(FREQUENCIES[language]) + + large_alphabet: bool = target_language_characters_count > 26 + + for character, character_rank in zip( + ordered_characters, range(0, ordered_characters_count) + ): + if character not in FREQUENCIES_language_set: + continue + + character_rank_in_language: int = FREQUENCIES[language].index(character) + expected_projection_ratio: float = ( + target_language_characters_count / ordered_characters_count + ) + character_rank_projection: int = int(character_rank * expected_projection_ratio) + + if ( + large_alphabet is False + and abs(character_rank_projection - character_rank_in_language) > 4 + ): + continue + + if ( + large_alphabet is True + and abs(character_rank_projection - character_rank_in_language) + < target_language_characters_count / 3 + ): + character_approved_count += 1 + continue + + characters_before_source: list[str] = FREQUENCIES[language][ + 0:character_rank_in_language + ] + characters_after_source: list[str] = FREQUENCIES[language][ + character_rank_in_language: + ] + characters_before: list[str] = ordered_characters[0:character_rank] + characters_after: list[str] = ordered_characters[character_rank:] + + before_match_count: int = len( + set(characters_before) & set(characters_before_source) + ) + + after_match_count: int = len( + set(characters_after) & set(characters_after_source) + ) + + if len(characters_before_source) == 0 and before_match_count <= 4: + character_approved_count += 1 + continue + + if len(characters_after_source) == 0 and after_match_count <= 4: + character_approved_count += 1 + continue + + if ( + before_match_count / len(characters_before_source) >= 0.4 + or after_match_count / len(characters_after_source) >= 0.4 + ): + character_approved_count += 1 + continue + + return character_approved_count / len(ordered_characters) + + +def alpha_unicode_split(decoded_sequence: str) -> list[str]: + """ + Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. + Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; + One containing the latin letters and the other hebrew. + """ + layers: dict[str, str] = {} + + for character in decoded_sequence: + if character.isalpha() is False: + continue + + character_range: str | None = unicode_range(character) + + if character_range is None: + continue + + layer_target_range: str | None = None + + for discovered_range in layers: + if ( + is_suspiciously_successive_range(discovered_range, character_range) + is False + ): + layer_target_range = discovered_range + break + + if layer_target_range is None: + layer_target_range = character_range + + if layer_target_range not in layers: + layers[layer_target_range] = character.lower() + continue + + layers[layer_target_range] += character.lower() + + return list(layers.values()) + + +def merge_coherence_ratios(results: list[CoherenceMatches]) -> CoherenceMatches: + """ + This function merge results previously given by the function coherence_ratio. + The return type is the same as coherence_ratio. + """ + per_language_ratios: dict[str, list[float]] = {} + for result in results: + for sub_result in result: + language, ratio = sub_result + if language not in per_language_ratios: + per_language_ratios[language] = [ratio] + continue + per_language_ratios[language].append(ratio) + + merge = [ + ( + language, + round( + sum(per_language_ratios[language]) / len(per_language_ratios[language]), + 4, + ), + ) + for language in per_language_ratios + ] + + return sorted(merge, key=lambda x: x[1], reverse=True) + + +def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches: + """ + We shall NOT return "English—" in CoherenceMatches because it is an alternative + of "English". This function only keeps the best match and remove the em-dash in it. + """ + index_results: dict[str, list[float]] = dict() + + for result in results: + language, ratio = result + no_em_name: str = language.replace("—", "") + + if no_em_name not in index_results: + index_results[no_em_name] = [] + + index_results[no_em_name].append(ratio) + + if any(len(index_results[e]) > 1 for e in index_results): + filtered_results: CoherenceMatches = [] + + for language in index_results: + filtered_results.append((language, max(index_results[language]))) + + return filtered_results + + return results + + +@lru_cache(maxsize=2048) +def coherence_ratio( + decoded_sequence: str, threshold: float = 0.1, lg_inclusion: str | None = None +) -> CoherenceMatches: + """ + Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers. + A layer = Character extraction by alphabets/ranges. + """ + + results: list[tuple[str, float]] = [] + ignore_non_latin: bool = False + + sufficient_match_count: int = 0 + + lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else [] + if "Latin Based" in lg_inclusion_list: + ignore_non_latin = True + lg_inclusion_list.remove("Latin Based") + + for layer in alpha_unicode_split(decoded_sequence): + sequence_frequencies: TypeCounter[str] = Counter(layer) + most_common = sequence_frequencies.most_common() + + character_count: int = sum(o for c, o in most_common) + + if character_count <= TOO_SMALL_SEQUENCE: + continue + + popular_character_ordered: list[str] = [c for c, o in most_common] + + for language in lg_inclusion_list or alphabet_languages( + popular_character_ordered, ignore_non_latin + ): + ratio: float = characters_popularity_compare( + language, popular_character_ordered + ) + + if ratio < threshold: + continue + elif ratio >= 0.8: + sufficient_match_count += 1 + + results.append((language, round(ratio, 4))) + + if sufficient_match_count >= 3: + break + + return sorted( + filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True + ) diff --git a/venv/Lib/site-packages/charset_normalizer/cli/__init__.py b/venv/Lib/site-packages/charset_normalizer/cli/__init__.py new file mode 100644 index 00000000..543a5a4d --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer/cli/__init__.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from .__main__ import cli_detect, query_yes_no + +__all__ = ( + "cli_detect", + "query_yes_no", +) diff --git a/venv/Lib/site-packages/charset_normalizer/cli/__main__.py b/venv/Lib/site-packages/charset_normalizer/cli/__main__.py new file mode 100644 index 00000000..cb64156a --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer/cli/__main__.py @@ -0,0 +1,381 @@ +from __future__ import annotations + +import argparse +import sys +import typing +from json import dumps +from os.path import abspath, basename, dirname, join, realpath +from platform import python_version +from unicodedata import unidata_version + +import charset_normalizer.md as md_module +from charset_normalizer import from_fp +from charset_normalizer.models import CliDetectionResult +from charset_normalizer.version import __version__ + + +def query_yes_no(question: str, default: str = "yes") -> bool: + """Ask a yes/no question via input() and return their answer. + + "question" is a string that is presented to the user. + "default" is the presumed answer if the user just hits . + It must be "yes" (the default), "no" or None (meaning + an answer is required of the user). + + The "answer" return value is True for "yes" or False for "no". + + Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input + """ + valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} + if default is None: + prompt = " [y/n] " + elif default == "yes": + prompt = " [Y/n] " + elif default == "no": + prompt = " [y/N] " + else: + raise ValueError("invalid default answer: '%s'" % default) + + while True: + sys.stdout.write(question + prompt) + choice = input().lower() + if default is not None and choice == "": + return valid[default] + elif choice in valid: + return valid[choice] + else: + sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") + + +class FileType: + """Factory for creating file object types + + Instances of FileType are typically passed as type= arguments to the + ArgumentParser add_argument() method. + + Keyword Arguments: + - mode -- A string indicating how the file is to be opened. Accepts the + same values as the builtin open() function. + - bufsize -- The file's desired buffer size. Accepts the same values as + the builtin open() function. + - encoding -- The file's encoding. Accepts the same values as the + builtin open() function. + - errors -- A string indicating how encoding and decoding errors are to + be handled. Accepts the same value as the builtin open() function. + + Backported from CPython 3.12 + """ + + def __init__( + self, + mode: str = "r", + bufsize: int = -1, + encoding: str | None = None, + errors: str | None = None, + ): + self._mode = mode + self._bufsize = bufsize + self._encoding = encoding + self._errors = errors + + def __call__(self, string: str) -> typing.IO: # type: ignore[type-arg] + # the special argument "-" means sys.std{in,out} + if string == "-": + if "r" in self._mode: + return sys.stdin.buffer if "b" in self._mode else sys.stdin + elif any(c in self._mode for c in "wax"): + return sys.stdout.buffer if "b" in self._mode else sys.stdout + else: + msg = f'argument "-" with mode {self._mode}' + raise ValueError(msg) + + # all other arguments are used as file names + try: + return open(string, self._mode, self._bufsize, self._encoding, self._errors) + except OSError as e: + message = f"can't open '{string}': {e}" + raise argparse.ArgumentTypeError(message) + + def __repr__(self) -> str: + args = self._mode, self._bufsize + kwargs = [("encoding", self._encoding), ("errors", self._errors)] + args_str = ", ".join( + [repr(arg) for arg in args if arg != -1] + + [f"{kw}={arg!r}" for kw, arg in kwargs if arg is not None] + ) + return f"{type(self).__name__}({args_str})" + + +def cli_detect(argv: list[str] | None = None) -> int: + """ + CLI assistant using ARGV and ArgumentParser + :param argv: + :return: 0 if everything is fine, anything else equal trouble + """ + parser = argparse.ArgumentParser( + description="The Real First Universal Charset Detector. " + "Discover originating encoding used on text file. " + "Normalize text to unicode." + ) + + parser.add_argument( + "files", type=FileType("rb"), nargs="+", help="File(s) to be analysed" + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + default=False, + dest="verbose", + help="Display complementary information about file if any. " + "Stdout will contain logs about the detection process.", + ) + parser.add_argument( + "-a", + "--with-alternative", + action="store_true", + default=False, + dest="alternatives", + help="Output complementary possibilities if any. Top-level JSON WILL be a list.", + ) + parser.add_argument( + "-n", + "--normalize", + action="store_true", + default=False, + dest="normalize", + help="Permit to normalize input file. If not set, program does not write anything.", + ) + parser.add_argument( + "-m", + "--minimal", + action="store_true", + default=False, + dest="minimal", + help="Only output the charset detected to STDOUT. Disabling JSON output.", + ) + parser.add_argument( + "-r", + "--replace", + action="store_true", + default=False, + dest="replace", + help="Replace file when trying to normalize it instead of creating a new one.", + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + dest="force", + help="Replace file without asking if you are sure, use this flag with caution.", + ) + parser.add_argument( + "-i", + "--no-preemptive", + action="store_true", + default=False, + dest="no_preemptive", + help="Disable looking at a charset declaration to hint the detector.", + ) + parser.add_argument( + "-t", + "--threshold", + action="store", + default=0.2, + type=float, + dest="threshold", + help="Define a custom maximum amount of noise allowed in decoded content. 0. <= noise <= 1.", + ) + parser.add_argument( + "--version", + action="version", + version="Charset-Normalizer {} - Python {} - Unicode {} - SpeedUp {}".format( + __version__, + python_version(), + unidata_version, + "OFF" if md_module.__file__.lower().endswith(".py") else "ON", + ), + help="Show version information and exit.", + ) + + args = parser.parse_args(argv) + + if args.replace is True and args.normalize is False: + if args.files: + for my_file in args.files: + my_file.close() + print("Use --replace in addition of --normalize only.", file=sys.stderr) + return 1 + + if args.force is True and args.replace is False: + if args.files: + for my_file in args.files: + my_file.close() + print("Use --force in addition of --replace only.", file=sys.stderr) + return 1 + + if args.threshold < 0.0 or args.threshold > 1.0: + if args.files: + for my_file in args.files: + my_file.close() + print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr) + return 1 + + x_ = [] + + for my_file in args.files: + matches = from_fp( + my_file, + threshold=args.threshold, + explain=args.verbose, + preemptive_behaviour=args.no_preemptive is False, + ) + + best_guess = matches.best() + + if best_guess is None: + print( + 'Unable to identify originating encoding for "{}". {}'.format( + my_file.name, + ( + "Maybe try increasing maximum amount of chaos." + if args.threshold < 1.0 + else "" + ), + ), + file=sys.stderr, + ) + x_.append( + CliDetectionResult( + abspath(my_file.name), + None, + [], + [], + "Unknown", + [], + False, + 1.0, + 0.0, + None, + True, + ) + ) + else: + x_.append( + CliDetectionResult( + abspath(my_file.name), + best_guess.encoding, + best_guess.encoding_aliases, + [ + cp + for cp in best_guess.could_be_from_charset + if cp != best_guess.encoding + ], + best_guess.language, + best_guess.alphabets, + best_guess.bom, + best_guess.percent_chaos, + best_guess.percent_coherence, + None, + True, + ) + ) + + if len(matches) > 1 and args.alternatives: + for el in matches: + if el != best_guess: + x_.append( + CliDetectionResult( + abspath(my_file.name), + el.encoding, + el.encoding_aliases, + [ + cp + for cp in el.could_be_from_charset + if cp != el.encoding + ], + el.language, + el.alphabets, + el.bom, + el.percent_chaos, + el.percent_coherence, + None, + False, + ) + ) + + if args.normalize is True: + if best_guess.encoding.startswith("utf") is True: + print( + '"{}" file does not need to be normalized, as it already came from unicode.'.format( + my_file.name + ), + file=sys.stderr, + ) + if my_file.closed is False: + my_file.close() + continue + + dir_path = dirname(realpath(my_file.name)) + file_name = basename(realpath(my_file.name)) + + o_: list[str] = file_name.split(".") + + if args.replace is False: + o_.insert(-1, best_guess.encoding) + if my_file.closed is False: + my_file.close() + elif ( + args.force is False + and query_yes_no( + 'Are you sure to normalize "{}" by replacing it ?'.format( + my_file.name + ), + "no", + ) + is False + ): + if my_file.closed is False: + my_file.close() + continue + + try: + x_[0].unicode_path = join(dir_path, ".".join(o_)) + + with open(x_[0].unicode_path, "wb") as fp: + fp.write(best_guess.output()) + except OSError as e: + print(str(e), file=sys.stderr) + if my_file.closed is False: + my_file.close() + return 2 + + if my_file.closed is False: + my_file.close() + + if args.minimal is False: + print( + dumps( + [el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__, + ensure_ascii=True, + indent=4, + ) + ) + else: + for my_file in args.files: + print( + ", ".join( + [ + el.encoding or "undefined" + for el in x_ + if el.path == abspath(my_file.name) + ] + ) + ) + + return 0 + + +if __name__ == "__main__": + cli_detect() diff --git a/venv/Lib/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..75ac6afb Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/cli/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/charset_normalizer/cli/__pycache__/__main__.cpython-312.pyc b/venv/Lib/site-packages/charset_normalizer/cli/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 00000000..9770a811 Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/cli/__pycache__/__main__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/charset_normalizer/constant.py b/venv/Lib/site-packages/charset_normalizer/constant.py new file mode 100644 index 00000000..cc71a019 --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer/constant.py @@ -0,0 +1,2015 @@ +from __future__ import annotations + +from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE +from encodings.aliases import aliases +from re import IGNORECASE +from re import compile as re_compile + +# Contain for each eligible encoding a list of/item bytes SIG/BOM +ENCODING_MARKS: dict[str, bytes | list[bytes]] = { + "utf_8": BOM_UTF8, + "utf_7": [ + b"\x2b\x2f\x76\x38", + b"\x2b\x2f\x76\x39", + b"\x2b\x2f\x76\x2b", + b"\x2b\x2f\x76\x2f", + b"\x2b\x2f\x76\x38\x2d", + ], + "gb18030": b"\x84\x31\x95\x33", + "utf_32": [BOM_UTF32_BE, BOM_UTF32_LE], + "utf_16": [BOM_UTF16_BE, BOM_UTF16_LE], +} + +TOO_SMALL_SEQUENCE: int = 32 +TOO_BIG_SEQUENCE: int = int(10e6) + +UTF8_MAXIMAL_ALLOCATION: int = 1_112_064 + +# Up-to-date Unicode ucd/15.0.0 +UNICODE_RANGES_COMBINED: dict[str, range] = { + "Control character": range(32), + "Basic Latin": range(32, 128), + "Latin-1 Supplement": range(128, 256), + "Latin Extended-A": range(256, 384), + "Latin Extended-B": range(384, 592), + "IPA Extensions": range(592, 688), + "Spacing Modifier Letters": range(688, 768), + "Combining Diacritical Marks": range(768, 880), + "Greek and Coptic": range(880, 1024), + "Cyrillic": range(1024, 1280), + "Cyrillic Supplement": range(1280, 1328), + "Armenian": range(1328, 1424), + "Hebrew": range(1424, 1536), + "Arabic": range(1536, 1792), + "Syriac": range(1792, 1872), + "Arabic Supplement": range(1872, 1920), + "Thaana": range(1920, 1984), + "NKo": range(1984, 2048), + "Samaritan": range(2048, 2112), + "Mandaic": range(2112, 2144), + "Syriac Supplement": range(2144, 2160), + "Arabic Extended-B": range(2160, 2208), + "Arabic Extended-A": range(2208, 2304), + "Devanagari": range(2304, 2432), + "Bengali": range(2432, 2560), + "Gurmukhi": range(2560, 2688), + "Gujarati": range(2688, 2816), + "Oriya": range(2816, 2944), + "Tamil": range(2944, 3072), + "Telugu": range(3072, 3200), + "Kannada": range(3200, 3328), + "Malayalam": range(3328, 3456), + "Sinhala": range(3456, 3584), + "Thai": range(3584, 3712), + "Lao": range(3712, 3840), + "Tibetan": range(3840, 4096), + "Myanmar": range(4096, 4256), + "Georgian": range(4256, 4352), + "Hangul Jamo": range(4352, 4608), + "Ethiopic": range(4608, 4992), + "Ethiopic Supplement": range(4992, 5024), + "Cherokee": range(5024, 5120), + "Unified Canadian Aboriginal Syllabics": range(5120, 5760), + "Ogham": range(5760, 5792), + "Runic": range(5792, 5888), + "Tagalog": range(5888, 5920), + "Hanunoo": range(5920, 5952), + "Buhid": range(5952, 5984), + "Tagbanwa": range(5984, 6016), + "Khmer": range(6016, 6144), + "Mongolian": range(6144, 6320), + "Unified Canadian Aboriginal Syllabics Extended": range(6320, 6400), + "Limbu": range(6400, 6480), + "Tai Le": range(6480, 6528), + "New Tai Lue": range(6528, 6624), + "Khmer Symbols": range(6624, 6656), + "Buginese": range(6656, 6688), + "Tai Tham": range(6688, 6832), + "Combining Diacritical Marks Extended": range(6832, 6912), + "Balinese": range(6912, 7040), + "Sundanese": range(7040, 7104), + "Batak": range(7104, 7168), + "Lepcha": range(7168, 7248), + "Ol Chiki": range(7248, 7296), + "Cyrillic Extended-C": range(7296, 7312), + "Georgian Extended": range(7312, 7360), + "Sundanese Supplement": range(7360, 7376), + "Vedic Extensions": range(7376, 7424), + "Phonetic Extensions": range(7424, 7552), + "Phonetic Extensions Supplement": range(7552, 7616), + "Combining Diacritical Marks Supplement": range(7616, 7680), + "Latin Extended Additional": range(7680, 7936), + "Greek Extended": range(7936, 8192), + "General Punctuation": range(8192, 8304), + "Superscripts and Subscripts": range(8304, 8352), + "Currency Symbols": range(8352, 8400), + "Combining Diacritical Marks for Symbols": range(8400, 8448), + "Letterlike Symbols": range(8448, 8528), + "Number Forms": range(8528, 8592), + "Arrows": range(8592, 8704), + "Mathematical Operators": range(8704, 8960), + "Miscellaneous Technical": range(8960, 9216), + "Control Pictures": range(9216, 9280), + "Optical Character Recognition": range(9280, 9312), + "Enclosed Alphanumerics": range(9312, 9472), + "Box Drawing": range(9472, 9600), + "Block Elements": range(9600, 9632), + "Geometric Shapes": range(9632, 9728), + "Miscellaneous Symbols": range(9728, 9984), + "Dingbats": range(9984, 10176), + "Miscellaneous Mathematical Symbols-A": range(10176, 10224), + "Supplemental Arrows-A": range(10224, 10240), + "Braille Patterns": range(10240, 10496), + "Supplemental Arrows-B": range(10496, 10624), + "Miscellaneous Mathematical Symbols-B": range(10624, 10752), + "Supplemental Mathematical Operators": range(10752, 11008), + "Miscellaneous Symbols and Arrows": range(11008, 11264), + "Glagolitic": range(11264, 11360), + "Latin Extended-C": range(11360, 11392), + "Coptic": range(11392, 11520), + "Georgian Supplement": range(11520, 11568), + "Tifinagh": range(11568, 11648), + "Ethiopic Extended": range(11648, 11744), + "Cyrillic Extended-A": range(11744, 11776), + "Supplemental Punctuation": range(11776, 11904), + "CJK Radicals Supplement": range(11904, 12032), + "Kangxi Radicals": range(12032, 12256), + "Ideographic Description Characters": range(12272, 12288), + "CJK Symbols and Punctuation": range(12288, 12352), + "Hiragana": range(12352, 12448), + "Katakana": range(12448, 12544), + "Bopomofo": range(12544, 12592), + "Hangul Compatibility Jamo": range(12592, 12688), + "Kanbun": range(12688, 12704), + "Bopomofo Extended": range(12704, 12736), + "CJK Strokes": range(12736, 12784), + "Katakana Phonetic Extensions": range(12784, 12800), + "Enclosed CJK Letters and Months": range(12800, 13056), + "CJK Compatibility": range(13056, 13312), + "CJK Unified Ideographs Extension A": range(13312, 19904), + "Yijing Hexagram Symbols": range(19904, 19968), + "CJK Unified Ideographs": range(19968, 40960), + "Yi Syllables": range(40960, 42128), + "Yi Radicals": range(42128, 42192), + "Lisu": range(42192, 42240), + "Vai": range(42240, 42560), + "Cyrillic Extended-B": range(42560, 42656), + "Bamum": range(42656, 42752), + "Modifier Tone Letters": range(42752, 42784), + "Latin Extended-D": range(42784, 43008), + "Syloti Nagri": range(43008, 43056), + "Common Indic Number Forms": range(43056, 43072), + "Phags-pa": range(43072, 43136), + "Saurashtra": range(43136, 43232), + "Devanagari Extended": range(43232, 43264), + "Kayah Li": range(43264, 43312), + "Rejang": range(43312, 43360), + "Hangul Jamo Extended-A": range(43360, 43392), + "Javanese": range(43392, 43488), + "Myanmar Extended-B": range(43488, 43520), + "Cham": range(43520, 43616), + "Myanmar Extended-A": range(43616, 43648), + "Tai Viet": range(43648, 43744), + "Meetei Mayek Extensions": range(43744, 43776), + "Ethiopic Extended-A": range(43776, 43824), + "Latin Extended-E": range(43824, 43888), + "Cherokee Supplement": range(43888, 43968), + "Meetei Mayek": range(43968, 44032), + "Hangul Syllables": range(44032, 55216), + "Hangul Jamo Extended-B": range(55216, 55296), + "High Surrogates": range(55296, 56192), + "High Private Use Surrogates": range(56192, 56320), + "Low Surrogates": range(56320, 57344), + "Private Use Area": range(57344, 63744), + "CJK Compatibility Ideographs": range(63744, 64256), + "Alphabetic Presentation Forms": range(64256, 64336), + "Arabic Presentation Forms-A": range(64336, 65024), + "Variation Selectors": range(65024, 65040), + "Vertical Forms": range(65040, 65056), + "Combining Half Marks": range(65056, 65072), + "CJK Compatibility Forms": range(65072, 65104), + "Small Form Variants": range(65104, 65136), + "Arabic Presentation Forms-B": range(65136, 65280), + "Halfwidth and Fullwidth Forms": range(65280, 65520), + "Specials": range(65520, 65536), + "Linear B Syllabary": range(65536, 65664), + "Linear B Ideograms": range(65664, 65792), + "Aegean Numbers": range(65792, 65856), + "Ancient Greek Numbers": range(65856, 65936), + "Ancient Symbols": range(65936, 66000), + "Phaistos Disc": range(66000, 66048), + "Lycian": range(66176, 66208), + "Carian": range(66208, 66272), + "Coptic Epact Numbers": range(66272, 66304), + "Old Italic": range(66304, 66352), + "Gothic": range(66352, 66384), + "Old Permic": range(66384, 66432), + "Ugaritic": range(66432, 66464), + "Old Persian": range(66464, 66528), + "Deseret": range(66560, 66640), + "Shavian": range(66640, 66688), + "Osmanya": range(66688, 66736), + "Osage": range(66736, 66816), + "Elbasan": range(66816, 66864), + "Caucasian Albanian": range(66864, 66928), + "Vithkuqi": range(66928, 67008), + "Linear A": range(67072, 67456), + "Latin Extended-F": range(67456, 67520), + "Cypriot Syllabary": range(67584, 67648), + "Imperial Aramaic": range(67648, 67680), + "Palmyrene": range(67680, 67712), + "Nabataean": range(67712, 67760), + "Hatran": range(67808, 67840), + "Phoenician": range(67840, 67872), + "Lydian": range(67872, 67904), + "Meroitic Hieroglyphs": range(67968, 68000), + "Meroitic Cursive": range(68000, 68096), + "Kharoshthi": range(68096, 68192), + "Old South Arabian": range(68192, 68224), + "Old North Arabian": range(68224, 68256), + "Manichaean": range(68288, 68352), + "Avestan": range(68352, 68416), + "Inscriptional Parthian": range(68416, 68448), + "Inscriptional Pahlavi": range(68448, 68480), + "Psalter Pahlavi": range(68480, 68528), + "Old Turkic": range(68608, 68688), + "Old Hungarian": range(68736, 68864), + "Hanifi Rohingya": range(68864, 68928), + "Rumi Numeral Symbols": range(69216, 69248), + "Yezidi": range(69248, 69312), + "Arabic Extended-C": range(69312, 69376), + "Old Sogdian": range(69376, 69424), + "Sogdian": range(69424, 69488), + "Old Uyghur": range(69488, 69552), + "Chorasmian": range(69552, 69600), + "Elymaic": range(69600, 69632), + "Brahmi": range(69632, 69760), + "Kaithi": range(69760, 69840), + "Sora Sompeng": range(69840, 69888), + "Chakma": range(69888, 69968), + "Mahajani": range(69968, 70016), + "Sharada": range(70016, 70112), + "Sinhala Archaic Numbers": range(70112, 70144), + "Khojki": range(70144, 70224), + "Multani": range(70272, 70320), + "Khudawadi": range(70320, 70400), + "Grantha": range(70400, 70528), + "Newa": range(70656, 70784), + "Tirhuta": range(70784, 70880), + "Siddham": range(71040, 71168), + "Modi": range(71168, 71264), + "Mongolian Supplement": range(71264, 71296), + "Takri": range(71296, 71376), + "Ahom": range(71424, 71504), + "Dogra": range(71680, 71760), + "Warang Citi": range(71840, 71936), + "Dives Akuru": range(71936, 72032), + "Nandinagari": range(72096, 72192), + "Zanabazar Square": range(72192, 72272), + "Soyombo": range(72272, 72368), + "Unified Canadian Aboriginal Syllabics Extended-A": range(72368, 72384), + "Pau Cin Hau": range(72384, 72448), + "Devanagari Extended-A": range(72448, 72544), + "Bhaiksuki": range(72704, 72816), + "Marchen": range(72816, 72896), + "Masaram Gondi": range(72960, 73056), + "Gunjala Gondi": range(73056, 73136), + "Makasar": range(73440, 73472), + "Kawi": range(73472, 73568), + "Lisu Supplement": range(73648, 73664), + "Tamil Supplement": range(73664, 73728), + "Cuneiform": range(73728, 74752), + "Cuneiform Numbers and Punctuation": range(74752, 74880), + "Early Dynastic Cuneiform": range(74880, 75088), + "Cypro-Minoan": range(77712, 77824), + "Egyptian Hieroglyphs": range(77824, 78896), + "Egyptian Hieroglyph Format Controls": range(78896, 78944), + "Anatolian Hieroglyphs": range(82944, 83584), + "Bamum Supplement": range(92160, 92736), + "Mro": range(92736, 92784), + "Tangsa": range(92784, 92880), + "Bassa Vah": range(92880, 92928), + "Pahawh Hmong": range(92928, 93072), + "Medefaidrin": range(93760, 93856), + "Miao": range(93952, 94112), + "Ideographic Symbols and Punctuation": range(94176, 94208), + "Tangut": range(94208, 100352), + "Tangut Components": range(100352, 101120), + "Khitan Small Script": range(101120, 101632), + "Tangut Supplement": range(101632, 101760), + "Kana Extended-B": range(110576, 110592), + "Kana Supplement": range(110592, 110848), + "Kana Extended-A": range(110848, 110896), + "Small Kana Extension": range(110896, 110960), + "Nushu": range(110960, 111360), + "Duployan": range(113664, 113824), + "Shorthand Format Controls": range(113824, 113840), + "Znamenny Musical Notation": range(118528, 118736), + "Byzantine Musical Symbols": range(118784, 119040), + "Musical Symbols": range(119040, 119296), + "Ancient Greek Musical Notation": range(119296, 119376), + "Kaktovik Numerals": range(119488, 119520), + "Mayan Numerals": range(119520, 119552), + "Tai Xuan Jing Symbols": range(119552, 119648), + "Counting Rod Numerals": range(119648, 119680), + "Mathematical Alphanumeric Symbols": range(119808, 120832), + "Sutton SignWriting": range(120832, 121520), + "Latin Extended-G": range(122624, 122880), + "Glagolitic Supplement": range(122880, 122928), + "Cyrillic Extended-D": range(122928, 123024), + "Nyiakeng Puachue Hmong": range(123136, 123216), + "Toto": range(123536, 123584), + "Wancho": range(123584, 123648), + "Nag Mundari": range(124112, 124160), + "Ethiopic Extended-B": range(124896, 124928), + "Mende Kikakui": range(124928, 125152), + "Adlam": range(125184, 125280), + "Indic Siyaq Numbers": range(126064, 126144), + "Ottoman Siyaq Numbers": range(126208, 126288), + "Arabic Mathematical Alphabetic Symbols": range(126464, 126720), + "Mahjong Tiles": range(126976, 127024), + "Domino Tiles": range(127024, 127136), + "Playing Cards": range(127136, 127232), + "Enclosed Alphanumeric Supplement": range(127232, 127488), + "Enclosed Ideographic Supplement": range(127488, 127744), + "Miscellaneous Symbols and Pictographs": range(127744, 128512), + "Emoticons range(Emoji)": range(128512, 128592), + "Ornamental Dingbats": range(128592, 128640), + "Transport and Map Symbols": range(128640, 128768), + "Alchemical Symbols": range(128768, 128896), + "Geometric Shapes Extended": range(128896, 129024), + "Supplemental Arrows-C": range(129024, 129280), + "Supplemental Symbols and Pictographs": range(129280, 129536), + "Chess Symbols": range(129536, 129648), + "Symbols and Pictographs Extended-A": range(129648, 129792), + "Symbols for Legacy Computing": range(129792, 130048), + "CJK Unified Ideographs Extension B": range(131072, 173792), + "CJK Unified Ideographs Extension C": range(173824, 177984), + "CJK Unified Ideographs Extension D": range(177984, 178208), + "CJK Unified Ideographs Extension E": range(178208, 183984), + "CJK Unified Ideographs Extension F": range(183984, 191472), + "CJK Compatibility Ideographs Supplement": range(194560, 195104), + "CJK Unified Ideographs Extension G": range(196608, 201552), + "CJK Unified Ideographs Extension H": range(201552, 205744), + "Tags": range(917504, 917632), + "Variation Selectors Supplement": range(917760, 918000), + "Supplementary Private Use Area-A": range(983040, 1048576), + "Supplementary Private Use Area-B": range(1048576, 1114112), +} + + +UNICODE_SECONDARY_RANGE_KEYWORD: list[str] = [ + "Supplement", + "Extended", + "Extensions", + "Modifier", + "Marks", + "Punctuation", + "Symbols", + "Forms", + "Operators", + "Miscellaneous", + "Drawing", + "Block", + "Shapes", + "Supplemental", + "Tags", +] + +RE_POSSIBLE_ENCODING_INDICATION = re_compile( + r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)", + IGNORECASE, +) + +IANA_NO_ALIASES = [ + "cp720", + "cp737", + "cp856", + "cp874", + "cp875", + "cp1006", + "koi8_r", + "koi8_t", + "koi8_u", +] + +IANA_SUPPORTED: list[str] = sorted( + filter( + lambda x: x.endswith("_codec") is False + and x not in {"rot_13", "tactis", "mbcs"}, + list(set(aliases.values())) + IANA_NO_ALIASES, + ) +) + +IANA_SUPPORTED_COUNT: int = len(IANA_SUPPORTED) + +# pre-computed code page that are similar using the function cp_similarity. +IANA_SUPPORTED_SIMILAR: dict[str, list[str]] = { + "cp037": ["cp1026", "cp1140", "cp273", "cp500"], + "cp1026": ["cp037", "cp1140", "cp273", "cp500"], + "cp1125": ["cp866"], + "cp1140": ["cp037", "cp1026", "cp273", "cp500"], + "cp1250": ["iso8859_2"], + "cp1251": ["kz1048", "ptcp154"], + "cp1252": ["iso8859_15", "iso8859_9", "latin_1"], + "cp1253": ["iso8859_7"], + "cp1254": ["iso8859_15", "iso8859_9", "latin_1"], + "cp1257": ["iso8859_13"], + "cp273": ["cp037", "cp1026", "cp1140", "cp500"], + "cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"], + "cp500": ["cp037", "cp1026", "cp1140", "cp273"], + "cp850": ["cp437", "cp857", "cp858", "cp865"], + "cp857": ["cp850", "cp858", "cp865"], + "cp858": ["cp437", "cp850", "cp857", "cp865"], + "cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"], + "cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"], + "cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"], + "cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"], + "cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"], + "cp866": ["cp1125"], + "iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"], + "iso8859_11": ["tis_620"], + "iso8859_13": ["cp1257"], + "iso8859_14": [ + "iso8859_10", + "iso8859_15", + "iso8859_16", + "iso8859_3", + "iso8859_9", + "latin_1", + ], + "iso8859_15": [ + "cp1252", + "cp1254", + "iso8859_10", + "iso8859_14", + "iso8859_16", + "iso8859_3", + "iso8859_9", + "latin_1", + ], + "iso8859_16": [ + "iso8859_14", + "iso8859_15", + "iso8859_2", + "iso8859_3", + "iso8859_9", + "latin_1", + ], + "iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"], + "iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"], + "iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"], + "iso8859_7": ["cp1253"], + "iso8859_9": [ + "cp1252", + "cp1254", + "cp1258", + "iso8859_10", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "iso8859_3", + "iso8859_4", + "latin_1", + ], + "kz1048": ["cp1251", "ptcp154"], + "latin_1": [ + "cp1252", + "cp1254", + "cp1258", + "iso8859_10", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "iso8859_3", + "iso8859_4", + "iso8859_9", + ], + "mac_iceland": ["mac_roman", "mac_turkish"], + "mac_roman": ["mac_iceland", "mac_turkish"], + "mac_turkish": ["mac_iceland", "mac_roman"], + "ptcp154": ["cp1251", "kz1048"], + "tis_620": ["iso8859_11"], +} + + +CHARDET_CORRESPONDENCE: dict[str, str] = { + "iso2022_kr": "ISO-2022-KR", + "iso2022_jp": "ISO-2022-JP", + "euc_kr": "EUC-KR", + "tis_620": "TIS-620", + "utf_32": "UTF-32", + "euc_jp": "EUC-JP", + "koi8_r": "KOI8-R", + "iso8859_1": "ISO-8859-1", + "iso8859_2": "ISO-8859-2", + "iso8859_5": "ISO-8859-5", + "iso8859_6": "ISO-8859-6", + "iso8859_7": "ISO-8859-7", + "iso8859_8": "ISO-8859-8", + "utf_16": "UTF-16", + "cp855": "IBM855", + "mac_cyrillic": "MacCyrillic", + "gb2312": "GB2312", + "gb18030": "GB18030", + "cp932": "CP932", + "cp866": "IBM866", + "utf_8": "utf-8", + "utf_8_sig": "UTF-8-SIG", + "shift_jis": "SHIFT_JIS", + "big5": "Big5", + "cp1250": "windows-1250", + "cp1251": "windows-1251", + "cp1252": "Windows-1252", + "cp1253": "windows-1253", + "cp1255": "windows-1255", + "cp1256": "windows-1256", + "cp1254": "Windows-1254", + "cp949": "CP949", +} + + +COMMON_SAFE_ASCII_CHARACTERS: set[str] = { + "<", + ">", + "=", + ":", + "/", + "&", + ";", + "{", + "}", + "[", + "]", + ",", + "|", + '"', + "-", + "(", + ")", +} + +# Sample character sets — replace with full lists if needed +COMMON_CHINESE_CHARACTERS = "的一是在不了有和人这中大为上个国我以要他时来用们生到作地于出就分对成会可主发年动同工也能下过子说产种面而方后多定行学法所民得经十三之进着等部度家电力里如水化高自二理起小物现实加量都两体制机当使点从业本去把性好应开它合还因由其些然前外天政四日那社义事平形相全表间样与关各重新线内数正心反你明看原又么利比或但质气第向道命此变条只没结解问意建月公无系军很情者最立代想已通并提直题党程展五果料象员革位入常文总次品式活设及管特件长求老头基资边流路级少图山统接知较将组见计别她手角期根论运农指几九区强放决西被干做必战先回则任取据处队南给色光门即保治北造百规热领七海口东导器压志世金增争济阶油思术极交受联什认六共权收证改清己美再采转更单风切打白教速花带安场身车例真务具万每目至达走积示议声报斗完类八离华名确才科张信马节话米整空元况今集温传土许步群广石记需段研界拉林律叫且究观越织装影算低持音众书布复容儿须际商非验连断深难近矿千周委素技备半办青省列习响约支般史感劳便团往酸历市克何除消构府太准精值号率族维划选标写存候毛亲快效斯院查江型眼王按格养易置派层片始却专状育厂京识适属圆包火住调满县局照参红细引听该铁价严龙飞" + +COMMON_JAPANESE_CHARACTERS = "日一国年大十二本中長出三時行見月分後前生五間上東四今金九入学高円子外八六下来気小七山話女北午百書先名川千水半男西電校語土木聞食車何南万毎白天母火右読友左休父雨" + +COMMON_KOREAN_CHARACTERS = "一二三四五六七八九十百千萬上下左右中人女子大小山川日月火水木金土父母天地國名年時文校學生" + +# Combine all into a set +COMMON_CJK_CHARACTERS = set( + "".join( + [ + COMMON_CHINESE_CHARACTERS, + COMMON_JAPANESE_CHARACTERS, + COMMON_KOREAN_CHARACTERS, + ] + ) +) + +KO_NAMES: set[str] = {"johab", "cp949", "euc_kr"} +ZH_NAMES: set[str] = {"big5", "cp950", "big5hkscs", "hz"} + +# Logging LEVEL below DEBUG +TRACE: int = 5 + + +# Language label that contain the em dash "—" +# character are to be considered alternative seq to origin +FREQUENCIES: dict[str, list[str]] = { + "English": [ + "e", + "a", + "t", + "i", + "o", + "n", + "s", + "r", + "h", + "l", + "d", + "c", + "u", + "m", + "f", + "p", + "g", + "w", + "y", + "b", + "v", + "k", + "x", + "j", + "z", + "q", + ], + "English—": [ + "e", + "a", + "t", + "i", + "o", + "n", + "s", + "r", + "h", + "l", + "d", + "c", + "m", + "u", + "f", + "p", + "g", + "w", + "b", + "y", + "v", + "k", + "j", + "x", + "z", + "q", + ], + "German": [ + "e", + "n", + "i", + "r", + "s", + "t", + "a", + "d", + "h", + "u", + "l", + "g", + "o", + "c", + "m", + "b", + "f", + "k", + "w", + "z", + "p", + "v", + "ü", + "ä", + "ö", + "j", + ], + "French": [ + "e", + "a", + "s", + "n", + "i", + "t", + "r", + "l", + "u", + "o", + "d", + "c", + "p", + "m", + "é", + "v", + "g", + "f", + "b", + "h", + "q", + "à", + "x", + "è", + "y", + "j", + ], + "Dutch": [ + "e", + "n", + "a", + "i", + "r", + "t", + "o", + "d", + "s", + "l", + "g", + "h", + "v", + "m", + "u", + "k", + "c", + "p", + "b", + "w", + "j", + "z", + "f", + "y", + "x", + "ë", + ], + "Italian": [ + "e", + "i", + "a", + "o", + "n", + "l", + "t", + "r", + "s", + "c", + "d", + "u", + "p", + "m", + "g", + "v", + "f", + "b", + "z", + "h", + "q", + "è", + "à", + "k", + "y", + "ò", + ], + "Polish": [ + "a", + "i", + "o", + "e", + "n", + "r", + "z", + "w", + "s", + "c", + "t", + "k", + "y", + "d", + "p", + "m", + "u", + "l", + "j", + "ł", + "g", + "b", + "h", + "ą", + "ę", + "ó", + ], + "Spanish": [ + "e", + "a", + "o", + "n", + "s", + "r", + "i", + "l", + "d", + "t", + "c", + "u", + "m", + "p", + "b", + "g", + "v", + "f", + "y", + "ó", + "h", + "q", + "í", + "j", + "z", + "á", + ], + "Russian": [ + "о", + "а", + "е", + "и", + "н", + "с", + "т", + "р", + "в", + "л", + "к", + "м", + "д", + "п", + "у", + "г", + "я", + "ы", + "з", + "б", + "й", + "ь", + "ч", + "х", + "ж", + "ц", + ], + # Jap-Kanji + "Japanese": [ + "人", + "一", + "大", + "亅", + "丁", + "丨", + "竹", + "笑", + "口", + "日", + "今", + "二", + "彳", + "行", + "十", + "土", + "丶", + "寸", + "寺", + "時", + "乙", + "丿", + "乂", + "气", + "気", + "冂", + "巾", + "亠", + "市", + "目", + "儿", + "見", + "八", + "小", + "凵", + "県", + "月", + "彐", + "門", + "間", + "木", + "東", + "山", + "出", + "本", + "中", + "刀", + "分", + "耳", + "又", + "取", + "最", + "言", + "田", + "心", + "思", + "刂", + "前", + "京", + "尹", + "事", + "生", + "厶", + "云", + "会", + "未", + "来", + "白", + "冫", + "楽", + "灬", + "馬", + "尸", + "尺", + "駅", + "明", + "耂", + "者", + "了", + "阝", + "都", + "高", + "卜", + "占", + "厂", + "广", + "店", + "子", + "申", + "奄", + "亻", + "俺", + "上", + "方", + "冖", + "学", + "衣", + "艮", + "食", + "自", + ], + # Jap-Katakana + "Japanese—": [ + "ー", + "ン", + "ス", + "・", + "ル", + "ト", + "リ", + "イ", + "ア", + "ラ", + "ッ", + "ク", + "ド", + "シ", + "レ", + "ジ", + "タ", + "フ", + "ロ", + "カ", + "テ", + "マ", + "ィ", + "グ", + "バ", + "ム", + "プ", + "オ", + "コ", + "デ", + "ニ", + "ウ", + "メ", + "サ", + "ビ", + "ナ", + "ブ", + "ャ", + "エ", + "ュ", + "チ", + "キ", + "ズ", + "ダ", + "パ", + "ミ", + "ェ", + "ョ", + "ハ", + "セ", + "ベ", + "ガ", + "モ", + "ツ", + "ネ", + "ボ", + "ソ", + "ノ", + "ァ", + "ヴ", + "ワ", + "ポ", + "ペ", + "ピ", + "ケ", + "ゴ", + "ギ", + "ザ", + "ホ", + "ゲ", + "ォ", + "ヤ", + "ヒ", + "ユ", + "ヨ", + "ヘ", + "ゼ", + "ヌ", + "ゥ", + "ゾ", + "ヶ", + "ヂ", + "ヲ", + "ヅ", + "ヵ", + "ヱ", + "ヰ", + "ヮ", + "ヽ", + "゠", + "ヾ", + "ヷ", + "ヿ", + "ヸ", + "ヹ", + "ヺ", + ], + # Jap-Hiragana + "Japanese——": [ + "の", + "に", + "る", + "た", + "と", + "は", + "し", + "い", + "を", + "で", + "て", + "が", + "な", + "れ", + "か", + "ら", + "さ", + "っ", + "り", + "す", + "あ", + "も", + "こ", + "ま", + "う", + "く", + "よ", + "き", + "ん", + "め", + "お", + "け", + "そ", + "つ", + "だ", + "や", + "え", + "ど", + "わ", + "ち", + "み", + "せ", + "じ", + "ば", + "へ", + "び", + "ず", + "ろ", + "ほ", + "げ", + "む", + "べ", + "ひ", + "ょ", + "ゆ", + "ぶ", + "ご", + "ゃ", + "ね", + "ふ", + "ぐ", + "ぎ", + "ぼ", + "ゅ", + "づ", + "ざ", + "ぞ", + "ぬ", + "ぜ", + "ぱ", + "ぽ", + "ぷ", + "ぴ", + "ぃ", + "ぁ", + "ぇ", + "ぺ", + "ゞ", + "ぢ", + "ぉ", + "ぅ", + "ゐ", + "ゝ", + "ゑ", + "゛", + "゜", + "ゎ", + "ゔ", + "゚", + "ゟ", + "゙", + "ゕ", + "ゖ", + ], + "Portuguese": [ + "a", + "e", + "o", + "s", + "i", + "r", + "d", + "n", + "t", + "m", + "u", + "c", + "l", + "p", + "g", + "v", + "b", + "f", + "h", + "ã", + "q", + "é", + "ç", + "á", + "z", + "í", + ], + "Swedish": [ + "e", + "a", + "n", + "r", + "t", + "s", + "i", + "l", + "d", + "o", + "m", + "k", + "g", + "v", + "h", + "f", + "u", + "p", + "ä", + "c", + "b", + "ö", + "å", + "y", + "j", + "x", + ], + "Chinese": [ + "的", + "一", + "是", + "不", + "了", + "在", + "人", + "有", + "我", + "他", + "这", + "个", + "们", + "中", + "来", + "上", + "大", + "为", + "和", + "国", + "地", + "到", + "以", + "说", + "时", + "要", + "就", + "出", + "会", + "可", + "也", + "你", + "对", + "生", + "能", + "而", + "子", + "那", + "得", + "于", + "着", + "下", + "自", + "之", + "年", + "过", + "发", + "后", + "作", + "里", + "用", + "道", + "行", + "所", + "然", + "家", + "种", + "事", + "成", + "方", + "多", + "经", + "么", + "去", + "法", + "学", + "如", + "都", + "同", + "现", + "当", + "没", + "动", + "面", + "起", + "看", + "定", + "天", + "分", + "还", + "进", + "好", + "小", + "部", + "其", + "些", + "主", + "样", + "理", + "心", + "她", + "本", + "前", + "开", + "但", + "因", + "只", + "从", + "想", + "实", + ], + "Ukrainian": [ + "о", + "а", + "н", + "і", + "и", + "р", + "в", + "т", + "е", + "с", + "к", + "л", + "у", + "д", + "м", + "п", + "з", + "я", + "ь", + "б", + "г", + "й", + "ч", + "х", + "ц", + "ї", + ], + "Norwegian": [ + "e", + "r", + "n", + "t", + "a", + "s", + "i", + "o", + "l", + "d", + "g", + "k", + "m", + "v", + "f", + "p", + "u", + "b", + "h", + "å", + "y", + "j", + "ø", + "c", + "æ", + "w", + ], + "Finnish": [ + "a", + "i", + "n", + "t", + "e", + "s", + "l", + "o", + "u", + "k", + "ä", + "m", + "r", + "v", + "j", + "h", + "p", + "y", + "d", + "ö", + "g", + "c", + "b", + "f", + "w", + "z", + ], + "Vietnamese": [ + "n", + "h", + "t", + "i", + "c", + "g", + "a", + "o", + "u", + "m", + "l", + "r", + "à", + "đ", + "s", + "e", + "v", + "p", + "b", + "y", + "ư", + "d", + "á", + "k", + "ộ", + "ế", + ], + "Czech": [ + "o", + "e", + "a", + "n", + "t", + "s", + "i", + "l", + "v", + "r", + "k", + "d", + "u", + "m", + "p", + "í", + "c", + "h", + "z", + "á", + "y", + "j", + "b", + "ě", + "é", + "ř", + ], + "Hungarian": [ + "e", + "a", + "t", + "l", + "s", + "n", + "k", + "r", + "i", + "o", + "z", + "á", + "é", + "g", + "m", + "b", + "y", + "v", + "d", + "h", + "u", + "p", + "j", + "ö", + "f", + "c", + ], + "Korean": [ + "이", + "다", + "에", + "의", + "는", + "로", + "하", + "을", + "가", + "고", + "지", + "서", + "한", + "은", + "기", + "으", + "년", + "대", + "사", + "시", + "를", + "리", + "도", + "인", + "스", + "일", + ], + "Indonesian": [ + "a", + "n", + "e", + "i", + "r", + "t", + "u", + "s", + "d", + "k", + "m", + "l", + "g", + "p", + "b", + "o", + "h", + "y", + "j", + "c", + "w", + "f", + "v", + "z", + "x", + "q", + ], + "Turkish": [ + "a", + "e", + "i", + "n", + "r", + "l", + "ı", + "k", + "d", + "t", + "s", + "m", + "y", + "u", + "o", + "b", + "ü", + "ş", + "v", + "g", + "z", + "h", + "c", + "p", + "ç", + "ğ", + ], + "Romanian": [ + "e", + "i", + "a", + "r", + "n", + "t", + "u", + "l", + "o", + "c", + "s", + "d", + "p", + "m", + "ă", + "f", + "v", + "î", + "g", + "b", + "ș", + "ț", + "z", + "h", + "â", + "j", + ], + "Farsi": [ + "ا", + "ی", + "ر", + "د", + "ن", + "ه", + "و", + "م", + "ت", + "ب", + "س", + "ل", + "ک", + "ش", + "ز", + "ف", + "گ", + "ع", + "خ", + "ق", + "ج", + "آ", + "پ", + "ح", + "ط", + "ص", + ], + "Arabic": [ + "ا", + "ل", + "ي", + "م", + "و", + "ن", + "ر", + "ت", + "ب", + "ة", + "ع", + "د", + "س", + "ف", + "ه", + "ك", + "ق", + "أ", + "ح", + "ج", + "ش", + "ط", + "ص", + "ى", + "خ", + "إ", + ], + "Danish": [ + "e", + "r", + "n", + "t", + "a", + "i", + "s", + "d", + "l", + "o", + "g", + "m", + "k", + "f", + "v", + "u", + "b", + "h", + "p", + "å", + "y", + "ø", + "æ", + "c", + "j", + "w", + ], + "Serbian": [ + "а", + "и", + "о", + "е", + "н", + "р", + "с", + "у", + "т", + "к", + "ј", + "в", + "д", + "м", + "п", + "л", + "г", + "з", + "б", + "a", + "i", + "e", + "o", + "n", + "ц", + "ш", + ], + "Lithuanian": [ + "i", + "a", + "s", + "o", + "r", + "e", + "t", + "n", + "u", + "k", + "m", + "l", + "p", + "v", + "d", + "j", + "g", + "ė", + "b", + "y", + "ų", + "š", + "ž", + "c", + "ą", + "į", + ], + "Slovene": [ + "e", + "a", + "i", + "o", + "n", + "r", + "s", + "l", + "t", + "j", + "v", + "k", + "d", + "p", + "m", + "u", + "z", + "b", + "g", + "h", + "č", + "c", + "š", + "ž", + "f", + "y", + ], + "Slovak": [ + "o", + "a", + "e", + "n", + "i", + "r", + "v", + "t", + "s", + "l", + "k", + "d", + "m", + "p", + "u", + "c", + "h", + "j", + "b", + "z", + "á", + "y", + "ý", + "í", + "č", + "é", + ], + "Hebrew": [ + "י", + "ו", + "ה", + "ל", + "ר", + "ב", + "ת", + "מ", + "א", + "ש", + "נ", + "ע", + "ם", + "ד", + "ק", + "ח", + "פ", + "ס", + "כ", + "ג", + "ט", + "צ", + "ן", + "ז", + "ך", + ], + "Bulgarian": [ + "а", + "и", + "о", + "е", + "н", + "т", + "р", + "с", + "в", + "л", + "к", + "д", + "п", + "м", + "з", + "г", + "я", + "ъ", + "у", + "б", + "ч", + "ц", + "й", + "ж", + "щ", + "х", + ], + "Croatian": [ + "a", + "i", + "o", + "e", + "n", + "r", + "j", + "s", + "t", + "u", + "k", + "l", + "v", + "d", + "m", + "p", + "g", + "z", + "b", + "c", + "č", + "h", + "š", + "ž", + "ć", + "f", + ], + "Hindi": [ + "क", + "र", + "स", + "न", + "त", + "म", + "ह", + "प", + "य", + "ल", + "व", + "ज", + "द", + "ग", + "ब", + "श", + "ट", + "अ", + "ए", + "थ", + "भ", + "ड", + "च", + "ध", + "ष", + "इ", + ], + "Estonian": [ + "a", + "i", + "e", + "s", + "t", + "l", + "u", + "n", + "o", + "k", + "r", + "d", + "m", + "v", + "g", + "p", + "j", + "h", + "ä", + "b", + "õ", + "ü", + "f", + "c", + "ö", + "y", + ], + "Thai": [ + "า", + "น", + "ร", + "อ", + "ก", + "เ", + "ง", + "ม", + "ย", + "ล", + "ว", + "ด", + "ท", + "ส", + "ต", + "ะ", + "ป", + "บ", + "ค", + "ห", + "แ", + "จ", + "พ", + "ช", + "ข", + "ใ", + ], + "Greek": [ + "α", + "τ", + "ο", + "ι", + "ε", + "ν", + "ρ", + "σ", + "κ", + "η", + "π", + "ς", + "υ", + "μ", + "λ", + "ί", + "ό", + "ά", + "γ", + "έ", + "δ", + "ή", + "ω", + "χ", + "θ", + "ύ", + ], + "Tamil": [ + "க", + "த", + "ப", + "ட", + "ர", + "ம", + "ல", + "ன", + "வ", + "ற", + "ய", + "ள", + "ச", + "ந", + "இ", + "ண", + "அ", + "ஆ", + "ழ", + "ங", + "எ", + "உ", + "ஒ", + "ஸ", + ], + "Kazakh": [ + "а", + "ы", + "е", + "н", + "т", + "р", + "л", + "і", + "д", + "с", + "м", + "қ", + "к", + "о", + "б", + "и", + "у", + "ғ", + "ж", + "ң", + "з", + "ш", + "й", + "п", + "г", + "ө", + ], +} + +LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES) diff --git a/venv/Lib/site-packages/charset_normalizer/legacy.py b/venv/Lib/site-packages/charset_normalizer/legacy.py new file mode 100644 index 00000000..e221beca --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer/legacy.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any +from warnings import warn + +from .api import from_bytes +from .constant import CHARDET_CORRESPONDENCE + +# TODO: remove this check when dropping Python 3.7 support +if TYPE_CHECKING: + from typing_extensions import TypedDict + + class ResultDict(TypedDict): + encoding: str | None + language: str + confidence: float | None + + +def detect( + byte_str: bytes, should_rename_legacy: bool = False, **kwargs: Any +) -> ResultDict: + """ + chardet legacy method + Detect the encoding of the given byte string. It should be mostly backward-compatible. + Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it) + This function is deprecated and should be used to migrate your project easily, consult the documentation for + further information. Not planned for removal. + + :param byte_str: The byte sequence to examine. + :param should_rename_legacy: Should we rename legacy encodings + to their more modern equivalents? + """ + if len(kwargs): + warn( + f"charset-normalizer disregard arguments '{','.join(list(kwargs.keys()))}' in legacy function detect()" + ) + + if not isinstance(byte_str, (bytearray, bytes)): + raise TypeError( # pragma: nocover + f"Expected object of type bytes or bytearray, got: {type(byte_str)}" + ) + + if isinstance(byte_str, bytearray): + byte_str = bytes(byte_str) + + r = from_bytes(byte_str).best() + + encoding = r.encoding if r is not None else None + language = r.language if r is not None and r.language != "Unknown" else "" + confidence = 1.0 - r.chaos if r is not None else None + + # Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process + # but chardet does return 'utf-8-sig' and it is a valid codec name. + if r is not None and encoding == "utf_8" and r.bom: + encoding += "_sig" + + if should_rename_legacy is False and encoding in CHARDET_CORRESPONDENCE: + encoding = CHARDET_CORRESPONDENCE[encoding] + + return { + "encoding": encoding, + "language": language, + "confidence": confidence, + } diff --git a/venv/Lib/site-packages/charset_normalizer/md.cp312-win_amd64.pyd b/venv/Lib/site-packages/charset_normalizer/md.cp312-win_amd64.pyd new file mode 100644 index 00000000..4c6c2cf5 Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/md.cp312-win_amd64.pyd differ diff --git a/venv/Lib/site-packages/charset_normalizer/md.py b/venv/Lib/site-packages/charset_normalizer/md.py new file mode 100644 index 00000000..12ce024b --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer/md.py @@ -0,0 +1,635 @@ +from __future__ import annotations + +from functools import lru_cache +from logging import getLogger + +from .constant import ( + COMMON_SAFE_ASCII_CHARACTERS, + TRACE, + UNICODE_SECONDARY_RANGE_KEYWORD, +) +from .utils import ( + is_accentuated, + is_arabic, + is_arabic_isolated_form, + is_case_variable, + is_cjk, + is_emoticon, + is_hangul, + is_hiragana, + is_katakana, + is_latin, + is_punctuation, + is_separator, + is_symbol, + is_thai, + is_unprintable, + remove_accent, + unicode_range, + is_cjk_uncommon, +) + + +class MessDetectorPlugin: + """ + Base abstract class used for mess detection plugins. + All detectors MUST extend and implement given methods. + """ + + def eligible(self, character: str) -> bool: + """ + Determine if given character should be fed in. + """ + raise NotImplementedError # pragma: nocover + + def feed(self, character: str) -> None: + """ + The main routine to be executed upon character. + Insert the logic in witch the text would be considered chaotic. + """ + raise NotImplementedError # pragma: nocover + + def reset(self) -> None: # pragma: no cover + """ + Permit to reset the plugin to the initial state. + """ + raise NotImplementedError + + @property + def ratio(self) -> float: + """ + Compute the chaos ratio based on what your feed() has seen. + Must NOT be lower than 0.; No restriction gt 0. + """ + raise NotImplementedError # pragma: nocover + + +class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._punctuation_count: int = 0 + self._symbol_count: int = 0 + self._character_count: int = 0 + + self._last_printable_char: str | None = None + self._frenzy_symbol_in_word: bool = False + + def eligible(self, character: str) -> bool: + return character.isprintable() + + def feed(self, character: str) -> None: + self._character_count += 1 + + if ( + character != self._last_printable_char + and character not in COMMON_SAFE_ASCII_CHARACTERS + ): + if is_punctuation(character): + self._punctuation_count += 1 + elif ( + character.isdigit() is False + and is_symbol(character) + and is_emoticon(character) is False + ): + self._symbol_count += 2 + + self._last_printable_char = character + + def reset(self) -> None: # Abstract + self._punctuation_count = 0 + self._character_count = 0 + self._symbol_count = 0 + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + ratio_of_punctuation: float = ( + self._punctuation_count + self._symbol_count + ) / self._character_count + + return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0 + + +class TooManyAccentuatedPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._character_count: int = 0 + self._accentuated_count: int = 0 + + def eligible(self, character: str) -> bool: + return character.isalpha() + + def feed(self, character: str) -> None: + self._character_count += 1 + + if is_accentuated(character): + self._accentuated_count += 1 + + def reset(self) -> None: # Abstract + self._character_count = 0 + self._accentuated_count = 0 + + @property + def ratio(self) -> float: + if self._character_count < 8: + return 0.0 + + ratio_of_accentuation: float = self._accentuated_count / self._character_count + return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0 + + +class UnprintablePlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._unprintable_count: int = 0 + self._character_count: int = 0 + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + if is_unprintable(character): + self._unprintable_count += 1 + self._character_count += 1 + + def reset(self) -> None: # Abstract + self._unprintable_count = 0 + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + return (self._unprintable_count * 8) / self._character_count + + +class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._successive_count: int = 0 + self._character_count: int = 0 + + self._last_latin_character: str | None = None + + def eligible(self, character: str) -> bool: + return character.isalpha() and is_latin(character) + + def feed(self, character: str) -> None: + self._character_count += 1 + if ( + self._last_latin_character is not None + and is_accentuated(character) + and is_accentuated(self._last_latin_character) + ): + if character.isupper() and self._last_latin_character.isupper(): + self._successive_count += 1 + # Worse if its the same char duplicated with different accent. + if remove_accent(character) == remove_accent(self._last_latin_character): + self._successive_count += 1 + self._last_latin_character = character + + def reset(self) -> None: # Abstract + self._successive_count = 0 + self._character_count = 0 + self._last_latin_character = None + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + return (self._successive_count * 2) / self._character_count + + +class SuspiciousRange(MessDetectorPlugin): + def __init__(self) -> None: + self._suspicious_successive_range_count: int = 0 + self._character_count: int = 0 + self._last_printable_seen: str | None = None + + def eligible(self, character: str) -> bool: + return character.isprintable() + + def feed(self, character: str) -> None: + self._character_count += 1 + + if ( + character.isspace() + or is_punctuation(character) + or character in COMMON_SAFE_ASCII_CHARACTERS + ): + self._last_printable_seen = None + return + + if self._last_printable_seen is None: + self._last_printable_seen = character + return + + unicode_range_a: str | None = unicode_range(self._last_printable_seen) + unicode_range_b: str | None = unicode_range(character) + + if is_suspiciously_successive_range(unicode_range_a, unicode_range_b): + self._suspicious_successive_range_count += 1 + + self._last_printable_seen = character + + def reset(self) -> None: # Abstract + self._character_count = 0 + self._suspicious_successive_range_count = 0 + self._last_printable_seen = None + + @property + def ratio(self) -> float: + if self._character_count <= 13: + return 0.0 + + ratio_of_suspicious_range_usage: float = ( + self._suspicious_successive_range_count * 2 + ) / self._character_count + + return ratio_of_suspicious_range_usage + + +class SuperWeirdWordPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._word_count: int = 0 + self._bad_word_count: int = 0 + self._foreign_long_count: int = 0 + + self._is_current_word_bad: bool = False + self._foreign_long_watch: bool = False + + self._character_count: int = 0 + self._bad_character_count: int = 0 + + self._buffer: str = "" + self._buffer_accent_count: int = 0 + self._buffer_glyph_count: int = 0 + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + if character.isalpha(): + self._buffer += character + if is_accentuated(character): + self._buffer_accent_count += 1 + if ( + self._foreign_long_watch is False + and (is_latin(character) is False or is_accentuated(character)) + and is_cjk(character) is False + and is_hangul(character) is False + and is_katakana(character) is False + and is_hiragana(character) is False + and is_thai(character) is False + ): + self._foreign_long_watch = True + if ( + is_cjk(character) + or is_hangul(character) + or is_katakana(character) + or is_hiragana(character) + or is_thai(character) + ): + self._buffer_glyph_count += 1 + return + if not self._buffer: + return + if ( + character.isspace() or is_punctuation(character) or is_separator(character) + ) and self._buffer: + self._word_count += 1 + buffer_length: int = len(self._buffer) + + self._character_count += buffer_length + + if buffer_length >= 4: + if self._buffer_accent_count / buffer_length >= 0.5: + self._is_current_word_bad = True + # Word/Buffer ending with an upper case accentuated letter are so rare, + # that we will consider them all as suspicious. Same weight as foreign_long suspicious. + elif ( + is_accentuated(self._buffer[-1]) + and self._buffer[-1].isupper() + and all(_.isupper() for _ in self._buffer) is False + ): + self._foreign_long_count += 1 + self._is_current_word_bad = True + elif self._buffer_glyph_count == 1: + self._is_current_word_bad = True + self._foreign_long_count += 1 + if buffer_length >= 24 and self._foreign_long_watch: + camel_case_dst = [ + i + for c, i in zip(self._buffer, range(0, buffer_length)) + if c.isupper() + ] + probable_camel_cased: bool = False + + if camel_case_dst and (len(camel_case_dst) / buffer_length <= 0.3): + probable_camel_cased = True + + if not probable_camel_cased: + self._foreign_long_count += 1 + self._is_current_word_bad = True + + if self._is_current_word_bad: + self._bad_word_count += 1 + self._bad_character_count += len(self._buffer) + self._is_current_word_bad = False + + self._foreign_long_watch = False + self._buffer = "" + self._buffer_accent_count = 0 + self._buffer_glyph_count = 0 + elif ( + character not in {"<", ">", "-", "=", "~", "|", "_"} + and character.isdigit() is False + and is_symbol(character) + ): + self._is_current_word_bad = True + self._buffer += character + + def reset(self) -> None: # Abstract + self._buffer = "" + self._is_current_word_bad = False + self._foreign_long_watch = False + self._bad_word_count = 0 + self._word_count = 0 + self._character_count = 0 + self._bad_character_count = 0 + self._foreign_long_count = 0 + + @property + def ratio(self) -> float: + if self._word_count <= 10 and self._foreign_long_count == 0: + return 0.0 + + return self._bad_character_count / self._character_count + + +class CjkUncommonPlugin(MessDetectorPlugin): + """ + Detect messy CJK text that probably means nothing. + """ + + def __init__(self) -> None: + self._character_count: int = 0 + self._uncommon_count: int = 0 + + def eligible(self, character: str) -> bool: + return is_cjk(character) + + def feed(self, character: str) -> None: + self._character_count += 1 + + if is_cjk_uncommon(character): + self._uncommon_count += 1 + return + + def reset(self) -> None: # Abstract + self._character_count = 0 + self._uncommon_count = 0 + + @property + def ratio(self) -> float: + if self._character_count < 8: + return 0.0 + + uncommon_form_usage: float = self._uncommon_count / self._character_count + + # we can be pretty sure it's garbage when uncommon characters are widely + # used. otherwise it could just be traditional chinese for example. + return uncommon_form_usage / 10 if uncommon_form_usage > 0.5 else 0.0 + + +class ArchaicUpperLowerPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._buf: bool = False + + self._character_count_since_last_sep: int = 0 + + self._successive_upper_lower_count: int = 0 + self._successive_upper_lower_count_final: int = 0 + + self._character_count: int = 0 + + self._last_alpha_seen: str | None = None + self._current_ascii_only: bool = True + + def eligible(self, character: str) -> bool: + return True + + def feed(self, character: str) -> None: + is_concerned = character.isalpha() and is_case_variable(character) + chunk_sep = is_concerned is False + + if chunk_sep and self._character_count_since_last_sep > 0: + if ( + self._character_count_since_last_sep <= 64 + and character.isdigit() is False + and self._current_ascii_only is False + ): + self._successive_upper_lower_count_final += ( + self._successive_upper_lower_count + ) + + self._successive_upper_lower_count = 0 + self._character_count_since_last_sep = 0 + self._last_alpha_seen = None + self._buf = False + self._character_count += 1 + self._current_ascii_only = True + + return + + if self._current_ascii_only is True and character.isascii() is False: + self._current_ascii_only = False + + if self._last_alpha_seen is not None: + if (character.isupper() and self._last_alpha_seen.islower()) or ( + character.islower() and self._last_alpha_seen.isupper() + ): + if self._buf is True: + self._successive_upper_lower_count += 2 + self._buf = False + else: + self._buf = True + else: + self._buf = False + + self._character_count += 1 + self._character_count_since_last_sep += 1 + self._last_alpha_seen = character + + def reset(self) -> None: # Abstract + self._character_count = 0 + self._character_count_since_last_sep = 0 + self._successive_upper_lower_count = 0 + self._successive_upper_lower_count_final = 0 + self._last_alpha_seen = None + self._buf = False + self._current_ascii_only = True + + @property + def ratio(self) -> float: + if self._character_count == 0: + return 0.0 + + return self._successive_upper_lower_count_final / self._character_count + + +class ArabicIsolatedFormPlugin(MessDetectorPlugin): + def __init__(self) -> None: + self._character_count: int = 0 + self._isolated_form_count: int = 0 + + def reset(self) -> None: # Abstract + self._character_count = 0 + self._isolated_form_count = 0 + + def eligible(self, character: str) -> bool: + return is_arabic(character) + + def feed(self, character: str) -> None: + self._character_count += 1 + + if is_arabic_isolated_form(character): + self._isolated_form_count += 1 + + @property + def ratio(self) -> float: + if self._character_count < 8: + return 0.0 + + isolated_form_usage: float = self._isolated_form_count / self._character_count + + return isolated_form_usage + + +@lru_cache(maxsize=1024) +def is_suspiciously_successive_range( + unicode_range_a: str | None, unicode_range_b: str | None +) -> bool: + """ + Determine if two Unicode range seen next to each other can be considered as suspicious. + """ + if unicode_range_a is None or unicode_range_b is None: + return True + + if unicode_range_a == unicode_range_b: + return False + + if "Latin" in unicode_range_a and "Latin" in unicode_range_b: + return False + + if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b: + return False + + # Latin characters can be accompanied with a combining diacritical mark + # eg. Vietnamese. + if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and ( + "Combining" in unicode_range_a or "Combining" in unicode_range_b + ): + return False + + keywords_range_a, keywords_range_b = ( + unicode_range_a.split(" "), + unicode_range_b.split(" "), + ) + + for el in keywords_range_a: + if el in UNICODE_SECONDARY_RANGE_KEYWORD: + continue + if el in keywords_range_b: + return False + + # Japanese Exception + range_a_jp_chars, range_b_jp_chars = ( + unicode_range_a + in ( + "Hiragana", + "Katakana", + ), + unicode_range_b in ("Hiragana", "Katakana"), + ) + if (range_a_jp_chars or range_b_jp_chars) and ( + "CJK" in unicode_range_a or "CJK" in unicode_range_b + ): + return False + if range_a_jp_chars and range_b_jp_chars: + return False + + if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b: + if "CJK" in unicode_range_a or "CJK" in unicode_range_b: + return False + if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin": + return False + + # Chinese/Japanese use dedicated range for punctuation and/or separators. + if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or ( + unicode_range_a in ["Katakana", "Hiragana"] + and unicode_range_b in ["Katakana", "Hiragana"] + ): + if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b: + return False + if "Forms" in unicode_range_a or "Forms" in unicode_range_b: + return False + if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin": + return False + + return True + + +@lru_cache(maxsize=2048) +def mess_ratio( + decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False +) -> float: + """ + Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier. + """ + + detectors: list[MessDetectorPlugin] = [ + md_class() for md_class in MessDetectorPlugin.__subclasses__() + ] + + length: int = len(decoded_sequence) + 1 + + mean_mess_ratio: float = 0.0 + + if length < 512: + intermediary_mean_mess_ratio_calc: int = 32 + elif length <= 1024: + intermediary_mean_mess_ratio_calc = 64 + else: + intermediary_mean_mess_ratio_calc = 128 + + for character, index in zip(decoded_sequence + "\n", range(length)): + for detector in detectors: + if detector.eligible(character): + detector.feed(character) + + if ( + index > 0 and index % intermediary_mean_mess_ratio_calc == 0 + ) or index == length - 1: + mean_mess_ratio = sum(dt.ratio for dt in detectors) + + if mean_mess_ratio >= maximum_threshold: + break + + if debug: + logger = getLogger("charset_normalizer") + + logger.log( + TRACE, + "Mess-detector extended-analysis start. " + f"intermediary_mean_mess_ratio_calc={intermediary_mean_mess_ratio_calc} mean_mess_ratio={mean_mess_ratio} " + f"maximum_threshold={maximum_threshold}", + ) + + if len(decoded_sequence) > 16: + logger.log(TRACE, f"Starting with: {decoded_sequence[:16]}") + logger.log(TRACE, f"Ending with: {decoded_sequence[-16::]}") + + for dt in detectors: + logger.log(TRACE, f"{dt.__class__}: {dt.ratio}") + + return round(mean_mess_ratio, 3) diff --git a/venv/Lib/site-packages/charset_normalizer/md__mypyc.cp312-win_amd64.pyd b/venv/Lib/site-packages/charset_normalizer/md__mypyc.cp312-win_amd64.pyd new file mode 100644 index 00000000..18e8e829 Binary files /dev/null and b/venv/Lib/site-packages/charset_normalizer/md__mypyc.cp312-win_amd64.pyd differ diff --git a/venv/Lib/site-packages/charset_normalizer/models.py b/venv/Lib/site-packages/charset_normalizer/models.py new file mode 100644 index 00000000..1042758f --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer/models.py @@ -0,0 +1,360 @@ +from __future__ import annotations + +from encodings.aliases import aliases +from hashlib import sha256 +from json import dumps +from re import sub +from typing import Any, Iterator, List, Tuple + +from .constant import RE_POSSIBLE_ENCODING_INDICATION, TOO_BIG_SEQUENCE +from .utils import iana_name, is_multi_byte_encoding, unicode_range + + +class CharsetMatch: + def __init__( + self, + payload: bytes, + guessed_encoding: str, + mean_mess_ratio: float, + has_sig_or_bom: bool, + languages: CoherenceMatches, + decoded_payload: str | None = None, + preemptive_declaration: str | None = None, + ): + self._payload: bytes = payload + + self._encoding: str = guessed_encoding + self._mean_mess_ratio: float = mean_mess_ratio + self._languages: CoherenceMatches = languages + self._has_sig_or_bom: bool = has_sig_or_bom + self._unicode_ranges: list[str] | None = None + + self._leaves: list[CharsetMatch] = [] + self._mean_coherence_ratio: float = 0.0 + + self._output_payload: bytes | None = None + self._output_encoding: str | None = None + + self._string: str | None = decoded_payload + + self._preemptive_declaration: str | None = preemptive_declaration + + def __eq__(self, other: object) -> bool: + if not isinstance(other, CharsetMatch): + if isinstance(other, str): + return iana_name(other) == self.encoding + return False + return self.encoding == other.encoding and self.fingerprint == other.fingerprint + + def __lt__(self, other: object) -> bool: + """ + Implemented to make sorted available upon CharsetMatches items. + """ + if not isinstance(other, CharsetMatch): + raise ValueError + + chaos_difference: float = abs(self.chaos - other.chaos) + coherence_difference: float = abs(self.coherence - other.coherence) + + # Below 1% difference --> Use Coherence + if chaos_difference < 0.01 and coherence_difference > 0.02: + return self.coherence > other.coherence + elif chaos_difference < 0.01 and coherence_difference <= 0.02: + # When having a difficult decision, use the result that decoded as many multi-byte as possible. + # preserve RAM usage! + if len(self._payload) >= TOO_BIG_SEQUENCE: + return self.chaos < other.chaos + return self.multi_byte_usage > other.multi_byte_usage + + return self.chaos < other.chaos + + @property + def multi_byte_usage(self) -> float: + return 1.0 - (len(str(self)) / len(self.raw)) + + def __str__(self) -> str: + # Lazy Str Loading + if self._string is None: + self._string = str(self._payload, self._encoding, "strict") + return self._string + + def __repr__(self) -> str: + return f"" + + def add_submatch(self, other: CharsetMatch) -> None: + if not isinstance(other, CharsetMatch) or other == self: + raise ValueError( + "Unable to add instance <{}> as a submatch of a CharsetMatch".format( + other.__class__ + ) + ) + + other._string = None # Unload RAM usage; dirty trick. + self._leaves.append(other) + + @property + def encoding(self) -> str: + return self._encoding + + @property + def encoding_aliases(self) -> list[str]: + """ + Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855. + """ + also_known_as: list[str] = [] + for u, p in aliases.items(): + if self.encoding == u: + also_known_as.append(p) + elif self.encoding == p: + also_known_as.append(u) + return also_known_as + + @property + def bom(self) -> bool: + return self._has_sig_or_bom + + @property + def byte_order_mark(self) -> bool: + return self._has_sig_or_bom + + @property + def languages(self) -> list[str]: + """ + Return the complete list of possible languages found in decoded sequence. + Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'. + """ + return [e[0] for e in self._languages] + + @property + def language(self) -> str: + """ + Most probable language found in decoded sequence. If none were detected or inferred, the property will return + "Unknown". + """ + if not self._languages: + # Trying to infer the language based on the given encoding + # Its either English or we should not pronounce ourselves in certain cases. + if "ascii" in self.could_be_from_charset: + return "English" + + # doing it there to avoid circular import + from charset_normalizer.cd import encoding_languages, mb_encoding_languages + + languages = ( + mb_encoding_languages(self.encoding) + if is_multi_byte_encoding(self.encoding) + else encoding_languages(self.encoding) + ) + + if len(languages) == 0 or "Latin Based" in languages: + return "Unknown" + + return languages[0] + + return self._languages[0][0] + + @property + def chaos(self) -> float: + return self._mean_mess_ratio + + @property + def coherence(self) -> float: + if not self._languages: + return 0.0 + return self._languages[0][1] + + @property + def percent_chaos(self) -> float: + return round(self.chaos * 100, ndigits=3) + + @property + def percent_coherence(self) -> float: + return round(self.coherence * 100, ndigits=3) + + @property + def raw(self) -> bytes: + """ + Original untouched bytes. + """ + return self._payload + + @property + def submatch(self) -> list[CharsetMatch]: + return self._leaves + + @property + def has_submatch(self) -> bool: + return len(self._leaves) > 0 + + @property + def alphabets(self) -> list[str]: + if self._unicode_ranges is not None: + return self._unicode_ranges + # list detected ranges + detected_ranges: list[str | None] = [unicode_range(char) for char in str(self)] + # filter and sort + self._unicode_ranges = sorted(list({r for r in detected_ranges if r})) + return self._unicode_ranges + + @property + def could_be_from_charset(self) -> list[str]: + """ + The complete list of encoding that output the exact SAME str result and therefore could be the originating + encoding. + This list does include the encoding available in property 'encoding'. + """ + return [self._encoding] + [m.encoding for m in self._leaves] + + def output(self, encoding: str = "utf_8") -> bytes: + """ + Method to get re-encoded bytes payload using given target encoding. Default to UTF-8. + Any errors will be simply ignored by the encoder NOT replaced. + """ + if self._output_encoding is None or self._output_encoding != encoding: + self._output_encoding = encoding + decoded_string = str(self) + if ( + self._preemptive_declaration is not None + and self._preemptive_declaration.lower() + not in ["utf-8", "utf8", "utf_8"] + ): + patched_header = sub( + RE_POSSIBLE_ENCODING_INDICATION, + lambda m: m.string[m.span()[0] : m.span()[1]].replace( + m.groups()[0], + iana_name(self._output_encoding).replace("_", "-"), # type: ignore[arg-type] + ), + decoded_string[:8192], + count=1, + ) + + decoded_string = patched_header + decoded_string[8192:] + + self._output_payload = decoded_string.encode(encoding, "replace") + + return self._output_payload # type: ignore + + @property + def fingerprint(self) -> str: + """ + Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one. + """ + return sha256(self.output()).hexdigest() + + +class CharsetMatches: + """ + Container with every CharsetMatch items ordered by default from most probable to the less one. + Act like a list(iterable) but does not implements all related methods. + """ + + def __init__(self, results: list[CharsetMatch] | None = None): + self._results: list[CharsetMatch] = sorted(results) if results else [] + + def __iter__(self) -> Iterator[CharsetMatch]: + yield from self._results + + def __getitem__(self, item: int | str) -> CharsetMatch: + """ + Retrieve a single item either by its position or encoding name (alias may be used here). + Raise KeyError upon invalid index or encoding not present in results. + """ + if isinstance(item, int): + return self._results[item] + if isinstance(item, str): + item = iana_name(item, False) + for result in self._results: + if item in result.could_be_from_charset: + return result + raise KeyError + + def __len__(self) -> int: + return len(self._results) + + def __bool__(self) -> bool: + return len(self._results) > 0 + + def append(self, item: CharsetMatch) -> None: + """ + Insert a single match. Will be inserted accordingly to preserve sort. + Can be inserted as a submatch. + """ + if not isinstance(item, CharsetMatch): + raise ValueError( + "Cannot append instance '{}' to CharsetMatches".format( + str(item.__class__) + ) + ) + # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage) + if len(item.raw) < TOO_BIG_SEQUENCE: + for match in self._results: + if match.fingerprint == item.fingerprint and match.chaos == item.chaos: + match.add_submatch(item) + return + self._results.append(item) + self._results = sorted(self._results) + + def best(self) -> CharsetMatch | None: + """ + Simply return the first match. Strict equivalent to matches[0]. + """ + if not self._results: + return None + return self._results[0] + + def first(self) -> CharsetMatch | None: + """ + Redundant method, call the method best(). Kept for BC reasons. + """ + return self.best() + + +CoherenceMatch = Tuple[str, float] +CoherenceMatches = List[CoherenceMatch] + + +class CliDetectionResult: + def __init__( + self, + path: str, + encoding: str | None, + encoding_aliases: list[str], + alternative_encodings: list[str], + language: str, + alphabets: list[str], + has_sig_or_bom: bool, + chaos: float, + coherence: float, + unicode_path: str | None, + is_preferred: bool, + ): + self.path: str = path + self.unicode_path: str | None = unicode_path + self.encoding: str | None = encoding + self.encoding_aliases: list[str] = encoding_aliases + self.alternative_encodings: list[str] = alternative_encodings + self.language: str = language + self.alphabets: list[str] = alphabets + self.has_sig_or_bom: bool = has_sig_or_bom + self.chaos: float = chaos + self.coherence: float = coherence + self.is_preferred: bool = is_preferred + + @property + def __dict__(self) -> dict[str, Any]: # type: ignore + return { + "path": self.path, + "encoding": self.encoding, + "encoding_aliases": self.encoding_aliases, + "alternative_encodings": self.alternative_encodings, + "language": self.language, + "alphabets": self.alphabets, + "has_sig_or_bom": self.has_sig_or_bom, + "chaos": self.chaos, + "coherence": self.coherence, + "unicode_path": self.unicode_path, + "is_preferred": self.is_preferred, + } + + def to_json(self) -> str: + return dumps(self.__dict__, ensure_ascii=True, indent=4) diff --git a/venv/Lib/site-packages/charset_normalizer/py.typed b/venv/Lib/site-packages/charset_normalizer/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/charset_normalizer/utils.py b/venv/Lib/site-packages/charset_normalizer/utils.py new file mode 100644 index 00000000..6bf0384c --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer/utils.py @@ -0,0 +1,414 @@ +from __future__ import annotations + +import importlib +import logging +import unicodedata +from codecs import IncrementalDecoder +from encodings.aliases import aliases +from functools import lru_cache +from re import findall +from typing import Generator + +from _multibytecodec import ( # type: ignore[import-not-found,import] + MultibyteIncrementalDecoder, +) + +from .constant import ( + ENCODING_MARKS, + IANA_SUPPORTED_SIMILAR, + RE_POSSIBLE_ENCODING_INDICATION, + UNICODE_RANGES_COMBINED, + UNICODE_SECONDARY_RANGE_KEYWORD, + UTF8_MAXIMAL_ALLOCATION, + COMMON_CJK_CHARACTERS, +) + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_accentuated(character: str) -> bool: + try: + description: str = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + return ( + "WITH GRAVE" in description + or "WITH ACUTE" in description + or "WITH CEDILLA" in description + or "WITH DIAERESIS" in description + or "WITH CIRCUMFLEX" in description + or "WITH TILDE" in description + or "WITH MACRON" in description + or "WITH RING ABOVE" in description + ) + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def remove_accent(character: str) -> str: + decomposed: str = unicodedata.decomposition(character) + if not decomposed: + return character + + codes: list[str] = decomposed.split(" ") + + return chr(int(codes[0], 16)) + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def unicode_range(character: str) -> str | None: + """ + Retrieve the Unicode range official name from a single character. + """ + character_ord: int = ord(character) + + for range_name, ord_range in UNICODE_RANGES_COMBINED.items(): + if character_ord in ord_range: + return range_name + + return None + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_latin(character: str) -> bool: + try: + description: str = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + return "LATIN" in description + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_punctuation(character: str) -> bool: + character_category: str = unicodedata.category(character) + + if "P" in character_category: + return True + + character_range: str | None = unicode_range(character) + + if character_range is None: + return False + + return "Punctuation" in character_range + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_symbol(character: str) -> bool: + character_category: str = unicodedata.category(character) + + if "S" in character_category or "N" in character_category: + return True + + character_range: str | None = unicode_range(character) + + if character_range is None: + return False + + return "Forms" in character_range and character_category != "Lo" + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_emoticon(character: str) -> bool: + character_range: str | None = unicode_range(character) + + if character_range is None: + return False + + return "Emoticons" in character_range or "Pictographs" in character_range + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_separator(character: str) -> bool: + if character.isspace() or character in {"|", "+", "<", ">"}: + return True + + character_category: str = unicodedata.category(character) + + return "Z" in character_category or character_category in {"Po", "Pd", "Pc"} + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_case_variable(character: str) -> bool: + return character.islower() != character.isupper() + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_cjk(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "CJK" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_hiragana(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "HIRAGANA" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_katakana(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "KATAKANA" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_hangul(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "HANGUL" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_thai(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "THAI" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_arabic(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "ARABIC" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_arabic_isolated_form(character: str) -> bool: + try: + character_name = unicodedata.name(character) + except ValueError: # Defensive: unicode database outdated? + return False + + return "ARABIC" in character_name and "ISOLATED FORM" in character_name + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_cjk_uncommon(character: str) -> bool: + return character not in COMMON_CJK_CHARACTERS + + +@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED)) +def is_unicode_range_secondary(range_name: str) -> bool: + return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD) + + +@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) +def is_unprintable(character: str) -> bool: + return ( + character.isspace() is False # includes \n \t \r \v + and character.isprintable() is False + and character != "\x1a" # Why? Its the ASCII substitute character. + and character != "\ufeff" # bug discovered in Python, + # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space. + ) + + +def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> str | None: + """ + Extract using ASCII-only decoder any specified encoding in the first n-bytes. + """ + if not isinstance(sequence, bytes): + raise TypeError + + seq_len: int = len(sequence) + + results: list[str] = findall( + RE_POSSIBLE_ENCODING_INDICATION, + sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"), + ) + + if len(results) == 0: + return None + + for specified_encoding in results: + specified_encoding = specified_encoding.lower().replace("-", "_") + + encoding_alias: str + encoding_iana: str + + for encoding_alias, encoding_iana in aliases.items(): + if encoding_alias == specified_encoding: + return encoding_iana + if encoding_iana == specified_encoding: + return encoding_iana + + return None + + +@lru_cache(maxsize=128) +def is_multi_byte_encoding(name: str) -> bool: + """ + Verify is a specific encoding is a multi byte one based on it IANA name + """ + return name in { + "utf_8", + "utf_8_sig", + "utf_16", + "utf_16_be", + "utf_16_le", + "utf_32", + "utf_32_le", + "utf_32_be", + "utf_7", + } or issubclass( + importlib.import_module(f"encodings.{name}").IncrementalDecoder, + MultibyteIncrementalDecoder, + ) + + +def identify_sig_or_bom(sequence: bytes) -> tuple[str | None, bytes]: + """ + Identify and extract SIG/BOM in given sequence. + """ + + for iana_encoding in ENCODING_MARKS: + marks: bytes | list[bytes] = ENCODING_MARKS[iana_encoding] + + if isinstance(marks, bytes): + marks = [marks] + + for mark in marks: + if sequence.startswith(mark): + return iana_encoding, mark + + return None, b"" + + +def should_strip_sig_or_bom(iana_encoding: str) -> bool: + return iana_encoding not in {"utf_16", "utf_32"} + + +def iana_name(cp_name: str, strict: bool = True) -> str: + """Returns the Python normalized encoding name (Not the IANA official name).""" + cp_name = cp_name.lower().replace("-", "_") + + encoding_alias: str + encoding_iana: str + + for encoding_alias, encoding_iana in aliases.items(): + if cp_name in [encoding_alias, encoding_iana]: + return encoding_iana + + if strict: + raise ValueError(f"Unable to retrieve IANA for '{cp_name}'") + + return cp_name + + +def cp_similarity(iana_name_a: str, iana_name_b: str) -> float: + if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b): + return 0.0 + + decoder_a = importlib.import_module(f"encodings.{iana_name_a}").IncrementalDecoder + decoder_b = importlib.import_module(f"encodings.{iana_name_b}").IncrementalDecoder + + id_a: IncrementalDecoder = decoder_a(errors="ignore") + id_b: IncrementalDecoder = decoder_b(errors="ignore") + + character_match_count: int = 0 + + for i in range(255): + to_be_decoded: bytes = bytes([i]) + if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded): + character_match_count += 1 + + return character_match_count / 254 + + +def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool: + """ + Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using + the function cp_similarity. + """ + return ( + iana_name_a in IANA_SUPPORTED_SIMILAR + and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a] + ) + + +def set_logging_handler( + name: str = "charset_normalizer", + level: int = logging.INFO, + format_string: str = "%(asctime)s | %(levelname)s | %(message)s", +) -> None: + logger = logging.getLogger(name) + logger.setLevel(level) + + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter(format_string)) + logger.addHandler(handler) + + +def cut_sequence_chunks( + sequences: bytes, + encoding_iana: str, + offsets: range, + chunk_size: int, + bom_or_sig_available: bool, + strip_sig_or_bom: bool, + sig_payload: bytes, + is_multi_byte_decoder: bool, + decoded_payload: str | None = None, +) -> Generator[str, None, None]: + if decoded_payload and is_multi_byte_decoder is False: + for i in offsets: + chunk = decoded_payload[i : i + chunk_size] + if not chunk: + break + yield chunk + else: + for i in offsets: + chunk_end = i + chunk_size + if chunk_end > len(sequences) + 8: + continue + + cut_sequence = sequences[i : i + chunk_size] + + if bom_or_sig_available and strip_sig_or_bom is False: + cut_sequence = sig_payload + cut_sequence + + chunk = cut_sequence.decode( + encoding_iana, + errors="ignore" if is_multi_byte_decoder else "strict", + ) + + # multi-byte bad cutting detector and adjustment + # not the cleanest way to perform that fix but clever enough for now. + if is_multi_byte_decoder and i > 0: + chunk_partial_size_chk: int = min(chunk_size, 16) + + if ( + decoded_payload + and chunk[:chunk_partial_size_chk] not in decoded_payload + ): + for j in range(i, i - 4, -1): + cut_sequence = sequences[j:chunk_end] + + if bom_or_sig_available and strip_sig_or_bom is False: + cut_sequence = sig_payload + cut_sequence + + chunk = cut_sequence.decode(encoding_iana, errors="ignore") + + if chunk[:chunk_partial_size_chk] in decoded_payload: + break + + yield chunk diff --git a/venv/Lib/site-packages/charset_normalizer/version.py b/venv/Lib/site-packages/charset_normalizer/version.py new file mode 100644 index 00000000..e5687e3c --- /dev/null +++ b/venv/Lib/site-packages/charset_normalizer/version.py @@ -0,0 +1,8 @@ +""" +Expose version +""" + +from __future__ import annotations + +__version__ = "3.4.2" +VERSION = __version__.split(".") diff --git a/venv/Lib/site-packages/click-8.2.0.dist-info/INSTALLER b/venv/Lib/site-packages/click-8.2.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/click-8.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/click-8.2.0.dist-info/METADATA b/venv/Lib/site-packages/click-8.2.0.dist-info/METADATA new file mode 100644 index 00000000..37ae3308 --- /dev/null +++ b/venv/Lib/site-packages/click-8.2.0.dist-info/METADATA @@ -0,0 +1,82 @@ +Metadata-Version: 2.4 +Name: click +Version: 8.2.0 +Summary: Composable command line interface toolkit +Maintainer-email: Pallets +Requires-Python: >=3.10 +Description-Content-Type: text/markdown +License-Expression: BSD-3-Clause +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Typing :: Typed +License-File: LICENSE.txt +Requires-Dist: colorama; platform_system == 'Windows' +Project-URL: Changes, https://click.palletsprojects.com/changes/ +Project-URL: Chat, https://discord.gg/pallets +Project-URL: Documentation, https://click.palletsprojects.com/ +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Source, https://github.com/pallets/click/ + +# $ click_ + +Click is a Python package for creating beautiful command line interfaces +in a composable way with as little code as necessary. It's the "Command +Line Interface Creation Kit". It's highly configurable but comes with +sensible defaults out of the box. + +It aims to make the process of writing command line tools quick and fun +while also preventing any frustration caused by the inability to +implement an intended CLI API. + +Click in three points: + +- Arbitrary nesting of commands +- Automatic help page generation +- Supports lazy loading of subcommands at runtime + + +## A Simple Example + +```python +import click + +@click.command() +@click.option("--count", default=1, help="Number of greetings.") +@click.option("--name", prompt="Your name", help="The person to greet.") +def hello(count, name): + """Simple program that greets NAME for a total of COUNT times.""" + for _ in range(count): + click.echo(f"Hello, {name}!") + +if __name__ == '__main__': + hello() +``` + +``` +$ python hello.py --count=3 +Your name: Click +Hello, Click! +Hello, Click! +Hello, Click! +``` + + +## Donate + +The Pallets organization develops and supports Click and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, [please +donate today][]. + +[please donate today]: https://palletsprojects.com/donate + +## Contributing + +See our [detailed contributing documentation][contrib] for many ways to +contribute, including reporting issues, requesting features, asking or answering +questions, and making PRs. + +[contrib]: https://palletsprojects.com/contributing/ + diff --git a/venv/Lib/site-packages/click-8.2.0.dist-info/RECORD b/venv/Lib/site-packages/click-8.2.0.dist-info/RECORD new file mode 100644 index 00000000..7f9a2670 --- /dev/null +++ b/venv/Lib/site-packages/click-8.2.0.dist-info/RECORD @@ -0,0 +1,38 @@ +click-8.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +click-8.2.0.dist-info/METADATA,sha256=b1wO_9Blfi0EN6Dvjb6YKj6xj7jthLcEgTADVm3qiUc,2466 +click-8.2.0.dist-info/RECORD,, +click-8.2.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 +click-8.2.0.dist-info/licenses/LICENSE.txt,sha256=morRBqOU6FO_4h9C9OctWSgZoigF2ZG18ydQKSkrZY0,1475 +click/__init__.py,sha256=6YyS1aeyknZ0LYweWozNZy0A9nZ_11wmYIhv3cbQrYo,4473 +click/__pycache__/__init__.cpython-312.pyc,, +click/__pycache__/_compat.cpython-312.pyc,, +click/__pycache__/_termui_impl.cpython-312.pyc,, +click/__pycache__/_textwrap.cpython-312.pyc,, +click/__pycache__/_winconsole.cpython-312.pyc,, +click/__pycache__/core.cpython-312.pyc,, +click/__pycache__/decorators.cpython-312.pyc,, +click/__pycache__/exceptions.cpython-312.pyc,, +click/__pycache__/formatting.cpython-312.pyc,, +click/__pycache__/globals.cpython-312.pyc,, +click/__pycache__/parser.cpython-312.pyc,, +click/__pycache__/shell_completion.cpython-312.pyc,, +click/__pycache__/termui.cpython-312.pyc,, +click/__pycache__/testing.cpython-312.pyc,, +click/__pycache__/types.cpython-312.pyc,, +click/__pycache__/utils.cpython-312.pyc,, +click/_compat.py,sha256=v3xBZkFbvA1BXPRkFfBJc6-pIwPI7345m-kQEnpVAs4,18693 +click/_termui_impl.py,sha256=ASXhLi9IQIc0Js9KQSS-3-SLZcPet3VqysBf9WgbbpI,26712 +click/_textwrap.py,sha256=BOae0RQ6vg3FkNgSJyOoGzG1meGMxJ_ukWVZKx_v-0o,1400 +click/_winconsole.py,sha256=6hzKWpPTXRv-v9SuH3-SwBpuMl3W9Dgox5QHPJFc7e4,8488 +click/core.py,sha256=yI_kKz2ewr2hFHnovdXUXZ5lFr88vFUkShCo7qrHLZo,117338 +click/decorators.py,sha256=5P7abhJtAQYp_KHgjUvhMv464ERwOzrv2enNknlwHyQ,18461 +click/exceptions.py,sha256=1rdtXgHJ1b3OjGkN-UpXB9t_HCBihJvh_DtpmLmwn9s,9891 +click/formatting.py,sha256=Bhqx4QXdKQ9W4WKknIwj5KPKFmtduGOuGq1yw_THLZ8,9726 +click/globals.py,sha256=gM-Nh6A4M0HB_SgkaF5M4ncGGMDHc_flHXu9_oh4GEU,1923 +click/parser.py,sha256=nU1Ah2p11q29ul1vNdU9swPo_PUuKrxU6YXToi71q1c,18979 +click/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +click/shell_completion.py,sha256=-WBu9SXYo85hNdO1syMu4OgY1gA9F_yhY0iE9eSogMs,19857 +click/termui.py,sha256=vAYrKC2a7f_NfEIhAThEVYfa__ib5XQbTSCGtJlABRA,30847 +click/testing.py,sha256=dqC7F_EYJ-z84bcIVF5NTN5fP3dAuF7dq6hYace6PcQ,18487 +click/types.py,sha256=KBTRxN28cR1VZ5mb9iJX98MQSw_p9SGzljqfEI8z5Tw,38389 +click/utils.py,sha256=b1Mm-usEDBHtEwcPltPIn3zSK4nw2KTp5GC7_oSTlLo,20245 diff --git a/venv/Lib/site-packages/click-8.2.0.dist-info/WHEEL b/venv/Lib/site-packages/click-8.2.0.dist-info/WHEEL new file mode 100644 index 00000000..d8b9936d --- /dev/null +++ b/venv/Lib/site-packages/click-8.2.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.12.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/Lib/site-packages/click-8.2.0.dist-info/licenses/LICENSE.txt b/venv/Lib/site-packages/click-8.2.0.dist-info/licenses/LICENSE.txt new file mode 100644 index 00000000..d12a8491 --- /dev/null +++ b/venv/Lib/site-packages/click-8.2.0.dist-info/licenses/LICENSE.txt @@ -0,0 +1,28 @@ +Copyright 2014 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/Lib/site-packages/click/__init__.py b/venv/Lib/site-packages/click/__init__.py new file mode 100644 index 00000000..1aa547c5 --- /dev/null +++ b/venv/Lib/site-packages/click/__init__.py @@ -0,0 +1,123 @@ +""" +Click is a simple Python module inspired by the stdlib optparse to make +writing command line scripts fun. Unlike other modules, it's based +around a simple API that does not come with too much magic and is +composable. +""" + +from __future__ import annotations + +from .core import Argument as Argument +from .core import Command as Command +from .core import CommandCollection as CommandCollection +from .core import Context as Context +from .core import Group as Group +from .core import Option as Option +from .core import Parameter as Parameter +from .decorators import argument as argument +from .decorators import command as command +from .decorators import confirmation_option as confirmation_option +from .decorators import group as group +from .decorators import help_option as help_option +from .decorators import make_pass_decorator as make_pass_decorator +from .decorators import option as option +from .decorators import pass_context as pass_context +from .decorators import pass_obj as pass_obj +from .decorators import password_option as password_option +from .decorators import version_option as version_option +from .exceptions import Abort as Abort +from .exceptions import BadArgumentUsage as BadArgumentUsage +from .exceptions import BadOptionUsage as BadOptionUsage +from .exceptions import BadParameter as BadParameter +from .exceptions import ClickException as ClickException +from .exceptions import FileError as FileError +from .exceptions import MissingParameter as MissingParameter +from .exceptions import NoSuchOption as NoSuchOption +from .exceptions import UsageError as UsageError +from .formatting import HelpFormatter as HelpFormatter +from .formatting import wrap_text as wrap_text +from .globals import get_current_context as get_current_context +from .termui import clear as clear +from .termui import confirm as confirm +from .termui import echo_via_pager as echo_via_pager +from .termui import edit as edit +from .termui import getchar as getchar +from .termui import launch as launch +from .termui import pause as pause +from .termui import progressbar as progressbar +from .termui import prompt as prompt +from .termui import secho as secho +from .termui import style as style +from .termui import unstyle as unstyle +from .types import BOOL as BOOL +from .types import Choice as Choice +from .types import DateTime as DateTime +from .types import File as File +from .types import FLOAT as FLOAT +from .types import FloatRange as FloatRange +from .types import INT as INT +from .types import IntRange as IntRange +from .types import ParamType as ParamType +from .types import Path as Path +from .types import STRING as STRING +from .types import Tuple as Tuple +from .types import UNPROCESSED as UNPROCESSED +from .types import UUID as UUID +from .utils import echo as echo +from .utils import format_filename as format_filename +from .utils import get_app_dir as get_app_dir +from .utils import get_binary_stream as get_binary_stream +from .utils import get_text_stream as get_text_stream +from .utils import open_file as open_file + + +def __getattr__(name: str) -> object: + import warnings + + if name == "BaseCommand": + from .core import _BaseCommand + + warnings.warn( + "'BaseCommand' is deprecated and will be removed in Click 9.0. Use" + " 'Command' instead.", + DeprecationWarning, + stacklevel=2, + ) + return _BaseCommand + + if name == "MultiCommand": + from .core import _MultiCommand + + warnings.warn( + "'MultiCommand' is deprecated and will be removed in Click 9.0. Use" + " 'Group' instead.", + DeprecationWarning, + stacklevel=2, + ) + return _MultiCommand + + if name == "OptionParser": + from .parser import _OptionParser + + warnings.warn( + "'OptionParser' is deprecated and will be removed in Click 9.0. The" + " old parser is available in 'optparse'.", + DeprecationWarning, + stacklevel=2, + ) + return _OptionParser + + if name == "__version__": + import importlib.metadata + import warnings + + warnings.warn( + "The '__version__' attribute is deprecated and will be removed in" + " Click 9.1. Use feature detection or" + " 'importlib.metadata.version(\"click\")' instead.", + DeprecationWarning, + stacklevel=2, + ) + return importlib.metadata.version("click") + + raise AttributeError(name) diff --git a/venv/Lib/site-packages/click/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..7e48c219 Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/_compat.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/_compat.cpython-312.pyc new file mode 100644 index 00000000..77592769 Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/_compat.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/_termui_impl.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/_termui_impl.cpython-312.pyc new file mode 100644 index 00000000..660540e9 Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/_termui_impl.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/_textwrap.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/_textwrap.cpython-312.pyc new file mode 100644 index 00000000..e1931dc3 Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/_textwrap.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/_winconsole.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/_winconsole.cpython-312.pyc new file mode 100644 index 00000000..f0f4396d Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/_winconsole.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/core.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/core.cpython-312.pyc new file mode 100644 index 00000000..4ab108e2 Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/core.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/decorators.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/decorators.cpython-312.pyc new file mode 100644 index 00000000..2a71c748 Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/decorators.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/exceptions.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 00000000..f015f546 Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/exceptions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/formatting.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/formatting.cpython-312.pyc new file mode 100644 index 00000000..394cf89a Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/formatting.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/globals.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/globals.cpython-312.pyc new file mode 100644 index 00000000..a759c067 Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/globals.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/parser.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/parser.cpython-312.pyc new file mode 100644 index 00000000..69417bbc Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/parser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/shell_completion.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/shell_completion.cpython-312.pyc new file mode 100644 index 00000000..1d80768e Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/shell_completion.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/termui.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/termui.cpython-312.pyc new file mode 100644 index 00000000..ca34b4f8 Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/termui.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/testing.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/testing.cpython-312.pyc new file mode 100644 index 00000000..d9c37211 Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/testing.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/types.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/types.cpython-312.pyc new file mode 100644 index 00000000..9aa83ad5 Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/types.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/click/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..46639092 Binary files /dev/null and b/venv/Lib/site-packages/click/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/click/_compat.py b/venv/Lib/site-packages/click/_compat.py new file mode 100644 index 00000000..f2726b93 --- /dev/null +++ b/venv/Lib/site-packages/click/_compat.py @@ -0,0 +1,622 @@ +from __future__ import annotations + +import codecs +import collections.abc as cabc +import io +import os +import re +import sys +import typing as t +from types import TracebackType +from weakref import WeakKeyDictionary + +CYGWIN = sys.platform.startswith("cygwin") +WIN = sys.platform.startswith("win") +auto_wrap_for_ansi: t.Callable[[t.TextIO], t.TextIO] | None = None +_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]") + + +def _make_text_stream( + stream: t.BinaryIO, + encoding: str | None, + errors: str | None, + force_readable: bool = False, + force_writable: bool = False, +) -> t.TextIO: + if encoding is None: + encoding = get_best_encoding(stream) + if errors is None: + errors = "replace" + return _NonClosingTextIOWrapper( + stream, + encoding, + errors, + line_buffering=True, + force_readable=force_readable, + force_writable=force_writable, + ) + + +def is_ascii_encoding(encoding: str) -> bool: + """Checks if a given encoding is ascii.""" + try: + return codecs.lookup(encoding).name == "ascii" + except LookupError: + return False + + +def get_best_encoding(stream: t.IO[t.Any]) -> str: + """Returns the default stream encoding if not found.""" + rv = getattr(stream, "encoding", None) or sys.getdefaultencoding() + if is_ascii_encoding(rv): + return "utf-8" + return rv + + +class _NonClosingTextIOWrapper(io.TextIOWrapper): + def __init__( + self, + stream: t.BinaryIO, + encoding: str | None, + errors: str | None, + force_readable: bool = False, + force_writable: bool = False, + **extra: t.Any, + ) -> None: + self._stream = stream = t.cast( + t.BinaryIO, _FixupStream(stream, force_readable, force_writable) + ) + super().__init__(stream, encoding, errors, **extra) + + def __del__(self) -> None: + try: + self.detach() + except Exception: + pass + + def isatty(self) -> bool: + # https://bitbucket.org/pypy/pypy/issue/1803 + return self._stream.isatty() + + +class _FixupStream: + """The new io interface needs more from streams than streams + traditionally implement. As such, this fix-up code is necessary in + some circumstances. + + The forcing of readable and writable flags are there because some tools + put badly patched objects on sys (one such offender are certain version + of jupyter notebook). + """ + + def __init__( + self, + stream: t.BinaryIO, + force_readable: bool = False, + force_writable: bool = False, + ): + self._stream = stream + self._force_readable = force_readable + self._force_writable = force_writable + + def __getattr__(self, name: str) -> t.Any: + return getattr(self._stream, name) + + def read1(self, size: int) -> bytes: + f = getattr(self._stream, "read1", None) + + if f is not None: + return t.cast(bytes, f(size)) + + return self._stream.read(size) + + def readable(self) -> bool: + if self._force_readable: + return True + x = getattr(self._stream, "readable", None) + if x is not None: + return t.cast(bool, x()) + try: + self._stream.read(0) + except Exception: + return False + return True + + def writable(self) -> bool: + if self._force_writable: + return True + x = getattr(self._stream, "writable", None) + if x is not None: + return t.cast(bool, x()) + try: + self._stream.write(b"") + except Exception: + try: + self._stream.write(b"") + except Exception: + return False + return True + + def seekable(self) -> bool: + x = getattr(self._stream, "seekable", None) + if x is not None: + return t.cast(bool, x()) + try: + self._stream.seek(self._stream.tell()) + except Exception: + return False + return True + + +def _is_binary_reader(stream: t.IO[t.Any], default: bool = False) -> bool: + try: + return isinstance(stream.read(0), bytes) + except Exception: + return default + # This happens in some cases where the stream was already + # closed. In this case, we assume the default. + + +def _is_binary_writer(stream: t.IO[t.Any], default: bool = False) -> bool: + try: + stream.write(b"") + except Exception: + try: + stream.write("") + return False + except Exception: + pass + return default + return True + + +def _find_binary_reader(stream: t.IO[t.Any]) -> t.BinaryIO | None: + # We need to figure out if the given stream is already binary. + # This can happen because the official docs recommend detaching + # the streams to get binary streams. Some code might do this, so + # we need to deal with this case explicitly. + if _is_binary_reader(stream, False): + return t.cast(t.BinaryIO, stream) + + buf = getattr(stream, "buffer", None) + + # Same situation here; this time we assume that the buffer is + # actually binary in case it's closed. + if buf is not None and _is_binary_reader(buf, True): + return t.cast(t.BinaryIO, buf) + + return None + + +def _find_binary_writer(stream: t.IO[t.Any]) -> t.BinaryIO | None: + # We need to figure out if the given stream is already binary. + # This can happen because the official docs recommend detaching + # the streams to get binary streams. Some code might do this, so + # we need to deal with this case explicitly. + if _is_binary_writer(stream, False): + return t.cast(t.BinaryIO, stream) + + buf = getattr(stream, "buffer", None) + + # Same situation here; this time we assume that the buffer is + # actually binary in case it's closed. + if buf is not None and _is_binary_writer(buf, True): + return t.cast(t.BinaryIO, buf) + + return None + + +def _stream_is_misconfigured(stream: t.TextIO) -> bool: + """A stream is misconfigured if its encoding is ASCII.""" + # If the stream does not have an encoding set, we assume it's set + # to ASCII. This appears to happen in certain unittest + # environments. It's not quite clear what the correct behavior is + # but this at least will force Click to recover somehow. + return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii") + + +def _is_compat_stream_attr(stream: t.TextIO, attr: str, value: str | None) -> bool: + """A stream attribute is compatible if it is equal to the + desired value or the desired value is unset and the attribute + has a value. + """ + stream_value = getattr(stream, attr, None) + return stream_value == value or (value is None and stream_value is not None) + + +def _is_compatible_text_stream( + stream: t.TextIO, encoding: str | None, errors: str | None +) -> bool: + """Check if a stream's encoding and errors attributes are + compatible with the desired values. + """ + return _is_compat_stream_attr( + stream, "encoding", encoding + ) and _is_compat_stream_attr(stream, "errors", errors) + + +def _force_correct_text_stream( + text_stream: t.IO[t.Any], + encoding: str | None, + errors: str | None, + is_binary: t.Callable[[t.IO[t.Any], bool], bool], + find_binary: t.Callable[[t.IO[t.Any]], t.BinaryIO | None], + force_readable: bool = False, + force_writable: bool = False, +) -> t.TextIO: + if is_binary(text_stream, False): + binary_reader = t.cast(t.BinaryIO, text_stream) + else: + text_stream = t.cast(t.TextIO, text_stream) + # If the stream looks compatible, and won't default to a + # misconfigured ascii encoding, return it as-is. + if _is_compatible_text_stream(text_stream, encoding, errors) and not ( + encoding is None and _stream_is_misconfigured(text_stream) + ): + return text_stream + + # Otherwise, get the underlying binary reader. + possible_binary_reader = find_binary(text_stream) + + # If that's not possible, silently use the original reader + # and get mojibake instead of exceptions. + if possible_binary_reader is None: + return text_stream + + binary_reader = possible_binary_reader + + # Default errors to replace instead of strict in order to get + # something that works. + if errors is None: + errors = "replace" + + # Wrap the binary stream in a text stream with the correct + # encoding parameters. + return _make_text_stream( + binary_reader, + encoding, + errors, + force_readable=force_readable, + force_writable=force_writable, + ) + + +def _force_correct_text_reader( + text_reader: t.IO[t.Any], + encoding: str | None, + errors: str | None, + force_readable: bool = False, +) -> t.TextIO: + return _force_correct_text_stream( + text_reader, + encoding, + errors, + _is_binary_reader, + _find_binary_reader, + force_readable=force_readable, + ) + + +def _force_correct_text_writer( + text_writer: t.IO[t.Any], + encoding: str | None, + errors: str | None, + force_writable: bool = False, +) -> t.TextIO: + return _force_correct_text_stream( + text_writer, + encoding, + errors, + _is_binary_writer, + _find_binary_writer, + force_writable=force_writable, + ) + + +def get_binary_stdin() -> t.BinaryIO: + reader = _find_binary_reader(sys.stdin) + if reader is None: + raise RuntimeError("Was not able to determine binary stream for sys.stdin.") + return reader + + +def get_binary_stdout() -> t.BinaryIO: + writer = _find_binary_writer(sys.stdout) + if writer is None: + raise RuntimeError("Was not able to determine binary stream for sys.stdout.") + return writer + + +def get_binary_stderr() -> t.BinaryIO: + writer = _find_binary_writer(sys.stderr) + if writer is None: + raise RuntimeError("Was not able to determine binary stream for sys.stderr.") + return writer + + +def get_text_stdin(encoding: str | None = None, errors: str | None = None) -> t.TextIO: + rv = _get_windows_console_stream(sys.stdin, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_reader(sys.stdin, encoding, errors, force_readable=True) + + +def get_text_stdout(encoding: str | None = None, errors: str | None = None) -> t.TextIO: + rv = _get_windows_console_stream(sys.stdout, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_writer(sys.stdout, encoding, errors, force_writable=True) + + +def get_text_stderr(encoding: str | None = None, errors: str | None = None) -> t.TextIO: + rv = _get_windows_console_stream(sys.stderr, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_writer(sys.stderr, encoding, errors, force_writable=True) + + +def _wrap_io_open( + file: str | os.PathLike[str] | int, + mode: str, + encoding: str | None, + errors: str | None, +) -> t.IO[t.Any]: + """Handles not passing ``encoding`` and ``errors`` in binary mode.""" + if "b" in mode: + return open(file, mode) + + return open(file, mode, encoding=encoding, errors=errors) + + +def open_stream( + filename: str | os.PathLike[str], + mode: str = "r", + encoding: str | None = None, + errors: str | None = "strict", + atomic: bool = False, +) -> tuple[t.IO[t.Any], bool]: + binary = "b" in mode + filename = os.fspath(filename) + + # Standard streams first. These are simple because they ignore the + # atomic flag. Use fsdecode to handle Path("-"). + if os.fsdecode(filename) == "-": + if any(m in mode for m in ["w", "a", "x"]): + if binary: + return get_binary_stdout(), False + return get_text_stdout(encoding=encoding, errors=errors), False + if binary: + return get_binary_stdin(), False + return get_text_stdin(encoding=encoding, errors=errors), False + + # Non-atomic writes directly go out through the regular open functions. + if not atomic: + return _wrap_io_open(filename, mode, encoding, errors), True + + # Some usability stuff for atomic writes + if "a" in mode: + raise ValueError( + "Appending to an existing file is not supported, because that" + " would involve an expensive `copy`-operation to a temporary" + " file. Open the file in normal `w`-mode and copy explicitly" + " if that's what you're after." + ) + if "x" in mode: + raise ValueError("Use the `overwrite`-parameter instead.") + if "w" not in mode: + raise ValueError("Atomic writes only make sense with `w`-mode.") + + # Atomic writes are more complicated. They work by opening a file + # as a proxy in the same folder and then using the fdopen + # functionality to wrap it in a Python file. Then we wrap it in an + # atomic file that moves the file over on close. + import errno + import random + + try: + perm: int | None = os.stat(filename).st_mode + except OSError: + perm = None + + flags = os.O_RDWR | os.O_CREAT | os.O_EXCL + + if binary: + flags |= getattr(os, "O_BINARY", 0) + + while True: + tmp_filename = os.path.join( + os.path.dirname(filename), + f".__atomic-write{random.randrange(1 << 32):08x}", + ) + try: + fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm) + break + except OSError as e: + if e.errno == errno.EEXIST or ( + os.name == "nt" + and e.errno == errno.EACCES + and os.path.isdir(e.filename) + and os.access(e.filename, os.W_OK) + ): + continue + raise + + if perm is not None: + os.chmod(tmp_filename, perm) # in case perm includes bits in umask + + f = _wrap_io_open(fd, mode, encoding, errors) + af = _AtomicFile(f, tmp_filename, os.path.realpath(filename)) + return t.cast(t.IO[t.Any], af), True + + +class _AtomicFile: + def __init__(self, f: t.IO[t.Any], tmp_filename: str, real_filename: str) -> None: + self._f = f + self._tmp_filename = tmp_filename + self._real_filename = real_filename + self.closed = False + + @property + def name(self) -> str: + return self._real_filename + + def close(self, delete: bool = False) -> None: + if self.closed: + return + self._f.close() + os.replace(self._tmp_filename, self._real_filename) + self.closed = True + + def __getattr__(self, name: str) -> t.Any: + return getattr(self._f, name) + + def __enter__(self) -> _AtomicFile: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + tb: TracebackType | None, + ) -> None: + self.close(delete=exc_type is not None) + + def __repr__(self) -> str: + return repr(self._f) + + +def strip_ansi(value: str) -> str: + return _ansi_re.sub("", value) + + +def _is_jupyter_kernel_output(stream: t.IO[t.Any]) -> bool: + while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)): + stream = stream._stream + + return stream.__class__.__module__.startswith("ipykernel.") + + +def should_strip_ansi( + stream: t.IO[t.Any] | None = None, color: bool | None = None +) -> bool: + if color is None: + if stream is None: + stream = sys.stdin + return not isatty(stream) and not _is_jupyter_kernel_output(stream) + return not color + + +# On Windows, wrap the output streams with colorama to support ANSI +# color codes. +# NOTE: double check is needed so mypy does not analyze this on Linux +if sys.platform.startswith("win") and WIN: + from ._winconsole import _get_windows_console_stream + + def _get_argv_encoding() -> str: + import locale + + return locale.getpreferredencoding() + + _ansi_stream_wrappers: cabc.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary() + + def auto_wrap_for_ansi(stream: t.TextIO, color: bool | None = None) -> t.TextIO: + """Support ANSI color and style codes on Windows by wrapping a + stream with colorama. + """ + try: + cached = _ansi_stream_wrappers.get(stream) + except Exception: + cached = None + + if cached is not None: + return cached + + import colorama + + strip = should_strip_ansi(stream, color) + ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip) + rv = t.cast(t.TextIO, ansi_wrapper.stream) + _write = rv.write + + def _safe_write(s: str) -> int: + try: + return _write(s) + except BaseException: + ansi_wrapper.reset_all() + raise + + rv.write = _safe_write # type: ignore[method-assign] + + try: + _ansi_stream_wrappers[stream] = rv + except Exception: + pass + + return rv + +else: + + def _get_argv_encoding() -> str: + return getattr(sys.stdin, "encoding", None) or sys.getfilesystemencoding() + + def _get_windows_console_stream( + f: t.TextIO, encoding: str | None, errors: str | None + ) -> t.TextIO | None: + return None + + +def term_len(x: str) -> int: + return len(strip_ansi(x)) + + +def isatty(stream: t.IO[t.Any]) -> bool: + try: + return stream.isatty() + except Exception: + return False + + +def _make_cached_stream_func( + src_func: t.Callable[[], t.TextIO | None], + wrapper_func: t.Callable[[], t.TextIO], +) -> t.Callable[[], t.TextIO | None]: + cache: cabc.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary() + + def func() -> t.TextIO | None: + stream = src_func() + + if stream is None: + return None + + try: + rv = cache.get(stream) + except Exception: + rv = None + if rv is not None: + return rv + rv = wrapper_func() + try: + cache[stream] = rv + except Exception: + pass + return rv + + return func + + +_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin) +_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout) +_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr) + + +binary_streams: cabc.Mapping[str, t.Callable[[], t.BinaryIO]] = { + "stdin": get_binary_stdin, + "stdout": get_binary_stdout, + "stderr": get_binary_stderr, +} + +text_streams: cabc.Mapping[str, t.Callable[[str | None, str | None], t.TextIO]] = { + "stdin": get_text_stdin, + "stdout": get_text_stdout, + "stderr": get_text_stderr, +} diff --git a/venv/Lib/site-packages/click/_termui_impl.py b/venv/Lib/site-packages/click/_termui_impl.py new file mode 100644 index 00000000..51fd9bf3 --- /dev/null +++ b/venv/Lib/site-packages/click/_termui_impl.py @@ -0,0 +1,839 @@ +""" +This module contains implementations for the termui module. To keep the +import time of Click down, some infrequently used functionality is +placed in this module and only imported as needed. +""" + +from __future__ import annotations + +import collections.abc as cabc +import contextlib +import math +import os +import shlex +import sys +import time +import typing as t +from gettext import gettext as _ +from io import StringIO +from pathlib import Path +from shutil import which +from types import TracebackType + +from ._compat import _default_text_stdout +from ._compat import CYGWIN +from ._compat import get_best_encoding +from ._compat import isatty +from ._compat import open_stream +from ._compat import strip_ansi +from ._compat import term_len +from ._compat import WIN +from .exceptions import ClickException +from .utils import echo + +V = t.TypeVar("V") + +if os.name == "nt": + BEFORE_BAR = "\r" + AFTER_BAR = "\n" +else: + BEFORE_BAR = "\r\033[?25l" + AFTER_BAR = "\033[?25h\n" + + +class ProgressBar(t.Generic[V]): + def __init__( + self, + iterable: cabc.Iterable[V] | None, + length: int | None = None, + fill_char: str = "#", + empty_char: str = " ", + bar_template: str = "%(bar)s", + info_sep: str = " ", + hidden: bool = False, + show_eta: bool = True, + show_percent: bool | None = None, + show_pos: bool = False, + item_show_func: t.Callable[[V | None], str | None] | None = None, + label: str | None = None, + file: t.TextIO | None = None, + color: bool | None = None, + update_min_steps: int = 1, + width: int = 30, + ) -> None: + self.fill_char = fill_char + self.empty_char = empty_char + self.bar_template = bar_template + self.info_sep = info_sep + self.hidden = hidden + self.show_eta = show_eta + self.show_percent = show_percent + self.show_pos = show_pos + self.item_show_func = item_show_func + self.label: str = label or "" + + if file is None: + file = _default_text_stdout() + + # There are no standard streams attached to write to. For example, + # pythonw on Windows. + if file is None: + file = StringIO() + + self.file = file + self.color = color + self.update_min_steps = update_min_steps + self._completed_intervals = 0 + self.width: int = width + self.autowidth: bool = width == 0 + + if length is None: + from operator import length_hint + + length = length_hint(iterable, -1) + + if length == -1: + length = None + if iterable is None: + if length is None: + raise TypeError("iterable or length is required") + iterable = t.cast("cabc.Iterable[V]", range(length)) + self.iter: cabc.Iterable[V] = iter(iterable) + self.length = length + self.pos: int = 0 + self.avg: list[float] = [] + self.last_eta: float + self.start: float + self.start = self.last_eta = time.time() + self.eta_known: bool = False + self.finished: bool = False + self.max_width: int | None = None + self.entered: bool = False + self.current_item: V | None = None + self._is_atty = isatty(self.file) + self._last_line: str | None = None + + def __enter__(self) -> ProgressBar[V]: + self.entered = True + self.render_progress() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + tb: TracebackType | None, + ) -> None: + self.render_finish() + + def __iter__(self) -> cabc.Iterator[V]: + if not self.entered: + raise RuntimeError("You need to use progress bars in a with block.") + self.render_progress() + return self.generator() + + def __next__(self) -> V: + # Iteration is defined in terms of a generator function, + # returned by iter(self); use that to define next(). This works + # because `self.iter` is an iterable consumed by that generator, + # so it is re-entry safe. Calling `next(self.generator())` + # twice works and does "what you want". + return next(iter(self)) + + def render_finish(self) -> None: + if self.hidden or not self._is_atty: + return + self.file.write(AFTER_BAR) + self.file.flush() + + @property + def pct(self) -> float: + if self.finished: + return 1.0 + return min(self.pos / (float(self.length or 1) or 1), 1.0) + + @property + def time_per_iteration(self) -> float: + if not self.avg: + return 0.0 + return sum(self.avg) / float(len(self.avg)) + + @property + def eta(self) -> float: + if self.length is not None and not self.finished: + return self.time_per_iteration * (self.length - self.pos) + return 0.0 + + def format_eta(self) -> str: + if self.eta_known: + t = int(self.eta) + seconds = t % 60 + t //= 60 + minutes = t % 60 + t //= 60 + hours = t % 24 + t //= 24 + if t > 0: + return f"{t}d {hours:02}:{minutes:02}:{seconds:02}" + else: + return f"{hours:02}:{minutes:02}:{seconds:02}" + return "" + + def format_pos(self) -> str: + pos = str(self.pos) + if self.length is not None: + pos += f"/{self.length}" + return pos + + def format_pct(self) -> str: + return f"{int(self.pct * 100): 4}%"[1:] + + def format_bar(self) -> str: + if self.length is not None: + bar_length = int(self.pct * self.width) + bar = self.fill_char * bar_length + bar += self.empty_char * (self.width - bar_length) + elif self.finished: + bar = self.fill_char * self.width + else: + chars = list(self.empty_char * (self.width or 1)) + if self.time_per_iteration != 0: + chars[ + int( + (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5) + * self.width + ) + ] = self.fill_char + bar = "".join(chars) + return bar + + def format_progress_line(self) -> str: + show_percent = self.show_percent + + info_bits = [] + if self.length is not None and show_percent is None: + show_percent = not self.show_pos + + if self.show_pos: + info_bits.append(self.format_pos()) + if show_percent: + info_bits.append(self.format_pct()) + if self.show_eta and self.eta_known and not self.finished: + info_bits.append(self.format_eta()) + if self.item_show_func is not None: + item_info = self.item_show_func(self.current_item) + if item_info is not None: + info_bits.append(item_info) + + return ( + self.bar_template + % { + "label": self.label, + "bar": self.format_bar(), + "info": self.info_sep.join(info_bits), + } + ).rstrip() + + def render_progress(self) -> None: + import shutil + + if self.hidden: + return + + if not self._is_atty: + # Only output the label once if the output is not a TTY. + if self._last_line != self.label: + self._last_line = self.label + echo(self.label, file=self.file, color=self.color) + return + + buf = [] + # Update width in case the terminal has been resized + if self.autowidth: + old_width = self.width + self.width = 0 + clutter_length = term_len(self.format_progress_line()) + new_width = max(0, shutil.get_terminal_size().columns - clutter_length) + if new_width < old_width and self.max_width is not None: + buf.append(BEFORE_BAR) + buf.append(" " * self.max_width) + self.max_width = new_width + self.width = new_width + + clear_width = self.width + if self.max_width is not None: + clear_width = self.max_width + + buf.append(BEFORE_BAR) + line = self.format_progress_line() + line_len = term_len(line) + if self.max_width is None or self.max_width < line_len: + self.max_width = line_len + + buf.append(line) + buf.append(" " * (clear_width - line_len)) + line = "".join(buf) + # Render the line only if it changed. + + if line != self._last_line: + self._last_line = line + echo(line, file=self.file, color=self.color, nl=False) + self.file.flush() + + def make_step(self, n_steps: int) -> None: + self.pos += n_steps + if self.length is not None and self.pos >= self.length: + self.finished = True + + if (time.time() - self.last_eta) < 1.0: + return + + self.last_eta = time.time() + + # self.avg is a rolling list of length <= 7 of steps where steps are + # defined as time elapsed divided by the total progress through + # self.length. + if self.pos: + step = (time.time() - self.start) / self.pos + else: + step = time.time() - self.start + + self.avg = self.avg[-6:] + [step] + + self.eta_known = self.length is not None + + def update(self, n_steps: int, current_item: V | None = None) -> None: + """Update the progress bar by advancing a specified number of + steps, and optionally set the ``current_item`` for this new + position. + + :param n_steps: Number of steps to advance. + :param current_item: Optional item to set as ``current_item`` + for the updated position. + + .. versionchanged:: 8.0 + Added the ``current_item`` optional parameter. + + .. versionchanged:: 8.0 + Only render when the number of steps meets the + ``update_min_steps`` threshold. + """ + if current_item is not None: + self.current_item = current_item + + self._completed_intervals += n_steps + + if self._completed_intervals >= self.update_min_steps: + self.make_step(self._completed_intervals) + self.render_progress() + self._completed_intervals = 0 + + def finish(self) -> None: + self.eta_known = False + self.current_item = None + self.finished = True + + def generator(self) -> cabc.Iterator[V]: + """Return a generator which yields the items added to the bar + during construction, and updates the progress bar *after* the + yielded block returns. + """ + # WARNING: the iterator interface for `ProgressBar` relies on + # this and only works because this is a simple generator which + # doesn't create or manage additional state. If this function + # changes, the impact should be evaluated both against + # `iter(bar)` and `next(bar)`. `next()` in particular may call + # `self.generator()` repeatedly, and this must remain safe in + # order for that interface to work. + if not self.entered: + raise RuntimeError("You need to use progress bars in a with block.") + + if not self._is_atty: + yield from self.iter + else: + for rv in self.iter: + self.current_item = rv + + # This allows show_item_func to be updated before the + # item is processed. Only trigger at the beginning of + # the update interval. + if self._completed_intervals == 0: + self.render_progress() + + yield rv + self.update(1) + + self.finish() + self.render_progress() + + +def pager(generator: cabc.Iterable[str], color: bool | None = None) -> None: + """Decide what method to use for paging through text.""" + stdout = _default_text_stdout() + + # There are no standard streams attached to write to. For example, + # pythonw on Windows. + if stdout is None: + stdout = StringIO() + + if not isatty(sys.stdin) or not isatty(stdout): + return _nullpager(stdout, generator, color) + + # Split and normalize the pager command into parts. + pager_cmd_parts = shlex.split(os.environ.get("PAGER", ""), posix=False) + if pager_cmd_parts: + if WIN: + if _tempfilepager(generator, pager_cmd_parts, color): + return + elif _pipepager(generator, pager_cmd_parts, color): + return + + if os.environ.get("TERM") in ("dumb", "emacs"): + return _nullpager(stdout, generator, color) + if (WIN or sys.platform.startswith("os2")) and _tempfilepager( + generator, ["more"], color + ): + return + if _pipepager(generator, ["less"], color): + return + + import tempfile + + fd, filename = tempfile.mkstemp() + os.close(fd) + try: + if _pipepager(generator, ["more"], color): + return + return _nullpager(stdout, generator, color) + finally: + os.unlink(filename) + + +def _pipepager( + generator: cabc.Iterable[str], cmd_parts: list[str], color: bool | None +) -> bool: + """Page through text by feeding it to another program. Invoking a + pager through this might support colors. + + Returns `True` if the command was found, `False` otherwise and thus another + pager should be attempted. + """ + # Split the command into the invoked CLI and its parameters. + if not cmd_parts: + return False + cmd = cmd_parts[0] + cmd_params = cmd_parts[1:] + + cmd_filepath = which(cmd) + if not cmd_filepath: + return False + # Resolves symlinks and produces a normalized absolute path string. + cmd_path = Path(cmd_filepath).resolve() + cmd_name = cmd_path.name + + import subprocess + + # Make a local copy of the environment to not affect the global one. + env = dict(os.environ) + + # If we're piping to less and the user hasn't decided on colors, we enable + # them by default we find the -R flag in the command line arguments. + if color is None and cmd_name == "less": + less_flags = f"{os.environ.get('LESS', '')}{' '.join(cmd_params)}" + if not less_flags: + env["LESS"] = "-R" + color = True + elif "r" in less_flags or "R" in less_flags: + color = True + + c = subprocess.Popen( + [str(cmd_path)] + cmd_params, + shell=True, + stdin=subprocess.PIPE, + env=env, + errors="replace", + text=True, + ) + assert c.stdin is not None + try: + for text in generator: + if not color: + text = strip_ansi(text) + + c.stdin.write(text) + except BrokenPipeError: + # In case the pager exited unexpectedly, ignore the broken pipe error. + pass + except Exception as e: + # In case there is an exception we want to close the pager immediately + # and let the caller handle it. + # Otherwise the pager will keep running, and the user may not notice + # the error message, or worse yet it may leave the terminal in a broken state. + c.terminate() + raise e + finally: + # We must close stdin and wait for the pager to exit before we continue + try: + c.stdin.close() + # Close implies flush, so it might throw a BrokenPipeError if the pager + # process exited already. + except BrokenPipeError: + pass + + # Less doesn't respect ^C, but catches it for its own UI purposes (aborting + # search or other commands inside less). + # + # That means when the user hits ^C, the parent process (click) terminates, + # but less is still alive, paging the output and messing up the terminal. + # + # If the user wants to make the pager exit on ^C, they should set + # `LESS='-K'`. It's not our decision to make. + while True: + try: + c.wait() + except KeyboardInterrupt: + pass + else: + break + + return True + + +def _tempfilepager( + generator: cabc.Iterable[str], cmd_parts: list[str], color: bool | None +) -> bool: + """Page through text by invoking a program on a temporary file. + + Returns `True` if the command was found, `False` otherwise and thus another + pager should be attempted. + """ + # Split the command into the invoked CLI and its parameters. + if not cmd_parts: + return False + cmd = cmd_parts[0] + + cmd_filepath = which(cmd) + if not cmd_filepath: + return False + # Resolves symlinks and produces a normalized absolute path string. + cmd_path = Path(cmd_filepath).resolve() + + import subprocess + import tempfile + + fd, filename = tempfile.mkstemp() + # TODO: This never terminates if the passed generator never terminates. + text = "".join(generator) + if not color: + text = strip_ansi(text) + encoding = get_best_encoding(sys.stdout) + with open_stream(filename, "wb")[0] as f: + f.write(text.encode(encoding)) + try: + subprocess.call([str(cmd_path), filename]) + except OSError: + # Command not found + pass + finally: + os.close(fd) + os.unlink(filename) + + return True + + +def _nullpager( + stream: t.TextIO, generator: cabc.Iterable[str], color: bool | None +) -> None: + """Simply print unformatted text. This is the ultimate fallback.""" + for text in generator: + if not color: + text = strip_ansi(text) + stream.write(text) + + +class Editor: + def __init__( + self, + editor: str | None = None, + env: cabc.Mapping[str, str] | None = None, + require_save: bool = True, + extension: str = ".txt", + ) -> None: + self.editor = editor + self.env = env + self.require_save = require_save + self.extension = extension + + def get_editor(self) -> str: + if self.editor is not None: + return self.editor + for key in "VISUAL", "EDITOR": + rv = os.environ.get(key) + if rv: + return rv + if WIN: + return "notepad" + for editor in "sensible-editor", "vim", "nano": + if which(editor) is not None: + return editor + return "vi" + + def edit_files(self, filenames: cabc.Iterable[str]) -> None: + import subprocess + + editor = self.get_editor() + environ: dict[str, str] | None = None + + if self.env: + environ = os.environ.copy() + environ.update(self.env) + + exc_filename = " ".join(f'"{filename}"' for filename in filenames) + + try: + c = subprocess.Popen( + args=f"{editor} {exc_filename}", env=environ, shell=True + ) + exit_code = c.wait() + if exit_code != 0: + raise ClickException( + _("{editor}: Editing failed").format(editor=editor) + ) + except OSError as e: + raise ClickException( + _("{editor}: Editing failed: {e}").format(editor=editor, e=e) + ) from e + + @t.overload + def edit(self, text: bytes | bytearray) -> bytes | None: ... + + # We cannot know whether or not the type expected is str or bytes when None + # is passed, so str is returned as that was what was done before. + @t.overload + def edit(self, text: str | None) -> str | None: ... + + def edit(self, text: str | bytes | bytearray | None) -> str | bytes | None: + import tempfile + + if text is None: + data = b"" + elif isinstance(text, (bytes, bytearray)): + data = text + else: + if text and not text.endswith("\n"): + text += "\n" + + if WIN: + data = text.replace("\n", "\r\n").encode("utf-8-sig") + else: + data = text.encode("utf-8") + + fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension) + f: t.BinaryIO + + try: + with os.fdopen(fd, "wb") as f: + f.write(data) + + # If the filesystem resolution is 1 second, like Mac OS + # 10.12 Extended, or 2 seconds, like FAT32, and the editor + # closes very fast, require_save can fail. Set the modified + # time to be 2 seconds in the past to work around this. + os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2)) + # Depending on the resolution, the exact value might not be + # recorded, so get the new recorded value. + timestamp = os.path.getmtime(name) + + self.edit_files((name,)) + + if self.require_save and os.path.getmtime(name) == timestamp: + return None + + with open(name, "rb") as f: + rv = f.read() + + if isinstance(text, (bytes, bytearray)): + return rv + + return rv.decode("utf-8-sig").replace("\r\n", "\n") + finally: + os.unlink(name) + + +def open_url(url: str, wait: bool = False, locate: bool = False) -> int: + import subprocess + + def _unquote_file(url: str) -> str: + from urllib.parse import unquote + + if url.startswith("file://"): + url = unquote(url[7:]) + + return url + + if sys.platform == "darwin": + args = ["open"] + if wait: + args.append("-W") + if locate: + args.append("-R") + args.append(_unquote_file(url)) + null = open("/dev/null", "w") + try: + return subprocess.Popen(args, stderr=null).wait() + finally: + null.close() + elif WIN: + if locate: + url = _unquote_file(url) + args = ["explorer", f"/select,{url}"] + else: + args = ["start"] + if wait: + args.append("/WAIT") + args.append("") + args.append(url) + try: + return subprocess.call(args) + except OSError: + # Command not found + return 127 + elif CYGWIN: + if locate: + url = _unquote_file(url) + args = ["cygstart", os.path.dirname(url)] + else: + args = ["cygstart"] + if wait: + args.append("-w") + args.append(url) + try: + return subprocess.call(args) + except OSError: + # Command not found + return 127 + + try: + if locate: + url = os.path.dirname(_unquote_file(url)) or "." + else: + url = _unquote_file(url) + c = subprocess.Popen(["xdg-open", url]) + if wait: + return c.wait() + return 0 + except OSError: + if url.startswith(("http://", "https://")) and not locate and not wait: + import webbrowser + + webbrowser.open(url) + return 0 + return 1 + + +def _translate_ch_to_exc(ch: str) -> None: + if ch == "\x03": + raise KeyboardInterrupt() + + if ch == "\x04" and not WIN: # Unix-like, Ctrl+D + raise EOFError() + + if ch == "\x1a" and WIN: # Windows, Ctrl+Z + raise EOFError() + + return None + + +if sys.platform == "win32": + import msvcrt + + @contextlib.contextmanager + def raw_terminal() -> cabc.Iterator[int]: + yield -1 + + def getchar(echo: bool) -> str: + # The function `getch` will return a bytes object corresponding to + # the pressed character. Since Windows 10 build 1803, it will also + # return \x00 when called a second time after pressing a regular key. + # + # `getwch` does not share this probably-bugged behavior. Moreover, it + # returns a Unicode object by default, which is what we want. + # + # Either of these functions will return \x00 or \xe0 to indicate + # a special key, and you need to call the same function again to get + # the "rest" of the code. The fun part is that \u00e0 is + # "latin small letter a with grave", so if you type that on a French + # keyboard, you _also_ get a \xe0. + # E.g., consider the Up arrow. This returns \xe0 and then \x48. The + # resulting Unicode string reads as "a with grave" + "capital H". + # This is indistinguishable from when the user actually types + # "a with grave" and then "capital H". + # + # When \xe0 is returned, we assume it's part of a special-key sequence + # and call `getwch` again, but that means that when the user types + # the \u00e0 character, `getchar` doesn't return until a second + # character is typed. + # The alternative is returning immediately, but that would mess up + # cross-platform handling of arrow keys and others that start with + # \xe0. Another option is using `getch`, but then we can't reliably + # read non-ASCII characters, because return values of `getch` are + # limited to the current 8-bit codepage. + # + # Anyway, Click doesn't claim to do this Right(tm), and using `getwch` + # is doing the right thing in more situations than with `getch`. + + if echo: + func = t.cast(t.Callable[[], str], msvcrt.getwche) + else: + func = t.cast(t.Callable[[], str], msvcrt.getwch) + + rv = func() + + if rv in ("\x00", "\xe0"): + # \x00 and \xe0 are control characters that indicate special key, + # see above. + rv += func() + + _translate_ch_to_exc(rv) + return rv + +else: + import termios + import tty + + @contextlib.contextmanager + def raw_terminal() -> cabc.Iterator[int]: + f: t.TextIO | None + fd: int + + if not isatty(sys.stdin): + f = open("/dev/tty") + fd = f.fileno() + else: + fd = sys.stdin.fileno() + f = None + + try: + old_settings = termios.tcgetattr(fd) + + try: + tty.setraw(fd) + yield fd + finally: + termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) + sys.stdout.flush() + + if f is not None: + f.close() + except termios.error: + pass + + def getchar(echo: bool) -> str: + with raw_terminal() as fd: + ch = os.read(fd, 32).decode(get_best_encoding(sys.stdin), "replace") + + if echo and isatty(sys.stdout): + sys.stdout.write(ch) + + _translate_ch_to_exc(ch) + return ch diff --git a/venv/Lib/site-packages/click/_textwrap.py b/venv/Lib/site-packages/click/_textwrap.py new file mode 100644 index 00000000..97fbee3d --- /dev/null +++ b/venv/Lib/site-packages/click/_textwrap.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +import collections.abc as cabc +import textwrap +from contextlib import contextmanager + + +class TextWrapper(textwrap.TextWrapper): + def _handle_long_word( + self, + reversed_chunks: list[str], + cur_line: list[str], + cur_len: int, + width: int, + ) -> None: + space_left = max(width - cur_len, 1) + + if self.break_long_words: + last = reversed_chunks[-1] + cut = last[:space_left] + res = last[space_left:] + cur_line.append(cut) + reversed_chunks[-1] = res + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + @contextmanager + def extra_indent(self, indent: str) -> cabc.Iterator[None]: + old_initial_indent = self.initial_indent + old_subsequent_indent = self.subsequent_indent + self.initial_indent += indent + self.subsequent_indent += indent + + try: + yield + finally: + self.initial_indent = old_initial_indent + self.subsequent_indent = old_subsequent_indent + + def indent_only(self, text: str) -> str: + rv = [] + + for idx, line in enumerate(text.splitlines()): + indent = self.initial_indent + + if idx > 0: + indent = self.subsequent_indent + + rv.append(f"{indent}{line}") + + return "\n".join(rv) diff --git a/venv/Lib/site-packages/click/_winconsole.py b/venv/Lib/site-packages/click/_winconsole.py new file mode 100644 index 00000000..566dd9c5 --- /dev/null +++ b/venv/Lib/site-packages/click/_winconsole.py @@ -0,0 +1,295 @@ +# This module is based on the excellent work by Adam Bartoš who +# provided a lot of what went into the implementation here in +# the discussion to issue1602 in the Python bug tracker. +# +# There are some general differences in regards to how this works +# compared to the original patches as we do not need to patch +# the entire interpreter but just work in our little world of +# echo and prompt. +from __future__ import annotations + +import collections.abc as cabc +import io +import sys +import time +import typing as t +from ctypes import Array +from ctypes import byref +from ctypes import c_char +from ctypes import c_char_p +from ctypes import c_int +from ctypes import c_ssize_t +from ctypes import c_ulong +from ctypes import c_void_p +from ctypes import POINTER +from ctypes import py_object +from ctypes import Structure +from ctypes.wintypes import DWORD +from ctypes.wintypes import HANDLE +from ctypes.wintypes import LPCWSTR +from ctypes.wintypes import LPWSTR + +from ._compat import _NonClosingTextIOWrapper + +assert sys.platform == "win32" +import msvcrt # noqa: E402 +from ctypes import windll # noqa: E402 +from ctypes import WINFUNCTYPE # noqa: E402 + +c_ssize_p = POINTER(c_ssize_t) + +kernel32 = windll.kernel32 +GetStdHandle = kernel32.GetStdHandle +ReadConsoleW = kernel32.ReadConsoleW +WriteConsoleW = kernel32.WriteConsoleW +GetConsoleMode = kernel32.GetConsoleMode +GetLastError = kernel32.GetLastError +GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32)) +CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( + ("CommandLineToArgvW", windll.shell32) +) +LocalFree = WINFUNCTYPE(c_void_p, c_void_p)(("LocalFree", windll.kernel32)) + +STDIN_HANDLE = GetStdHandle(-10) +STDOUT_HANDLE = GetStdHandle(-11) +STDERR_HANDLE = GetStdHandle(-12) + +PyBUF_SIMPLE = 0 +PyBUF_WRITABLE = 1 + +ERROR_SUCCESS = 0 +ERROR_NOT_ENOUGH_MEMORY = 8 +ERROR_OPERATION_ABORTED = 995 + +STDIN_FILENO = 0 +STDOUT_FILENO = 1 +STDERR_FILENO = 2 + +EOF = b"\x1a" +MAX_BYTES_WRITTEN = 32767 + +if t.TYPE_CHECKING: + try: + # Using `typing_extensions.Buffer` instead of `collections.abc` + # on Windows for some reason does not have `Sized` implemented. + from collections.abc import Buffer # type: ignore + except ImportError: + from typing_extensions import Buffer + +try: + from ctypes import pythonapi +except ImportError: + # On PyPy we cannot get buffers so our ability to operate here is + # severely limited. + get_buffer = None +else: + + class Py_buffer(Structure): + _fields_ = [ # noqa: RUF012 + ("buf", c_void_p), + ("obj", py_object), + ("len", c_ssize_t), + ("itemsize", c_ssize_t), + ("readonly", c_int), + ("ndim", c_int), + ("format", c_char_p), + ("shape", c_ssize_p), + ("strides", c_ssize_p), + ("suboffsets", c_ssize_p), + ("internal", c_void_p), + ] + + PyObject_GetBuffer = pythonapi.PyObject_GetBuffer + PyBuffer_Release = pythonapi.PyBuffer_Release + + def get_buffer(obj: Buffer, writable: bool = False) -> Array[c_char]: + buf = Py_buffer() + flags: int = PyBUF_WRITABLE if writable else PyBUF_SIMPLE + PyObject_GetBuffer(py_object(obj), byref(buf), flags) + + try: + buffer_type: Array[c_char] = c_char * buf.len + return buffer_type.from_address(buf.buf) # type: ignore[attr-defined, no-any-return] + finally: + PyBuffer_Release(byref(buf)) + + +class _WindowsConsoleRawIOBase(io.RawIOBase): + def __init__(self, handle: int | None) -> None: + self.handle = handle + + def isatty(self) -> t.Literal[True]: + super().isatty() + return True + + +class _WindowsConsoleReader(_WindowsConsoleRawIOBase): + def readable(self) -> t.Literal[True]: + return True + + def readinto(self, b: Buffer) -> int: + bytes_to_be_read = len(b) + if not bytes_to_be_read: + return 0 + elif bytes_to_be_read % 2: + raise ValueError( + "cannot read odd number of bytes from UTF-16-LE encoded console" + ) + + buffer = get_buffer(b, writable=True) + code_units_to_be_read = bytes_to_be_read // 2 + code_units_read = c_ulong() + + rv = ReadConsoleW( + HANDLE(self.handle), + buffer, + code_units_to_be_read, + byref(code_units_read), + None, + ) + if GetLastError() == ERROR_OPERATION_ABORTED: + # wait for KeyboardInterrupt + time.sleep(0.1) + if not rv: + raise OSError(f"Windows error: {GetLastError()}") + + if buffer[0] == EOF: + return 0 + return 2 * code_units_read.value + + +class _WindowsConsoleWriter(_WindowsConsoleRawIOBase): + def writable(self) -> t.Literal[True]: + return True + + @staticmethod + def _get_error_message(errno: int) -> str: + if errno == ERROR_SUCCESS: + return "ERROR_SUCCESS" + elif errno == ERROR_NOT_ENOUGH_MEMORY: + return "ERROR_NOT_ENOUGH_MEMORY" + return f"Windows error {errno}" + + def write(self, b: Buffer) -> int: + bytes_to_be_written = len(b) + buf = get_buffer(b) + code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2 + code_units_written = c_ulong() + + WriteConsoleW( + HANDLE(self.handle), + buf, + code_units_to_be_written, + byref(code_units_written), + None, + ) + bytes_written = 2 * code_units_written.value + + if bytes_written == 0 and bytes_to_be_written > 0: + raise OSError(self._get_error_message(GetLastError())) + return bytes_written + + +class ConsoleStream: + def __init__(self, text_stream: t.TextIO, byte_stream: t.BinaryIO) -> None: + self._text_stream = text_stream + self.buffer = byte_stream + + @property + def name(self) -> str: + return self.buffer.name + + def write(self, x: t.AnyStr) -> int: + if isinstance(x, str): + return self._text_stream.write(x) + try: + self.flush() + except Exception: + pass + return self.buffer.write(x) + + def writelines(self, lines: cabc.Iterable[t.AnyStr]) -> None: + for line in lines: + self.write(line) + + def __getattr__(self, name: str) -> t.Any: + return getattr(self._text_stream, name) + + def isatty(self) -> bool: + return self.buffer.isatty() + + def __repr__(self) -> str: + return f"" + + +def _get_text_stdin(buffer_stream: t.BinaryIO) -> t.TextIO: + text_stream = _NonClosingTextIOWrapper( + io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), + "utf-16-le", + "strict", + line_buffering=True, + ) + return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream)) + + +def _get_text_stdout(buffer_stream: t.BinaryIO) -> t.TextIO: + text_stream = _NonClosingTextIOWrapper( + io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)), + "utf-16-le", + "strict", + line_buffering=True, + ) + return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream)) + + +def _get_text_stderr(buffer_stream: t.BinaryIO) -> t.TextIO: + text_stream = _NonClosingTextIOWrapper( + io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)), + "utf-16-le", + "strict", + line_buffering=True, + ) + return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream)) + + +_stream_factories: cabc.Mapping[int, t.Callable[[t.BinaryIO], t.TextIO]] = { + 0: _get_text_stdin, + 1: _get_text_stdout, + 2: _get_text_stderr, +} + + +def _is_console(f: t.TextIO) -> bool: + if not hasattr(f, "fileno"): + return False + + try: + fileno = f.fileno() + except (OSError, io.UnsupportedOperation): + return False + + handle = msvcrt.get_osfhandle(fileno) + return bool(GetConsoleMode(handle, byref(DWORD()))) + + +def _get_windows_console_stream( + f: t.TextIO, encoding: str | None, errors: str | None +) -> t.TextIO | None: + if ( + get_buffer is None + or encoding not in {"utf-16-le", None} + or errors not in {"strict", None} + or not _is_console(f) + ): + return None + + func = _stream_factories.get(f.fileno()) + if func is None: + return None + + b = getattr(f, "buffer", None) + + if b is None: + return None + + return func(b) diff --git a/venv/Lib/site-packages/click/core.py b/venv/Lib/site-packages/click/core.py new file mode 100644 index 00000000..4745b533 --- /dev/null +++ b/venv/Lib/site-packages/click/core.py @@ -0,0 +1,3134 @@ +from __future__ import annotations + +import collections.abc as cabc +import enum +import errno +import inspect +import os +import sys +import typing as t +from collections import abc +from collections import Counter +from contextlib import AbstractContextManager +from contextlib import contextmanager +from contextlib import ExitStack +from functools import update_wrapper +from gettext import gettext as _ +from gettext import ngettext +from itertools import repeat +from types import TracebackType + +from . import types +from .exceptions import Abort +from .exceptions import BadParameter +from .exceptions import ClickException +from .exceptions import Exit +from .exceptions import MissingParameter +from .exceptions import NoArgsIsHelpError +from .exceptions import UsageError +from .formatting import HelpFormatter +from .formatting import join_options +from .globals import pop_context +from .globals import push_context +from .parser import _flag_needs_value +from .parser import _OptionParser +from .parser import _split_opt +from .termui import confirm +from .termui import prompt +from .termui import style +from .utils import _detect_program_name +from .utils import _expand_args +from .utils import echo +from .utils import make_default_short_help +from .utils import make_str +from .utils import PacifyFlushWrapper + +if t.TYPE_CHECKING: + from .shell_completion import CompletionItem + +F = t.TypeVar("F", bound="t.Callable[..., t.Any]") +V = t.TypeVar("V") + + +def _complete_visible_commands( + ctx: Context, incomplete: str +) -> cabc.Iterator[tuple[str, Command]]: + """List all the subcommands of a group that start with the + incomplete value and aren't hidden. + + :param ctx: Invocation context for the group. + :param incomplete: Value being completed. May be empty. + """ + multi = t.cast(Group, ctx.command) + + for name in multi.list_commands(ctx): + if name.startswith(incomplete): + command = multi.get_command(ctx, name) + + if command is not None and not command.hidden: + yield name, command + + +def _check_nested_chain( + base_command: Group, cmd_name: str, cmd: Command, register: bool = False +) -> None: + if not base_command.chain or not isinstance(cmd, Group): + return + + if register: + message = ( + f"It is not possible to add the group {cmd_name!r} to another" + f" group {base_command.name!r} that is in chain mode." + ) + else: + message = ( + f"Found the group {cmd_name!r} as subcommand to another group " + f" {base_command.name!r} that is in chain mode. This is not supported." + ) + + raise RuntimeError(message) + + +def batch(iterable: cabc.Iterable[V], batch_size: int) -> list[tuple[V, ...]]: + return list(zip(*repeat(iter(iterable), batch_size), strict=False)) + + +@contextmanager +def augment_usage_errors( + ctx: Context, param: Parameter | None = None +) -> cabc.Iterator[None]: + """Context manager that attaches extra information to exceptions.""" + try: + yield + except BadParameter as e: + if e.ctx is None: + e.ctx = ctx + if param is not None and e.param is None: + e.param = param + raise + except UsageError as e: + if e.ctx is None: + e.ctx = ctx + raise + + +def iter_params_for_processing( + invocation_order: cabc.Sequence[Parameter], + declaration_order: cabc.Sequence[Parameter], +) -> list[Parameter]: + """Returns all declared parameters in the order they should be processed. + + The declared parameters are re-shuffled depending on the order in which + they were invoked, as well as the eagerness of each parameters. + + The invocation order takes precedence over the declaration order. I.e. the + order in which the user provided them to the CLI is respected. + + This behavior and its effect on callback evaluation is detailed at: + https://click.palletsprojects.com/en/stable/advanced/#callback-evaluation-order + """ + + def sort_key(item: Parameter) -> tuple[bool, float]: + try: + idx: float = invocation_order.index(item) + except ValueError: + idx = float("inf") + + return not item.is_eager, idx + + return sorted(declaration_order, key=sort_key) + + +class ParameterSource(enum.Enum): + """This is an :class:`~enum.Enum` that indicates the source of a + parameter's value. + + Use :meth:`click.Context.get_parameter_source` to get the + source for a parameter by name. + + .. versionchanged:: 8.0 + Use :class:`~enum.Enum` and drop the ``validate`` method. + + .. versionchanged:: 8.0 + Added the ``PROMPT`` value. + """ + + COMMANDLINE = enum.auto() + """The value was provided by the command line args.""" + ENVIRONMENT = enum.auto() + """The value was provided with an environment variable.""" + DEFAULT = enum.auto() + """Used the default specified by the parameter.""" + DEFAULT_MAP = enum.auto() + """Used a default provided by :attr:`Context.default_map`.""" + PROMPT = enum.auto() + """Used a prompt to confirm a default or provide a value.""" + + +class Context: + """The context is a special internal object that holds state relevant + for the script execution at every single level. It's normally invisible + to commands unless they opt-in to getting access to it. + + The context is useful as it can pass internal objects around and can + control special execution features such as reading data from + environment variables. + + A context can be used as context manager in which case it will call + :meth:`close` on teardown. + + :param command: the command class for this context. + :param parent: the parent context. + :param info_name: the info name for this invocation. Generally this + is the most descriptive name for the script or + command. For the toplevel script it is usually + the name of the script, for commands below it it's + the name of the script. + :param obj: an arbitrary object of user data. + :param auto_envvar_prefix: the prefix to use for automatic environment + variables. If this is `None` then reading + from environment variables is disabled. This + does not affect manually set environment + variables which are always read. + :param default_map: a dictionary (like object) with default values + for parameters. + :param terminal_width: the width of the terminal. The default is + inherit from parent context. If no context + defines the terminal width then auto + detection will be applied. + :param max_content_width: the maximum width for content rendered by + Click (this currently only affects help + pages). This defaults to 80 characters if + not overridden. In other words: even if the + terminal is larger than that, Click will not + format things wider than 80 characters by + default. In addition to that, formatters might + add some safety mapping on the right. + :param resilient_parsing: if this flag is enabled then Click will + parse without any interactivity or callback + invocation. Default values will also be + ignored. This is useful for implementing + things such as completion support. + :param allow_extra_args: if this is set to `True` then extra arguments + at the end will not raise an error and will be + kept on the context. The default is to inherit + from the command. + :param allow_interspersed_args: if this is set to `False` then options + and arguments cannot be mixed. The + default is to inherit from the command. + :param ignore_unknown_options: instructs click to ignore options it does + not know and keeps them for later + processing. + :param help_option_names: optionally a list of strings that define how + the default help parameter is named. The + default is ``['--help']``. + :param token_normalize_func: an optional function that is used to + normalize tokens (options, choices, + etc.). This for instance can be used to + implement case insensitive behavior. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. This is only needed if ANSI + codes are used in texts that Click prints which is by + default not the case. This for instance would affect + help output. + :param show_default: Show the default value for commands. If this + value is not set, it defaults to the value from the parent + context. ``Command.show_default`` overrides this default for the + specific command. + + .. versionchanged:: 8.2 + The ``protected_args`` attribute is deprecated and will be removed in + Click 9.0. ``args`` will contain remaining unparsed tokens. + + .. versionchanged:: 8.1 + The ``show_default`` parameter is overridden by + ``Command.show_default``, instead of the other way around. + + .. versionchanged:: 8.0 + The ``show_default`` parameter defaults to the value from the + parent context. + + .. versionchanged:: 7.1 + Added the ``show_default`` parameter. + + .. versionchanged:: 4.0 + Added the ``color``, ``ignore_unknown_options``, and + ``max_content_width`` parameters. + + .. versionchanged:: 3.0 + Added the ``allow_extra_args`` and ``allow_interspersed_args`` + parameters. + + .. versionchanged:: 2.0 + Added the ``resilient_parsing``, ``help_option_names``, and + ``token_normalize_func`` parameters. + """ + + #: The formatter class to create with :meth:`make_formatter`. + #: + #: .. versionadded:: 8.0 + formatter_class: type[HelpFormatter] = HelpFormatter + + def __init__( + self, + command: Command, + parent: Context | None = None, + info_name: str | None = None, + obj: t.Any | None = None, + auto_envvar_prefix: str | None = None, + default_map: cabc.MutableMapping[str, t.Any] | None = None, + terminal_width: int | None = None, + max_content_width: int | None = None, + resilient_parsing: bool = False, + allow_extra_args: bool | None = None, + allow_interspersed_args: bool | None = None, + ignore_unknown_options: bool | None = None, + help_option_names: list[str] | None = None, + token_normalize_func: t.Callable[[str], str] | None = None, + color: bool | None = None, + show_default: bool | None = None, + ) -> None: + #: the parent context or `None` if none exists. + self.parent = parent + #: the :class:`Command` for this context. + self.command = command + #: the descriptive information name + self.info_name = info_name + #: Map of parameter names to their parsed values. Parameters + #: with ``expose_value=False`` are not stored. + self.params: dict[str, t.Any] = {} + #: the leftover arguments. + self.args: list[str] = [] + #: protected arguments. These are arguments that are prepended + #: to `args` when certain parsing scenarios are encountered but + #: must be never propagated to another arguments. This is used + #: to implement nested parsing. + self._protected_args: list[str] = [] + #: the collected prefixes of the command's options. + self._opt_prefixes: set[str] = set(parent._opt_prefixes) if parent else set() + + if obj is None and parent is not None: + obj = parent.obj + + #: the user object stored. + self.obj: t.Any = obj + self._meta: dict[str, t.Any] = getattr(parent, "meta", {}) + + #: A dictionary (-like object) with defaults for parameters. + if ( + default_map is None + and info_name is not None + and parent is not None + and parent.default_map is not None + ): + default_map = parent.default_map.get(info_name) + + self.default_map: cabc.MutableMapping[str, t.Any] | None = default_map + + #: This flag indicates if a subcommand is going to be executed. A + #: group callback can use this information to figure out if it's + #: being executed directly or because the execution flow passes + #: onwards to a subcommand. By default it's None, but it can be + #: the name of the subcommand to execute. + #: + #: If chaining is enabled this will be set to ``'*'`` in case + #: any commands are executed. It is however not possible to + #: figure out which ones. If you require this knowledge you + #: should use a :func:`result_callback`. + self.invoked_subcommand: str | None = None + + if terminal_width is None and parent is not None: + terminal_width = parent.terminal_width + + #: The width of the terminal (None is autodetection). + self.terminal_width: int | None = terminal_width + + if max_content_width is None and parent is not None: + max_content_width = parent.max_content_width + + #: The maximum width of formatted content (None implies a sensible + #: default which is 80 for most things). + self.max_content_width: int | None = max_content_width + + if allow_extra_args is None: + allow_extra_args = command.allow_extra_args + + #: Indicates if the context allows extra args or if it should + #: fail on parsing. + #: + #: .. versionadded:: 3.0 + self.allow_extra_args = allow_extra_args + + if allow_interspersed_args is None: + allow_interspersed_args = command.allow_interspersed_args + + #: Indicates if the context allows mixing of arguments and + #: options or not. + #: + #: .. versionadded:: 3.0 + self.allow_interspersed_args: bool = allow_interspersed_args + + if ignore_unknown_options is None: + ignore_unknown_options = command.ignore_unknown_options + + #: Instructs click to ignore options that a command does not + #: understand and will store it on the context for later + #: processing. This is primarily useful for situations where you + #: want to call into external programs. Generally this pattern is + #: strongly discouraged because it's not possibly to losslessly + #: forward all arguments. + #: + #: .. versionadded:: 4.0 + self.ignore_unknown_options: bool = ignore_unknown_options + + if help_option_names is None: + if parent is not None: + help_option_names = parent.help_option_names + else: + help_option_names = ["--help"] + + #: The names for the help options. + self.help_option_names: list[str] = help_option_names + + if token_normalize_func is None and parent is not None: + token_normalize_func = parent.token_normalize_func + + #: An optional normalization function for tokens. This is + #: options, choices, commands etc. + self.token_normalize_func: t.Callable[[str], str] | None = token_normalize_func + + #: Indicates if resilient parsing is enabled. In that case Click + #: will do its best to not cause any failures and default values + #: will be ignored. Useful for completion. + self.resilient_parsing: bool = resilient_parsing + + # If there is no envvar prefix yet, but the parent has one and + # the command on this level has a name, we can expand the envvar + # prefix automatically. + if auto_envvar_prefix is None: + if ( + parent is not None + and parent.auto_envvar_prefix is not None + and self.info_name is not None + ): + auto_envvar_prefix = ( + f"{parent.auto_envvar_prefix}_{self.info_name.upper()}" + ) + else: + auto_envvar_prefix = auto_envvar_prefix.upper() + + if auto_envvar_prefix is not None: + auto_envvar_prefix = auto_envvar_prefix.replace("-", "_") + + self.auto_envvar_prefix: str | None = auto_envvar_prefix + + if color is None and parent is not None: + color = parent.color + + #: Controls if styling output is wanted or not. + self.color: bool | None = color + + if show_default is None and parent is not None: + show_default = parent.show_default + + #: Show option default values when formatting help text. + self.show_default: bool | None = show_default + + self._close_callbacks: list[t.Callable[[], t.Any]] = [] + self._depth = 0 + self._parameter_source: dict[str, ParameterSource] = {} + self._exit_stack = ExitStack() + + @property + def protected_args(self) -> list[str]: + import warnings + + warnings.warn( + "'protected_args' is deprecated and will be removed in Click 9.0." + " 'args' will contain remaining unparsed tokens.", + DeprecationWarning, + stacklevel=2, + ) + return self._protected_args + + def to_info_dict(self) -> dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. This traverses the entire CLI + structure. + + .. code-block:: python + + with Context(cli) as ctx: + info = ctx.to_info_dict() + + .. versionadded:: 8.0 + """ + return { + "command": self.command.to_info_dict(self), + "info_name": self.info_name, + "allow_extra_args": self.allow_extra_args, + "allow_interspersed_args": self.allow_interspersed_args, + "ignore_unknown_options": self.ignore_unknown_options, + "auto_envvar_prefix": self.auto_envvar_prefix, + } + + def __enter__(self) -> Context: + self._depth += 1 + push_context(self) + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + tb: TracebackType | None, + ) -> None: + self._depth -= 1 + if self._depth == 0: + self.close() + pop_context() + + @contextmanager + def scope(self, cleanup: bool = True) -> cabc.Iterator[Context]: + """This helper method can be used with the context object to promote + it to the current thread local (see :func:`get_current_context`). + The default behavior of this is to invoke the cleanup functions which + can be disabled by setting `cleanup` to `False`. The cleanup + functions are typically used for things such as closing file handles. + + If the cleanup is intended the context object can also be directly + used as a context manager. + + Example usage:: + + with ctx.scope(): + assert get_current_context() is ctx + + This is equivalent:: + + with ctx: + assert get_current_context() is ctx + + .. versionadded:: 5.0 + + :param cleanup: controls if the cleanup functions should be run or + not. The default is to run these functions. In + some situations the context only wants to be + temporarily pushed in which case this can be disabled. + Nested pushes automatically defer the cleanup. + """ + if not cleanup: + self._depth += 1 + try: + with self as rv: + yield rv + finally: + if not cleanup: + self._depth -= 1 + + @property + def meta(self) -> dict[str, t.Any]: + """This is a dictionary which is shared with all the contexts + that are nested. It exists so that click utilities can store some + state here if they need to. It is however the responsibility of + that code to manage this dictionary well. + + The keys are supposed to be unique dotted strings. For instance + module paths are a good choice for it. What is stored in there is + irrelevant for the operation of click. However what is important is + that code that places data here adheres to the general semantics of + the system. + + Example usage:: + + LANG_KEY = f'{__name__}.lang' + + def set_language(value): + ctx = get_current_context() + ctx.meta[LANG_KEY] = value + + def get_language(): + return get_current_context().meta.get(LANG_KEY, 'en_US') + + .. versionadded:: 5.0 + """ + return self._meta + + def make_formatter(self) -> HelpFormatter: + """Creates the :class:`~click.HelpFormatter` for the help and + usage output. + + To quickly customize the formatter class used without overriding + this method, set the :attr:`formatter_class` attribute. + + .. versionchanged:: 8.0 + Added the :attr:`formatter_class` attribute. + """ + return self.formatter_class( + width=self.terminal_width, max_width=self.max_content_width + ) + + def with_resource(self, context_manager: AbstractContextManager[V]) -> V: + """Register a resource as if it were used in a ``with`` + statement. The resource will be cleaned up when the context is + popped. + + Uses :meth:`contextlib.ExitStack.enter_context`. It calls the + resource's ``__enter__()`` method and returns the result. When + the context is popped, it closes the stack, which calls the + resource's ``__exit__()`` method. + + To register a cleanup function for something that isn't a + context manager, use :meth:`call_on_close`. Or use something + from :mod:`contextlib` to turn it into a context manager first. + + .. code-block:: python + + @click.group() + @click.option("--name") + @click.pass_context + def cli(ctx): + ctx.obj = ctx.with_resource(connect_db(name)) + + :param context_manager: The context manager to enter. + :return: Whatever ``context_manager.__enter__()`` returns. + + .. versionadded:: 8.0 + """ + return self._exit_stack.enter_context(context_manager) + + def call_on_close(self, f: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: + """Register a function to be called when the context tears down. + + This can be used to close resources opened during the script + execution. Resources that support Python's context manager + protocol which would be used in a ``with`` statement should be + registered with :meth:`with_resource` instead. + + :param f: The function to execute on teardown. + """ + return self._exit_stack.callback(f) + + def close(self) -> None: + """Invoke all close callbacks registered with + :meth:`call_on_close`, and exit all context managers entered + with :meth:`with_resource`. + """ + self._exit_stack.close() + # In case the context is reused, create a new exit stack. + self._exit_stack = ExitStack() + + @property + def command_path(self) -> str: + """The computed command path. This is used for the ``usage`` + information on the help page. It's automatically created by + combining the info names of the chain of contexts to the root. + """ + rv = "" + if self.info_name is not None: + rv = self.info_name + if self.parent is not None: + parent_command_path = [self.parent.command_path] + + if isinstance(self.parent.command, Command): + for param in self.parent.command.get_params(self): + parent_command_path.extend(param.get_usage_pieces(self)) + + rv = f"{' '.join(parent_command_path)} {rv}" + return rv.lstrip() + + def find_root(self) -> Context: + """Finds the outermost context.""" + node = self + while node.parent is not None: + node = node.parent + return node + + def find_object(self, object_type: type[V]) -> V | None: + """Finds the closest object of a given type.""" + node: Context | None = self + + while node is not None: + if isinstance(node.obj, object_type): + return node.obj + + node = node.parent + + return None + + def ensure_object(self, object_type: type[V]) -> V: + """Like :meth:`find_object` but sets the innermost object to a + new instance of `object_type` if it does not exist. + """ + rv = self.find_object(object_type) + if rv is None: + self.obj = rv = object_type() + return rv + + @t.overload + def lookup_default( + self, name: str, call: t.Literal[True] = True + ) -> t.Any | None: ... + + @t.overload + def lookup_default( + self, name: str, call: t.Literal[False] = ... + ) -> t.Any | t.Callable[[], t.Any] | None: ... + + def lookup_default(self, name: str, call: bool = True) -> t.Any | None: + """Get the default for a parameter from :attr:`default_map`. + + :param name: Name of the parameter. + :param call: If the default is a callable, call it. Disable to + return the callable instead. + + .. versionchanged:: 8.0 + Added the ``call`` parameter. + """ + if self.default_map is not None: + value = self.default_map.get(name) + + if call and callable(value): + return value() + + return value + + return None + + def fail(self, message: str) -> t.NoReturn: + """Aborts the execution of the program with a specific error + message. + + :param message: the error message to fail with. + """ + raise UsageError(message, self) + + def abort(self) -> t.NoReturn: + """Aborts the script.""" + raise Abort() + + def exit(self, code: int = 0) -> t.NoReturn: + """Exits the application with a given exit code. + + .. versionchanged:: 8.2 + Callbacks and context managers registered with :meth:`call_on_close` + and :meth:`with_resource` are closed before exiting. + """ + self.close() + raise Exit(code) + + def get_usage(self) -> str: + """Helper method to get formatted usage string for the current + context and command. + """ + return self.command.get_usage(self) + + def get_help(self) -> str: + """Helper method to get formatted help page for the current + context and command. + """ + return self.command.get_help(self) + + def _make_sub_context(self, command: Command) -> Context: + """Create a new context of the same type as this context, but + for a new command. + + :meta private: + """ + return type(self)(command, info_name=command.name, parent=self) + + @t.overload + def invoke( + self, callback: t.Callable[..., V], /, *args: t.Any, **kwargs: t.Any + ) -> V: ... + + @t.overload + def invoke(self, callback: Command, /, *args: t.Any, **kwargs: t.Any) -> t.Any: ... + + def invoke( + self, callback: Command | t.Callable[..., V], /, *args: t.Any, **kwargs: t.Any + ) -> t.Any | V: + """Invokes a command callback in exactly the way it expects. There + are two ways to invoke this method: + + 1. the first argument can be a callback and all other arguments and + keyword arguments are forwarded directly to the function. + 2. the first argument is a click command object. In that case all + arguments are forwarded as well but proper click parameters + (options and click arguments) must be keyword arguments and Click + will fill in defaults. + + .. versionchanged:: 8.0 + All ``kwargs`` are tracked in :attr:`params` so they will be + passed if :meth:`forward` is called at multiple levels. + + .. versionchanged:: 3.2 + A new context is created, and missing arguments use default values. + """ + if isinstance(callback, Command): + other_cmd = callback + + if other_cmd.callback is None: + raise TypeError( + "The given command does not have a callback that can be invoked." + ) + else: + callback = t.cast("t.Callable[..., V]", other_cmd.callback) + + ctx = self._make_sub_context(other_cmd) + + for param in other_cmd.params: + if param.name not in kwargs and param.expose_value: + kwargs[param.name] = param.type_cast_value( # type: ignore + ctx, param.get_default(ctx) + ) + + # Track all kwargs as params, so that forward() will pass + # them on in subsequent calls. + ctx.params.update(kwargs) + else: + ctx = self + + with augment_usage_errors(self): + with ctx: + return callback(*args, **kwargs) + + def forward(self, cmd: Command, /, *args: t.Any, **kwargs: t.Any) -> t.Any: + """Similar to :meth:`invoke` but fills in default keyword + arguments from the current context if the other command expects + it. This cannot invoke callbacks directly, only other commands. + + .. versionchanged:: 8.0 + All ``kwargs`` are tracked in :attr:`params` so they will be + passed if ``forward`` is called at multiple levels. + """ + # Can only forward to other commands, not direct callbacks. + if not isinstance(cmd, Command): + raise TypeError("Callback is not a command.") + + for param in self.params: + if param not in kwargs: + kwargs[param] = self.params[param] + + return self.invoke(cmd, *args, **kwargs) + + def set_parameter_source(self, name: str, source: ParameterSource) -> None: + """Set the source of a parameter. This indicates the location + from which the value of the parameter was obtained. + + :param name: The name of the parameter. + :param source: A member of :class:`~click.core.ParameterSource`. + """ + self._parameter_source[name] = source + + def get_parameter_source(self, name: str) -> ParameterSource | None: + """Get the source of a parameter. This indicates the location + from which the value of the parameter was obtained. + + This can be useful for determining when a user specified a value + on the command line that is the same as the default value. It + will be :attr:`~click.core.ParameterSource.DEFAULT` only if the + value was actually taken from the default. + + :param name: The name of the parameter. + :rtype: ParameterSource + + .. versionchanged:: 8.0 + Returns ``None`` if the parameter was not provided from any + source. + """ + return self._parameter_source.get(name) + + +class Command: + """Commands are the basic building block of command line interfaces in + Click. A basic command handles command line parsing and might dispatch + more parsing to commands nested below it. + + :param name: the name of the command to use unless a group overrides it. + :param context_settings: an optional dictionary with defaults that are + passed to the context object. + :param callback: the callback to invoke. This is optional. + :param params: the parameters to register with this command. This can + be either :class:`Option` or :class:`Argument` objects. + :param help: the help string to use for this command. + :param epilog: like the help string but it's printed at the end of the + help page after everything else. + :param short_help: the short help to use for this command. This is + shown on the command listing of the parent command. + :param add_help_option: by default each command registers a ``--help`` + option. This can be disabled by this parameter. + :param no_args_is_help: this controls what happens if no arguments are + provided. This option is disabled by default. + If enabled this will add ``--help`` as argument + if no arguments are passed + :param hidden: hide this command from help outputs. + :param deprecated: If ``True`` or non-empty string, issues a message + indicating that the command is deprecated and highlights + its deprecation in --help. The message can be customized + by using a string as the value. + + .. versionchanged:: 8.2 + This is the base class for all commands, not ``BaseCommand``. + ``deprecated`` can be set to a string as well to customize the + deprecation message. + + .. versionchanged:: 8.1 + ``help``, ``epilog``, and ``short_help`` are stored unprocessed, + all formatting is done when outputting help text, not at init, + and is done even if not using the ``@command`` decorator. + + .. versionchanged:: 8.0 + Added a ``repr`` showing the command name. + + .. versionchanged:: 7.1 + Added the ``no_args_is_help`` parameter. + + .. versionchanged:: 2.0 + Added the ``context_settings`` parameter. + """ + + #: The context class to create with :meth:`make_context`. + #: + #: .. versionadded:: 8.0 + context_class: type[Context] = Context + + #: the default for the :attr:`Context.allow_extra_args` flag. + allow_extra_args = False + + #: the default for the :attr:`Context.allow_interspersed_args` flag. + allow_interspersed_args = True + + #: the default for the :attr:`Context.ignore_unknown_options` flag. + ignore_unknown_options = False + + def __init__( + self, + name: str | None, + context_settings: cabc.MutableMapping[str, t.Any] | None = None, + callback: t.Callable[..., t.Any] | None = None, + params: list[Parameter] | None = None, + help: str | None = None, + epilog: str | None = None, + short_help: str | None = None, + options_metavar: str | None = "[OPTIONS]", + add_help_option: bool = True, + no_args_is_help: bool = False, + hidden: bool = False, + deprecated: bool | str = False, + ) -> None: + #: the name the command thinks it has. Upon registering a command + #: on a :class:`Group` the group will default the command name + #: with this information. You should instead use the + #: :class:`Context`\'s :attr:`~Context.info_name` attribute. + self.name = name + + if context_settings is None: + context_settings = {} + + #: an optional dictionary with defaults passed to the context. + self.context_settings: cabc.MutableMapping[str, t.Any] = context_settings + + #: the callback to execute when the command fires. This might be + #: `None` in which case nothing happens. + self.callback = callback + #: the list of parameters for this command in the order they + #: should show up in the help page and execute. Eager parameters + #: will automatically be handled before non eager ones. + self.params: list[Parameter] = params or [] + self.help = help + self.epilog = epilog + self.options_metavar = options_metavar + self.short_help = short_help + self.add_help_option = add_help_option + self._help_option = None + self.no_args_is_help = no_args_is_help + self.hidden = hidden + self.deprecated = deprecated + + def to_info_dict(self, ctx: Context) -> dict[str, t.Any]: + return { + "name": self.name, + "params": [param.to_info_dict() for param in self.get_params(ctx)], + "help": self.help, + "epilog": self.epilog, + "short_help": self.short_help, + "hidden": self.hidden, + "deprecated": self.deprecated, + } + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.name}>" + + def get_usage(self, ctx: Context) -> str: + """Formats the usage line into a string and returns it. + + Calls :meth:`format_usage` internally. + """ + formatter = ctx.make_formatter() + self.format_usage(ctx, formatter) + return formatter.getvalue().rstrip("\n") + + def get_params(self, ctx: Context) -> list[Parameter]: + params = self.params + help_option = self.get_help_option(ctx) + + if help_option is not None: + params = [*params, help_option] + + if __debug__: + import warnings + + opts = [opt for param in params for opt in param.opts] + opts_counter = Counter(opts) + duplicate_opts = (opt for opt, count in opts_counter.items() if count > 1) + + for duplicate_opt in duplicate_opts: + warnings.warn( + ( + f"The parameter {duplicate_opt} is used more than once. " + "Remove its duplicate as parameters should be unique." + ), + stacklevel=3, + ) + + return params + + def format_usage(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the usage line into the formatter. + + This is a low-level method called by :meth:`get_usage`. + """ + pieces = self.collect_usage_pieces(ctx) + formatter.write_usage(ctx.command_path, " ".join(pieces)) + + def collect_usage_pieces(self, ctx: Context) -> list[str]: + """Returns all the pieces that go into the usage line and returns + it as a list of strings. + """ + rv = [self.options_metavar] if self.options_metavar else [] + + for param in self.get_params(ctx): + rv.extend(param.get_usage_pieces(ctx)) + + return rv + + def get_help_option_names(self, ctx: Context) -> list[str]: + """Returns the names for the help option.""" + all_names = set(ctx.help_option_names) + for param in self.params: + all_names.difference_update(param.opts) + all_names.difference_update(param.secondary_opts) + return list(all_names) + + def get_help_option(self, ctx: Context) -> Option | None: + """Returns the help option object. + + Skipped if :attr:`add_help_option` is ``False``. + + .. versionchanged:: 8.1.8 + The help option is now cached to avoid creating it multiple times. + """ + help_option_names = self.get_help_option_names(ctx) + + if not help_option_names or not self.add_help_option: + return None + + # Cache the help option object in private _help_option attribute to + # avoid creating it multiple times. Not doing this will break the + # callback odering by iter_params_for_processing(), which relies on + # object comparison. + if self._help_option is None: + # Avoid circular import. + from .decorators import help_option + + # Apply help_option decorator and pop resulting option + help_option(*help_option_names)(self) + self._help_option = self.params.pop() # type: ignore[assignment] + + return self._help_option + + def make_parser(self, ctx: Context) -> _OptionParser: + """Creates the underlying option parser for this command.""" + parser = _OptionParser(ctx) + for param in self.get_params(ctx): + param.add_to_parser(parser, ctx) + return parser + + def get_help(self, ctx: Context) -> str: + """Formats the help into a string and returns it. + + Calls :meth:`format_help` internally. + """ + formatter = ctx.make_formatter() + self.format_help(ctx, formatter) + return formatter.getvalue().rstrip("\n") + + def get_short_help_str(self, limit: int = 45) -> str: + """Gets short help for the command or makes it by shortening the + long help string. + """ + if self.short_help: + text = inspect.cleandoc(self.short_help) + elif self.help: + text = make_default_short_help(self.help, limit) + else: + text = "" + + if self.deprecated: + deprecated_message = ( + f"(DEPRECATED: {self.deprecated})" + if isinstance(self.deprecated, str) + else "(DEPRECATED)" + ) + text = _("{text} {deprecated_message}").format( + text=text, deprecated_message=deprecated_message + ) + + return text.strip() + + def format_help(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the help into the formatter if it exists. + + This is a low-level method called by :meth:`get_help`. + + This calls the following methods: + + - :meth:`format_usage` + - :meth:`format_help_text` + - :meth:`format_options` + - :meth:`format_epilog` + """ + self.format_usage(ctx, formatter) + self.format_help_text(ctx, formatter) + self.format_options(ctx, formatter) + self.format_epilog(ctx, formatter) + + def format_help_text(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the help text to the formatter if it exists.""" + if self.help is not None: + # truncate the help text to the first form feed + text = inspect.cleandoc(self.help).partition("\f")[0] + else: + text = "" + + if self.deprecated: + deprecated_message = ( + f"(DEPRECATED: {self.deprecated})" + if isinstance(self.deprecated, str) + else "(DEPRECATED)" + ) + text = _("{text} {deprecated_message}").format( + text=text, deprecated_message=deprecated_message + ) + + if text: + formatter.write_paragraph() + + with formatter.indentation(): + formatter.write_text(text) + + def format_options(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes all the options into the formatter if they exist.""" + opts = [] + for param in self.get_params(ctx): + rv = param.get_help_record(ctx) + if rv is not None: + opts.append(rv) + + if opts: + with formatter.section(_("Options")): + formatter.write_dl(opts) + + def format_epilog(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the epilog into the formatter if it exists.""" + if self.epilog: + epilog = inspect.cleandoc(self.epilog) + formatter.write_paragraph() + + with formatter.indentation(): + formatter.write_text(epilog) + + def make_context( + self, + info_name: str | None, + args: list[str], + parent: Context | None = None, + **extra: t.Any, + ) -> Context: + """This function when given an info name and arguments will kick + off the parsing and create a new :class:`Context`. It does not + invoke the actual command callback though. + + To quickly customize the context class used without overriding + this method, set the :attr:`context_class` attribute. + + :param info_name: the info name for this invocation. Generally this + is the most descriptive name for the script or + command. For the toplevel script it's usually + the name of the script, for commands below it's + the name of the command. + :param args: the arguments to parse as list of strings. + :param parent: the parent context if available. + :param extra: extra keyword arguments forwarded to the context + constructor. + + .. versionchanged:: 8.0 + Added the :attr:`context_class` attribute. + """ + for key, value in self.context_settings.items(): + if key not in extra: + extra[key] = value + + ctx = self.context_class(self, info_name=info_name, parent=parent, **extra) + + with ctx.scope(cleanup=False): + self.parse_args(ctx, args) + return ctx + + def parse_args(self, ctx: Context, args: list[str]) -> list[str]: + if not args and self.no_args_is_help and not ctx.resilient_parsing: + raise NoArgsIsHelpError(ctx) + + parser = self.make_parser(ctx) + opts, args, param_order = parser.parse_args(args=args) + + for param in iter_params_for_processing(param_order, self.get_params(ctx)): + value, args = param.handle_parse_result(ctx, opts, args) + + if args and not ctx.allow_extra_args and not ctx.resilient_parsing: + ctx.fail( + ngettext( + "Got unexpected extra argument ({args})", + "Got unexpected extra arguments ({args})", + len(args), + ).format(args=" ".join(map(str, args))) + ) + + ctx.args = args + ctx._opt_prefixes.update(parser._opt_prefixes) + return args + + def invoke(self, ctx: Context) -> t.Any: + """Given a context, this invokes the attached callback (if it exists) + in the right way. + """ + if self.deprecated: + extra_message = ( + f" {self.deprecated}" if isinstance(self.deprecated, str) else "" + ) + message = _( + "DeprecationWarning: The command {name!r} is deprecated.{extra_message}" + ).format(name=self.name, extra_message=extra_message) + echo(style(message, fg="red"), err=True) + + if self.callback is not None: + return ctx.invoke(self.callback, **ctx.params) + + def shell_complete(self, ctx: Context, incomplete: str) -> list[CompletionItem]: + """Return a list of completions for the incomplete value. Looks + at the names of options and chained multi-commands. + + Any command could be part of a chained multi-command, so sibling + commands are valid at any point during command completion. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + results: list[CompletionItem] = [] + + if incomplete and not incomplete[0].isalnum(): + for param in self.get_params(ctx): + if ( + not isinstance(param, Option) + or param.hidden + or ( + not param.multiple + and ctx.get_parameter_source(param.name) # type: ignore + is ParameterSource.COMMANDLINE + ) + ): + continue + + results.extend( + CompletionItem(name, help=param.help) + for name in [*param.opts, *param.secondary_opts] + if name.startswith(incomplete) + ) + + while ctx.parent is not None: + ctx = ctx.parent + + if isinstance(ctx.command, Group) and ctx.command.chain: + results.extend( + CompletionItem(name, help=command.get_short_help_str()) + for name, command in _complete_visible_commands(ctx, incomplete) + if name not in ctx._protected_args + ) + + return results + + @t.overload + def main( + self, + args: cabc.Sequence[str] | None = None, + prog_name: str | None = None, + complete_var: str | None = None, + standalone_mode: t.Literal[True] = True, + **extra: t.Any, + ) -> t.NoReturn: ... + + @t.overload + def main( + self, + args: cabc.Sequence[str] | None = None, + prog_name: str | None = None, + complete_var: str | None = None, + standalone_mode: bool = ..., + **extra: t.Any, + ) -> t.Any: ... + + def main( + self, + args: cabc.Sequence[str] | None = None, + prog_name: str | None = None, + complete_var: str | None = None, + standalone_mode: bool = True, + windows_expand_args: bool = True, + **extra: t.Any, + ) -> t.Any: + """This is the way to invoke a script with all the bells and + whistles as a command line application. This will always terminate + the application after a call. If this is not wanted, ``SystemExit`` + needs to be caught. + + This method is also available by directly calling the instance of + a :class:`Command`. + + :param args: the arguments that should be used for parsing. If not + provided, ``sys.argv[1:]`` is used. + :param prog_name: the program name that should be used. By default + the program name is constructed by taking the file + name from ``sys.argv[0]``. + :param complete_var: the environment variable that controls the + bash completion support. The default is + ``"__COMPLETE"`` with prog_name in + uppercase. + :param standalone_mode: the default behavior is to invoke the script + in standalone mode. Click will then + handle exceptions and convert them into + error messages and the function will never + return but shut down the interpreter. If + this is set to `False` they will be + propagated to the caller and the return + value of this function is the return value + of :meth:`invoke`. + :param windows_expand_args: Expand glob patterns, user dir, and + env vars in command line args on Windows. + :param extra: extra keyword arguments are forwarded to the context + constructor. See :class:`Context` for more information. + + .. versionchanged:: 8.0.1 + Added the ``windows_expand_args`` parameter to allow + disabling command line arg expansion on Windows. + + .. versionchanged:: 8.0 + When taking arguments from ``sys.argv`` on Windows, glob + patterns, user dir, and env vars are expanded. + + .. versionchanged:: 3.0 + Added the ``standalone_mode`` parameter. + """ + if args is None: + args = sys.argv[1:] + + if os.name == "nt" and windows_expand_args: + args = _expand_args(args) + else: + args = list(args) + + if prog_name is None: + prog_name = _detect_program_name() + + # Process shell completion requests and exit early. + self._main_shell_completion(extra, prog_name, complete_var) + + try: + try: + with self.make_context(prog_name, args, **extra) as ctx: + rv = self.invoke(ctx) + if not standalone_mode: + return rv + # it's not safe to `ctx.exit(rv)` here! + # note that `rv` may actually contain data like "1" which + # has obvious effects + # more subtle case: `rv=[None, None]` can come out of + # chained commands which all returned `None` -- so it's not + # even always obvious that `rv` indicates success/failure + # by its truthiness/falsiness + ctx.exit() + except (EOFError, KeyboardInterrupt) as e: + echo(file=sys.stderr) + raise Abort() from e + except ClickException as e: + if not standalone_mode: + raise + e.show() + sys.exit(e.exit_code) + except OSError as e: + if e.errno == errno.EPIPE: + sys.stdout = t.cast(t.TextIO, PacifyFlushWrapper(sys.stdout)) + sys.stderr = t.cast(t.TextIO, PacifyFlushWrapper(sys.stderr)) + sys.exit(1) + else: + raise + except Exit as e: + if standalone_mode: + sys.exit(e.exit_code) + else: + # in non-standalone mode, return the exit code + # note that this is only reached if `self.invoke` above raises + # an Exit explicitly -- thus bypassing the check there which + # would return its result + # the results of non-standalone execution may therefore be + # somewhat ambiguous: if there are codepaths which lead to + # `ctx.exit(1)` and to `return 1`, the caller won't be able to + # tell the difference between the two + return e.exit_code + except Abort: + if not standalone_mode: + raise + echo(_("Aborted!"), file=sys.stderr) + sys.exit(1) + + def _main_shell_completion( + self, + ctx_args: cabc.MutableMapping[str, t.Any], + prog_name: str, + complete_var: str | None = None, + ) -> None: + """Check if the shell is asking for tab completion, process + that, then exit early. Called from :meth:`main` before the + program is invoked. + + :param prog_name: Name of the executable in the shell. + :param complete_var: Name of the environment variable that holds + the completion instruction. Defaults to + ``_{PROG_NAME}_COMPLETE``. + + .. versionchanged:: 8.2.0 + Dots (``.``) in ``prog_name`` are replaced with underscores (``_``). + """ + if complete_var is None: + complete_name = prog_name.replace("-", "_").replace(".", "_") + complete_var = f"_{complete_name}_COMPLETE".upper() + + instruction = os.environ.get(complete_var) + + if not instruction: + return + + from .shell_completion import shell_complete + + rv = shell_complete(self, ctx_args, prog_name, complete_var, instruction) + sys.exit(rv) + + def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Any: + """Alias for :meth:`main`.""" + return self.main(*args, **kwargs) + + +class _FakeSubclassCheck(type): + def __subclasscheck__(cls, subclass: type) -> bool: + return issubclass(subclass, cls.__bases__[0]) + + def __instancecheck__(cls, instance: t.Any) -> bool: + return isinstance(instance, cls.__bases__[0]) + + +class _BaseCommand(Command, metaclass=_FakeSubclassCheck): + """ + .. deprecated:: 8.2 + Will be removed in Click 9.0. Use ``Command`` instead. + """ + + +class Group(Command): + """A group is a command that nests other commands (or more groups). + + :param name: The name of the group command. + :param commands: Map names to :class:`Command` objects. Can be a list, which + will use :attr:`Command.name` as the keys. + :param invoke_without_command: Invoke the group's callback even if a + subcommand is not given. + :param no_args_is_help: If no arguments are given, show the group's help and + exit. Defaults to the opposite of ``invoke_without_command``. + :param subcommand_metavar: How to represent the subcommand argument in help. + The default will represent whether ``chain`` is set or not. + :param chain: Allow passing more than one subcommand argument. After parsing + a command's arguments, if any arguments remain another command will be + matched, and so on. + :param result_callback: A function to call after the group's and + subcommand's callbacks. The value returned by the subcommand is passed. + If ``chain`` is enabled, the value will be a list of values returned by + all the commands. If ``invoke_without_command`` is enabled, the value + will be the value returned by the group's callback, or an empty list if + ``chain`` is enabled. + :param kwargs: Other arguments passed to :class:`Command`. + + .. versionchanged:: 8.0 + The ``commands`` argument can be a list of command objects. + + .. versionchanged:: 8.2 + Merged with and replaces the ``MultiCommand`` base class. + """ + + allow_extra_args = True + allow_interspersed_args = False + + #: If set, this is used by the group's :meth:`command` decorator + #: as the default :class:`Command` class. This is useful to make all + #: subcommands use a custom command class. + #: + #: .. versionadded:: 8.0 + command_class: type[Command] | None = None + + #: If set, this is used by the group's :meth:`group` decorator + #: as the default :class:`Group` class. This is useful to make all + #: subgroups use a custom group class. + #: + #: If set to the special value :class:`type` (literally + #: ``group_class = type``), this group's class will be used as the + #: default class. This makes a custom group class continue to make + #: custom groups. + #: + #: .. versionadded:: 8.0 + group_class: type[Group] | type[type] | None = None + # Literal[type] isn't valid, so use Type[type] + + def __init__( + self, + name: str | None = None, + commands: cabc.MutableMapping[str, Command] + | cabc.Sequence[Command] + | None = None, + invoke_without_command: bool = False, + no_args_is_help: bool | None = None, + subcommand_metavar: str | None = None, + chain: bool = False, + result_callback: t.Callable[..., t.Any] | None = None, + **kwargs: t.Any, + ) -> None: + super().__init__(name, **kwargs) + + if commands is None: + commands = {} + elif isinstance(commands, abc.Sequence): + commands = {c.name: c for c in commands if c.name is not None} + + #: The registered subcommands by their exported names. + self.commands: cabc.MutableMapping[str, Command] = commands + + if no_args_is_help is None: + no_args_is_help = not invoke_without_command + + self.no_args_is_help = no_args_is_help + self.invoke_without_command = invoke_without_command + + if subcommand_metavar is None: + if chain: + subcommand_metavar = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..." + else: + subcommand_metavar = "COMMAND [ARGS]..." + + self.subcommand_metavar = subcommand_metavar + self.chain = chain + # The result callback that is stored. This can be set or + # overridden with the :func:`result_callback` decorator. + self._result_callback = result_callback + + if self.chain: + for param in self.params: + if isinstance(param, Argument) and not param.required: + raise RuntimeError( + "A group in chain mode cannot have optional arguments." + ) + + def to_info_dict(self, ctx: Context) -> dict[str, t.Any]: + info_dict = super().to_info_dict(ctx) + commands = {} + + for name in self.list_commands(ctx): + command = self.get_command(ctx, name) + + if command is None: + continue + + sub_ctx = ctx._make_sub_context(command) + + with sub_ctx.scope(cleanup=False): + commands[name] = command.to_info_dict(sub_ctx) + + info_dict.update(commands=commands, chain=self.chain) + return info_dict + + def add_command(self, cmd: Command, name: str | None = None) -> None: + """Registers another :class:`Command` with this group. If the name + is not provided, the name of the command is used. + """ + name = name or cmd.name + if name is None: + raise TypeError("Command has no name.") + _check_nested_chain(self, name, cmd, register=True) + self.commands[name] = cmd + + @t.overload + def command(self, __func: t.Callable[..., t.Any]) -> Command: ... + + @t.overload + def command( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Callable[[t.Callable[..., t.Any]], Command]: ... + + def command( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Callable[[t.Callable[..., t.Any]], Command] | Command: + """A shortcut decorator for declaring and attaching a command to + the group. This takes the same arguments as :func:`command` and + immediately registers the created command with this group by + calling :meth:`add_command`. + + To customize the command class used, set the + :attr:`command_class` attribute. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + + .. versionchanged:: 8.0 + Added the :attr:`command_class` attribute. + """ + from .decorators import command + + func: t.Callable[..., t.Any] | None = None + + if args and callable(args[0]): + assert len(args) == 1 and not kwargs, ( + "Use 'command(**kwargs)(callable)' to provide arguments." + ) + (func,) = args + args = () + + if self.command_class and kwargs.get("cls") is None: + kwargs["cls"] = self.command_class + + def decorator(f: t.Callable[..., t.Any]) -> Command: + cmd: Command = command(*args, **kwargs)(f) + self.add_command(cmd) + return cmd + + if func is not None: + return decorator(func) + + return decorator + + @t.overload + def group(self, __func: t.Callable[..., t.Any]) -> Group: ... + + @t.overload + def group( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Callable[[t.Callable[..., t.Any]], Group]: ... + + def group( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Callable[[t.Callable[..., t.Any]], Group] | Group: + """A shortcut decorator for declaring and attaching a group to + the group. This takes the same arguments as :func:`group` and + immediately registers the created group with this group by + calling :meth:`add_command`. + + To customize the group class used, set the :attr:`group_class` + attribute. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + + .. versionchanged:: 8.0 + Added the :attr:`group_class` attribute. + """ + from .decorators import group + + func: t.Callable[..., t.Any] | None = None + + if args and callable(args[0]): + assert len(args) == 1 and not kwargs, ( + "Use 'group(**kwargs)(callable)' to provide arguments." + ) + (func,) = args + args = () + + if self.group_class is not None and kwargs.get("cls") is None: + if self.group_class is type: + kwargs["cls"] = type(self) + else: + kwargs["cls"] = self.group_class + + def decorator(f: t.Callable[..., t.Any]) -> Group: + cmd: Group = group(*args, **kwargs)(f) + self.add_command(cmd) + return cmd + + if func is not None: + return decorator(func) + + return decorator + + def result_callback(self, replace: bool = False) -> t.Callable[[F], F]: + """Adds a result callback to the command. By default if a + result callback is already registered this will chain them but + this can be disabled with the `replace` parameter. The result + callback is invoked with the return value of the subcommand + (or the list of return values from all subcommands if chaining + is enabled) as well as the parameters as they would be passed + to the main callback. + + Example:: + + @click.group() + @click.option('-i', '--input', default=23) + def cli(input): + return 42 + + @cli.result_callback() + def process_result(result, input): + return result + input + + :param replace: if set to `True` an already existing result + callback will be removed. + + .. versionchanged:: 8.0 + Renamed from ``resultcallback``. + + .. versionadded:: 3.0 + """ + + def decorator(f: F) -> F: + old_callback = self._result_callback + + if old_callback is None or replace: + self._result_callback = f + return f + + def function(value: t.Any, /, *args: t.Any, **kwargs: t.Any) -> t.Any: + inner = old_callback(value, *args, **kwargs) + return f(inner, *args, **kwargs) + + self._result_callback = rv = update_wrapper(t.cast(F, function), f) + return rv # type: ignore[return-value] + + return decorator + + def get_command(self, ctx: Context, cmd_name: str) -> Command | None: + """Given a context and a command name, this returns a :class:`Command` + object if it exists or returns ``None``. + """ + return self.commands.get(cmd_name) + + def list_commands(self, ctx: Context) -> list[str]: + """Returns a list of subcommand names in the order they should appear.""" + return sorted(self.commands) + + def collect_usage_pieces(self, ctx: Context) -> list[str]: + rv = super().collect_usage_pieces(ctx) + rv.append(self.subcommand_metavar) + return rv + + def format_options(self, ctx: Context, formatter: HelpFormatter) -> None: + super().format_options(ctx, formatter) + self.format_commands(ctx, formatter) + + def format_commands(self, ctx: Context, formatter: HelpFormatter) -> None: + """Extra format methods for multi methods that adds all the commands + after the options. + """ + commands = [] + for subcommand in self.list_commands(ctx): + cmd = self.get_command(ctx, subcommand) + # What is this, the tool lied about a command. Ignore it + if cmd is None: + continue + if cmd.hidden: + continue + + commands.append((subcommand, cmd)) + + # allow for 3 times the default spacing + if len(commands): + limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands) + + rows = [] + for subcommand, cmd in commands: + help = cmd.get_short_help_str(limit) + rows.append((subcommand, help)) + + if rows: + with formatter.section(_("Commands")): + formatter.write_dl(rows) + + def parse_args(self, ctx: Context, args: list[str]) -> list[str]: + if not args and self.no_args_is_help and not ctx.resilient_parsing: + raise NoArgsIsHelpError(ctx) + + rest = super().parse_args(ctx, args) + + if self.chain: + ctx._protected_args = rest + ctx.args = [] + elif rest: + ctx._protected_args, ctx.args = rest[:1], rest[1:] + + return ctx.args + + def invoke(self, ctx: Context) -> t.Any: + def _process_result(value: t.Any) -> t.Any: + if self._result_callback is not None: + value = ctx.invoke(self._result_callback, value, **ctx.params) + return value + + if not ctx._protected_args: + if self.invoke_without_command: + # No subcommand was invoked, so the result callback is + # invoked with the group return value for regular + # groups, or an empty list for chained groups. + with ctx: + rv = super().invoke(ctx) + return _process_result([] if self.chain else rv) + ctx.fail(_("Missing command.")) + + # Fetch args back out + args = [*ctx._protected_args, *ctx.args] + ctx.args = [] + ctx._protected_args = [] + + # If we're not in chain mode, we only allow the invocation of a + # single command but we also inform the current context about the + # name of the command to invoke. + if not self.chain: + # Make sure the context is entered so we do not clean up + # resources until the result processor has worked. + with ctx: + cmd_name, cmd, args = self.resolve_command(ctx, args) + assert cmd is not None + ctx.invoked_subcommand = cmd_name + super().invoke(ctx) + sub_ctx = cmd.make_context(cmd_name, args, parent=ctx) + with sub_ctx: + return _process_result(sub_ctx.command.invoke(sub_ctx)) + + # In chain mode we create the contexts step by step, but after the + # base command has been invoked. Because at that point we do not + # know the subcommands yet, the invoked subcommand attribute is + # set to ``*`` to inform the command that subcommands are executed + # but nothing else. + with ctx: + ctx.invoked_subcommand = "*" if args else None + super().invoke(ctx) + + # Otherwise we make every single context and invoke them in a + # chain. In that case the return value to the result processor + # is the list of all invoked subcommand's results. + contexts = [] + while args: + cmd_name, cmd, args = self.resolve_command(ctx, args) + assert cmd is not None + sub_ctx = cmd.make_context( + cmd_name, + args, + parent=ctx, + allow_extra_args=True, + allow_interspersed_args=False, + ) + contexts.append(sub_ctx) + args, sub_ctx.args = sub_ctx.args, [] + + rv = [] + for sub_ctx in contexts: + with sub_ctx: + rv.append(sub_ctx.command.invoke(sub_ctx)) + return _process_result(rv) + + def resolve_command( + self, ctx: Context, args: list[str] + ) -> tuple[str | None, Command | None, list[str]]: + cmd_name = make_str(args[0]) + original_cmd_name = cmd_name + + # Get the command + cmd = self.get_command(ctx, cmd_name) + + # If we can't find the command but there is a normalization + # function available, we try with that one. + if cmd is None and ctx.token_normalize_func is not None: + cmd_name = ctx.token_normalize_func(cmd_name) + cmd = self.get_command(ctx, cmd_name) + + # If we don't find the command we want to show an error message + # to the user that it was not provided. However, there is + # something else we should do: if the first argument looks like + # an option we want to kick off parsing again for arguments to + # resolve things like --help which now should go to the main + # place. + if cmd is None and not ctx.resilient_parsing: + if _split_opt(cmd_name)[0]: + self.parse_args(ctx, args) + ctx.fail(_("No such command {name!r}.").format(name=original_cmd_name)) + return cmd_name if cmd else None, cmd, args[1:] + + def shell_complete(self, ctx: Context, incomplete: str) -> list[CompletionItem]: + """Return a list of completions for the incomplete value. Looks + at the names of options, subcommands, and chained + multi-commands. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + results = [ + CompletionItem(name, help=command.get_short_help_str()) + for name, command in _complete_visible_commands(ctx, incomplete) + ] + results.extend(super().shell_complete(ctx, incomplete)) + return results + + +class _MultiCommand(Group, metaclass=_FakeSubclassCheck): + """ + .. deprecated:: 8.2 + Will be removed in Click 9.0. Use ``Group`` instead. + """ + + +class CommandCollection(Group): + """A :class:`Group` that looks up subcommands on other groups. If a command + is not found on this group, each registered source is checked in order. + Parameters on a source are not added to this group, and a source's callback + is not invoked when invoking its commands. In other words, this "flattens" + commands in many groups into this one group. + + :param name: The name of the group command. + :param sources: A list of :class:`Group` objects to look up commands from. + :param kwargs: Other arguments passed to :class:`Group`. + + .. versionchanged:: 8.2 + This is a subclass of ``Group``. Commands are looked up first on this + group, then each of its sources. + """ + + def __init__( + self, + name: str | None = None, + sources: list[Group] | None = None, + **kwargs: t.Any, + ) -> None: + super().__init__(name, **kwargs) + #: The list of registered groups. + self.sources: list[Group] = sources or [] + + def add_source(self, group: Group) -> None: + """Add a group as a source of commands.""" + self.sources.append(group) + + def get_command(self, ctx: Context, cmd_name: str) -> Command | None: + rv = super().get_command(ctx, cmd_name) + + if rv is not None: + return rv + + for source in self.sources: + rv = source.get_command(ctx, cmd_name) + + if rv is not None: + if self.chain: + _check_nested_chain(self, cmd_name, rv) + + return rv + + return None + + def list_commands(self, ctx: Context) -> list[str]: + rv: set[str] = set(super().list_commands(ctx)) + + for source in self.sources: + rv.update(source.list_commands(ctx)) + + return sorted(rv) + + +def _check_iter(value: t.Any) -> cabc.Iterator[t.Any]: + """Check if the value is iterable but not a string. Raises a type + error, or return an iterator over the value. + """ + if isinstance(value, str): + raise TypeError + + return iter(value) + + +class Parameter: + r"""A parameter to a command comes in two versions: they are either + :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently + not supported by design as some of the internals for parsing are + intentionally not finalized. + + Some settings are supported by both options and arguments. + + :param param_decls: the parameter declarations for this option or + argument. This is a list of flags or argument + names. + :param type: the type that should be used. Either a :class:`ParamType` + or a Python type. The latter is converted into the former + automatically if supported. + :param required: controls if this is optional or not. + :param default: the default value if omitted. This can also be a callable, + in which case it's invoked when the default is needed + without any arguments. + :param callback: A function to further process or validate the value + after type conversion. It is called as ``f(ctx, param, value)`` + and must return the value. It is called for all sources, + including prompts. + :param nargs: the number of arguments to match. If not ``1`` the return + value is a tuple instead of single value. The default for + nargs is ``1`` (except if the type is a tuple, then it's + the arity of the tuple). If ``nargs=-1``, all remaining + parameters are collected. + :param metavar: how the value is represented in the help page. + :param expose_value: if this is `True` then the value is passed onwards + to the command callback and stored on the context, + otherwise it's skipped. + :param is_eager: eager values are processed before non eager ones. This + should not be set for arguments or it will inverse the + order of processing. + :param envvar: a string or list of strings that are environment variables + that should be checked. + :param shell_complete: A function that returns custom shell + completions. Used instead of the param's type completion if + given. Takes ``ctx, param, incomplete`` and must return a list + of :class:`~click.shell_completion.CompletionItem` or a list of + strings. + :param deprecated: If ``True`` or non-empty string, issues a message + indicating that the argument is deprecated and highlights + its deprecation in --help. The message can be customized + by using a string as the value. A deprecated parameter + cannot be required, a ValueError will be raised otherwise. + + .. versionchanged:: 8.2.0 + Introduction of ``deprecated``. + + .. versionchanged:: 8.2 + Adding duplicate parameter names to a :class:`~click.core.Command` will + result in a ``UserWarning`` being shown. + + .. versionchanged:: 8.2 + Adding duplicate parameter names to a :class:`~click.core.Command` will + result in a ``UserWarning`` being shown. + + .. versionchanged:: 8.0 + ``process_value`` validates required parameters and bounded + ``nargs``, and invokes the parameter callback before returning + the value. This allows the callback to validate prompts. + ``full_process_value`` is removed. + + .. versionchanged:: 8.0 + ``autocompletion`` is renamed to ``shell_complete`` and has new + semantics described above. The old name is deprecated and will + be removed in 8.1, until then it will be wrapped to match the + new requirements. + + .. versionchanged:: 8.0 + For ``multiple=True, nargs>1``, the default must be a list of + tuples. + + .. versionchanged:: 8.0 + Setting a default is no longer required for ``nargs>1``, it will + default to ``None``. ``multiple=True`` or ``nargs=-1`` will + default to ``()``. + + .. versionchanged:: 7.1 + Empty environment variables are ignored rather than taking the + empty string value. This makes it possible for scripts to clear + variables if they can't unset them. + + .. versionchanged:: 2.0 + Changed signature for parameter callback to also be passed the + parameter. The old callback format will still work, but it will + raise a warning to give you a chance to migrate the code easier. + """ + + param_type_name = "parameter" + + def __init__( + self, + param_decls: cabc.Sequence[str] | None = None, + type: types.ParamType | t.Any | None = None, + required: bool = False, + default: t.Any | t.Callable[[], t.Any] | None = None, + callback: t.Callable[[Context, Parameter, t.Any], t.Any] | None = None, + nargs: int | None = None, + multiple: bool = False, + metavar: str | None = None, + expose_value: bool = True, + is_eager: bool = False, + envvar: str | cabc.Sequence[str] | None = None, + shell_complete: t.Callable[ + [Context, Parameter, str], list[CompletionItem] | list[str] + ] + | None = None, + deprecated: bool | str = False, + ) -> None: + self.name: str | None + self.opts: list[str] + self.secondary_opts: list[str] + self.name, self.opts, self.secondary_opts = self._parse_decls( + param_decls or (), expose_value + ) + self.type: types.ParamType = types.convert_type(type, default) + + # Default nargs to what the type tells us if we have that + # information available. + if nargs is None: + if self.type.is_composite: + nargs = self.type.arity + else: + nargs = 1 + + self.required = required + self.callback = callback + self.nargs = nargs + self.multiple = multiple + self.expose_value = expose_value + self.default = default + self.is_eager = is_eager + self.metavar = metavar + self.envvar = envvar + self._custom_shell_complete = shell_complete + self.deprecated = deprecated + + if __debug__: + if self.type.is_composite and nargs != self.type.arity: + raise ValueError( + f"'nargs' must be {self.type.arity} (or None) for" + f" type {self.type!r}, but it was {nargs}." + ) + + # Skip no default or callable default. + check_default = default if not callable(default) else None + + if check_default is not None: + if multiple: + try: + # Only check the first value against nargs. + check_default = next(_check_iter(check_default), None) + except TypeError: + raise ValueError( + "'default' must be a list when 'multiple' is true." + ) from None + + # Can be None for multiple with empty default. + if nargs != 1 and check_default is not None: + try: + _check_iter(check_default) + except TypeError: + if multiple: + message = ( + "'default' must be a list of lists when 'multiple' is" + " true and 'nargs' != 1." + ) + else: + message = "'default' must be a list when 'nargs' != 1." + + raise ValueError(message) from None + + if nargs > 1 and len(check_default) != nargs: + subject = "item length" if multiple else "length" + raise ValueError( + f"'default' {subject} must match nargs={nargs}." + ) + + if required and deprecated: + raise ValueError( + f"The {self.param_type_name} '{self.human_readable_name}' " + "is deprecated and still required. A deprecated " + f"{self.param_type_name} cannot be required." + ) + + def to_info_dict(self) -> dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. + + Use :meth:`click.Context.to_info_dict` to traverse the entire + CLI structure. + + .. versionadded:: 8.0 + """ + return { + "name": self.name, + "param_type_name": self.param_type_name, + "opts": self.opts, + "secondary_opts": self.secondary_opts, + "type": self.type.to_info_dict(), + "required": self.required, + "nargs": self.nargs, + "multiple": self.multiple, + "default": self.default, + "envvar": self.envvar, + } + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.name}>" + + def _parse_decls( + self, decls: cabc.Sequence[str], expose_value: bool + ) -> tuple[str | None, list[str], list[str]]: + raise NotImplementedError() + + @property + def human_readable_name(self) -> str: + """Returns the human readable name of this parameter. This is the + same as the name for options, but the metavar for arguments. + """ + return self.name # type: ignore + + def make_metavar(self, ctx: Context) -> str: + if self.metavar is not None: + return self.metavar + + metavar = self.type.get_metavar(param=self, ctx=ctx) + + if metavar is None: + metavar = self.type.name.upper() + + if self.nargs != 1: + metavar += "..." + + return metavar + + @t.overload + def get_default( + self, ctx: Context, call: t.Literal[True] = True + ) -> t.Any | None: ... + + @t.overload + def get_default( + self, ctx: Context, call: bool = ... + ) -> t.Any | t.Callable[[], t.Any] | None: ... + + def get_default( + self, ctx: Context, call: bool = True + ) -> t.Any | t.Callable[[], t.Any] | None: + """Get the default for the parameter. Tries + :meth:`Context.lookup_default` first, then the local default. + + :param ctx: Current context. + :param call: If the default is a callable, call it. Disable to + return the callable instead. + + .. versionchanged:: 8.0.2 + Type casting is no longer performed when getting a default. + + .. versionchanged:: 8.0.1 + Type casting can fail in resilient parsing mode. Invalid + defaults will not prevent showing help text. + + .. versionchanged:: 8.0 + Looks at ``ctx.default_map`` first. + + .. versionchanged:: 8.0 + Added the ``call`` parameter. + """ + value = ctx.lookup_default(self.name, call=False) # type: ignore + + if value is None: + value = self.default + + if call and callable(value): + value = value() + + return value + + def add_to_parser(self, parser: _OptionParser, ctx: Context) -> None: + raise NotImplementedError() + + def consume_value( + self, ctx: Context, opts: cabc.Mapping[str, t.Any] + ) -> tuple[t.Any, ParameterSource]: + value = opts.get(self.name) # type: ignore + source = ParameterSource.COMMANDLINE + + if value is None: + value = self.value_from_envvar(ctx) + source = ParameterSource.ENVIRONMENT + + if value is None: + value = ctx.lookup_default(self.name) # type: ignore + source = ParameterSource.DEFAULT_MAP + + if value is None: + value = self.get_default(ctx) + source = ParameterSource.DEFAULT + + return value, source + + def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any: + """Convert and validate a value against the option's + :attr:`type`, :attr:`multiple`, and :attr:`nargs`. + """ + if value is None: + return () if self.multiple or self.nargs == -1 else None + + def check_iter(value: t.Any) -> cabc.Iterator[t.Any]: + try: + return _check_iter(value) + except TypeError: + # This should only happen when passing in args manually, + # the parser should construct an iterable when parsing + # the command line. + raise BadParameter( + _("Value must be an iterable."), ctx=ctx, param=self + ) from None + + if self.nargs == 1 or self.type.is_composite: + + def convert(value: t.Any) -> t.Any: + return self.type(value, param=self, ctx=ctx) + + elif self.nargs == -1: + + def convert(value: t.Any) -> t.Any: # tuple[t.Any, ...] + return tuple(self.type(x, self, ctx) for x in check_iter(value)) + + else: # nargs > 1 + + def convert(value: t.Any) -> t.Any: # tuple[t.Any, ...] + value = tuple(check_iter(value)) + + if len(value) != self.nargs: + raise BadParameter( + ngettext( + "Takes {nargs} values but 1 was given.", + "Takes {nargs} values but {len} were given.", + len(value), + ).format(nargs=self.nargs, len=len(value)), + ctx=ctx, + param=self, + ) + + return tuple(self.type(x, self, ctx) for x in value) + + if self.multiple: + return tuple(convert(x) for x in check_iter(value)) + + return convert(value) + + def value_is_missing(self, value: t.Any) -> bool: + if value is None: + return True + + if (self.nargs != 1 or self.multiple) and value == (): + return True + + return False + + def process_value(self, ctx: Context, value: t.Any) -> t.Any: + value = self.type_cast_value(ctx, value) + + if self.required and self.value_is_missing(value): + raise MissingParameter(ctx=ctx, param=self) + + if self.callback is not None: + value = self.callback(ctx, self, value) + + return value + + def resolve_envvar_value(self, ctx: Context) -> str | None: + if self.envvar is None: + return None + + if isinstance(self.envvar, str): + rv = os.environ.get(self.envvar) + + if rv: + return rv + else: + for envvar in self.envvar: + rv = os.environ.get(envvar) + + if rv: + return rv + + return None + + def value_from_envvar(self, ctx: Context) -> t.Any | None: + rv: t.Any | None = self.resolve_envvar_value(ctx) + + if rv is not None and self.nargs != 1: + rv = self.type.split_envvar_value(rv) + + return rv + + def handle_parse_result( + self, ctx: Context, opts: cabc.Mapping[str, t.Any], args: list[str] + ) -> tuple[t.Any, list[str]]: + with augment_usage_errors(ctx, param=self): + value, source = self.consume_value(ctx, opts) + + if ( + self.deprecated + and value is not None + and source + not in ( + ParameterSource.DEFAULT, + ParameterSource.DEFAULT_MAP, + ) + ): + extra_message = ( + f" {self.deprecated}" if isinstance(self.deprecated, str) else "" + ) + message = _( + "DeprecationWarning: The {param_type} {name!r} is deprecated." + "{extra_message}" + ).format( + param_type=self.param_type_name, + name=self.human_readable_name, + extra_message=extra_message, + ) + echo(style(message, fg="red"), err=True) + + ctx.set_parameter_source(self.name, source) # type: ignore + + try: + value = self.process_value(ctx, value) + except Exception: + if not ctx.resilient_parsing: + raise + + value = None + + if self.expose_value: + ctx.params[self.name] = value # type: ignore + + return value, args + + def get_help_record(self, ctx: Context) -> tuple[str, str] | None: + pass + + def get_usage_pieces(self, ctx: Context) -> list[str]: + return [] + + def get_error_hint(self, ctx: Context) -> str: + """Get a stringified version of the param for use in error messages to + indicate which param caused the error. + """ + hint_list = self.opts or [self.human_readable_name] + return " / ".join(f"'{x}'" for x in hint_list) + + def shell_complete(self, ctx: Context, incomplete: str) -> list[CompletionItem]: + """Return a list of completions for the incomplete value. If a + ``shell_complete`` function was given during init, it is used. + Otherwise, the :attr:`type` + :meth:`~click.types.ParamType.shell_complete` function is used. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + if self._custom_shell_complete is not None: + results = self._custom_shell_complete(ctx, self, incomplete) + + if results and isinstance(results[0], str): + from click.shell_completion import CompletionItem + + results = [CompletionItem(c) for c in results] + + return t.cast("list[CompletionItem]", results) + + return self.type.shell_complete(ctx, self, incomplete) + + +class Option(Parameter): + """Options are usually optional values on the command line and + have some extra features that arguments don't have. + + All other parameters are passed onwards to the parameter constructor. + + :param show_default: Show the default value for this option in its + help text. Values are not shown by default, unless + :attr:`Context.show_default` is ``True``. If this value is a + string, it shows that string in parentheses instead of the + actual value. This is particularly useful for dynamic options. + For single option boolean flags, the default remains hidden if + its value is ``False``. + :param show_envvar: Controls if an environment variable should be + shown on the help page and error messages. + Normally, environment variables are not shown. + :param prompt: If set to ``True`` or a non empty string then the + user will be prompted for input. If set to ``True`` the prompt + will be the option name capitalized. A deprecated option cannot be + prompted. + :param confirmation_prompt: Prompt a second time to confirm the + value if it was prompted for. Can be set to a string instead of + ``True`` to customize the message. + :param prompt_required: If set to ``False``, the user will be + prompted for input only when the option was specified as a flag + without a value. + :param hide_input: If this is ``True`` then the input on the prompt + will be hidden from the user. This is useful for password input. + :param is_flag: forces this option to act as a flag. The default is + auto detection. + :param flag_value: which value should be used for this flag if it's + enabled. This is set to a boolean automatically if + the option string contains a slash to mark two options. + :param multiple: if this is set to `True` then the argument is accepted + multiple times and recorded. This is similar to ``nargs`` + in how it works but supports arbitrary number of + arguments. + :param count: this flag makes an option increment an integer. + :param allow_from_autoenv: if this is enabled then the value of this + parameter will be pulled from an environment + variable in case a prefix is defined on the + context. + :param help: the help string. + :param hidden: hide this option from help outputs. + :param attrs: Other command arguments described in :class:`Parameter`. + + .. versionchanged:: 8.2 + ``envvar`` used with ``flag_value`` will always use the ``flag_value``, + previously it would use the value of the environment variable. + + .. versionchanged:: 8.1 + Help text indentation is cleaned here instead of only in the + ``@option`` decorator. + + .. versionchanged:: 8.1 + The ``show_default`` parameter overrides + ``Context.show_default``. + + .. versionchanged:: 8.1 + The default of a single option boolean flag is not shown if the + default value is ``False``. + + .. versionchanged:: 8.0.1 + ``type`` is detected from ``flag_value`` if given. + """ + + param_type_name = "option" + + def __init__( + self, + param_decls: cabc.Sequence[str] | None = None, + show_default: bool | str | None = None, + prompt: bool | str = False, + confirmation_prompt: bool | str = False, + prompt_required: bool = True, + hide_input: bool = False, + is_flag: bool | None = None, + flag_value: t.Any | None = None, + multiple: bool = False, + count: bool = False, + allow_from_autoenv: bool = True, + type: types.ParamType | t.Any | None = None, + help: str | None = None, + hidden: bool = False, + show_choices: bool = True, + show_envvar: bool = False, + deprecated: bool | str = False, + **attrs: t.Any, + ) -> None: + if help: + help = inspect.cleandoc(help) + + default_is_missing = "default" not in attrs + super().__init__( + param_decls, type=type, multiple=multiple, deprecated=deprecated, **attrs + ) + + if prompt is True: + if self.name is None: + raise TypeError("'name' is required with 'prompt=True'.") + + prompt_text: str | None = self.name.replace("_", " ").capitalize() + elif prompt is False: + prompt_text = None + else: + prompt_text = prompt + + if deprecated: + deprecated_message = ( + f"(DEPRECATED: {deprecated})" + if isinstance(deprecated, str) + else "(DEPRECATED)" + ) + help = help + deprecated_message if help is not None else deprecated_message + + self.prompt = prompt_text + self.confirmation_prompt = confirmation_prompt + self.prompt_required = prompt_required + self.hide_input = hide_input + self.hidden = hidden + + # If prompt is enabled but not required, then the option can be + # used as a flag to indicate using prompt or flag_value. + self._flag_needs_value = self.prompt is not None and not self.prompt_required + + if is_flag is None: + if flag_value is not None: + # Implicitly a flag because flag_value was set. + is_flag = True + elif self._flag_needs_value: + # Not a flag, but when used as a flag it shows a prompt. + is_flag = False + else: + # Implicitly a flag because flag options were given. + is_flag = bool(self.secondary_opts) + elif is_flag is False and not self._flag_needs_value: + # Not a flag, and prompt is not enabled, can be used as a + # flag if flag_value is set. + self._flag_needs_value = flag_value is not None + + self.default: t.Any | t.Callable[[], t.Any] + + if is_flag and default_is_missing and not self.required: + if multiple: + self.default = () + else: + self.default = False + + self.type: types.ParamType + if is_flag and type is None: + if flag_value is None: + flag_value = not self.default + # Re-guess the type from the flag value instead of the + # default. + self.type = types.convert_type(None, flag_value) + + self.is_flag: bool = is_flag + self.is_bool_flag: bool = is_flag and isinstance(self.type, types.BoolParamType) + self.flag_value: t.Any = flag_value + + # Counting + self.count = count + if count: + if type is None: + self.type = types.IntRange(min=0) + if default_is_missing: + self.default = 0 + + self.allow_from_autoenv = allow_from_autoenv + self.help = help + self.show_default = show_default + self.show_choices = show_choices + self.show_envvar = show_envvar + + if __debug__: + if deprecated and prompt: + raise ValueError("`deprecated` options cannot use `prompt`.") + + if self.nargs == -1: + raise TypeError("nargs=-1 is not supported for options.") + + if self.prompt and self.is_flag and not self.is_bool_flag: + raise TypeError("'prompt' is not valid for non-boolean flag.") + + if not self.is_bool_flag and self.secondary_opts: + raise TypeError("Secondary flag is not valid for non-boolean flag.") + + if self.is_bool_flag and self.hide_input and self.prompt is not None: + raise TypeError( + "'prompt' with 'hide_input' is not valid for boolean flag." + ) + + if self.count: + if self.multiple: + raise TypeError("'count' is not valid with 'multiple'.") + + if self.is_flag: + raise TypeError("'count' is not valid with 'is_flag'.") + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update( + help=self.help, + prompt=self.prompt, + is_flag=self.is_flag, + flag_value=self.flag_value, + count=self.count, + hidden=self.hidden, + ) + return info_dict + + def get_error_hint(self, ctx: Context) -> str: + result = super().get_error_hint(ctx) + if self.show_envvar: + result += f" (env var: '{self.envvar}')" + return result + + def _parse_decls( + self, decls: cabc.Sequence[str], expose_value: bool + ) -> tuple[str | None, list[str], list[str]]: + opts = [] + secondary_opts = [] + name = None + possible_names = [] + + for decl in decls: + if decl.isidentifier(): + if name is not None: + raise TypeError(f"Name '{name}' defined twice") + name = decl + else: + split_char = ";" if decl[:1] == "/" else "/" + if split_char in decl: + first, second = decl.split(split_char, 1) + first = first.rstrip() + if first: + possible_names.append(_split_opt(first)) + opts.append(first) + second = second.lstrip() + if second: + secondary_opts.append(second.lstrip()) + if first == second: + raise ValueError( + f"Boolean option {decl!r} cannot use the" + " same flag for true/false." + ) + else: + possible_names.append(_split_opt(decl)) + opts.append(decl) + + if name is None and possible_names: + possible_names.sort(key=lambda x: -len(x[0])) # group long options first + name = possible_names[0][1].replace("-", "_").lower() + if not name.isidentifier(): + name = None + + if name is None: + if not expose_value: + return None, opts, secondary_opts + raise TypeError( + f"Could not determine name for option with declarations {decls!r}" + ) + + if not opts and not secondary_opts: + raise TypeError( + f"No options defined but a name was passed ({name})." + " Did you mean to declare an argument instead? Did" + f" you mean to pass '--{name}'?" + ) + + return name, opts, secondary_opts + + def add_to_parser(self, parser: _OptionParser, ctx: Context) -> None: + if self.multiple: + action = "append" + elif self.count: + action = "count" + else: + action = "store" + + if self.is_flag: + action = f"{action}_const" + + if self.is_bool_flag and self.secondary_opts: + parser.add_option( + obj=self, opts=self.opts, dest=self.name, action=action, const=True + ) + parser.add_option( + obj=self, + opts=self.secondary_opts, + dest=self.name, + action=action, + const=False, + ) + else: + parser.add_option( + obj=self, + opts=self.opts, + dest=self.name, + action=action, + const=self.flag_value, + ) + else: + parser.add_option( + obj=self, + opts=self.opts, + dest=self.name, + action=action, + nargs=self.nargs, + ) + + def get_help_record(self, ctx: Context) -> tuple[str, str] | None: + if self.hidden: + return None + + any_prefix_is_slash = False + + def _write_opts(opts: cabc.Sequence[str]) -> str: + nonlocal any_prefix_is_slash + + rv, any_slashes = join_options(opts) + + if any_slashes: + any_prefix_is_slash = True + + if not self.is_flag and not self.count: + rv += f" {self.make_metavar(ctx=ctx)}" + + return rv + + rv = [_write_opts(self.opts)] + + if self.secondary_opts: + rv.append(_write_opts(self.secondary_opts)) + + help = self.help or "" + + extra = self.get_help_extra(ctx) + extra_items = [] + if "envvars" in extra: + extra_items.append( + _("env var: {var}").format(var=", ".join(extra["envvars"])) + ) + if "default" in extra: + extra_items.append(_("default: {default}").format(default=extra["default"])) + if "range" in extra: + extra_items.append(extra["range"]) + if "required" in extra: + extra_items.append(_(extra["required"])) + + if extra_items: + extra_str = "; ".join(extra_items) + help = f"{help} [{extra_str}]" if help else f"[{extra_str}]" + + return ("; " if any_prefix_is_slash else " / ").join(rv), help + + def get_help_extra(self, ctx: Context) -> types.OptionHelpExtra: + extra: types.OptionHelpExtra = {} + + if self.show_envvar: + envvar = self.envvar + + if envvar is None: + if ( + self.allow_from_autoenv + and ctx.auto_envvar_prefix is not None + and self.name is not None + ): + envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}" + + if envvar is not None: + if isinstance(envvar, str): + extra["envvars"] = (envvar,) + else: + extra["envvars"] = tuple(str(d) for d in envvar) + + # Temporarily enable resilient parsing to avoid type casting + # failing for the default. Might be possible to extend this to + # help formatting in general. + resilient = ctx.resilient_parsing + ctx.resilient_parsing = True + + try: + default_value = self.get_default(ctx, call=False) + finally: + ctx.resilient_parsing = resilient + + show_default = False + show_default_is_str = False + + if self.show_default is not None: + if isinstance(self.show_default, str): + show_default_is_str = show_default = True + else: + show_default = self.show_default + elif ctx.show_default is not None: + show_default = ctx.show_default + + if show_default_is_str or (show_default and (default_value is not None)): + if show_default_is_str: + default_string = f"({self.show_default})" + elif isinstance(default_value, (list, tuple)): + default_string = ", ".join(str(d) for d in default_value) + elif inspect.isfunction(default_value): + default_string = _("(dynamic)") + elif self.is_bool_flag and self.secondary_opts: + # For boolean flags that have distinct True/False opts, + # use the opt without prefix instead of the value. + default_string = _split_opt( + (self.opts if default_value else self.secondary_opts)[0] + )[1] + elif self.is_bool_flag and not self.secondary_opts and not default_value: + default_string = "" + elif default_value == "": + default_string = '""' + else: + default_string = str(default_value) + + if default_string: + extra["default"] = default_string + + if ( + isinstance(self.type, types._NumberRangeBase) + # skip count with default range type + and not (self.count and self.type.min == 0 and self.type.max is None) + ): + range_str = self.type._describe_range() + + if range_str: + extra["range"] = range_str + + if self.required: + extra["required"] = "required" + + return extra + + @t.overload + def get_default( + self, ctx: Context, call: t.Literal[True] = True + ) -> t.Any | None: ... + + @t.overload + def get_default( + self, ctx: Context, call: bool = ... + ) -> t.Any | t.Callable[[], t.Any] | None: ... + + def get_default( + self, ctx: Context, call: bool = True + ) -> t.Any | t.Callable[[], t.Any] | None: + # If we're a non boolean flag our default is more complex because + # we need to look at all flags in the same group to figure out + # if we're the default one in which case we return the flag + # value as default. + if self.is_flag and not self.is_bool_flag: + for param in ctx.command.params: + if param.name == self.name and param.default: + return t.cast(Option, param).flag_value + + return None + + return super().get_default(ctx, call=call) + + def prompt_for_value(self, ctx: Context) -> t.Any: + """This is an alternative flow that can be activated in the full + value processing if a value does not exist. It will prompt the + user until a valid value exists and then returns the processed + value as result. + """ + assert self.prompt is not None + + # Calculate the default before prompting anything to be stable. + default = self.get_default(ctx) + + # If this is a prompt for a flag we need to handle this + # differently. + if self.is_bool_flag: + return confirm(self.prompt, default) + + # If show_default is set to True/False, provide this to `prompt` as well. For + # non-bool values of `show_default`, we use `prompt`'s default behavior + prompt_kwargs: t.Any = {} + if isinstance(self.show_default, bool): + prompt_kwargs["show_default"] = self.show_default + + return prompt( + self.prompt, + default=default, + type=self.type, + hide_input=self.hide_input, + show_choices=self.show_choices, + confirmation_prompt=self.confirmation_prompt, + value_proc=lambda x: self.process_value(ctx, x), + **prompt_kwargs, + ) + + def resolve_envvar_value(self, ctx: Context) -> str | None: + rv = super().resolve_envvar_value(ctx) + + if rv is not None: + if self.is_flag and self.flag_value: + return str(self.flag_value) + return rv + + if ( + self.allow_from_autoenv + and ctx.auto_envvar_prefix is not None + and self.name is not None + ): + envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}" + rv = os.environ.get(envvar) + + if rv: + return rv + + return None + + def value_from_envvar(self, ctx: Context) -> t.Any | None: + rv: t.Any | None = self.resolve_envvar_value(ctx) + + if rv is None: + return None + + value_depth = (self.nargs != 1) + bool(self.multiple) + + if value_depth > 0: + rv = self.type.split_envvar_value(rv) + + if self.multiple and self.nargs != 1: + rv = batch(rv, self.nargs) + + return rv + + def consume_value( + self, ctx: Context, opts: cabc.Mapping[str, Parameter] + ) -> tuple[t.Any, ParameterSource]: + value, source = super().consume_value(ctx, opts) + + # The parser will emit a sentinel value if the option can be + # given as a flag without a value. This is different from None + # to distinguish from the flag not being given at all. + if value is _flag_needs_value: + if self.prompt is not None and not ctx.resilient_parsing: + value = self.prompt_for_value(ctx) + source = ParameterSource.PROMPT + else: + value = self.flag_value + source = ParameterSource.COMMANDLINE + + elif ( + self.multiple + and value is not None + and any(v is _flag_needs_value for v in value) + ): + value = [self.flag_value if v is _flag_needs_value else v for v in value] + source = ParameterSource.COMMANDLINE + + # The value wasn't set, or used the param's default, prompt if + # prompting is enabled. + elif ( + source in {None, ParameterSource.DEFAULT} + and self.prompt is not None + and (self.required or self.prompt_required) + and not ctx.resilient_parsing + ): + value = self.prompt_for_value(ctx) + source = ParameterSource.PROMPT + + return value, source + + +class Argument(Parameter): + """Arguments are positional parameters to a command. They generally + provide fewer features than options but can have infinite ``nargs`` + and are required by default. + + All parameters are passed onwards to the constructor of :class:`Parameter`. + """ + + param_type_name = "argument" + + def __init__( + self, + param_decls: cabc.Sequence[str], + required: bool | None = None, + **attrs: t.Any, + ) -> None: + if required is None: + if attrs.get("default") is not None: + required = False + else: + required = attrs.get("nargs", 1) > 0 + + if "multiple" in attrs: + raise TypeError("__init__() got an unexpected keyword argument 'multiple'.") + + super().__init__(param_decls, required=required, **attrs) + + if __debug__: + if self.default is not None and self.nargs == -1: + raise TypeError("'default' is not supported for nargs=-1.") + + @property + def human_readable_name(self) -> str: + if self.metavar is not None: + return self.metavar + return self.name.upper() # type: ignore + + def make_metavar(self, ctx: Context) -> str: + if self.metavar is not None: + return self.metavar + var = self.type.get_metavar(param=self, ctx=ctx) + if not var: + var = self.name.upper() # type: ignore + if self.deprecated: + var += "!" + if not self.required: + var = f"[{var}]" + if self.nargs != 1: + var += "..." + return var + + def _parse_decls( + self, decls: cabc.Sequence[str], expose_value: bool + ) -> tuple[str | None, list[str], list[str]]: + if not decls: + if not expose_value: + return None, [], [] + raise TypeError("Argument is marked as exposed, but does not have a name.") + if len(decls) == 1: + name = arg = decls[0] + name = name.replace("-", "_").lower() + else: + raise TypeError( + "Arguments take exactly one parameter declaration, got" + f" {len(decls)}: {decls}." + ) + return name, [arg], [] + + def get_usage_pieces(self, ctx: Context) -> list[str]: + return [self.make_metavar(ctx)] + + def get_error_hint(self, ctx: Context) -> str: + return f"'{self.make_metavar(ctx)}'" + + def add_to_parser(self, parser: _OptionParser, ctx: Context) -> None: + parser.add_argument(dest=self.name, nargs=self.nargs, obj=self) + + +def __getattr__(name: str) -> object: + import warnings + + if name == "BaseCommand": + warnings.warn( + "'BaseCommand' is deprecated and will be removed in Click 9.0. Use" + " 'Command' instead.", + DeprecationWarning, + stacklevel=2, + ) + return _BaseCommand + + if name == "MultiCommand": + warnings.warn( + "'MultiCommand' is deprecated and will be removed in Click 9.0. Use" + " 'Group' instead.", + DeprecationWarning, + stacklevel=2, + ) + return _MultiCommand + + raise AttributeError(name) diff --git a/venv/Lib/site-packages/click/decorators.py b/venv/Lib/site-packages/click/decorators.py new file mode 100644 index 00000000..21f4c342 --- /dev/null +++ b/venv/Lib/site-packages/click/decorators.py @@ -0,0 +1,551 @@ +from __future__ import annotations + +import inspect +import typing as t +from functools import update_wrapper +from gettext import gettext as _ + +from .core import Argument +from .core import Command +from .core import Context +from .core import Group +from .core import Option +from .core import Parameter +from .globals import get_current_context +from .utils import echo + +if t.TYPE_CHECKING: + import typing_extensions as te + + P = te.ParamSpec("P") + +R = t.TypeVar("R") +T = t.TypeVar("T") +_AnyCallable = t.Callable[..., t.Any] +FC = t.TypeVar("FC", bound="_AnyCallable | Command") + + +def pass_context(f: t.Callable[te.Concatenate[Context, P], R]) -> t.Callable[P, R]: + """Marks a callback as wanting to receive the current context + object as first argument. + """ + + def new_func(*args: P.args, **kwargs: P.kwargs) -> R: + return f(get_current_context(), *args, **kwargs) + + return update_wrapper(new_func, f) + + +def pass_obj(f: t.Callable[te.Concatenate[T, P], R]) -> t.Callable[P, R]: + """Similar to :func:`pass_context`, but only pass the object on the + context onwards (:attr:`Context.obj`). This is useful if that object + represents the state of a nested system. + """ + + def new_func(*args: P.args, **kwargs: P.kwargs) -> R: + return f(get_current_context().obj, *args, **kwargs) + + return update_wrapper(new_func, f) + + +def make_pass_decorator( + object_type: type[T], ensure: bool = False +) -> t.Callable[[t.Callable[te.Concatenate[T, P], R]], t.Callable[P, R]]: + """Given an object type this creates a decorator that will work + similar to :func:`pass_obj` but instead of passing the object of the + current context, it will find the innermost context of type + :func:`object_type`. + + This generates a decorator that works roughly like this:: + + from functools import update_wrapper + + def decorator(f): + @pass_context + def new_func(ctx, *args, **kwargs): + obj = ctx.find_object(object_type) + return ctx.invoke(f, obj, *args, **kwargs) + return update_wrapper(new_func, f) + return decorator + + :param object_type: the type of the object to pass. + :param ensure: if set to `True`, a new object will be created and + remembered on the context if it's not there yet. + """ + + def decorator(f: t.Callable[te.Concatenate[T, P], R]) -> t.Callable[P, R]: + def new_func(*args: P.args, **kwargs: P.kwargs) -> R: + ctx = get_current_context() + + obj: T | None + if ensure: + obj = ctx.ensure_object(object_type) + else: + obj = ctx.find_object(object_type) + + if obj is None: + raise RuntimeError( + "Managed to invoke callback without a context" + f" object of type {object_type.__name__!r}" + " existing." + ) + + return ctx.invoke(f, obj, *args, **kwargs) + + return update_wrapper(new_func, f) + + return decorator + + +def pass_meta_key( + key: str, *, doc_description: str | None = None +) -> t.Callable[[t.Callable[te.Concatenate[T, P], R]], t.Callable[P, R]]: + """Create a decorator that passes a key from + :attr:`click.Context.meta` as the first argument to the decorated + function. + + :param key: Key in ``Context.meta`` to pass. + :param doc_description: Description of the object being passed, + inserted into the decorator's docstring. Defaults to "the 'key' + key from Context.meta". + + .. versionadded:: 8.0 + """ + + def decorator(f: t.Callable[te.Concatenate[T, P], R]) -> t.Callable[P, R]: + def new_func(*args: P.args, **kwargs: P.kwargs) -> R: + ctx = get_current_context() + obj = ctx.meta[key] + return ctx.invoke(f, obj, *args, **kwargs) + + return update_wrapper(new_func, f) + + if doc_description is None: + doc_description = f"the {key!r} key from :attr:`click.Context.meta`" + + decorator.__doc__ = ( + f"Decorator that passes {doc_description} as the first argument" + " to the decorated function." + ) + return decorator + + +CmdType = t.TypeVar("CmdType", bound=Command) + + +# variant: no call, directly as decorator for a function. +@t.overload +def command(name: _AnyCallable) -> Command: ... + + +# variant: with positional name and with positional or keyword cls argument: +# @command(namearg, CommandCls, ...) or @command(namearg, cls=CommandCls, ...) +@t.overload +def command( + name: str | None, + cls: type[CmdType], + **attrs: t.Any, +) -> t.Callable[[_AnyCallable], CmdType]: ... + + +# variant: name omitted, cls _must_ be a keyword argument, @command(cls=CommandCls, ...) +@t.overload +def command( + name: None = None, + *, + cls: type[CmdType], + **attrs: t.Any, +) -> t.Callable[[_AnyCallable], CmdType]: ... + + +# variant: with optional string name, no cls argument provided. +@t.overload +def command( + name: str | None = ..., cls: None = None, **attrs: t.Any +) -> t.Callable[[_AnyCallable], Command]: ... + + +def command( + name: str | _AnyCallable | None = None, + cls: type[CmdType] | None = None, + **attrs: t.Any, +) -> Command | t.Callable[[_AnyCallable], Command | CmdType]: + r"""Creates a new :class:`Command` and uses the decorated function as + callback. This will also automatically attach all decorated + :func:`option`\s and :func:`argument`\s as parameters to the command. + + The name of the command defaults to the name of the function, converted to + lowercase, with underscores ``_`` replaced by dashes ``-``, and the suffixes + ``_command``, ``_cmd``, ``_group``, and ``_grp`` are removed. For example, + ``init_data_command`` becomes ``init-data``. + + All keyword arguments are forwarded to the underlying command class. + For the ``params`` argument, any decorated params are appended to + the end of the list. + + Once decorated the function turns into a :class:`Command` instance + that can be invoked as a command line utility or be attached to a + command :class:`Group`. + + :param name: The name of the command. Defaults to modifying the function's + name as described above. + :param cls: The command class to create. Defaults to :class:`Command`. + + .. versionchanged:: 8.2 + The suffixes ``_command``, ``_cmd``, ``_group``, and ``_grp`` are + removed when generating the name. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + + .. versionchanged:: 8.1 + The ``params`` argument can be used. Decorated params are + appended to the end of the list. + """ + + func: t.Callable[[_AnyCallable], t.Any] | None = None + + if callable(name): + func = name + name = None + assert cls is None, "Use 'command(cls=cls)(callable)' to specify a class." + assert not attrs, "Use 'command(**kwargs)(callable)' to provide arguments." + + if cls is None: + cls = t.cast("type[CmdType]", Command) + + def decorator(f: _AnyCallable) -> CmdType: + if isinstance(f, Command): + raise TypeError("Attempted to convert a callback into a command twice.") + + attr_params = attrs.pop("params", None) + params = attr_params if attr_params is not None else [] + + try: + decorator_params = f.__click_params__ # type: ignore + except AttributeError: + pass + else: + del f.__click_params__ # type: ignore + params.extend(reversed(decorator_params)) + + if attrs.get("help") is None: + attrs["help"] = f.__doc__ + + if t.TYPE_CHECKING: + assert cls is not None + assert not callable(name) + + if name is not None: + cmd_name = name + else: + cmd_name = f.__name__.lower().replace("_", "-") + cmd_left, sep, suffix = cmd_name.rpartition("-") + + if sep and suffix in {"command", "cmd", "group", "grp"}: + cmd_name = cmd_left + + cmd = cls(name=cmd_name, callback=f, params=params, **attrs) + cmd.__doc__ = f.__doc__ + return cmd + + if func is not None: + return decorator(func) + + return decorator + + +GrpType = t.TypeVar("GrpType", bound=Group) + + +# variant: no call, directly as decorator for a function. +@t.overload +def group(name: _AnyCallable) -> Group: ... + + +# variant: with positional name and with positional or keyword cls argument: +# @group(namearg, GroupCls, ...) or @group(namearg, cls=GroupCls, ...) +@t.overload +def group( + name: str | None, + cls: type[GrpType], + **attrs: t.Any, +) -> t.Callable[[_AnyCallable], GrpType]: ... + + +# variant: name omitted, cls _must_ be a keyword argument, @group(cmd=GroupCls, ...) +@t.overload +def group( + name: None = None, + *, + cls: type[GrpType], + **attrs: t.Any, +) -> t.Callable[[_AnyCallable], GrpType]: ... + + +# variant: with optional string name, no cls argument provided. +@t.overload +def group( + name: str | None = ..., cls: None = None, **attrs: t.Any +) -> t.Callable[[_AnyCallable], Group]: ... + + +def group( + name: str | _AnyCallable | None = None, + cls: type[GrpType] | None = None, + **attrs: t.Any, +) -> Group | t.Callable[[_AnyCallable], Group | GrpType]: + """Creates a new :class:`Group` with a function as callback. This + works otherwise the same as :func:`command` just that the `cls` + parameter is set to :class:`Group`. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + """ + if cls is None: + cls = t.cast("type[GrpType]", Group) + + if callable(name): + return command(cls=cls, **attrs)(name) + + return command(name, cls, **attrs) + + +def _param_memo(f: t.Callable[..., t.Any], param: Parameter) -> None: + if isinstance(f, Command): + f.params.append(param) + else: + if not hasattr(f, "__click_params__"): + f.__click_params__ = [] # type: ignore + + f.__click_params__.append(param) # type: ignore + + +def argument( + *param_decls: str, cls: type[Argument] | None = None, **attrs: t.Any +) -> t.Callable[[FC], FC]: + """Attaches an argument to the command. All positional arguments are + passed as parameter declarations to :class:`Argument`; all keyword + arguments are forwarded unchanged (except ``cls``). + This is equivalent to creating an :class:`Argument` instance manually + and attaching it to the :attr:`Command.params` list. + + For the default argument class, refer to :class:`Argument` and + :class:`Parameter` for descriptions of parameters. + + :param cls: the argument class to instantiate. This defaults to + :class:`Argument`. + :param param_decls: Passed as positional arguments to the constructor of + ``cls``. + :param attrs: Passed as keyword arguments to the constructor of ``cls``. + """ + if cls is None: + cls = Argument + + def decorator(f: FC) -> FC: + _param_memo(f, cls(param_decls, **attrs)) + return f + + return decorator + + +def option( + *param_decls: str, cls: type[Option] | None = None, **attrs: t.Any +) -> t.Callable[[FC], FC]: + """Attaches an option to the command. All positional arguments are + passed as parameter declarations to :class:`Option`; all keyword + arguments are forwarded unchanged (except ``cls``). + This is equivalent to creating an :class:`Option` instance manually + and attaching it to the :attr:`Command.params` list. + + For the default option class, refer to :class:`Option` and + :class:`Parameter` for descriptions of parameters. + + :param cls: the option class to instantiate. This defaults to + :class:`Option`. + :param param_decls: Passed as positional arguments to the constructor of + ``cls``. + :param attrs: Passed as keyword arguments to the constructor of ``cls``. + """ + if cls is None: + cls = Option + + def decorator(f: FC) -> FC: + _param_memo(f, cls(param_decls, **attrs)) + return f + + return decorator + + +def confirmation_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]: + """Add a ``--yes`` option which shows a prompt before continuing if + not passed. If the prompt is declined, the program will exit. + + :param param_decls: One or more option names. Defaults to the single + value ``"--yes"``. + :param kwargs: Extra arguments are passed to :func:`option`. + """ + + def callback(ctx: Context, param: Parameter, value: bool) -> None: + if not value: + ctx.abort() + + if not param_decls: + param_decls = ("--yes",) + + kwargs.setdefault("is_flag", True) + kwargs.setdefault("callback", callback) + kwargs.setdefault("expose_value", False) + kwargs.setdefault("prompt", "Do you want to continue?") + kwargs.setdefault("help", "Confirm the action without prompting.") + return option(*param_decls, **kwargs) + + +def password_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]: + """Add a ``--password`` option which prompts for a password, hiding + input and asking to enter the value again for confirmation. + + :param param_decls: One or more option names. Defaults to the single + value ``"--password"``. + :param kwargs: Extra arguments are passed to :func:`option`. + """ + if not param_decls: + param_decls = ("--password",) + + kwargs.setdefault("prompt", True) + kwargs.setdefault("confirmation_prompt", True) + kwargs.setdefault("hide_input", True) + return option(*param_decls, **kwargs) + + +def version_option( + version: str | None = None, + *param_decls: str, + package_name: str | None = None, + prog_name: str | None = None, + message: str | None = None, + **kwargs: t.Any, +) -> t.Callable[[FC], FC]: + """Add a ``--version`` option which immediately prints the version + number and exits the program. + + If ``version`` is not provided, Click will try to detect it using + :func:`importlib.metadata.version` to get the version for the + ``package_name``. + + If ``package_name`` is not provided, Click will try to detect it by + inspecting the stack frames. This will be used to detect the + version, so it must match the name of the installed package. + + :param version: The version number to show. If not provided, Click + will try to detect it. + :param param_decls: One or more option names. Defaults to the single + value ``"--version"``. + :param package_name: The package name to detect the version from. If + not provided, Click will try to detect it. + :param prog_name: The name of the CLI to show in the message. If not + provided, it will be detected from the command. + :param message: The message to show. The values ``%(prog)s``, + ``%(package)s``, and ``%(version)s`` are available. Defaults to + ``"%(prog)s, version %(version)s"``. + :param kwargs: Extra arguments are passed to :func:`option`. + :raise RuntimeError: ``version`` could not be detected. + + .. versionchanged:: 8.0 + Add the ``package_name`` parameter, and the ``%(package)s`` + value for messages. + + .. versionchanged:: 8.0 + Use :mod:`importlib.metadata` instead of ``pkg_resources``. The + version is detected based on the package name, not the entry + point name. The Python package name must match the installed + package name, or be passed with ``package_name=``. + """ + if message is None: + message = _("%(prog)s, version %(version)s") + + if version is None and package_name is None: + frame = inspect.currentframe() + f_back = frame.f_back if frame is not None else None + f_globals = f_back.f_globals if f_back is not None else None + # break reference cycle + # https://docs.python.org/3/library/inspect.html#the-interpreter-stack + del frame + + if f_globals is not None: + package_name = f_globals.get("__name__") + + if package_name == "__main__": + package_name = f_globals.get("__package__") + + if package_name: + package_name = package_name.partition(".")[0] + + def callback(ctx: Context, param: Parameter, value: bool) -> None: + if not value or ctx.resilient_parsing: + return + + nonlocal prog_name + nonlocal version + + if prog_name is None: + prog_name = ctx.find_root().info_name + + if version is None and package_name is not None: + import importlib.metadata + + try: + version = importlib.metadata.version(package_name) + except importlib.metadata.PackageNotFoundError: + raise RuntimeError( + f"{package_name!r} is not installed. Try passing" + " 'package_name' instead." + ) from None + + if version is None: + raise RuntimeError( + f"Could not determine the version for {package_name!r} automatically." + ) + + echo( + message % {"prog": prog_name, "package": package_name, "version": version}, + color=ctx.color, + ) + ctx.exit() + + if not param_decls: + param_decls = ("--version",) + + kwargs.setdefault("is_flag", True) + kwargs.setdefault("expose_value", False) + kwargs.setdefault("is_eager", True) + kwargs.setdefault("help", _("Show the version and exit.")) + kwargs["callback"] = callback + return option(*param_decls, **kwargs) + + +def help_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]: + """Pre-configured ``--help`` option which immediately prints the help page + and exits the program. + + :param param_decls: One or more option names. Defaults to the single + value ``"--help"``. + :param kwargs: Extra arguments are passed to :func:`option`. + """ + + def show_help(ctx: Context, param: Parameter, value: bool) -> None: + """Callback that print the help page on ```` and exits.""" + if value and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + + if not param_decls: + param_decls = ("--help",) + + kwargs.setdefault("is_flag", True) + kwargs.setdefault("expose_value", False) + kwargs.setdefault("is_eager", True) + kwargs.setdefault("help", _("Show this message and exit.")) + kwargs.setdefault("callback", show_help) + + return option(*param_decls, **kwargs) diff --git a/venv/Lib/site-packages/click/exceptions.py b/venv/Lib/site-packages/click/exceptions.py new file mode 100644 index 00000000..f141a832 --- /dev/null +++ b/venv/Lib/site-packages/click/exceptions.py @@ -0,0 +1,308 @@ +from __future__ import annotations + +import collections.abc as cabc +import typing as t +from gettext import gettext as _ +from gettext import ngettext + +from ._compat import get_text_stderr +from .globals import resolve_color_default +from .utils import echo +from .utils import format_filename + +if t.TYPE_CHECKING: + from .core import Command + from .core import Context + from .core import Parameter + + +def _join_param_hints(param_hint: cabc.Sequence[str] | str | None) -> str | None: + if param_hint is not None and not isinstance(param_hint, str): + return " / ".join(repr(x) for x in param_hint) + + return param_hint + + +class ClickException(Exception): + """An exception that Click can handle and show to the user.""" + + #: The exit code for this exception. + exit_code = 1 + + def __init__(self, message: str) -> None: + super().__init__(message) + # The context will be removed by the time we print the message, so cache + # the color settings here to be used later on (in `show`) + self.show_color: bool | None = resolve_color_default() + self.message = message + + def format_message(self) -> str: + return self.message + + def __str__(self) -> str: + return self.message + + def show(self, file: t.IO[t.Any] | None = None) -> None: + if file is None: + file = get_text_stderr() + + echo( + _("Error: {message}").format(message=self.format_message()), + file=file, + color=self.show_color, + ) + + +class UsageError(ClickException): + """An internal exception that signals a usage error. This typically + aborts any further handling. + + :param message: the error message to display. + :param ctx: optionally the context that caused this error. Click will + fill in the context automatically in some situations. + """ + + exit_code = 2 + + def __init__(self, message: str, ctx: Context | None = None) -> None: + super().__init__(message) + self.ctx = ctx + self.cmd: Command | None = self.ctx.command if self.ctx else None + + def show(self, file: t.IO[t.Any] | None = None) -> None: + if file is None: + file = get_text_stderr() + color = None + hint = "" + if ( + self.ctx is not None + and self.ctx.command.get_help_option(self.ctx) is not None + ): + hint = _("Try '{command} {option}' for help.").format( + command=self.ctx.command_path, option=self.ctx.help_option_names[0] + ) + hint = f"{hint}\n" + if self.ctx is not None: + color = self.ctx.color + echo(f"{self.ctx.get_usage()}\n{hint}", file=file, color=color) + echo( + _("Error: {message}").format(message=self.format_message()), + file=file, + color=color, + ) + + +class BadParameter(UsageError): + """An exception that formats out a standardized error message for a + bad parameter. This is useful when thrown from a callback or type as + Click will attach contextual information to it (for instance, which + parameter it is). + + .. versionadded:: 2.0 + + :param param: the parameter object that caused this error. This can + be left out, and Click will attach this info itself + if possible. + :param param_hint: a string that shows up as parameter name. This + can be used as alternative to `param` in cases + where custom validation should happen. If it is + a string it's used as such, if it's a list then + each item is quoted and separated. + """ + + def __init__( + self, + message: str, + ctx: Context | None = None, + param: Parameter | None = None, + param_hint: str | None = None, + ) -> None: + super().__init__(message, ctx) + self.param = param + self.param_hint = param_hint + + def format_message(self) -> str: + if self.param_hint is not None: + param_hint = self.param_hint + elif self.param is not None: + param_hint = self.param.get_error_hint(self.ctx) # type: ignore + else: + return _("Invalid value: {message}").format(message=self.message) + + return _("Invalid value for {param_hint}: {message}").format( + param_hint=_join_param_hints(param_hint), message=self.message + ) + + +class MissingParameter(BadParameter): + """Raised if click required an option or argument but it was not + provided when invoking the script. + + .. versionadded:: 4.0 + + :param param_type: a string that indicates the type of the parameter. + The default is to inherit the parameter type from + the given `param`. Valid values are ``'parameter'``, + ``'option'`` or ``'argument'``. + """ + + def __init__( + self, + message: str | None = None, + ctx: Context | None = None, + param: Parameter | None = None, + param_hint: str | None = None, + param_type: str | None = None, + ) -> None: + super().__init__(message or "", ctx, param, param_hint) + self.param_type = param_type + + def format_message(self) -> str: + if self.param_hint is not None: + param_hint: str | None = self.param_hint + elif self.param is not None: + param_hint = self.param.get_error_hint(self.ctx) # type: ignore + else: + param_hint = None + + param_hint = _join_param_hints(param_hint) + param_hint = f" {param_hint}" if param_hint else "" + + param_type = self.param_type + if param_type is None and self.param is not None: + param_type = self.param.param_type_name + + msg = self.message + if self.param is not None: + msg_extra = self.param.type.get_missing_message( + param=self.param, ctx=self.ctx + ) + if msg_extra: + if msg: + msg += f". {msg_extra}" + else: + msg = msg_extra + + msg = f" {msg}" if msg else "" + + # Translate param_type for known types. + if param_type == "argument": + missing = _("Missing argument") + elif param_type == "option": + missing = _("Missing option") + elif param_type == "parameter": + missing = _("Missing parameter") + else: + missing = _("Missing {param_type}").format(param_type=param_type) + + return f"{missing}{param_hint}.{msg}" + + def __str__(self) -> str: + if not self.message: + param_name = self.param.name if self.param else None + return _("Missing parameter: {param_name}").format(param_name=param_name) + else: + return self.message + + +class NoSuchOption(UsageError): + """Raised if click attempted to handle an option that does not + exist. + + .. versionadded:: 4.0 + """ + + def __init__( + self, + option_name: str, + message: str | None = None, + possibilities: cabc.Sequence[str] | None = None, + ctx: Context | None = None, + ) -> None: + if message is None: + message = _("No such option: {name}").format(name=option_name) + + super().__init__(message, ctx) + self.option_name = option_name + self.possibilities = possibilities + + def format_message(self) -> str: + if not self.possibilities: + return self.message + + possibility_str = ", ".join(sorted(self.possibilities)) + suggest = ngettext( + "Did you mean {possibility}?", + "(Possible options: {possibilities})", + len(self.possibilities), + ).format(possibility=possibility_str, possibilities=possibility_str) + return f"{self.message} {suggest}" + + +class BadOptionUsage(UsageError): + """Raised if an option is generally supplied but the use of the option + was incorrect. This is for instance raised if the number of arguments + for an option is not correct. + + .. versionadded:: 4.0 + + :param option_name: the name of the option being used incorrectly. + """ + + def __init__( + self, option_name: str, message: str, ctx: Context | None = None + ) -> None: + super().__init__(message, ctx) + self.option_name = option_name + + +class BadArgumentUsage(UsageError): + """Raised if an argument is generally supplied but the use of the argument + was incorrect. This is for instance raised if the number of values + for an argument is not correct. + + .. versionadded:: 6.0 + """ + + +class NoArgsIsHelpError(UsageError): + def __init__(self, ctx: Context) -> None: + self.ctx: Context + super().__init__(ctx.get_help(), ctx=ctx) + + def show(self, file: t.IO[t.Any] | None = None) -> None: + echo(self.format_message(), file=file, err=True, color=self.ctx.color) + + +class FileError(ClickException): + """Raised if a file cannot be opened.""" + + def __init__(self, filename: str, hint: str | None = None) -> None: + if hint is None: + hint = _("unknown error") + + super().__init__(hint) + self.ui_filename: str = format_filename(filename) + self.filename = filename + + def format_message(self) -> str: + return _("Could not open file {filename!r}: {message}").format( + filename=self.ui_filename, message=self.message + ) + + +class Abort(RuntimeError): + """An internal signalling exception that signals Click to abort.""" + + +class Exit(RuntimeError): + """An exception that indicates that the application should exit with some + status code. + + :param code: the status code to exit with. + """ + + __slots__ = ("exit_code",) + + def __init__(self, code: int = 0) -> None: + self.exit_code: int = code diff --git a/venv/Lib/site-packages/click/formatting.py b/venv/Lib/site-packages/click/formatting.py new file mode 100644 index 00000000..9891f880 --- /dev/null +++ b/venv/Lib/site-packages/click/formatting.py @@ -0,0 +1,301 @@ +from __future__ import annotations + +import collections.abc as cabc +from contextlib import contextmanager +from gettext import gettext as _ + +from ._compat import term_len +from .parser import _split_opt + +# Can force a width. This is used by the test system +FORCED_WIDTH: int | None = None + + +def measure_table(rows: cabc.Iterable[tuple[str, str]]) -> tuple[int, ...]: + widths: dict[int, int] = {} + + for row in rows: + for idx, col in enumerate(row): + widths[idx] = max(widths.get(idx, 0), term_len(col)) + + return tuple(y for x, y in sorted(widths.items())) + + +def iter_rows( + rows: cabc.Iterable[tuple[str, str]], col_count: int +) -> cabc.Iterator[tuple[str, ...]]: + for row in rows: + yield row + ("",) * (col_count - len(row)) + + +def wrap_text( + text: str, + width: int = 78, + initial_indent: str = "", + subsequent_indent: str = "", + preserve_paragraphs: bool = False, +) -> str: + """A helper function that intelligently wraps text. By default, it + assumes that it operates on a single paragraph of text but if the + `preserve_paragraphs` parameter is provided it will intelligently + handle paragraphs (defined by two empty lines). + + If paragraphs are handled, a paragraph can be prefixed with an empty + line containing the ``\\b`` character (``\\x08``) to indicate that + no rewrapping should happen in that block. + + :param text: the text that should be rewrapped. + :param width: the maximum width for the text. + :param initial_indent: the initial indent that should be placed on the + first line as a string. + :param subsequent_indent: the indent string that should be placed on + each consecutive line. + :param preserve_paragraphs: if this flag is set then the wrapping will + intelligently handle paragraphs. + """ + from ._textwrap import TextWrapper + + text = text.expandtabs() + wrapper = TextWrapper( + width, + initial_indent=initial_indent, + subsequent_indent=subsequent_indent, + replace_whitespace=False, + ) + if not preserve_paragraphs: + return wrapper.fill(text) + + p: list[tuple[int, bool, str]] = [] + buf: list[str] = [] + indent = None + + def _flush_par() -> None: + if not buf: + return + if buf[0].strip() == "\b": + p.append((indent or 0, True, "\n".join(buf[1:]))) + else: + p.append((indent or 0, False, " ".join(buf))) + del buf[:] + + for line in text.splitlines(): + if not line: + _flush_par() + indent = None + else: + if indent is None: + orig_len = term_len(line) + line = line.lstrip() + indent = orig_len - term_len(line) + buf.append(line) + _flush_par() + + rv = [] + for indent, raw, text in p: + with wrapper.extra_indent(" " * indent): + if raw: + rv.append(wrapper.indent_only(text)) + else: + rv.append(wrapper.fill(text)) + + return "\n\n".join(rv) + + +class HelpFormatter: + """This class helps with formatting text-based help pages. It's + usually just needed for very special internal cases, but it's also + exposed so that developers can write their own fancy outputs. + + At present, it always writes into memory. + + :param indent_increment: the additional increment for each level. + :param width: the width for the text. This defaults to the terminal + width clamped to a maximum of 78. + """ + + def __init__( + self, + indent_increment: int = 2, + width: int | None = None, + max_width: int | None = None, + ) -> None: + import shutil + + self.indent_increment = indent_increment + if max_width is None: + max_width = 80 + if width is None: + width = FORCED_WIDTH + if width is None: + width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50) + self.width = width + self.current_indent: int = 0 + self.buffer: list[str] = [] + + def write(self, string: str) -> None: + """Writes a unicode string into the internal buffer.""" + self.buffer.append(string) + + def indent(self) -> None: + """Increases the indentation.""" + self.current_indent += self.indent_increment + + def dedent(self) -> None: + """Decreases the indentation.""" + self.current_indent -= self.indent_increment + + def write_usage(self, prog: str, args: str = "", prefix: str | None = None) -> None: + """Writes a usage line into the buffer. + + :param prog: the program name. + :param args: whitespace separated list of arguments. + :param prefix: The prefix for the first line. Defaults to + ``"Usage: "``. + """ + if prefix is None: + prefix = f"{_('Usage:')} " + + usage_prefix = f"{prefix:>{self.current_indent}}{prog} " + text_width = self.width - self.current_indent + + if text_width >= (term_len(usage_prefix) + 20): + # The arguments will fit to the right of the prefix. + indent = " " * term_len(usage_prefix) + self.write( + wrap_text( + args, + text_width, + initial_indent=usage_prefix, + subsequent_indent=indent, + ) + ) + else: + # The prefix is too long, put the arguments on the next line. + self.write(usage_prefix) + self.write("\n") + indent = " " * (max(self.current_indent, term_len(prefix)) + 4) + self.write( + wrap_text( + args, text_width, initial_indent=indent, subsequent_indent=indent + ) + ) + + self.write("\n") + + def write_heading(self, heading: str) -> None: + """Writes a heading into the buffer.""" + self.write(f"{'':>{self.current_indent}}{heading}:\n") + + def write_paragraph(self) -> None: + """Writes a paragraph into the buffer.""" + if self.buffer: + self.write("\n") + + def write_text(self, text: str) -> None: + """Writes re-indented text into the buffer. This rewraps and + preserves paragraphs. + """ + indent = " " * self.current_indent + self.write( + wrap_text( + text, + self.width, + initial_indent=indent, + subsequent_indent=indent, + preserve_paragraphs=True, + ) + ) + self.write("\n") + + def write_dl( + self, + rows: cabc.Sequence[tuple[str, str]], + col_max: int = 30, + col_spacing: int = 2, + ) -> None: + """Writes a definition list into the buffer. This is how options + and commands are usually formatted. + + :param rows: a list of two item tuples for the terms and values. + :param col_max: the maximum width of the first column. + :param col_spacing: the number of spaces between the first and + second column. + """ + rows = list(rows) + widths = measure_table(rows) + if len(widths) != 2: + raise TypeError("Expected two columns for definition list") + + first_col = min(widths[0], col_max) + col_spacing + + for first, second in iter_rows(rows, len(widths)): + self.write(f"{'':>{self.current_indent}}{first}") + if not second: + self.write("\n") + continue + if term_len(first) <= first_col - col_spacing: + self.write(" " * (first_col - term_len(first))) + else: + self.write("\n") + self.write(" " * (first_col + self.current_indent)) + + text_width = max(self.width - first_col - 2, 10) + wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True) + lines = wrapped_text.splitlines() + + if lines: + self.write(f"{lines[0]}\n") + + for line in lines[1:]: + self.write(f"{'':>{first_col + self.current_indent}}{line}\n") + else: + self.write("\n") + + @contextmanager + def section(self, name: str) -> cabc.Iterator[None]: + """Helpful context manager that writes a paragraph, a heading, + and the indents. + + :param name: the section name that is written as heading. + """ + self.write_paragraph() + self.write_heading(name) + self.indent() + try: + yield + finally: + self.dedent() + + @contextmanager + def indentation(self) -> cabc.Iterator[None]: + """A context manager that increases the indentation.""" + self.indent() + try: + yield + finally: + self.dedent() + + def getvalue(self) -> str: + """Returns the buffer contents.""" + return "".join(self.buffer) + + +def join_options(options: cabc.Sequence[str]) -> tuple[str, bool]: + """Given a list of option strings this joins them in the most appropriate + way and returns them in the form ``(formatted_string, + any_prefix_is_slash)`` where the second item in the tuple is a flag that + indicates if any of the option prefixes was a slash. + """ + rv = [] + any_prefix_is_slash = False + + for opt in options: + prefix = _split_opt(opt)[0] + + if prefix == "/": + any_prefix_is_slash = True + + rv.append((len(prefix), opt)) + + rv.sort(key=lambda x: x[0]) + return ", ".join(x[1] for x in rv), any_prefix_is_slash diff --git a/venv/Lib/site-packages/click/globals.py b/venv/Lib/site-packages/click/globals.py new file mode 100644 index 00000000..a2f91723 --- /dev/null +++ b/venv/Lib/site-packages/click/globals.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +import typing as t +from threading import local + +if t.TYPE_CHECKING: + from .core import Context + +_local = local() + + +@t.overload +def get_current_context(silent: t.Literal[False] = False) -> Context: ... + + +@t.overload +def get_current_context(silent: bool = ...) -> Context | None: ... + + +def get_current_context(silent: bool = False) -> Context | None: + """Returns the current click context. This can be used as a way to + access the current context object from anywhere. This is a more implicit + alternative to the :func:`pass_context` decorator. This function is + primarily useful for helpers such as :func:`echo` which might be + interested in changing its behavior based on the current context. + + To push the current context, :meth:`Context.scope` can be used. + + .. versionadded:: 5.0 + + :param silent: if set to `True` the return value is `None` if no context + is available. The default behavior is to raise a + :exc:`RuntimeError`. + """ + try: + return t.cast("Context", _local.stack[-1]) + except (AttributeError, IndexError) as e: + if not silent: + raise RuntimeError("There is no active click context.") from e + + return None + + +def push_context(ctx: Context) -> None: + """Pushes a new context to the current stack.""" + _local.__dict__.setdefault("stack", []).append(ctx) + + +def pop_context() -> None: + """Removes the top level from the stack.""" + _local.stack.pop() + + +def resolve_color_default(color: bool | None = None) -> bool | None: + """Internal helper to get the default value of the color flag. If a + value is passed it's returned unchanged, otherwise it's looked up from + the current context. + """ + if color is not None: + return color + + ctx = get_current_context(silent=True) + + if ctx is not None: + return ctx.color + + return None diff --git a/venv/Lib/site-packages/click/parser.py b/venv/Lib/site-packages/click/parser.py new file mode 100644 index 00000000..a8b7d263 --- /dev/null +++ b/venv/Lib/site-packages/click/parser.py @@ -0,0 +1,532 @@ +""" +This module started out as largely a copy paste from the stdlib's +optparse module with the features removed that we do not need from +optparse because we implement them in Click on a higher level (for +instance type handling, help formatting and a lot more). + +The plan is to remove more and more from here over time. + +The reason this is a different module and not optparse from the stdlib +is that there are differences in 2.x and 3.x about the error messages +generated and optparse in the stdlib uses gettext for no good reason +and might cause us issues. + +Click uses parts of optparse written by Gregory P. Ward and maintained +by the Python Software Foundation. This is limited to code in parser.py. + +Copyright 2001-2006 Gregory P. Ward. All rights reserved. +Copyright 2002-2006 Python Software Foundation. All rights reserved. +""" + +# This code uses parts of optparse written by Gregory P. Ward and +# maintained by the Python Software Foundation. +# Copyright 2001-2006 Gregory P. Ward +# Copyright 2002-2006 Python Software Foundation +from __future__ import annotations + +import collections.abc as cabc +import typing as t +from collections import deque +from gettext import gettext as _ +from gettext import ngettext + +from .exceptions import BadArgumentUsage +from .exceptions import BadOptionUsage +from .exceptions import NoSuchOption +from .exceptions import UsageError + +if t.TYPE_CHECKING: + from .core import Argument as CoreArgument + from .core import Context + from .core import Option as CoreOption + from .core import Parameter as CoreParameter + +V = t.TypeVar("V") + +# Sentinel value that indicates an option was passed as a flag without a +# value but is not a flag option. Option.consume_value uses this to +# prompt or use the flag_value. +_flag_needs_value = object() + + +def _unpack_args( + args: cabc.Sequence[str], nargs_spec: cabc.Sequence[int] +) -> tuple[cabc.Sequence[str | cabc.Sequence[str | None] | None], list[str]]: + """Given an iterable of arguments and an iterable of nargs specifications, + it returns a tuple with all the unpacked arguments at the first index + and all remaining arguments as the second. + + The nargs specification is the number of arguments that should be consumed + or `-1` to indicate that this position should eat up all the remainders. + + Missing items are filled with `None`. + """ + args = deque(args) + nargs_spec = deque(nargs_spec) + rv: list[str | tuple[str | None, ...] | None] = [] + spos: int | None = None + + def _fetch(c: deque[V]) -> V | None: + try: + if spos is None: + return c.popleft() + else: + return c.pop() + except IndexError: + return None + + while nargs_spec: + nargs = _fetch(nargs_spec) + + if nargs is None: + continue + + if nargs == 1: + rv.append(_fetch(args)) + elif nargs > 1: + x = [_fetch(args) for _ in range(nargs)] + + # If we're reversed, we're pulling in the arguments in reverse, + # so we need to turn them around. + if spos is not None: + x.reverse() + + rv.append(tuple(x)) + elif nargs < 0: + if spos is not None: + raise TypeError("Cannot have two nargs < 0") + + spos = len(rv) + rv.append(None) + + # spos is the position of the wildcard (star). If it's not `None`, + # we fill it with the remainder. + if spos is not None: + rv[spos] = tuple(args) + args = [] + rv[spos + 1 :] = reversed(rv[spos + 1 :]) + + return tuple(rv), list(args) + + +def _split_opt(opt: str) -> tuple[str, str]: + first = opt[:1] + if first.isalnum(): + return "", opt + if opt[1:2] == first: + return opt[:2], opt[2:] + return first, opt[1:] + + +def _normalize_opt(opt: str, ctx: Context | None) -> str: + if ctx is None or ctx.token_normalize_func is None: + return opt + prefix, opt = _split_opt(opt) + return f"{prefix}{ctx.token_normalize_func(opt)}" + + +class _Option: + def __init__( + self, + obj: CoreOption, + opts: cabc.Sequence[str], + dest: str | None, + action: str | None = None, + nargs: int = 1, + const: t.Any | None = None, + ): + self._short_opts = [] + self._long_opts = [] + self.prefixes: set[str] = set() + + for opt in opts: + prefix, value = _split_opt(opt) + if not prefix: + raise ValueError(f"Invalid start character for option ({opt})") + self.prefixes.add(prefix[0]) + if len(prefix) == 1 and len(value) == 1: + self._short_opts.append(opt) + else: + self._long_opts.append(opt) + self.prefixes.add(prefix) + + if action is None: + action = "store" + + self.dest = dest + self.action = action + self.nargs = nargs + self.const = const + self.obj = obj + + @property + def takes_value(self) -> bool: + return self.action in ("store", "append") + + def process(self, value: t.Any, state: _ParsingState) -> None: + if self.action == "store": + state.opts[self.dest] = value # type: ignore + elif self.action == "store_const": + state.opts[self.dest] = self.const # type: ignore + elif self.action == "append": + state.opts.setdefault(self.dest, []).append(value) # type: ignore + elif self.action == "append_const": + state.opts.setdefault(self.dest, []).append(self.const) # type: ignore + elif self.action == "count": + state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 # type: ignore + else: + raise ValueError(f"unknown action '{self.action}'") + state.order.append(self.obj) + + +class _Argument: + def __init__(self, obj: CoreArgument, dest: str | None, nargs: int = 1): + self.dest = dest + self.nargs = nargs + self.obj = obj + + def process( + self, + value: str | cabc.Sequence[str | None] | None, + state: _ParsingState, + ) -> None: + if self.nargs > 1: + assert value is not None + holes = sum(1 for x in value if x is None) + if holes == len(value): + value = None + elif holes != 0: + raise BadArgumentUsage( + _("Argument {name!r} takes {nargs} values.").format( + name=self.dest, nargs=self.nargs + ) + ) + + if self.nargs == -1 and self.obj.envvar is not None and value == (): + # Replace empty tuple with None so that a value from the + # environment may be tried. + value = None + + state.opts[self.dest] = value # type: ignore + state.order.append(self.obj) + + +class _ParsingState: + def __init__(self, rargs: list[str]) -> None: + self.opts: dict[str, t.Any] = {} + self.largs: list[str] = [] + self.rargs = rargs + self.order: list[CoreParameter] = [] + + +class _OptionParser: + """The option parser is an internal class that is ultimately used to + parse options and arguments. It's modelled after optparse and brings + a similar but vastly simplified API. It should generally not be used + directly as the high level Click classes wrap it for you. + + It's not nearly as extensible as optparse or argparse as it does not + implement features that are implemented on a higher level (such as + types or defaults). + + :param ctx: optionally the :class:`~click.Context` where this parser + should go with. + + .. deprecated:: 8.2 + Will be removed in Click 9.0. + """ + + def __init__(self, ctx: Context | None = None) -> None: + #: The :class:`~click.Context` for this parser. This might be + #: `None` for some advanced use cases. + self.ctx = ctx + #: This controls how the parser deals with interspersed arguments. + #: If this is set to `False`, the parser will stop on the first + #: non-option. Click uses this to implement nested subcommands + #: safely. + self.allow_interspersed_args: bool = True + #: This tells the parser how to deal with unknown options. By + #: default it will error out (which is sensible), but there is a + #: second mode where it will ignore it and continue processing + #: after shifting all the unknown options into the resulting args. + self.ignore_unknown_options: bool = False + + if ctx is not None: + self.allow_interspersed_args = ctx.allow_interspersed_args + self.ignore_unknown_options = ctx.ignore_unknown_options + + self._short_opt: dict[str, _Option] = {} + self._long_opt: dict[str, _Option] = {} + self._opt_prefixes = {"-", "--"} + self._args: list[_Argument] = [] + + def add_option( + self, + obj: CoreOption, + opts: cabc.Sequence[str], + dest: str | None, + action: str | None = None, + nargs: int = 1, + const: t.Any | None = None, + ) -> None: + """Adds a new option named `dest` to the parser. The destination + is not inferred (unlike with optparse) and needs to be explicitly + provided. Action can be any of ``store``, ``store_const``, + ``append``, ``append_const`` or ``count``. + + The `obj` can be used to identify the option in the order list + that is returned from the parser. + """ + opts = [_normalize_opt(opt, self.ctx) for opt in opts] + option = _Option(obj, opts, dest, action=action, nargs=nargs, const=const) + self._opt_prefixes.update(option.prefixes) + for opt in option._short_opts: + self._short_opt[opt] = option + for opt in option._long_opts: + self._long_opt[opt] = option + + def add_argument(self, obj: CoreArgument, dest: str | None, nargs: int = 1) -> None: + """Adds a positional argument named `dest` to the parser. + + The `obj` can be used to identify the option in the order list + that is returned from the parser. + """ + self._args.append(_Argument(obj, dest=dest, nargs=nargs)) + + def parse_args( + self, args: list[str] + ) -> tuple[dict[str, t.Any], list[str], list[CoreParameter]]: + """Parses positional arguments and returns ``(values, args, order)`` + for the parsed options and arguments as well as the leftover + arguments if there are any. The order is a list of objects as they + appear on the command line. If arguments appear multiple times they + will be memorized multiple times as well. + """ + state = _ParsingState(args) + try: + self._process_args_for_options(state) + self._process_args_for_args(state) + except UsageError: + if self.ctx is None or not self.ctx.resilient_parsing: + raise + return state.opts, state.largs, state.order + + def _process_args_for_args(self, state: _ParsingState) -> None: + pargs, args = _unpack_args( + state.largs + state.rargs, [x.nargs for x in self._args] + ) + + for idx, arg in enumerate(self._args): + arg.process(pargs[idx], state) + + state.largs = args + state.rargs = [] + + def _process_args_for_options(self, state: _ParsingState) -> None: + while state.rargs: + arg = state.rargs.pop(0) + arglen = len(arg) + # Double dashes always handled explicitly regardless of what + # prefixes are valid. + if arg == "--": + return + elif arg[:1] in self._opt_prefixes and arglen > 1: + self._process_opts(arg, state) + elif self.allow_interspersed_args: + state.largs.append(arg) + else: + state.rargs.insert(0, arg) + return + + # Say this is the original argument list: + # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] + # ^ + # (we are about to process arg(i)). + # + # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of + # [arg0, ..., arg(i-1)] (any options and their arguments will have + # been removed from largs). + # + # The while loop will usually consume 1 or more arguments per pass. + # If it consumes 1 (eg. arg is an option that takes no arguments), + # then after _process_arg() is done the situation is: + # + # largs = subset of [arg0, ..., arg(i)] + # rargs = [arg(i+1), ..., arg(N-1)] + # + # If allow_interspersed_args is false, largs will always be + # *empty* -- still a subset of [arg0, ..., arg(i-1)], but + # not a very interesting subset! + + def _match_long_opt( + self, opt: str, explicit_value: str | None, state: _ParsingState + ) -> None: + if opt not in self._long_opt: + from difflib import get_close_matches + + possibilities = get_close_matches(opt, self._long_opt) + raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx) + + option = self._long_opt[opt] + if option.takes_value: + # At this point it's safe to modify rargs by injecting the + # explicit value, because no exception is raised in this + # branch. This means that the inserted value will be fully + # consumed. + if explicit_value is not None: + state.rargs.insert(0, explicit_value) + + value = self._get_value_from_state(opt, option, state) + + elif explicit_value is not None: + raise BadOptionUsage( + opt, _("Option {name!r} does not take a value.").format(name=opt) + ) + + else: + value = None + + option.process(value, state) + + def _match_short_opt(self, arg: str, state: _ParsingState) -> None: + stop = False + i = 1 + prefix = arg[0] + unknown_options = [] + + for ch in arg[1:]: + opt = _normalize_opt(f"{prefix}{ch}", self.ctx) + option = self._short_opt.get(opt) + i += 1 + + if not option: + if self.ignore_unknown_options: + unknown_options.append(ch) + continue + raise NoSuchOption(opt, ctx=self.ctx) + if option.takes_value: + # Any characters left in arg? Pretend they're the + # next arg, and stop consuming characters of arg. + if i < len(arg): + state.rargs.insert(0, arg[i:]) + stop = True + + value = self._get_value_from_state(opt, option, state) + + else: + value = None + + option.process(value, state) + + if stop: + break + + # If we got any unknown options we recombine the string of the + # remaining options and re-attach the prefix, then report that + # to the state as new larg. This way there is basic combinatorics + # that can be achieved while still ignoring unknown arguments. + if self.ignore_unknown_options and unknown_options: + state.largs.append(f"{prefix}{''.join(unknown_options)}") + + def _get_value_from_state( + self, option_name: str, option: _Option, state: _ParsingState + ) -> t.Any: + nargs = option.nargs + + if len(state.rargs) < nargs: + if option.obj._flag_needs_value: + # Option allows omitting the value. + value = _flag_needs_value + else: + raise BadOptionUsage( + option_name, + ngettext( + "Option {name!r} requires an argument.", + "Option {name!r} requires {nargs} arguments.", + nargs, + ).format(name=option_name, nargs=nargs), + ) + elif nargs == 1: + next_rarg = state.rargs[0] + + if ( + option.obj._flag_needs_value + and isinstance(next_rarg, str) + and next_rarg[:1] in self._opt_prefixes + and len(next_rarg) > 1 + ): + # The next arg looks like the start of an option, don't + # use it as the value if omitting the value is allowed. + value = _flag_needs_value + else: + value = state.rargs.pop(0) + else: + value = tuple(state.rargs[:nargs]) + del state.rargs[:nargs] + + return value + + def _process_opts(self, arg: str, state: _ParsingState) -> None: + explicit_value = None + # Long option handling happens in two parts. The first part is + # supporting explicitly attached values. In any case, we will try + # to long match the option first. + if "=" in arg: + long_opt, explicit_value = arg.split("=", 1) + else: + long_opt = arg + norm_long_opt = _normalize_opt(long_opt, self.ctx) + + # At this point we will match the (assumed) long option through + # the long option matching code. Note that this allows options + # like "-foo" to be matched as long options. + try: + self._match_long_opt(norm_long_opt, explicit_value, state) + except NoSuchOption: + # At this point the long option matching failed, and we need + # to try with short options. However there is a special rule + # which says, that if we have a two character options prefix + # (applies to "--foo" for instance), we do not dispatch to the + # short option code and will instead raise the no option + # error. + if arg[:2] not in self._opt_prefixes: + self._match_short_opt(arg, state) + return + + if not self.ignore_unknown_options: + raise + + state.largs.append(arg) + + +def __getattr__(name: str) -> object: + import warnings + + if name in { + "OptionParser", + "Argument", + "Option", + "split_opt", + "normalize_opt", + "ParsingState", + }: + warnings.warn( + f"'parser.{name}' is deprecated and will be removed in Click 9.0." + " The old parser is available in 'optparse'.", + DeprecationWarning, + stacklevel=2, + ) + return globals()[f"_{name}"] + + if name == "split_arg_string": + from .shell_completion import split_arg_string + + warnings.warn( + "Importing 'parser.split_arg_string' is deprecated, it will only be" + " available in 'shell_completion' in Click 9.0.", + DeprecationWarning, + stacklevel=2, + ) + return split_arg_string + + raise AttributeError(name) diff --git a/venv/Lib/site-packages/click/py.typed b/venv/Lib/site-packages/click/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/click/shell_completion.py b/venv/Lib/site-packages/click/shell_completion.py new file mode 100644 index 00000000..cdb58222 --- /dev/null +++ b/venv/Lib/site-packages/click/shell_completion.py @@ -0,0 +1,644 @@ +from __future__ import annotations + +import collections.abc as cabc +import os +import re +import typing as t +from gettext import gettext as _ + +from .core import Argument +from .core import Command +from .core import Context +from .core import Group +from .core import Option +from .core import Parameter +from .core import ParameterSource +from .utils import echo + + +def shell_complete( + cli: Command, + ctx_args: cabc.MutableMapping[str, t.Any], + prog_name: str, + complete_var: str, + instruction: str, +) -> int: + """Perform shell completion for the given CLI program. + + :param cli: Command being called. + :param ctx_args: Extra arguments to pass to + ``cli.make_context``. + :param prog_name: Name of the executable in the shell. + :param complete_var: Name of the environment variable that holds + the completion instruction. + :param instruction: Value of ``complete_var`` with the completion + instruction and shell, in the form ``instruction_shell``. + :return: Status code to exit with. + """ + shell, _, instruction = instruction.partition("_") + comp_cls = get_completion_class(shell) + + if comp_cls is None: + return 1 + + comp = comp_cls(cli, ctx_args, prog_name, complete_var) + + if instruction == "source": + echo(comp.source()) + return 0 + + if instruction == "complete": + echo(comp.complete()) + return 0 + + return 1 + + +class CompletionItem: + """Represents a completion value and metadata about the value. The + default metadata is ``type`` to indicate special shell handling, + and ``help`` if a shell supports showing a help string next to the + value. + + Arbitrary parameters can be passed when creating the object, and + accessed using ``item.attr``. If an attribute wasn't passed, + accessing it returns ``None``. + + :param value: The completion suggestion. + :param type: Tells the shell script to provide special completion + support for the type. Click uses ``"dir"`` and ``"file"``. + :param help: String shown next to the value if supported. + :param kwargs: Arbitrary metadata. The built-in implementations + don't use this, but custom type completions paired with custom + shell support could use it. + """ + + __slots__ = ("value", "type", "help", "_info") + + def __init__( + self, + value: t.Any, + type: str = "plain", + help: str | None = None, + **kwargs: t.Any, + ) -> None: + self.value: t.Any = value + self.type: str = type + self.help: str | None = help + self._info = kwargs + + def __getattr__(self, name: str) -> t.Any: + return self._info.get(name) + + +# Only Bash >= 4.4 has the nosort option. +_SOURCE_BASH = """\ +%(complete_func)s() { + local IFS=$'\\n' + local response + + response=$(env COMP_WORDS="${COMP_WORDS[*]}" COMP_CWORD=$COMP_CWORD \ +%(complete_var)s=bash_complete $1) + + for completion in $response; do + IFS=',' read type value <<< "$completion" + + if [[ $type == 'dir' ]]; then + COMPREPLY=() + compopt -o dirnames + elif [[ $type == 'file' ]]; then + COMPREPLY=() + compopt -o default + elif [[ $type == 'plain' ]]; then + COMPREPLY+=($value) + fi + done + + return 0 +} + +%(complete_func)s_setup() { + complete -o nosort -F %(complete_func)s %(prog_name)s +} + +%(complete_func)s_setup; +""" + +_SOURCE_ZSH = """\ +#compdef %(prog_name)s + +%(complete_func)s() { + local -a completions + local -a completions_with_descriptions + local -a response + (( ! $+commands[%(prog_name)s] )) && return 1 + + response=("${(@f)$(env COMP_WORDS="${words[*]}" COMP_CWORD=$((CURRENT-1)) \ +%(complete_var)s=zsh_complete %(prog_name)s)}") + + for type key descr in ${response}; do + if [[ "$type" == "plain" ]]; then + if [[ "$descr" == "_" ]]; then + completions+=("$key") + else + completions_with_descriptions+=("$key":"$descr") + fi + elif [[ "$type" == "dir" ]]; then + _path_files -/ + elif [[ "$type" == "file" ]]; then + _path_files -f + fi + done + + if [ -n "$completions_with_descriptions" ]; then + _describe -V unsorted completions_with_descriptions -U + fi + + if [ -n "$completions" ]; then + compadd -U -V unsorted -a completions + fi +} + +if [[ $zsh_eval_context[-1] == loadautofunc ]]; then + # autoload from fpath, call function directly + %(complete_func)s "$@" +else + # eval/source/. command, register function for later + compdef %(complete_func)s %(prog_name)s +fi +""" + +_SOURCE_FISH = """\ +function %(complete_func)s; + set -l response (env %(complete_var)s=fish_complete COMP_WORDS=(commandline -cp) \ +COMP_CWORD=(commandline -t) %(prog_name)s); + + for completion in $response; + set -l metadata (string split "," $completion); + + if test $metadata[1] = "dir"; + __fish_complete_directories $metadata[2]; + else if test $metadata[1] = "file"; + __fish_complete_path $metadata[2]; + else if test $metadata[1] = "plain"; + echo $metadata[2]; + end; + end; +end; + +complete --no-files --command %(prog_name)s --arguments \ +"(%(complete_func)s)"; +""" + + +class ShellComplete: + """Base class for providing shell completion support. A subclass for + a given shell will override attributes and methods to implement the + completion instructions (``source`` and ``complete``). + + :param cli: Command being called. + :param prog_name: Name of the executable in the shell. + :param complete_var: Name of the environment variable that holds + the completion instruction. + + .. versionadded:: 8.0 + """ + + name: t.ClassVar[str] + """Name to register the shell as with :func:`add_completion_class`. + This is used in completion instructions (``{name}_source`` and + ``{name}_complete``). + """ + + source_template: t.ClassVar[str] + """Completion script template formatted by :meth:`source`. This must + be provided by subclasses. + """ + + def __init__( + self, + cli: Command, + ctx_args: cabc.MutableMapping[str, t.Any], + prog_name: str, + complete_var: str, + ) -> None: + self.cli = cli + self.ctx_args = ctx_args + self.prog_name = prog_name + self.complete_var = complete_var + + @property + def func_name(self) -> str: + """The name of the shell function defined by the completion + script. + """ + safe_name = re.sub(r"\W*", "", self.prog_name.replace("-", "_"), flags=re.ASCII) + return f"_{safe_name}_completion" + + def source_vars(self) -> dict[str, t.Any]: + """Vars for formatting :attr:`source_template`. + + By default this provides ``complete_func``, ``complete_var``, + and ``prog_name``. + """ + return { + "complete_func": self.func_name, + "complete_var": self.complete_var, + "prog_name": self.prog_name, + } + + def source(self) -> str: + """Produce the shell script that defines the completion + function. By default this ``%``-style formats + :attr:`source_template` with the dict returned by + :meth:`source_vars`. + """ + return self.source_template % self.source_vars() + + def get_completion_args(self) -> tuple[list[str], str]: + """Use the env vars defined by the shell script to return a + tuple of ``args, incomplete``. This must be implemented by + subclasses. + """ + raise NotImplementedError + + def get_completions(self, args: list[str], incomplete: str) -> list[CompletionItem]: + """Determine the context and last complete command or parameter + from the complete args. Call that object's ``shell_complete`` + method to get the completions for the incomplete value. + + :param args: List of complete args before the incomplete value. + :param incomplete: Value being completed. May be empty. + """ + ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args) + obj, incomplete = _resolve_incomplete(ctx, args, incomplete) + return obj.shell_complete(ctx, incomplete) + + def format_completion(self, item: CompletionItem) -> str: + """Format a completion item into the form recognized by the + shell script. This must be implemented by subclasses. + + :param item: Completion item to format. + """ + raise NotImplementedError + + def complete(self) -> str: + """Produce the completion data to send back to the shell. + + By default this calls :meth:`get_completion_args`, gets the + completions, then calls :meth:`format_completion` for each + completion. + """ + args, incomplete = self.get_completion_args() + completions = self.get_completions(args, incomplete) + out = [self.format_completion(item) for item in completions] + return "\n".join(out) + + +class BashComplete(ShellComplete): + """Shell completion for Bash.""" + + name = "bash" + source_template = _SOURCE_BASH + + @staticmethod + def _check_version() -> None: + import shutil + import subprocess + + bash_exe = shutil.which("bash") + + if bash_exe is None: + match = None + else: + output = subprocess.run( + [bash_exe, "--norc", "-c", 'echo "${BASH_VERSION}"'], + stdout=subprocess.PIPE, + ) + match = re.search(r"^(\d+)\.(\d+)\.\d+", output.stdout.decode()) + + if match is not None: + major, minor = match.groups() + + if major < "4" or major == "4" and minor < "4": + echo( + _( + "Shell completion is not supported for Bash" + " versions older than 4.4." + ), + err=True, + ) + else: + echo( + _("Couldn't detect Bash version, shell completion is not supported."), + err=True, + ) + + def source(self) -> str: + self._check_version() + return super().source() + + def get_completion_args(self) -> tuple[list[str], str]: + cwords = split_arg_string(os.environ["COMP_WORDS"]) + cword = int(os.environ["COMP_CWORD"]) + args = cwords[1:cword] + + try: + incomplete = cwords[cword] + except IndexError: + incomplete = "" + + return args, incomplete + + def format_completion(self, item: CompletionItem) -> str: + return f"{item.type},{item.value}" + + +class ZshComplete(ShellComplete): + """Shell completion for Zsh.""" + + name = "zsh" + source_template = _SOURCE_ZSH + + def get_completion_args(self) -> tuple[list[str], str]: + cwords = split_arg_string(os.environ["COMP_WORDS"]) + cword = int(os.environ["COMP_CWORD"]) + args = cwords[1:cword] + + try: + incomplete = cwords[cword] + except IndexError: + incomplete = "" + + return args, incomplete + + def format_completion(self, item: CompletionItem) -> str: + return f"{item.type}\n{item.value}\n{item.help if item.help else '_'}" + + +class FishComplete(ShellComplete): + """Shell completion for Fish.""" + + name = "fish" + source_template = _SOURCE_FISH + + def get_completion_args(self) -> tuple[list[str], str]: + cwords = split_arg_string(os.environ["COMP_WORDS"]) + incomplete = os.environ["COMP_CWORD"] + args = cwords[1:] + + # Fish stores the partial word in both COMP_WORDS and + # COMP_CWORD, remove it from complete args. + if incomplete and args and args[-1] == incomplete: + args.pop() + + return args, incomplete + + def format_completion(self, item: CompletionItem) -> str: + if item.help: + return f"{item.type},{item.value}\t{item.help}" + + return f"{item.type},{item.value}" + + +ShellCompleteType = t.TypeVar("ShellCompleteType", bound="type[ShellComplete]") + + +_available_shells: dict[str, type[ShellComplete]] = { + "bash": BashComplete, + "fish": FishComplete, + "zsh": ZshComplete, +} + + +def add_completion_class( + cls: ShellCompleteType, name: str | None = None +) -> ShellCompleteType: + """Register a :class:`ShellComplete` subclass under the given name. + The name will be provided by the completion instruction environment + variable during completion. + + :param cls: The completion class that will handle completion for the + shell. + :param name: Name to register the class under. Defaults to the + class's ``name`` attribute. + """ + if name is None: + name = cls.name + + _available_shells[name] = cls + + return cls + + +def get_completion_class(shell: str) -> type[ShellComplete] | None: + """Look up a registered :class:`ShellComplete` subclass by the name + provided by the completion instruction environment variable. If the + name isn't registered, returns ``None``. + + :param shell: Name the class is registered under. + """ + return _available_shells.get(shell) + + +def split_arg_string(string: str) -> list[str]: + """Split an argument string as with :func:`shlex.split`, but don't + fail if the string is incomplete. Ignores a missing closing quote or + incomplete escape sequence and uses the partial token as-is. + + .. code-block:: python + + split_arg_string("example 'my file") + ["example", "my file"] + + split_arg_string("example my\\") + ["example", "my"] + + :param string: String to split. + + .. versionchanged:: 8.2 + Moved to ``shell_completion`` from ``parser``. + """ + import shlex + + lex = shlex.shlex(string, posix=True) + lex.whitespace_split = True + lex.commenters = "" + out = [] + + try: + for token in lex: + out.append(token) + except ValueError: + # Raised when end-of-string is reached in an invalid state. Use + # the partial token as-is. The quote or escape character is in + # lex.state, not lex.token. + out.append(lex.token) + + return out + + +def _is_incomplete_argument(ctx: Context, param: Parameter) -> bool: + """Determine if the given parameter is an argument that can still + accept values. + + :param ctx: Invocation context for the command represented by the + parsed complete args. + :param param: Argument object being checked. + """ + if not isinstance(param, Argument): + return False + + assert param.name is not None + # Will be None if expose_value is False. + value = ctx.params.get(param.name) + return ( + param.nargs == -1 + or ctx.get_parameter_source(param.name) is not ParameterSource.COMMANDLINE + or ( + param.nargs > 1 + and isinstance(value, (tuple, list)) + and len(value) < param.nargs + ) + ) + + +def _start_of_option(ctx: Context, value: str) -> bool: + """Check if the value looks like the start of an option.""" + if not value: + return False + + c = value[0] + return c in ctx._opt_prefixes + + +def _is_incomplete_option(ctx: Context, args: list[str], param: Parameter) -> bool: + """Determine if the given parameter is an option that needs a value. + + :param args: List of complete args before the incomplete value. + :param param: Option object being checked. + """ + if not isinstance(param, Option): + return False + + if param.is_flag or param.count: + return False + + last_option = None + + for index, arg in enumerate(reversed(args)): + if index + 1 > param.nargs: + break + + if _start_of_option(ctx, arg): + last_option = arg + + return last_option is not None and last_option in param.opts + + +def _resolve_context( + cli: Command, + ctx_args: cabc.MutableMapping[str, t.Any], + prog_name: str, + args: list[str], +) -> Context: + """Produce the context hierarchy starting with the command and + traversing the complete arguments. This only follows the commands, + it doesn't trigger input prompts or callbacks. + + :param cli: Command being called. + :param prog_name: Name of the executable in the shell. + :param args: List of complete args before the incomplete value. + """ + ctx_args["resilient_parsing"] = True + with cli.make_context(prog_name, args.copy(), **ctx_args) as ctx: + args = ctx._protected_args + ctx.args + + while args: + command = ctx.command + + if isinstance(command, Group): + if not command.chain: + name, cmd, args = command.resolve_command(ctx, args) + + if cmd is None: + return ctx + + with cmd.make_context( + name, args, parent=ctx, resilient_parsing=True + ) as sub_ctx: + args = ctx._protected_args + ctx.args + ctx = sub_ctx + else: + sub_ctx = ctx + + while args: + name, cmd, args = command.resolve_command(ctx, args) + + if cmd is None: + return ctx + + with cmd.make_context( + name, + args, + parent=ctx, + allow_extra_args=True, + allow_interspersed_args=False, + resilient_parsing=True, + ) as sub_sub_ctx: + args = sub_ctx.args + sub_ctx = sub_sub_ctx + + ctx = sub_ctx + args = [*sub_ctx._protected_args, *sub_ctx.args] + else: + break + + return ctx + + +def _resolve_incomplete( + ctx: Context, args: list[str], incomplete: str +) -> tuple[Command | Parameter, str]: + """Find the Click object that will handle the completion of the + incomplete value. Return the object and the incomplete value. + + :param ctx: Invocation context for the command represented by + the parsed complete args. + :param args: List of complete args before the incomplete value. + :param incomplete: Value being completed. May be empty. + """ + # Different shells treat an "=" between a long option name and + # value differently. Might keep the value joined, return the "=" + # as a separate item, or return the split name and value. Always + # split and discard the "=" to make completion easier. + if incomplete == "=": + incomplete = "" + elif "=" in incomplete and _start_of_option(ctx, incomplete): + name, _, incomplete = incomplete.partition("=") + args.append(name) + + # The "--" marker tells Click to stop treating values as options + # even if they start with the option character. If it hasn't been + # given and the incomplete arg looks like an option, the current + # command will provide option name completions. + if "--" not in args and _start_of_option(ctx, incomplete): + return ctx.command, incomplete + + params = ctx.command.get_params(ctx) + + # If the last complete arg is an option name with an incomplete + # value, the option will provide value completions. + for param in params: + if _is_incomplete_option(ctx, args, param): + return param, incomplete + + # It's not an option name or value. The first argument without a + # parsed value will provide value completions. + for param in params: + if _is_incomplete_argument(ctx, param): + return param, incomplete + + # There were no unparsed arguments, the command may be a group that + # will provide command name completions. + return ctx.command, incomplete diff --git a/venv/Lib/site-packages/click/termui.py b/venv/Lib/site-packages/click/termui.py new file mode 100644 index 00000000..dcbb2221 --- /dev/null +++ b/venv/Lib/site-packages/click/termui.py @@ -0,0 +1,877 @@ +from __future__ import annotations + +import collections.abc as cabc +import inspect +import io +import itertools +import sys +import typing as t +from contextlib import AbstractContextManager +from gettext import gettext as _ + +from ._compat import isatty +from ._compat import strip_ansi +from .exceptions import Abort +from .exceptions import UsageError +from .globals import resolve_color_default +from .types import Choice +from .types import convert_type +from .types import ParamType +from .utils import echo +from .utils import LazyFile + +if t.TYPE_CHECKING: + from ._termui_impl import ProgressBar + +V = t.TypeVar("V") + +# The prompt functions to use. The doc tools currently override these +# functions to customize how they work. +visible_prompt_func: t.Callable[[str], str] = input + +_ansi_colors = { + "black": 30, + "red": 31, + "green": 32, + "yellow": 33, + "blue": 34, + "magenta": 35, + "cyan": 36, + "white": 37, + "reset": 39, + "bright_black": 90, + "bright_red": 91, + "bright_green": 92, + "bright_yellow": 93, + "bright_blue": 94, + "bright_magenta": 95, + "bright_cyan": 96, + "bright_white": 97, +} +_ansi_reset_all = "\033[0m" + + +def hidden_prompt_func(prompt: str) -> str: + import getpass + + return getpass.getpass(prompt) + + +def _build_prompt( + text: str, + suffix: str, + show_default: bool = False, + default: t.Any | None = None, + show_choices: bool = True, + type: ParamType | None = None, +) -> str: + prompt = text + if type is not None and show_choices and isinstance(type, Choice): + prompt += f" ({', '.join(map(str, type.choices))})" + if default is not None and show_default: + prompt = f"{prompt} [{_format_default(default)}]" + return f"{prompt}{suffix}" + + +def _format_default(default: t.Any) -> t.Any: + if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"): + return default.name + + return default + + +def prompt( + text: str, + default: t.Any | None = None, + hide_input: bool = False, + confirmation_prompt: bool | str = False, + type: ParamType | t.Any | None = None, + value_proc: t.Callable[[str], t.Any] | None = None, + prompt_suffix: str = ": ", + show_default: bool = True, + err: bool = False, + show_choices: bool = True, +) -> t.Any: + """Prompts a user for input. This is a convenience function that can + be used to prompt a user for input later. + + If the user aborts the input by sending an interrupt signal, this + function will catch it and raise a :exc:`Abort` exception. + + :param text: the text to show for the prompt. + :param default: the default value to use if no input happens. If this + is not given it will prompt until it's aborted. + :param hide_input: if this is set to true then the input value will + be hidden. + :param confirmation_prompt: Prompt a second time to confirm the + value. Can be set to a string instead of ``True`` to customize + the message. + :param type: the type to use to check the value against. + :param value_proc: if this parameter is provided it's a function that + is invoked instead of the type conversion to + convert a value. + :param prompt_suffix: a suffix that should be added to the prompt. + :param show_default: shows or hides the default value in the prompt. + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``, the same as with echo. + :param show_choices: Show or hide choices if the passed type is a Choice. + For example if type is a Choice of either day or week, + show_choices is true and text is "Group by" then the + prompt will be "Group by (day, week): ". + + .. versionadded:: 8.0 + ``confirmation_prompt`` can be a custom string. + + .. versionadded:: 7.0 + Added the ``show_choices`` parameter. + + .. versionadded:: 6.0 + Added unicode support for cmd.exe on Windows. + + .. versionadded:: 4.0 + Added the `err` parameter. + + """ + + def prompt_func(text: str) -> str: + f = hidden_prompt_func if hide_input else visible_prompt_func + try: + # Write the prompt separately so that we get nice + # coloring through colorama on Windows + echo(text.rstrip(" "), nl=False, err=err) + # Echo a space to stdout to work around an issue where + # readline causes backspace to clear the whole line. + return f(" ") + except (KeyboardInterrupt, EOFError): + # getpass doesn't print a newline if the user aborts input with ^C. + # Allegedly this behavior is inherited from getpass(3). + # A doc bug has been filed at https://bugs.python.org/issue24711 + if hide_input: + echo(None, err=err) + raise Abort() from None + + if value_proc is None: + value_proc = convert_type(type, default) + + prompt = _build_prompt( + text, prompt_suffix, show_default, default, show_choices, type + ) + + if confirmation_prompt: + if confirmation_prompt is True: + confirmation_prompt = _("Repeat for confirmation") + + confirmation_prompt = _build_prompt(confirmation_prompt, prompt_suffix) + + while True: + while True: + value = prompt_func(prompt) + if value: + break + elif default is not None: + value = default + break + try: + result = value_proc(value) + except UsageError as e: + if hide_input: + echo(_("Error: The value you entered was invalid."), err=err) + else: + echo(_("Error: {e.message}").format(e=e), err=err) + continue + if not confirmation_prompt: + return result + while True: + value2 = prompt_func(confirmation_prompt) + is_empty = not value and not value2 + if value2 or is_empty: + break + if value == value2: + return result + echo(_("Error: The two entered values do not match."), err=err) + + +def confirm( + text: str, + default: bool | None = False, + abort: bool = False, + prompt_suffix: str = ": ", + show_default: bool = True, + err: bool = False, +) -> bool: + """Prompts for confirmation (yes/no question). + + If the user aborts the input by sending a interrupt signal this + function will catch it and raise a :exc:`Abort` exception. + + :param text: the question to ask. + :param default: The default value to use when no input is given. If + ``None``, repeat until input is given. + :param abort: if this is set to `True` a negative answer aborts the + exception by raising :exc:`Abort`. + :param prompt_suffix: a suffix that should be added to the prompt. + :param show_default: shows or hides the default value in the prompt. + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``, the same as with echo. + + .. versionchanged:: 8.0 + Repeat until input is given if ``default`` is ``None``. + + .. versionadded:: 4.0 + Added the ``err`` parameter. + """ + prompt = _build_prompt( + text, + prompt_suffix, + show_default, + "y/n" if default is None else ("Y/n" if default else "y/N"), + ) + + while True: + try: + # Write the prompt separately so that we get nice + # coloring through colorama on Windows + echo(prompt.rstrip(" "), nl=False, err=err) + # Echo a space to stdout to work around an issue where + # readline causes backspace to clear the whole line. + value = visible_prompt_func(" ").lower().strip() + except (KeyboardInterrupt, EOFError): + raise Abort() from None + if value in ("y", "yes"): + rv = True + elif value in ("n", "no"): + rv = False + elif default is not None and value == "": + rv = default + else: + echo(_("Error: invalid input"), err=err) + continue + break + if abort and not rv: + raise Abort() + return rv + + +def echo_via_pager( + text_or_generator: cabc.Iterable[str] | t.Callable[[], cabc.Iterable[str]] | str, + color: bool | None = None, +) -> None: + """This function takes a text and shows it via an environment specific + pager on stdout. + + .. versionchanged:: 3.0 + Added the `color` flag. + + :param text_or_generator: the text to page, or alternatively, a + generator emitting the text to page. + :param color: controls if the pager supports ANSI colors or not. The + default is autodetection. + """ + color = resolve_color_default(color) + + if inspect.isgeneratorfunction(text_or_generator): + i = t.cast("t.Callable[[], cabc.Iterable[str]]", text_or_generator)() + elif isinstance(text_or_generator, str): + i = [text_or_generator] + else: + i = iter(t.cast("cabc.Iterable[str]", text_or_generator)) + + # convert every element of i to a text type if necessary + text_generator = (el if isinstance(el, str) else str(el) for el in i) + + from ._termui_impl import pager + + return pager(itertools.chain(text_generator, "\n"), color) + + +@t.overload +def progressbar( + *, + length: int, + label: str | None = None, + hidden: bool = False, + show_eta: bool = True, + show_percent: bool | None = None, + show_pos: bool = False, + fill_char: str = "#", + empty_char: str = "-", + bar_template: str = "%(label)s [%(bar)s] %(info)s", + info_sep: str = " ", + width: int = 36, + file: t.TextIO | None = None, + color: bool | None = None, + update_min_steps: int = 1, +) -> ProgressBar[int]: ... + + +@t.overload +def progressbar( + iterable: cabc.Iterable[V] | None = None, + length: int | None = None, + label: str | None = None, + hidden: bool = False, + show_eta: bool = True, + show_percent: bool | None = None, + show_pos: bool = False, + item_show_func: t.Callable[[V | None], str | None] | None = None, + fill_char: str = "#", + empty_char: str = "-", + bar_template: str = "%(label)s [%(bar)s] %(info)s", + info_sep: str = " ", + width: int = 36, + file: t.TextIO | None = None, + color: bool | None = None, + update_min_steps: int = 1, +) -> ProgressBar[V]: ... + + +def progressbar( + iterable: cabc.Iterable[V] | None = None, + length: int | None = None, + label: str | None = None, + hidden: bool = False, + show_eta: bool = True, + show_percent: bool | None = None, + show_pos: bool = False, + item_show_func: t.Callable[[V | None], str | None] | None = None, + fill_char: str = "#", + empty_char: str = "-", + bar_template: str = "%(label)s [%(bar)s] %(info)s", + info_sep: str = " ", + width: int = 36, + file: t.TextIO | None = None, + color: bool | None = None, + update_min_steps: int = 1, +) -> ProgressBar[V]: + """This function creates an iterable context manager that can be used + to iterate over something while showing a progress bar. It will + either iterate over the `iterable` or `length` items (that are counted + up). While iteration happens, this function will print a rendered + progress bar to the given `file` (defaults to stdout) and will attempt + to calculate remaining time and more. By default, this progress bar + will not be rendered if the file is not a terminal. + + The context manager creates the progress bar. When the context + manager is entered the progress bar is already created. With every + iteration over the progress bar, the iterable passed to the bar is + advanced and the bar is updated. When the context manager exits, + a newline is printed and the progress bar is finalized on screen. + + Note: The progress bar is currently designed for use cases where the + total progress can be expected to take at least several seconds. + Because of this, the ProgressBar class object won't display + progress that is considered too fast, and progress where the time + between steps is less than a second. + + No printing must happen or the progress bar will be unintentionally + destroyed. + + Example usage:: + + with progressbar(items) as bar: + for item in bar: + do_something_with(item) + + Alternatively, if no iterable is specified, one can manually update the + progress bar through the `update()` method instead of directly + iterating over the progress bar. The update method accepts the number + of steps to increment the bar with:: + + with progressbar(length=chunks.total_bytes) as bar: + for chunk in chunks: + process_chunk(chunk) + bar.update(chunks.bytes) + + The ``update()`` method also takes an optional value specifying the + ``current_item`` at the new position. This is useful when used + together with ``item_show_func`` to customize the output for each + manual step:: + + with click.progressbar( + length=total_size, + label='Unzipping archive', + item_show_func=lambda a: a.filename + ) as bar: + for archive in zip_file: + archive.extract() + bar.update(archive.size, archive) + + :param iterable: an iterable to iterate over. If not provided the length + is required. + :param length: the number of items to iterate over. By default the + progressbar will attempt to ask the iterator about its + length, which might or might not work. If an iterable is + also provided this parameter can be used to override the + length. If an iterable is not provided the progress bar + will iterate over a range of that length. + :param label: the label to show next to the progress bar. + :param hidden: hide the progressbar. Defaults to ``False``. When no tty is + detected, it will only print the progressbar label. Setting this to + ``False`` also disables that. + :param show_eta: enables or disables the estimated time display. This is + automatically disabled if the length cannot be + determined. + :param show_percent: enables or disables the percentage display. The + default is `True` if the iterable has a length or + `False` if not. + :param show_pos: enables or disables the absolute position display. The + default is `False`. + :param item_show_func: A function called with the current item which + can return a string to show next to the progress bar. If the + function returns ``None`` nothing is shown. The current item can + be ``None``, such as when entering and exiting the bar. + :param fill_char: the character to use to show the filled part of the + progress bar. + :param empty_char: the character to use to show the non-filled part of + the progress bar. + :param bar_template: the format string to use as template for the bar. + The parameters in it are ``label`` for the label, + ``bar`` for the progress bar and ``info`` for the + info section. + :param info_sep: the separator between multiple info items (eta etc.) + :param width: the width of the progress bar in characters, 0 means full + terminal width + :param file: The file to write to. If this is not a terminal then + only the label is printed. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. This is only needed if ANSI + codes are included anywhere in the progress bar output + which is not the case by default. + :param update_min_steps: Render only when this many updates have + completed. This allows tuning for very fast iterators. + + .. versionadded:: 8.2 + The ``hidden`` argument. + + .. versionchanged:: 8.0 + Output is shown even if execution time is less than 0.5 seconds. + + .. versionchanged:: 8.0 + ``item_show_func`` shows the current item, not the previous one. + + .. versionchanged:: 8.0 + Labels are echoed if the output is not a TTY. Reverts a change + in 7.0 that removed all output. + + .. versionadded:: 8.0 + The ``update_min_steps`` parameter. + + .. versionadded:: 4.0 + The ``color`` parameter and ``update`` method. + + .. versionadded:: 2.0 + """ + from ._termui_impl import ProgressBar + + color = resolve_color_default(color) + return ProgressBar( + iterable=iterable, + length=length, + hidden=hidden, + show_eta=show_eta, + show_percent=show_percent, + show_pos=show_pos, + item_show_func=item_show_func, + fill_char=fill_char, + empty_char=empty_char, + bar_template=bar_template, + info_sep=info_sep, + file=file, + label=label, + width=width, + color=color, + update_min_steps=update_min_steps, + ) + + +def clear() -> None: + """Clears the terminal screen. This will have the effect of clearing + the whole visible space of the terminal and moving the cursor to the + top left. This does not do anything if not connected to a terminal. + + .. versionadded:: 2.0 + """ + if not isatty(sys.stdout): + return + + # ANSI escape \033[2J clears the screen, \033[1;1H moves the cursor + echo("\033[2J\033[1;1H", nl=False) + + +def _interpret_color(color: int | tuple[int, int, int] | str, offset: int = 0) -> str: + if isinstance(color, int): + return f"{38 + offset};5;{color:d}" + + if isinstance(color, (tuple, list)): + r, g, b = color + return f"{38 + offset};2;{r:d};{g:d};{b:d}" + + return str(_ansi_colors[color] + offset) + + +def style( + text: t.Any, + fg: int | tuple[int, int, int] | str | None = None, + bg: int | tuple[int, int, int] | str | None = None, + bold: bool | None = None, + dim: bool | None = None, + underline: bool | None = None, + overline: bool | None = None, + italic: bool | None = None, + blink: bool | None = None, + reverse: bool | None = None, + strikethrough: bool | None = None, + reset: bool = True, +) -> str: + """Styles a text with ANSI styles and returns the new string. By + default the styling is self contained which means that at the end + of the string a reset code is issued. This can be prevented by + passing ``reset=False``. + + Examples:: + + click.echo(click.style('Hello World!', fg='green')) + click.echo(click.style('ATTENTION!', blink=True)) + click.echo(click.style('Some things', reverse=True, fg='cyan')) + click.echo(click.style('More colors', fg=(255, 12, 128), bg=117)) + + Supported color names: + + * ``black`` (might be a gray) + * ``red`` + * ``green`` + * ``yellow`` (might be an orange) + * ``blue`` + * ``magenta`` + * ``cyan`` + * ``white`` (might be light gray) + * ``bright_black`` + * ``bright_red`` + * ``bright_green`` + * ``bright_yellow`` + * ``bright_blue`` + * ``bright_magenta`` + * ``bright_cyan`` + * ``bright_white`` + * ``reset`` (reset the color code only) + + If the terminal supports it, color may also be specified as: + + - An integer in the interval [0, 255]. The terminal must support + 8-bit/256-color mode. + - An RGB tuple of three integers in [0, 255]. The terminal must + support 24-bit/true-color mode. + + See https://en.wikipedia.org/wiki/ANSI_color and + https://gist.github.com/XVilka/8346728 for more information. + + :param text: the string to style with ansi codes. + :param fg: if provided this will become the foreground color. + :param bg: if provided this will become the background color. + :param bold: if provided this will enable or disable bold mode. + :param dim: if provided this will enable or disable dim mode. This is + badly supported. + :param underline: if provided this will enable or disable underline. + :param overline: if provided this will enable or disable overline. + :param italic: if provided this will enable or disable italic. + :param blink: if provided this will enable or disable blinking. + :param reverse: if provided this will enable or disable inverse + rendering (foreground becomes background and the + other way round). + :param strikethrough: if provided this will enable or disable + striking through text. + :param reset: by default a reset-all code is added at the end of the + string which means that styles do not carry over. This + can be disabled to compose styles. + + .. versionchanged:: 8.0 + A non-string ``message`` is converted to a string. + + .. versionchanged:: 8.0 + Added support for 256 and RGB color codes. + + .. versionchanged:: 8.0 + Added the ``strikethrough``, ``italic``, and ``overline`` + parameters. + + .. versionchanged:: 7.0 + Added support for bright colors. + + .. versionadded:: 2.0 + """ + if not isinstance(text, str): + text = str(text) + + bits = [] + + if fg: + try: + bits.append(f"\033[{_interpret_color(fg)}m") + except KeyError: + raise TypeError(f"Unknown color {fg!r}") from None + + if bg: + try: + bits.append(f"\033[{_interpret_color(bg, 10)}m") + except KeyError: + raise TypeError(f"Unknown color {bg!r}") from None + + if bold is not None: + bits.append(f"\033[{1 if bold else 22}m") + if dim is not None: + bits.append(f"\033[{2 if dim else 22}m") + if underline is not None: + bits.append(f"\033[{4 if underline else 24}m") + if overline is not None: + bits.append(f"\033[{53 if overline else 55}m") + if italic is not None: + bits.append(f"\033[{3 if italic else 23}m") + if blink is not None: + bits.append(f"\033[{5 if blink else 25}m") + if reverse is not None: + bits.append(f"\033[{7 if reverse else 27}m") + if strikethrough is not None: + bits.append(f"\033[{9 if strikethrough else 29}m") + bits.append(text) + if reset: + bits.append(_ansi_reset_all) + return "".join(bits) + + +def unstyle(text: str) -> str: + """Removes ANSI styling information from a string. Usually it's not + necessary to use this function as Click's echo function will + automatically remove styling if necessary. + + .. versionadded:: 2.0 + + :param text: the text to remove style information from. + """ + return strip_ansi(text) + + +def secho( + message: t.Any | None = None, + file: t.IO[t.AnyStr] | None = None, + nl: bool = True, + err: bool = False, + color: bool | None = None, + **styles: t.Any, +) -> None: + """This function combines :func:`echo` and :func:`style` into one + call. As such the following two calls are the same:: + + click.secho('Hello World!', fg='green') + click.echo(click.style('Hello World!', fg='green')) + + All keyword arguments are forwarded to the underlying functions + depending on which one they go with. + + Non-string types will be converted to :class:`str`. However, + :class:`bytes` are passed directly to :meth:`echo` without applying + style. If you want to style bytes that represent text, call + :meth:`bytes.decode` first. + + .. versionchanged:: 8.0 + A non-string ``message`` is converted to a string. Bytes are + passed through without style applied. + + .. versionadded:: 2.0 + """ + if message is not None and not isinstance(message, (bytes, bytearray)): + message = style(message, **styles) + + return echo(message, file=file, nl=nl, err=err, color=color) + + +@t.overload +def edit( + text: bytes | bytearray, + editor: str | None = None, + env: cabc.Mapping[str, str] | None = None, + require_save: bool = False, + extension: str = ".txt", +) -> bytes | None: ... + + +@t.overload +def edit( + text: str, + editor: str | None = None, + env: cabc.Mapping[str, str] | None = None, + require_save: bool = True, + extension: str = ".txt", +) -> str | None: ... + + +@t.overload +def edit( + text: None = None, + editor: str | None = None, + env: cabc.Mapping[str, str] | None = None, + require_save: bool = True, + extension: str = ".txt", + filename: str | cabc.Iterable[str] | None = None, +) -> None: ... + + +def edit( + text: str | bytes | bytearray | None = None, + editor: str | None = None, + env: cabc.Mapping[str, str] | None = None, + require_save: bool = True, + extension: str = ".txt", + filename: str | cabc.Iterable[str] | None = None, +) -> str | bytes | bytearray | None: + r"""Edits the given text in the defined editor. If an editor is given + (should be the full path to the executable but the regular operating + system search path is used for finding the executable) it overrides + the detected editor. Optionally, some environment variables can be + used. If the editor is closed without changes, `None` is returned. In + case a file is edited directly the return value is always `None` and + `require_save` and `extension` are ignored. + + If the editor cannot be opened a :exc:`UsageError` is raised. + + Note for Windows: to simplify cross-platform usage, the newlines are + automatically converted from POSIX to Windows and vice versa. As such, + the message here will have ``\n`` as newline markers. + + :param text: the text to edit. + :param editor: optionally the editor to use. Defaults to automatic + detection. + :param env: environment variables to forward to the editor. + :param require_save: if this is true, then not saving in the editor + will make the return value become `None`. + :param extension: the extension to tell the editor about. This defaults + to `.txt` but changing this might change syntax + highlighting. + :param filename: if provided it will edit this file instead of the + provided text contents. It will not use a temporary + file as an indirection in that case. If the editor supports + editing multiple files at once, a sequence of files may be + passed as well. Invoke `click.file` once per file instead + if multiple files cannot be managed at once or editing the + files serially is desired. + + .. versionchanged:: 8.2.0 + ``filename`` now accepts any ``Iterable[str]`` in addition to a ``str`` + if the ``editor`` supports editing multiple files at once. + + """ + from ._termui_impl import Editor + + ed = Editor(editor=editor, env=env, require_save=require_save, extension=extension) + + if filename is None: + return ed.edit(text) + + if isinstance(filename, str): + filename = (filename,) + + ed.edit_files(filenames=filename) + return None + + +def launch(url: str, wait: bool = False, locate: bool = False) -> int: + """This function launches the given URL (or filename) in the default + viewer application for this file type. If this is an executable, it + might launch the executable in a new session. The return value is + the exit code of the launched application. Usually, ``0`` indicates + success. + + Examples:: + + click.launch('https://click.palletsprojects.com/') + click.launch('/my/downloaded/file', locate=True) + + .. versionadded:: 2.0 + + :param url: URL or filename of the thing to launch. + :param wait: Wait for the program to exit before returning. This + only works if the launched program blocks. In particular, + ``xdg-open`` on Linux does not block. + :param locate: if this is set to `True` then instead of launching the + application associated with the URL it will attempt to + launch a file manager with the file located. This + might have weird effects if the URL does not point to + the filesystem. + """ + from ._termui_impl import open_url + + return open_url(url, wait=wait, locate=locate) + + +# If this is provided, getchar() calls into this instead. This is used +# for unittesting purposes. +_getchar: t.Callable[[bool], str] | None = None + + +def getchar(echo: bool = False) -> str: + """Fetches a single character from the terminal and returns it. This + will always return a unicode character and under certain rare + circumstances this might return more than one character. The + situations which more than one character is returned is when for + whatever reason multiple characters end up in the terminal buffer or + standard input was not actually a terminal. + + Note that this will always read from the terminal, even if something + is piped into the standard input. + + Note for Windows: in rare cases when typing non-ASCII characters, this + function might wait for a second character and then return both at once. + This is because certain Unicode characters look like special-key markers. + + .. versionadded:: 2.0 + + :param echo: if set to `True`, the character read will also show up on + the terminal. The default is to not show it. + """ + global _getchar + + if _getchar is None: + from ._termui_impl import getchar as f + + _getchar = f + + return _getchar(echo) + + +def raw_terminal() -> AbstractContextManager[int]: + from ._termui_impl import raw_terminal as f + + return f() + + +def pause(info: str | None = None, err: bool = False) -> None: + """This command stops execution and waits for the user to press any + key to continue. This is similar to the Windows batch "pause" + command. If the program is not run through a terminal, this command + will instead do nothing. + + .. versionadded:: 2.0 + + .. versionadded:: 4.0 + Added the `err` parameter. + + :param info: The message to print before pausing. Defaults to + ``"Press any key to continue..."``. + :param err: if set to message goes to ``stderr`` instead of + ``stdout``, the same as with echo. + """ + if not isatty(sys.stdin) or not isatty(sys.stdout): + return + + if info is None: + info = _("Press any key to continue...") + + try: + if info: + echo(info, nl=False, err=err) + try: + getchar() + except (KeyboardInterrupt, EOFError): + pass + finally: + if info: + echo(err=err) diff --git a/venv/Lib/site-packages/click/testing.py b/venv/Lib/site-packages/click/testing.py new file mode 100644 index 00000000..d19c103a --- /dev/null +++ b/venv/Lib/site-packages/click/testing.py @@ -0,0 +1,557 @@ +from __future__ import annotations + +import collections.abc as cabc +import contextlib +import io +import os +import shlex +import shutil +import sys +import tempfile +import typing as t +from types import TracebackType + +from . import _compat +from . import formatting +from . import termui +from . import utils +from ._compat import _find_binary_reader + +if t.TYPE_CHECKING: + from _typeshed import ReadableBuffer + + from .core import Command + + +class EchoingStdin: + def __init__(self, input: t.BinaryIO, output: t.BinaryIO) -> None: + self._input = input + self._output = output + self._paused = False + + def __getattr__(self, x: str) -> t.Any: + return getattr(self._input, x) + + def _echo(self, rv: bytes) -> bytes: + if not self._paused: + self._output.write(rv) + + return rv + + def read(self, n: int = -1) -> bytes: + return self._echo(self._input.read(n)) + + def read1(self, n: int = -1) -> bytes: + return self._echo(self._input.read1(n)) # type: ignore + + def readline(self, n: int = -1) -> bytes: + return self._echo(self._input.readline(n)) + + def readlines(self) -> list[bytes]: + return [self._echo(x) for x in self._input.readlines()] + + def __iter__(self) -> cabc.Iterator[bytes]: + return iter(self._echo(x) for x in self._input) + + def __repr__(self) -> str: + return repr(self._input) + + +@contextlib.contextmanager +def _pause_echo(stream: EchoingStdin | None) -> cabc.Iterator[None]: + if stream is None: + yield + else: + stream._paused = True + yield + stream._paused = False + + +class BytesIOCopy(io.BytesIO): + """Patch ``io.BytesIO`` to let the written stream be copied to another. + + .. versionadded:: 8.2 + """ + + def __init__(self, copy_to: io.BytesIO) -> None: + super().__init__() + self.copy_to = copy_to + + def flush(self) -> None: + super().flush() + self.copy_to.flush() + + def write(self, b: ReadableBuffer) -> int: + self.copy_to.write(b) + return super().write(b) + + +class StreamMixer: + """Mixes `` and `` streams. + + The result is available in the ``output`` attribute. + + .. versionadded:: 8.2 + """ + + def __init__(self) -> None: + self.output: io.BytesIO = io.BytesIO() + self.stdout: io.BytesIO = BytesIOCopy(copy_to=self.output) + self.stderr: io.BytesIO = BytesIOCopy(copy_to=self.output) + + +class _NamedTextIOWrapper(io.TextIOWrapper): + def __init__( + self, buffer: t.BinaryIO, name: str, mode: str, **kwargs: t.Any + ) -> None: + super().__init__(buffer, **kwargs) + self._name = name + self._mode = mode + + @property + def name(self) -> str: + return self._name + + @property + def mode(self) -> str: + return self._mode + + +def make_input_stream( + input: str | bytes | t.IO[t.Any] | None, charset: str +) -> t.BinaryIO: + # Is already an input stream. + if hasattr(input, "read"): + rv = _find_binary_reader(t.cast("t.IO[t.Any]", input)) + + if rv is not None: + return rv + + raise TypeError("Could not find binary reader for input stream.") + + if input is None: + input = b"" + elif isinstance(input, str): + input = input.encode(charset) + + return io.BytesIO(input) + + +class Result: + """Holds the captured result of an invoked CLI script. + + :param runner: The runner that created the result + :param stdout_bytes: The standard output as bytes. + :param stderr_bytes: The standard error as bytes. + :param output_bytes: A mix of ``stdout_bytes`` and ``stderr_bytes``, as the + user would see it in its terminal. + :param return_value: The value returned from the invoked command. + :param exit_code: The exit code as integer. + :param exception: The exception that happened if one did. + :param exc_info: Exception information (exception type, exception instance, + traceback type). + + .. versionchanged:: 8.2 + ``stderr_bytes`` no longer optional, ``output_bytes`` introduced and + ``mix_stderr`` has been removed. + + .. versionadded:: 8.0 + Added ``return_value``. + """ + + def __init__( + self, + runner: CliRunner, + stdout_bytes: bytes, + stderr_bytes: bytes, + output_bytes: bytes, + return_value: t.Any, + exit_code: int, + exception: BaseException | None, + exc_info: tuple[type[BaseException], BaseException, TracebackType] + | None = None, + ): + self.runner = runner + self.stdout_bytes = stdout_bytes + self.stderr_bytes = stderr_bytes + self.output_bytes = output_bytes + self.return_value = return_value + self.exit_code = exit_code + self.exception = exception + self.exc_info = exc_info + + @property + def output(self) -> str: + """The terminal output as unicode string, as the user would see it. + + .. versionchanged:: 8.2 + No longer a proxy for ``self.stdout``. Now has its own independent stream + that is mixing `` and ``, in the order they were written. + """ + return self.output_bytes.decode(self.runner.charset, "replace").replace( + "\r\n", "\n" + ) + + @property + def stdout(self) -> str: + """The standard output as unicode string.""" + return self.stdout_bytes.decode(self.runner.charset, "replace").replace( + "\r\n", "\n" + ) + + @property + def stderr(self) -> str: + """The standard error as unicode string. + + .. versionchanged:: 8.2 + No longer raise an exception, always returns the `` string. + """ + return self.stderr_bytes.decode(self.runner.charset, "replace").replace( + "\r\n", "\n" + ) + + def __repr__(self) -> str: + exc_str = repr(self.exception) if self.exception else "okay" + return f"<{type(self).__name__} {exc_str}>" + + +class CliRunner: + """The CLI runner provides functionality to invoke a Click command line + script for unittesting purposes in a isolated environment. This only + works in single-threaded systems without any concurrency as it changes the + global interpreter state. + + :param charset: the character set for the input and output data. + :param env: a dictionary with environment variables for overriding. + :param echo_stdin: if this is set to `True`, then reading from `` writes + to ``. This is useful for showing examples in + some circumstances. Note that regular prompts + will automatically echo the input. + :param catch_exceptions: Whether to catch any exceptions other than + ``SystemExit`` when running :meth:`~CliRunner.invoke`. + + .. versionchanged:: 8.2 + Added the ``catch_exceptions`` parameter. + + .. versionchanged:: 8.2 + ``mix_stderr`` parameter has been removed. + """ + + def __init__( + self, + charset: str = "utf-8", + env: cabc.Mapping[str, str | None] | None = None, + echo_stdin: bool = False, + catch_exceptions: bool = True, + ) -> None: + self.charset = charset + self.env: cabc.Mapping[str, str | None] = env or {} + self.echo_stdin = echo_stdin + self.catch_exceptions = catch_exceptions + + def get_default_prog_name(self, cli: Command) -> str: + """Given a command object it will return the default program name + for it. The default is the `name` attribute or ``"root"`` if not + set. + """ + return cli.name or "root" + + def make_env( + self, overrides: cabc.Mapping[str, str | None] | None = None + ) -> cabc.Mapping[str, str | None]: + """Returns the environment overrides for invoking a script.""" + rv = dict(self.env) + if overrides: + rv.update(overrides) + return rv + + @contextlib.contextmanager + def isolation( + self, + input: str | bytes | t.IO[t.Any] | None = None, + env: cabc.Mapping[str, str | None] | None = None, + color: bool = False, + ) -> cabc.Iterator[tuple[io.BytesIO, io.BytesIO, io.BytesIO]]: + """A context manager that sets up the isolation for invoking of a + command line tool. This sets up `` with the given input data + and `os.environ` with the overrides from the given dictionary. + This also rebinds some internals in Click to be mocked (like the + prompt functionality). + + This is automatically done in the :meth:`invoke` method. + + :param input: the input stream to put into `sys.stdin`. + :param env: the environment overrides as dictionary. + :param color: whether the output should contain color codes. The + application can still override this explicitly. + + .. versionadded:: 8.2 + An additional output stream is returned, which is a mix of + `` and `` streams. + + .. versionchanged:: 8.2 + Always returns the `` stream. + + .. versionchanged:: 8.0 + `` is opened with ``errors="backslashreplace"`` + instead of the default ``"strict"``. + + .. versionchanged:: 4.0 + Added the ``color`` parameter. + """ + bytes_input = make_input_stream(input, self.charset) + echo_input = None + + old_stdin = sys.stdin + old_stdout = sys.stdout + old_stderr = sys.stderr + old_forced_width = formatting.FORCED_WIDTH + formatting.FORCED_WIDTH = 80 + + env = self.make_env(env) + + stream_mixer = StreamMixer() + + if self.echo_stdin: + bytes_input = echo_input = t.cast( + t.BinaryIO, EchoingStdin(bytes_input, stream_mixer.stdout) + ) + + sys.stdin = text_input = _NamedTextIOWrapper( + bytes_input, encoding=self.charset, name="", mode="r" + ) + + if self.echo_stdin: + # Force unbuffered reads, otherwise TextIOWrapper reads a + # large chunk which is echoed early. + text_input._CHUNK_SIZE = 1 # type: ignore + + sys.stdout = _NamedTextIOWrapper( + stream_mixer.stdout, encoding=self.charset, name="", mode="w" + ) + + sys.stderr = _NamedTextIOWrapper( + stream_mixer.stderr, + encoding=self.charset, + name="", + mode="w", + errors="backslashreplace", + ) + + @_pause_echo(echo_input) # type: ignore + def visible_input(prompt: str | None = None) -> str: + sys.stdout.write(prompt or "") + val = text_input.readline().rstrip("\r\n") + sys.stdout.write(f"{val}\n") + sys.stdout.flush() + return val + + @_pause_echo(echo_input) # type: ignore + def hidden_input(prompt: str | None = None) -> str: + sys.stdout.write(f"{prompt or ''}\n") + sys.stdout.flush() + return text_input.readline().rstrip("\r\n") + + @_pause_echo(echo_input) # type: ignore + def _getchar(echo: bool) -> str: + char = sys.stdin.read(1) + + if echo: + sys.stdout.write(char) + + sys.stdout.flush() + return char + + default_color = color + + def should_strip_ansi( + stream: t.IO[t.Any] | None = None, color: bool | None = None + ) -> bool: + if color is None: + return not default_color + return not color + + old_visible_prompt_func = termui.visible_prompt_func + old_hidden_prompt_func = termui.hidden_prompt_func + old__getchar_func = termui._getchar + old_should_strip_ansi = utils.should_strip_ansi # type: ignore + old__compat_should_strip_ansi = _compat.should_strip_ansi + termui.visible_prompt_func = visible_input + termui.hidden_prompt_func = hidden_input + termui._getchar = _getchar + utils.should_strip_ansi = should_strip_ansi # type: ignore + _compat.should_strip_ansi = should_strip_ansi + + old_env = {} + try: + for key, value in env.items(): + old_env[key] = os.environ.get(key) + if value is None: + try: + del os.environ[key] + except Exception: + pass + else: + os.environ[key] = value + yield (stream_mixer.stdout, stream_mixer.stderr, stream_mixer.output) + finally: + for key, value in old_env.items(): + if value is None: + try: + del os.environ[key] + except Exception: + pass + else: + os.environ[key] = value + sys.stdout = old_stdout + sys.stderr = old_stderr + sys.stdin = old_stdin + termui.visible_prompt_func = old_visible_prompt_func + termui.hidden_prompt_func = old_hidden_prompt_func + termui._getchar = old__getchar_func + utils.should_strip_ansi = old_should_strip_ansi # type: ignore + _compat.should_strip_ansi = old__compat_should_strip_ansi + formatting.FORCED_WIDTH = old_forced_width + + def invoke( + self, + cli: Command, + args: str | cabc.Sequence[str] | None = None, + input: str | bytes | t.IO[t.Any] | None = None, + env: cabc.Mapping[str, str | None] | None = None, + catch_exceptions: bool | None = None, + color: bool = False, + **extra: t.Any, + ) -> Result: + """Invokes a command in an isolated environment. The arguments are + forwarded directly to the command line script, the `extra` keyword + arguments are passed to the :meth:`~clickpkg.Command.main` function of + the command. + + This returns a :class:`Result` object. + + :param cli: the command to invoke + :param args: the arguments to invoke. It may be given as an iterable + or a string. When given as string it will be interpreted + as a Unix shell command. More details at + :func:`shlex.split`. + :param input: the input data for `sys.stdin`. + :param env: the environment overrides. + :param catch_exceptions: Whether to catch any other exceptions than + ``SystemExit``. If :data:`None`, the value + from :class:`CliRunner` is used. + :param extra: the keyword arguments to pass to :meth:`main`. + :param color: whether the output should contain color codes. The + application can still override this explicitly. + + .. versionadded:: 8.2 + The result object has the ``output_bytes`` attribute with + the mix of ``stdout_bytes`` and ``stderr_bytes``, as the user would + see it in its terminal. + + .. versionchanged:: 8.2 + The result object always returns the ``stderr_bytes`` stream. + + .. versionchanged:: 8.0 + The result object has the ``return_value`` attribute with + the value returned from the invoked command. + + .. versionchanged:: 4.0 + Added the ``color`` parameter. + + .. versionchanged:: 3.0 + Added the ``catch_exceptions`` parameter. + + .. versionchanged:: 3.0 + The result object has the ``exc_info`` attribute with the + traceback if available. + """ + exc_info = None + if catch_exceptions is None: + catch_exceptions = self.catch_exceptions + + with self.isolation(input=input, env=env, color=color) as outstreams: + return_value = None + exception: BaseException | None = None + exit_code = 0 + + if isinstance(args, str): + args = shlex.split(args) + + try: + prog_name = extra.pop("prog_name") + except KeyError: + prog_name = self.get_default_prog_name(cli) + + try: + return_value = cli.main(args=args or (), prog_name=prog_name, **extra) + except SystemExit as e: + exc_info = sys.exc_info() + e_code = t.cast("int | t.Any | None", e.code) + + if e_code is None: + e_code = 0 + + if e_code != 0: + exception = e + + if not isinstance(e_code, int): + sys.stdout.write(str(e_code)) + sys.stdout.write("\n") + e_code = 1 + + exit_code = e_code + + except Exception as e: + if not catch_exceptions: + raise + exception = e + exit_code = 1 + exc_info = sys.exc_info() + finally: + sys.stdout.flush() + stdout = outstreams[0].getvalue() + stderr = outstreams[1].getvalue() + output = outstreams[2].getvalue() + + return Result( + runner=self, + stdout_bytes=stdout, + stderr_bytes=stderr, + output_bytes=output, + return_value=return_value, + exit_code=exit_code, + exception=exception, + exc_info=exc_info, # type: ignore + ) + + @contextlib.contextmanager + def isolated_filesystem( + self, temp_dir: str | os.PathLike[str] | None = None + ) -> cabc.Iterator[str]: + """A context manager that creates a temporary directory and + changes the current working directory to it. This isolates tests + that affect the contents of the CWD to prevent them from + interfering with each other. + + :param temp_dir: Create the temporary directory under this + directory. If given, the created directory is not removed + when exiting. + + .. versionchanged:: 8.0 + Added the ``temp_dir`` parameter. + """ + cwd = os.getcwd() + dt = tempfile.mkdtemp(dir=temp_dir) + os.chdir(dt) + + try: + yield dt + finally: + os.chdir(cwd) + + if temp_dir is None: + try: + shutil.rmtree(dt) + except OSError: + pass diff --git a/venv/Lib/site-packages/click/types.py b/venv/Lib/site-packages/click/types.py new file mode 100644 index 00000000..684cb3b1 --- /dev/null +++ b/venv/Lib/site-packages/click/types.py @@ -0,0 +1,1165 @@ +from __future__ import annotations + +import collections.abc as cabc +import enum +import os +import stat +import sys +import typing as t +from datetime import datetime +from gettext import gettext as _ +from gettext import ngettext + +from ._compat import _get_argv_encoding +from ._compat import open_stream +from .exceptions import BadParameter +from .utils import format_filename +from .utils import LazyFile +from .utils import safecall + +if t.TYPE_CHECKING: + import typing_extensions as te + + from .core import Context + from .core import Parameter + from .shell_completion import CompletionItem + +ParamTypeValue = t.TypeVar("ParamTypeValue") + + +class ParamType: + """Represents the type of a parameter. Validates and converts values + from the command line or Python into the correct type. + + To implement a custom type, subclass and implement at least the + following: + + - The :attr:`name` class attribute must be set. + - Calling an instance of the type with ``None`` must return + ``None``. This is already implemented by default. + - :meth:`convert` must convert string values to the correct type. + - :meth:`convert` must accept values that are already the correct + type. + - It must be able to convert a value if the ``ctx`` and ``param`` + arguments are ``None``. This can occur when converting prompt + input. + """ + + is_composite: t.ClassVar[bool] = False + arity: t.ClassVar[int] = 1 + + #: the descriptive name of this type + name: str + + #: if a list of this type is expected and the value is pulled from a + #: string environment variable, this is what splits it up. `None` + #: means any whitespace. For all parameters the general rule is that + #: whitespace splits them up. The exception are paths and files which + #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on + #: Windows). + envvar_list_splitter: t.ClassVar[str | None] = None + + def to_info_dict(self) -> dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. + + Use :meth:`click.Context.to_info_dict` to traverse the entire + CLI structure. + + .. versionadded:: 8.0 + """ + # The class name without the "ParamType" suffix. + param_type = type(self).__name__.partition("ParamType")[0] + param_type = param_type.partition("ParameterType")[0] + + # Custom subclasses might not remember to set a name. + if hasattr(self, "name"): + name = self.name + else: + name = param_type + + return {"param_type": param_type, "name": name} + + def __call__( + self, + value: t.Any, + param: Parameter | None = None, + ctx: Context | None = None, + ) -> t.Any: + if value is not None: + return self.convert(value, param, ctx) + + def get_metavar(self, param: Parameter, ctx: Context) -> str | None: + """Returns the metavar default for this param if it provides one.""" + + def get_missing_message(self, param: Parameter, ctx: Context | None) -> str | None: + """Optionally might return extra information about a missing + parameter. + + .. versionadded:: 2.0 + """ + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + """Convert the value to the correct type. This is not called if + the value is ``None`` (the missing value). + + This must accept string values from the command line, as well as + values that are already the correct type. It may also convert + other compatible types. + + The ``param`` and ``ctx`` arguments may be ``None`` in certain + situations, such as when converting prompt input. + + If the value cannot be converted, call :meth:`fail` with a + descriptive message. + + :param value: The value to convert. + :param param: The parameter that is using this type to convert + its value. May be ``None``. + :param ctx: The current context that arrived at this value. May + be ``None``. + """ + return value + + def split_envvar_value(self, rv: str) -> cabc.Sequence[str]: + """Given a value from an environment variable this splits it up + into small chunks depending on the defined envvar list splitter. + + If the splitter is set to `None`, which means that whitespace splits, + then leading and trailing whitespace is ignored. Otherwise, leading + and trailing splitters usually lead to empty items being included. + """ + return (rv or "").split(self.envvar_list_splitter) + + def fail( + self, + message: str, + param: Parameter | None = None, + ctx: Context | None = None, + ) -> t.NoReturn: + """Helper method to fail with an invalid value message.""" + raise BadParameter(message, ctx=ctx, param=param) + + def shell_complete( + self, ctx: Context, param: Parameter, incomplete: str + ) -> list[CompletionItem]: + """Return a list of + :class:`~click.shell_completion.CompletionItem` objects for the + incomplete value. Most types do not provide completions, but + some do, and this allows custom types to provide custom + completions as well. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + return [] + + +class CompositeParamType(ParamType): + is_composite = True + + @property + def arity(self) -> int: # type: ignore + raise NotImplementedError() + + +class FuncParamType(ParamType): + def __init__(self, func: t.Callable[[t.Any], t.Any]) -> None: + self.name: str = func.__name__ + self.func = func + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["func"] = self.func + return info_dict + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + try: + return self.func(value) + except ValueError: + try: + value = str(value) + except UnicodeError: + value = value.decode("utf-8", "replace") + + self.fail(value, param, ctx) + + +class UnprocessedParamType(ParamType): + name = "text" + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + return value + + def __repr__(self) -> str: + return "UNPROCESSED" + + +class StringParamType(ParamType): + name = "text" + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + if isinstance(value, bytes): + enc = _get_argv_encoding() + try: + value = value.decode(enc) + except UnicodeError: + fs_enc = sys.getfilesystemencoding() + if fs_enc != enc: + try: + value = value.decode(fs_enc) + except UnicodeError: + value = value.decode("utf-8", "replace") + else: + value = value.decode("utf-8", "replace") + return value + return str(value) + + def __repr__(self) -> str: + return "STRING" + + +class Choice(ParamType, t.Generic[ParamTypeValue]): + """The choice type allows a value to be checked against a fixed set + of supported values. + + You may pass any iterable value which will be converted to a tuple + and thus will only be iterated once. + + The resulting value will always be one of the originally passed choices. + See :meth:`normalize_choice` for more info on the mapping of strings + to choices. See :ref:`choice-opts` for an example. + + :param case_sensitive: Set to false to make choices case + insensitive. Defaults to true. + + .. versionchanged:: 8.2.0 + Non-``str`` ``choices`` are now supported. It can additionally be any + iterable. Before you were not recommended to pass anything but a list or + tuple. + + .. versionadded:: 8.2.0 + Choice normalization can be overridden via :meth:`normalize_choice`. + """ + + name = "choice" + + def __init__( + self, choices: cabc.Iterable[ParamTypeValue], case_sensitive: bool = True + ) -> None: + self.choices: cabc.Sequence[ParamTypeValue] = tuple(choices) + self.case_sensitive = case_sensitive + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["choices"] = self.choices + info_dict["case_sensitive"] = self.case_sensitive + return info_dict + + def _normalized_mapping( + self, ctx: Context | None = None + ) -> cabc.Mapping[ParamTypeValue, str]: + """ + Returns mapping where keys are the original choices and the values are + the normalized values that are accepted via the command line. + + This is a simple wrapper around :meth:`normalize_choice`, use that + instead which is supported. + """ + return { + choice: self.normalize_choice( + choice=choice, + ctx=ctx, + ) + for choice in self.choices + } + + def normalize_choice(self, choice: ParamTypeValue, ctx: Context | None) -> str: + """ + Normalize a choice value, used to map a passed string to a choice. + Each choice must have a unique normalized value. + + By default uses :meth:`Context.token_normalize_func` and if not case + sensitive, convert it to a casefolded value. + + .. versionadded:: 8.2.0 + """ + normed_value = choice.name if isinstance(choice, enum.Enum) else str(choice) + + if ctx is not None and ctx.token_normalize_func is not None: + normed_value = ctx.token_normalize_func(normed_value) + + if not self.case_sensitive: + normed_value = normed_value.casefold() + + return normed_value + + def get_metavar(self, param: Parameter, ctx: Context) -> str | None: + if param.param_type_name == "option" and not param.show_choices: # type: ignore + choice_metavars = [ + convert_type(type(choice)).name.upper() for choice in self.choices + ] + choices_str = "|".join([*dict.fromkeys(choice_metavars)]) + else: + choices_str = "|".join( + [str(i) for i in self._normalized_mapping(ctx=ctx).values()] + ) + + # Use curly braces to indicate a required argument. + if param.required and param.param_type_name == "argument": + return f"{{{choices_str}}}" + + # Use square braces to indicate an option or optional argument. + return f"[{choices_str}]" + + def get_missing_message(self, param: Parameter, ctx: Context | None) -> str: + """ + Message shown when no choice is passed. + + .. versionchanged:: 8.2.0 Added ``ctx`` argument. + """ + return _("Choose from:\n\t{choices}").format( + choices=",\n\t".join(self._normalized_mapping(ctx=ctx).values()) + ) + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> ParamTypeValue: + """ + For a given value from the parser, normalize it and find its + matching normalized value in the list of choices. Then return the + matched "original" choice. + """ + normed_value = self.normalize_choice(choice=value, ctx=ctx) + normalized_mapping = self._normalized_mapping(ctx=ctx) + + try: + return next( + original + for original, normalized in normalized_mapping.items() + if normalized == normed_value + ) + except StopIteration: + self.fail( + self.get_invalid_choice_message(value=value, ctx=ctx), + param=param, + ctx=ctx, + ) + + def get_invalid_choice_message(self, value: t.Any, ctx: Context | None) -> str: + """Get the error message when the given choice is invalid. + + :param value: The invalid value. + + .. versionadded:: 8.2 + """ + choices_str = ", ".join(map(repr, self._normalized_mapping(ctx=ctx).values())) + return ngettext( + "{value!r} is not {choice}.", + "{value!r} is not one of {choices}.", + len(self.choices), + ).format(value=value, choice=choices_str, choices=choices_str) + + def __repr__(self) -> str: + return f"Choice({list(self.choices)})" + + def shell_complete( + self, ctx: Context, param: Parameter, incomplete: str + ) -> list[CompletionItem]: + """Complete choices that start with the incomplete value. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + str_choices = map(str, self.choices) + + if self.case_sensitive: + matched = (c for c in str_choices if c.startswith(incomplete)) + else: + incomplete = incomplete.lower() + matched = (c for c in str_choices if c.lower().startswith(incomplete)) + + return [CompletionItem(c) for c in matched] + + +class DateTime(ParamType): + """The DateTime type converts date strings into `datetime` objects. + + The format strings which are checked are configurable, but default to some + common (non-timezone aware) ISO 8601 formats. + + When specifying *DateTime* formats, you should only pass a list or a tuple. + Other iterables, like generators, may lead to surprising results. + + The format strings are processed using ``datetime.strptime``, and this + consequently defines the format strings which are allowed. + + Parsing is tried using each format, in order, and the first format which + parses successfully is used. + + :param formats: A list or tuple of date format strings, in the order in + which they should be tried. Defaults to + ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``, + ``'%Y-%m-%d %H:%M:%S'``. + """ + + name = "datetime" + + def __init__(self, formats: cabc.Sequence[str] | None = None): + self.formats: cabc.Sequence[str] = formats or [ + "%Y-%m-%d", + "%Y-%m-%dT%H:%M:%S", + "%Y-%m-%d %H:%M:%S", + ] + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["formats"] = self.formats + return info_dict + + def get_metavar(self, param: Parameter, ctx: Context) -> str | None: + return f"[{'|'.join(self.formats)}]" + + def _try_to_convert_date(self, value: t.Any, format: str) -> datetime | None: + try: + return datetime.strptime(value, format) + except ValueError: + return None + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + if isinstance(value, datetime): + return value + + for format in self.formats: + converted = self._try_to_convert_date(value, format) + + if converted is not None: + return converted + + formats_str = ", ".join(map(repr, self.formats)) + self.fail( + ngettext( + "{value!r} does not match the format {format}.", + "{value!r} does not match the formats {formats}.", + len(self.formats), + ).format(value=value, format=formats_str, formats=formats_str), + param, + ctx, + ) + + def __repr__(self) -> str: + return "DateTime" + + +class _NumberParamTypeBase(ParamType): + _number_class: t.ClassVar[type[t.Any]] + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + try: + return self._number_class(value) + except ValueError: + self.fail( + _("{value!r} is not a valid {number_type}.").format( + value=value, number_type=self.name + ), + param, + ctx, + ) + + +class _NumberRangeBase(_NumberParamTypeBase): + def __init__( + self, + min: float | None = None, + max: float | None = None, + min_open: bool = False, + max_open: bool = False, + clamp: bool = False, + ) -> None: + self.min = min + self.max = max + self.min_open = min_open + self.max_open = max_open + self.clamp = clamp + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update( + min=self.min, + max=self.max, + min_open=self.min_open, + max_open=self.max_open, + clamp=self.clamp, + ) + return info_dict + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + import operator + + rv = super().convert(value, param, ctx) + lt_min: bool = self.min is not None and ( + operator.le if self.min_open else operator.lt + )(rv, self.min) + gt_max: bool = self.max is not None and ( + operator.ge if self.max_open else operator.gt + )(rv, self.max) + + if self.clamp: + if lt_min: + return self._clamp(self.min, 1, self.min_open) # type: ignore + + if gt_max: + return self._clamp(self.max, -1, self.max_open) # type: ignore + + if lt_min or gt_max: + self.fail( + _("{value} is not in the range {range}.").format( + value=rv, range=self._describe_range() + ), + param, + ctx, + ) + + return rv + + def _clamp(self, bound: float, dir: t.Literal[1, -1], open: bool) -> float: + """Find the valid value to clamp to bound in the given + direction. + + :param bound: The boundary value. + :param dir: 1 or -1 indicating the direction to move. + :param open: If true, the range does not include the bound. + """ + raise NotImplementedError + + def _describe_range(self) -> str: + """Describe the range for use in help text.""" + if self.min is None: + op = "<" if self.max_open else "<=" + return f"x{op}{self.max}" + + if self.max is None: + op = ">" if self.min_open else ">=" + return f"x{op}{self.min}" + + lop = "<" if self.min_open else "<=" + rop = "<" if self.max_open else "<=" + return f"{self.min}{lop}x{rop}{self.max}" + + def __repr__(self) -> str: + clamp = " clamped" if self.clamp else "" + return f"<{type(self).__name__} {self._describe_range()}{clamp}>" + + +class IntParamType(_NumberParamTypeBase): + name = "integer" + _number_class = int + + def __repr__(self) -> str: + return "INT" + + +class IntRange(_NumberRangeBase, IntParamType): + """Restrict an :data:`click.INT` value to a range of accepted + values. See :ref:`ranges`. + + If ``min`` or ``max`` are not passed, any value is accepted in that + direction. If ``min_open`` or ``max_open`` are enabled, the + corresponding boundary is not included in the range. + + If ``clamp`` is enabled, a value outside the range is clamped to the + boundary instead of failing. + + .. versionchanged:: 8.0 + Added the ``min_open`` and ``max_open`` parameters. + """ + + name = "integer range" + + def _clamp( # type: ignore + self, bound: int, dir: t.Literal[1, -1], open: bool + ) -> int: + if not open: + return bound + + return bound + dir + + +class FloatParamType(_NumberParamTypeBase): + name = "float" + _number_class = float + + def __repr__(self) -> str: + return "FLOAT" + + +class FloatRange(_NumberRangeBase, FloatParamType): + """Restrict a :data:`click.FLOAT` value to a range of accepted + values. See :ref:`ranges`. + + If ``min`` or ``max`` are not passed, any value is accepted in that + direction. If ``min_open`` or ``max_open`` are enabled, the + corresponding boundary is not included in the range. + + If ``clamp`` is enabled, a value outside the range is clamped to the + boundary instead of failing. This is not supported if either + boundary is marked ``open``. + + .. versionchanged:: 8.0 + Added the ``min_open`` and ``max_open`` parameters. + """ + + name = "float range" + + def __init__( + self, + min: float | None = None, + max: float | None = None, + min_open: bool = False, + max_open: bool = False, + clamp: bool = False, + ) -> None: + super().__init__( + min=min, max=max, min_open=min_open, max_open=max_open, clamp=clamp + ) + + if (min_open or max_open) and clamp: + raise TypeError("Clamping is not supported for open bounds.") + + def _clamp(self, bound: float, dir: t.Literal[1, -1], open: bool) -> float: + if not open: + return bound + + # Could use math.nextafter here, but clamping an + # open float range doesn't seem to be particularly useful. It's + # left up to the user to write a callback to do it if needed. + raise RuntimeError("Clamping is not supported for open bounds.") + + +class BoolParamType(ParamType): + name = "boolean" + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + if value in {False, True}: + return bool(value) + + norm = value.strip().lower() + + if norm in {"1", "true", "t", "yes", "y", "on"}: + return True + + if norm in {"0", "false", "f", "no", "n", "off"}: + return False + + self.fail( + _("{value!r} is not a valid boolean.").format(value=value), param, ctx + ) + + def __repr__(self) -> str: + return "BOOL" + + +class UUIDParameterType(ParamType): + name = "uuid" + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + import uuid + + if isinstance(value, uuid.UUID): + return value + + value = value.strip() + + try: + return uuid.UUID(value) + except ValueError: + self.fail( + _("{value!r} is not a valid UUID.").format(value=value), param, ctx + ) + + def __repr__(self) -> str: + return "UUID" + + +class File(ParamType): + """Declares a parameter to be a file for reading or writing. The file + is automatically closed once the context tears down (after the command + finished working). + + Files can be opened for reading or writing. The special value ``-`` + indicates stdin or stdout depending on the mode. + + By default, the file is opened for reading text data, but it can also be + opened in binary mode or for writing. The encoding parameter can be used + to force a specific encoding. + + The `lazy` flag controls if the file should be opened immediately or upon + first IO. The default is to be non-lazy for standard input and output + streams as well as files opened for reading, `lazy` otherwise. When opening a + file lazily for reading, it is still opened temporarily for validation, but + will not be held open until first IO. lazy is mainly useful when opening + for writing to avoid creating the file until it is needed. + + Files can also be opened atomically in which case all writes go into a + separate file in the same folder and upon completion the file will + be moved over to the original location. This is useful if a file + regularly read by other users is modified. + + See :ref:`file-args` for more information. + + .. versionchanged:: 2.0 + Added the ``atomic`` parameter. + """ + + name = "filename" + envvar_list_splitter: t.ClassVar[str] = os.path.pathsep + + def __init__( + self, + mode: str = "r", + encoding: str | None = None, + errors: str | None = "strict", + lazy: bool | None = None, + atomic: bool = False, + ) -> None: + self.mode = mode + self.encoding = encoding + self.errors = errors + self.lazy = lazy + self.atomic = atomic + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update(mode=self.mode, encoding=self.encoding) + return info_dict + + def resolve_lazy_flag(self, value: str | os.PathLike[str]) -> bool: + if self.lazy is not None: + return self.lazy + if os.fspath(value) == "-": + return False + elif "w" in self.mode: + return True + return False + + def convert( + self, + value: str | os.PathLike[str] | t.IO[t.Any], + param: Parameter | None, + ctx: Context | None, + ) -> t.IO[t.Any]: + if _is_file_like(value): + return value + + value = t.cast("str | os.PathLike[str]", value) + + try: + lazy = self.resolve_lazy_flag(value) + + if lazy: + lf = LazyFile( + value, self.mode, self.encoding, self.errors, atomic=self.atomic + ) + + if ctx is not None: + ctx.call_on_close(lf.close_intelligently) + + return t.cast("t.IO[t.Any]", lf) + + f, should_close = open_stream( + value, self.mode, self.encoding, self.errors, atomic=self.atomic + ) + + # If a context is provided, we automatically close the file + # at the end of the context execution (or flush out). If a + # context does not exist, it's the caller's responsibility to + # properly close the file. This for instance happens when the + # type is used with prompts. + if ctx is not None: + if should_close: + ctx.call_on_close(safecall(f.close)) + else: + ctx.call_on_close(safecall(f.flush)) + + return f + except OSError as e: + self.fail(f"'{format_filename(value)}': {e.strerror}", param, ctx) + + def shell_complete( + self, ctx: Context, param: Parameter, incomplete: str + ) -> list[CompletionItem]: + """Return a special completion marker that tells the completion + system to use the shell to provide file path completions. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + return [CompletionItem(incomplete, type="file")] + + +def _is_file_like(value: t.Any) -> te.TypeGuard[t.IO[t.Any]]: + return hasattr(value, "read") or hasattr(value, "write") + + +class Path(ParamType): + """The ``Path`` type is similar to the :class:`File` type, but + returns the filename instead of an open file. Various checks can be + enabled to validate the type of file and permissions. + + :param exists: The file or directory needs to exist for the value to + be valid. If this is not set to ``True``, and the file does not + exist, then all further checks are silently skipped. + :param file_okay: Allow a file as a value. + :param dir_okay: Allow a directory as a value. + :param readable: if true, a readable check is performed. + :param writable: if true, a writable check is performed. + :param executable: if true, an executable check is performed. + :param resolve_path: Make the value absolute and resolve any + symlinks. A ``~`` is not expanded, as this is supposed to be + done by the shell only. + :param allow_dash: Allow a single dash as a value, which indicates + a standard stream (but does not open it). Use + :func:`~click.open_file` to handle opening this value. + :param path_type: Convert the incoming path value to this type. If + ``None``, keep Python's default, which is ``str``. Useful to + convert to :class:`pathlib.Path`. + + .. versionchanged:: 8.1 + Added the ``executable`` parameter. + + .. versionchanged:: 8.0 + Allow passing ``path_type=pathlib.Path``. + + .. versionchanged:: 6.0 + Added the ``allow_dash`` parameter. + """ + + envvar_list_splitter: t.ClassVar[str] = os.path.pathsep + + def __init__( + self, + exists: bool = False, + file_okay: bool = True, + dir_okay: bool = True, + writable: bool = False, + readable: bool = True, + resolve_path: bool = False, + allow_dash: bool = False, + path_type: type[t.Any] | None = None, + executable: bool = False, + ): + self.exists = exists + self.file_okay = file_okay + self.dir_okay = dir_okay + self.readable = readable + self.writable = writable + self.executable = executable + self.resolve_path = resolve_path + self.allow_dash = allow_dash + self.type = path_type + + if self.file_okay and not self.dir_okay: + self.name: str = _("file") + elif self.dir_okay and not self.file_okay: + self.name = _("directory") + else: + self.name = _("path") + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update( + exists=self.exists, + file_okay=self.file_okay, + dir_okay=self.dir_okay, + writable=self.writable, + readable=self.readable, + allow_dash=self.allow_dash, + ) + return info_dict + + def coerce_path_result( + self, value: str | os.PathLike[str] + ) -> str | bytes | os.PathLike[str]: + if self.type is not None and not isinstance(value, self.type): + if self.type is str: + return os.fsdecode(value) + elif self.type is bytes: + return os.fsencode(value) + else: + return t.cast("os.PathLike[str]", self.type(value)) + + return value + + def convert( + self, + value: str | os.PathLike[str], + param: Parameter | None, + ctx: Context | None, + ) -> str | bytes | os.PathLike[str]: + rv = value + + is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-") + + if not is_dash: + if self.resolve_path: + rv = os.path.realpath(rv) + + try: + st = os.stat(rv) + except OSError: + if not self.exists: + return self.coerce_path_result(rv) + self.fail( + _("{name} {filename!r} does not exist.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + if not self.file_okay and stat.S_ISREG(st.st_mode): + self.fail( + _("{name} {filename!r} is a file.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + if not self.dir_okay and stat.S_ISDIR(st.st_mode): + self.fail( + _("{name} {filename!r} is a directory.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + if self.readable and not os.access(rv, os.R_OK): + self.fail( + _("{name} {filename!r} is not readable.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + if self.writable and not os.access(rv, os.W_OK): + self.fail( + _("{name} {filename!r} is not writable.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + if self.executable and not os.access(value, os.X_OK): + self.fail( + _("{name} {filename!r} is not executable.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + return self.coerce_path_result(rv) + + def shell_complete( + self, ctx: Context, param: Parameter, incomplete: str + ) -> list[CompletionItem]: + """Return a special completion marker that tells the completion + system to use the shell to provide path completions for only + directories or any paths. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + type = "dir" if self.dir_okay and not self.file_okay else "file" + return [CompletionItem(incomplete, type=type)] + + +class Tuple(CompositeParamType): + """The default behavior of Click is to apply a type on a value directly. + This works well in most cases, except for when `nargs` is set to a fixed + count and different types should be used for different items. In this + case the :class:`Tuple` type can be used. This type can only be used + if `nargs` is set to a fixed number. + + For more information see :ref:`tuple-type`. + + This can be selected by using a Python tuple literal as a type. + + :param types: a list of types that should be used for the tuple items. + """ + + def __init__(self, types: cabc.Sequence[type[t.Any] | ParamType]) -> None: + self.types: cabc.Sequence[ParamType] = [convert_type(ty) for ty in types] + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["types"] = [t.to_info_dict() for t in self.types] + return info_dict + + @property + def name(self) -> str: # type: ignore + return f"<{' '.join(ty.name for ty in self.types)}>" + + @property + def arity(self) -> int: # type: ignore + return len(self.types) + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + len_type = len(self.types) + len_value = len(value) + + if len_value != len_type: + self.fail( + ngettext( + "{len_type} values are required, but {len_value} was given.", + "{len_type} values are required, but {len_value} were given.", + len_value, + ).format(len_type=len_type, len_value=len_value), + param=param, + ctx=ctx, + ) + + return tuple( + ty(x, param, ctx) for ty, x in zip(self.types, value, strict=False) + ) + + +def convert_type(ty: t.Any | None, default: t.Any | None = None) -> ParamType: + """Find the most appropriate :class:`ParamType` for the given Python + type. If the type isn't provided, it can be inferred from a default + value. + """ + guessed_type = False + + if ty is None and default is not None: + if isinstance(default, (tuple, list)): + # If the default is empty, ty will remain None and will + # return STRING. + if default: + item = default[0] + + # A tuple of tuples needs to detect the inner types. + # Can't call convert recursively because that would + # incorrectly unwind the tuple to a single type. + if isinstance(item, (tuple, list)): + ty = tuple(map(type, item)) + else: + ty = type(item) + else: + ty = type(default) + + guessed_type = True + + if isinstance(ty, tuple): + return Tuple(ty) + + if isinstance(ty, ParamType): + return ty + + if ty is str or ty is None: + return STRING + + if ty is int: + return INT + + if ty is float: + return FLOAT + + if ty is bool: + return BOOL + + if guessed_type: + return STRING + + if __debug__: + try: + if issubclass(ty, ParamType): + raise AssertionError( + f"Attempted to use an uninstantiated parameter type ({ty})." + ) + except TypeError: + # ty is an instance (correct), so issubclass fails. + pass + + return FuncParamType(ty) + + +#: A dummy parameter type that just does nothing. From a user's +#: perspective this appears to just be the same as `STRING` but +#: internally no string conversion takes place if the input was bytes. +#: This is usually useful when working with file paths as they can +#: appear in bytes and unicode. +#: +#: For path related uses the :class:`Path` type is a better choice but +#: there are situations where an unprocessed type is useful which is why +#: it is is provided. +#: +#: .. versionadded:: 4.0 +UNPROCESSED = UnprocessedParamType() + +#: A unicode string parameter type which is the implicit default. This +#: can also be selected by using ``str`` as type. +STRING = StringParamType() + +#: An integer parameter. This can also be selected by using ``int`` as +#: type. +INT = IntParamType() + +#: A floating point value parameter. This can also be selected by using +#: ``float`` as type. +FLOAT = FloatParamType() + +#: A boolean parameter. This is the default for boolean flags. This can +#: also be selected by using ``bool`` as a type. +BOOL = BoolParamType() + +#: A UUID parameter. +UUID = UUIDParameterType() + + +class OptionHelpExtra(t.TypedDict, total=False): + envvars: tuple[str, ...] + default: str + range: str + required: str diff --git a/venv/Lib/site-packages/click/utils.py b/venv/Lib/site-packages/click/utils.py new file mode 100644 index 00000000..ab2fe588 --- /dev/null +++ b/venv/Lib/site-packages/click/utils.py @@ -0,0 +1,627 @@ +from __future__ import annotations + +import collections.abc as cabc +import os +import re +import sys +import typing as t +from functools import update_wrapper +from types import ModuleType +from types import TracebackType + +from ._compat import _default_text_stderr +from ._compat import _default_text_stdout +from ._compat import _find_binary_writer +from ._compat import auto_wrap_for_ansi +from ._compat import binary_streams +from ._compat import open_stream +from ._compat import should_strip_ansi +from ._compat import strip_ansi +from ._compat import text_streams +from ._compat import WIN +from .globals import resolve_color_default + +if t.TYPE_CHECKING: + import typing_extensions as te + + P = te.ParamSpec("P") + +R = t.TypeVar("R") + + +def _posixify(name: str) -> str: + return "-".join(name.split()).lower() + + +def safecall(func: t.Callable[P, R]) -> t.Callable[P, R | None]: + """Wraps a function so that it swallows exceptions.""" + + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R | None: + try: + return func(*args, **kwargs) + except Exception: + pass + return None + + return update_wrapper(wrapper, func) + + +def make_str(value: t.Any) -> str: + """Converts a value into a valid string.""" + if isinstance(value, bytes): + try: + return value.decode(sys.getfilesystemencoding()) + except UnicodeError: + return value.decode("utf-8", "replace") + return str(value) + + +def make_default_short_help(help: str, max_length: int = 45) -> str: + """Returns a condensed version of help string.""" + # Consider only the first paragraph. + paragraph_end = help.find("\n\n") + + if paragraph_end != -1: + help = help[:paragraph_end] + + # Collapse newlines, tabs, and spaces. + words = help.split() + + if not words: + return "" + + # The first paragraph started with a "no rewrap" marker, ignore it. + if words[0] == "\b": + words = words[1:] + + total_length = 0 + last_index = len(words) - 1 + + for i, word in enumerate(words): + total_length += len(word) + (i > 0) + + if total_length > max_length: # too long, truncate + break + + if word[-1] == ".": # sentence end, truncate without "..." + return " ".join(words[: i + 1]) + + if total_length == max_length and i != last_index: + break # not at sentence end, truncate with "..." + else: + return " ".join(words) # no truncation needed + + # Account for the length of the suffix. + total_length += len("...") + + # remove words until the length is short enough + while i > 0: + total_length -= len(words[i]) + (i > 0) + + if total_length <= max_length: + break + + i -= 1 + + return " ".join(words[:i]) + "..." + + +class LazyFile: + """A lazy file works like a regular file but it does not fully open + the file but it does perform some basic checks early to see if the + filename parameter does make sense. This is useful for safely opening + files for writing. + """ + + def __init__( + self, + filename: str | os.PathLike[str], + mode: str = "r", + encoding: str | None = None, + errors: str | None = "strict", + atomic: bool = False, + ): + self.name: str = os.fspath(filename) + self.mode = mode + self.encoding = encoding + self.errors = errors + self.atomic = atomic + self._f: t.IO[t.Any] | None + self.should_close: bool + + if self.name == "-": + self._f, self.should_close = open_stream(filename, mode, encoding, errors) + else: + if "r" in mode: + # Open and close the file in case we're opening it for + # reading so that we can catch at least some errors in + # some cases early. + open(filename, mode).close() + self._f = None + self.should_close = True + + def __getattr__(self, name: str) -> t.Any: + return getattr(self.open(), name) + + def __repr__(self) -> str: + if self._f is not None: + return repr(self._f) + return f"" + + def open(self) -> t.IO[t.Any]: + """Opens the file if it's not yet open. This call might fail with + a :exc:`FileError`. Not handling this error will produce an error + that Click shows. + """ + if self._f is not None: + return self._f + try: + rv, self.should_close = open_stream( + self.name, self.mode, self.encoding, self.errors, atomic=self.atomic + ) + except OSError as e: + from .exceptions import FileError + + raise FileError(self.name, hint=e.strerror) from e + self._f = rv + return rv + + def close(self) -> None: + """Closes the underlying file, no matter what.""" + if self._f is not None: + self._f.close() + + def close_intelligently(self) -> None: + """This function only closes the file if it was opened by the lazy + file wrapper. For instance this will never close stdin. + """ + if self.should_close: + self.close() + + def __enter__(self) -> LazyFile: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + tb: TracebackType | None, + ) -> None: + self.close_intelligently() + + def __iter__(self) -> cabc.Iterator[t.AnyStr]: + self.open() + return iter(self._f) # type: ignore + + +class KeepOpenFile: + def __init__(self, file: t.IO[t.Any]) -> None: + self._file: t.IO[t.Any] = file + + def __getattr__(self, name: str) -> t.Any: + return getattr(self._file, name) + + def __enter__(self) -> KeepOpenFile: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + tb: TracebackType | None, + ) -> None: + pass + + def __repr__(self) -> str: + return repr(self._file) + + def __iter__(self) -> cabc.Iterator[t.AnyStr]: + return iter(self._file) + + +def echo( + message: t.Any | None = None, + file: t.IO[t.Any] | None = None, + nl: bool = True, + err: bool = False, + color: bool | None = None, +) -> None: + """Print a message and newline to stdout or a file. This should be + used instead of :func:`print` because it provides better support + for different data, files, and environments. + + Compared to :func:`print`, this does the following: + + - Ensures that the output encoding is not misconfigured on Linux. + - Supports Unicode in the Windows console. + - Supports writing to binary outputs, and supports writing bytes + to text outputs. + - Supports colors and styles on Windows. + - Removes ANSI color and style codes if the output does not look + like an interactive terminal. + - Always flushes the output. + + :param message: The string or bytes to output. Other objects are + converted to strings. + :param file: The file to write to. Defaults to ``stdout``. + :param err: Write to ``stderr`` instead of ``stdout``. + :param nl: Print a newline after the message. Enabled by default. + :param color: Force showing or hiding colors and other styles. By + default Click will remove color if the output does not look like + an interactive terminal. + + .. versionchanged:: 6.0 + Support Unicode output on the Windows console. Click does not + modify ``sys.stdout``, so ``sys.stdout.write()`` and ``print()`` + will still not support Unicode. + + .. versionchanged:: 4.0 + Added the ``color`` parameter. + + .. versionadded:: 3.0 + Added the ``err`` parameter. + + .. versionchanged:: 2.0 + Support colors on Windows if colorama is installed. + """ + if file is None: + if err: + file = _default_text_stderr() + else: + file = _default_text_stdout() + + # There are no standard streams attached to write to. For example, + # pythonw on Windows. + if file is None: + return + + # Convert non bytes/text into the native string type. + if message is not None and not isinstance(message, (str, bytes, bytearray)): + out: str | bytes | None = str(message) + else: + out = message + + if nl: + out = out or "" + if isinstance(out, str): + out += "\n" + else: + out += b"\n" + + if not out: + file.flush() + return + + # If there is a message and the value looks like bytes, we manually + # need to find the binary stream and write the message in there. + # This is done separately so that most stream types will work as you + # would expect. Eg: you can write to StringIO for other cases. + if isinstance(out, (bytes, bytearray)): + binary_file = _find_binary_writer(file) + + if binary_file is not None: + file.flush() + binary_file.write(out) + binary_file.flush() + return + + # ANSI style code support. For no message or bytes, nothing happens. + # When outputting to a file instead of a terminal, strip codes. + else: + color = resolve_color_default(color) + + if should_strip_ansi(file, color): + out = strip_ansi(out) + elif WIN: + if auto_wrap_for_ansi is not None: + file = auto_wrap_for_ansi(file, color) # type: ignore + elif not color: + out = strip_ansi(out) + + file.write(out) # type: ignore + file.flush() + + +def get_binary_stream(name: t.Literal["stdin", "stdout", "stderr"]) -> t.BinaryIO: + """Returns a system stream for byte processing. + + :param name: the name of the stream to open. Valid names are ``'stdin'``, + ``'stdout'`` and ``'stderr'`` + """ + opener = binary_streams.get(name) + if opener is None: + raise TypeError(f"Unknown standard stream '{name}'") + return opener() + + +def get_text_stream( + name: t.Literal["stdin", "stdout", "stderr"], + encoding: str | None = None, + errors: str | None = "strict", +) -> t.TextIO: + """Returns a system stream for text processing. This usually returns + a wrapped stream around a binary stream returned from + :func:`get_binary_stream` but it also can take shortcuts for already + correctly configured streams. + + :param name: the name of the stream to open. Valid names are ``'stdin'``, + ``'stdout'`` and ``'stderr'`` + :param encoding: overrides the detected default encoding. + :param errors: overrides the default error mode. + """ + opener = text_streams.get(name) + if opener is None: + raise TypeError(f"Unknown standard stream '{name}'") + return opener(encoding, errors) + + +def open_file( + filename: str | os.PathLike[str], + mode: str = "r", + encoding: str | None = None, + errors: str | None = "strict", + lazy: bool = False, + atomic: bool = False, +) -> t.IO[t.Any]: + """Open a file, with extra behavior to handle ``'-'`` to indicate + a standard stream, lazy open on write, and atomic write. Similar to + the behavior of the :class:`~click.File` param type. + + If ``'-'`` is given to open ``stdout`` or ``stdin``, the stream is + wrapped so that using it in a context manager will not close it. + This makes it possible to use the function without accidentally + closing a standard stream: + + .. code-block:: python + + with open_file(filename) as f: + ... + + :param filename: The name or Path of the file to open, or ``'-'`` for + ``stdin``/``stdout``. + :param mode: The mode in which to open the file. + :param encoding: The encoding to decode or encode a file opened in + text mode. + :param errors: The error handling mode. + :param lazy: Wait to open the file until it is accessed. For read + mode, the file is temporarily opened to raise access errors + early, then closed until it is read again. + :param atomic: Write to a temporary file and replace the given file + on close. + + .. versionadded:: 3.0 + """ + if lazy: + return t.cast( + "t.IO[t.Any]", LazyFile(filename, mode, encoding, errors, atomic=atomic) + ) + + f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic) + + if not should_close: + f = t.cast("t.IO[t.Any]", KeepOpenFile(f)) + + return f + + +def format_filename( + filename: str | bytes | os.PathLike[str] | os.PathLike[bytes], + shorten: bool = False, +) -> str: + """Format a filename as a string for display. Ensures the filename can be + displayed by replacing any invalid bytes or surrogate escapes in the name + with the replacement character ``�``. + + Invalid bytes or surrogate escapes will raise an error when written to a + stream with ``errors="strict"``. This will typically happen with ``stdout`` + when the locale is something like ``en_GB.UTF-8``. + + Many scenarios *are* safe to write surrogates though, due to PEP 538 and + PEP 540, including: + + - Writing to ``stderr``, which uses ``errors="backslashreplace"``. + - The system has ``LANG=C.UTF-8``, ``C``, or ``POSIX``. Python opens + stdout and stderr with ``errors="surrogateescape"``. + - None of ``LANG/LC_*`` are set. Python assumes ``LANG=C.UTF-8``. + - Python is started in UTF-8 mode with ``PYTHONUTF8=1`` or ``-X utf8``. + Python opens stdout and stderr with ``errors="surrogateescape"``. + + :param filename: formats a filename for UI display. This will also convert + the filename into unicode without failing. + :param shorten: this optionally shortens the filename to strip of the + path that leads up to it. + """ + if shorten: + filename = os.path.basename(filename) + else: + filename = os.fspath(filename) + + if isinstance(filename, bytes): + filename = filename.decode(sys.getfilesystemencoding(), "replace") + else: + filename = filename.encode("utf-8", "surrogateescape").decode( + "utf-8", "replace" + ) + + return filename + + +def get_app_dir(app_name: str, roaming: bool = True, force_posix: bool = False) -> str: + r"""Returns the config folder for the application. The default behavior + is to return whatever is most appropriate for the operating system. + + To give you an idea, for an app called ``"Foo Bar"``, something like + the following folders could be returned: + + Mac OS X: + ``~/Library/Application Support/Foo Bar`` + Mac OS X (POSIX): + ``~/.foo-bar`` + Unix: + ``~/.config/foo-bar`` + Unix (POSIX): + ``~/.foo-bar`` + Windows (roaming): + ``C:\Users\\AppData\Roaming\Foo Bar`` + Windows (not roaming): + ``C:\Users\\AppData\Local\Foo Bar`` + + .. versionadded:: 2.0 + + :param app_name: the application name. This should be properly capitalized + and can contain whitespace. + :param roaming: controls if the folder should be roaming or not on Windows. + Has no effect otherwise. + :param force_posix: if this is set to `True` then on any POSIX system the + folder will be stored in the home folder with a leading + dot instead of the XDG config home or darwin's + application support folder. + """ + if WIN: + key = "APPDATA" if roaming else "LOCALAPPDATA" + folder = os.environ.get(key) + if folder is None: + folder = os.path.expanduser("~") + return os.path.join(folder, app_name) + if force_posix: + return os.path.join(os.path.expanduser(f"~/.{_posixify(app_name)}")) + if sys.platform == "darwin": + return os.path.join( + os.path.expanduser("~/Library/Application Support"), app_name + ) + return os.path.join( + os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")), + _posixify(app_name), + ) + + +class PacifyFlushWrapper: + """This wrapper is used to catch and suppress BrokenPipeErrors resulting + from ``.flush()`` being called on broken pipe during the shutdown/final-GC + of the Python interpreter. Notably ``.flush()`` is always called on + ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any + other cleanup code, and the case where the underlying file is not a broken + pipe, all calls and attributes are proxied. + """ + + def __init__(self, wrapped: t.IO[t.Any]) -> None: + self.wrapped = wrapped + + def flush(self) -> None: + try: + self.wrapped.flush() + except OSError as e: + import errno + + if e.errno != errno.EPIPE: + raise + + def __getattr__(self, attr: str) -> t.Any: + return getattr(self.wrapped, attr) + + +def _detect_program_name( + path: str | None = None, _main: ModuleType | None = None +) -> str: + """Determine the command used to run the program, for use in help + text. If a file or entry point was executed, the file name is + returned. If ``python -m`` was used to execute a module or package, + ``python -m name`` is returned. + + This doesn't try to be too precise, the goal is to give a concise + name for help text. Files are only shown as their name without the + path. ``python`` is only shown for modules, and the full path to + ``sys.executable`` is not shown. + + :param path: The Python file being executed. Python puts this in + ``sys.argv[0]``, which is used by default. + :param _main: The ``__main__`` module. This should only be passed + during internal testing. + + .. versionadded:: 8.0 + Based on command args detection in the Werkzeug reloader. + + :meta private: + """ + if _main is None: + _main = sys.modules["__main__"] + + if not path: + path = sys.argv[0] + + # The value of __package__ indicates how Python was called. It may + # not exist if a setuptools script is installed as an egg. It may be + # set incorrectly for entry points created with pip on Windows. + # It is set to "" inside a Shiv or PEX zipapp. + if getattr(_main, "__package__", None) in {None, ""} or ( + os.name == "nt" + and _main.__package__ == "" + and not os.path.exists(path) + and os.path.exists(f"{path}.exe") + ): + # Executed a file, like "python app.py". + return os.path.basename(path) + + # Executed a module, like "python -m example". + # Rewritten by Python from "-m script" to "/path/to/script.py". + # Need to look at main module to determine how it was executed. + py_module = t.cast(str, _main.__package__) + name = os.path.splitext(os.path.basename(path))[0] + + # A submodule like "example.cli". + if name != "__main__": + py_module = f"{py_module}.{name}" + + return f"python -m {py_module.lstrip('.')}" + + +def _expand_args( + args: cabc.Iterable[str], + *, + user: bool = True, + env: bool = True, + glob_recursive: bool = True, +) -> list[str]: + """Simulate Unix shell expansion with Python functions. + + See :func:`glob.glob`, :func:`os.path.expanduser`, and + :func:`os.path.expandvars`. + + This is intended for use on Windows, where the shell does not do any + expansion. It may not exactly match what a Unix shell would do. + + :param args: List of command line arguments to expand. + :param user: Expand user home directory. + :param env: Expand environment variables. + :param glob_recursive: ``**`` matches directories recursively. + + .. versionchanged:: 8.1 + Invalid glob patterns are treated as empty expansions rather + than raising an error. + + .. versionadded:: 8.0 + + :meta private: + """ + from glob import glob + + out = [] + + for arg in args: + if user: + arg = os.path.expanduser(arg) + + if env: + arg = os.path.expandvars(arg) + + try: + matches = glob(arg, recursive=glob_recursive) + except re.error: + matches = [] + + if not matches: + out.append(arg) + else: + out.extend(matches) + + return out diff --git a/venv/Lib/site-packages/colorama-0.4.6.dist-info/INSTALLER b/venv/Lib/site-packages/colorama-0.4.6.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/colorama-0.4.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/colorama-0.4.6.dist-info/METADATA b/venv/Lib/site-packages/colorama-0.4.6.dist-info/METADATA new file mode 100644 index 00000000..a1b5c575 --- /dev/null +++ b/venv/Lib/site-packages/colorama-0.4.6.dist-info/METADATA @@ -0,0 +1,441 @@ +Metadata-Version: 2.1 +Name: colorama +Version: 0.4.6 +Summary: Cross-platform colored terminal text. +Project-URL: Homepage, https://github.com/tartley/colorama +Author-email: Jonathan Hartley +License-File: LICENSE.txt +Keywords: ansi,color,colour,crossplatform,terminal,text,windows,xplatform +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Terminals +Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7 +Description-Content-Type: text/x-rst + +.. image:: https://img.shields.io/pypi/v/colorama.svg + :target: https://pypi.org/project/colorama/ + :alt: Latest Version + +.. image:: https://img.shields.io/pypi/pyversions/colorama.svg + :target: https://pypi.org/project/colorama/ + :alt: Supported Python versions + +.. image:: https://github.com/tartley/colorama/actions/workflows/test.yml/badge.svg + :target: https://github.com/tartley/colorama/actions/workflows/test.yml + :alt: Build Status + +Colorama +======== + +Makes ANSI escape character sequences (for producing colored terminal text and +cursor positioning) work under MS Windows. + +.. |donate| image:: https://www.paypalobjects.com/en_US/i/btn/btn_donate_SM.gif + :target: https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=2MZ9D2GMLYCUJ&item_name=Colorama¤cy_code=USD + :alt: Donate with Paypal + +`PyPI for releases `_ | +`Github for source `_ | +`Colorama for enterprise on Tidelift `_ + +If you find Colorama useful, please |donate| to the authors. Thank you! + +Installation +------------ + +Tested on CPython 2.7, 3.7, 3.8, 3.9 and 3.10 and Pypy 2.7 and 3.8. + +No requirements other than the standard library. + +.. code-block:: bash + + pip install colorama + # or + conda install -c anaconda colorama + +Description +----------- + +ANSI escape character sequences have long been used to produce colored terminal +text and cursor positioning on Unix and Macs. Colorama makes this work on +Windows, too, by wrapping ``stdout``, stripping ANSI sequences it finds (which +would appear as gobbledygook in the output), and converting them into the +appropriate win32 calls to modify the state of the terminal. On other platforms, +Colorama does nothing. + +This has the upshot of providing a simple cross-platform API for printing +colored terminal text from Python, and has the happy side-effect that existing +applications or libraries which use ANSI sequences to produce colored output on +Linux or Macs can now also work on Windows, simply by calling +``colorama.just_fix_windows_console()`` (since v0.4.6) or ``colorama.init()`` +(all versions, but may have other side-effects – see below). + +An alternative approach is to install ``ansi.sys`` on Windows machines, which +provides the same behaviour for all applications running in terminals. Colorama +is intended for situations where that isn't easy (e.g., maybe your app doesn't +have an installer.) + +Demo scripts in the source code repository print some colored text using +ANSI sequences. Compare their output under Gnome-terminal's built in ANSI +handling, versus on Windows Command-Prompt using Colorama: + +.. image:: https://github.com/tartley/colorama/raw/master/screenshots/ubuntu-demo.png + :width: 661 + :height: 357 + :alt: ANSI sequences on Ubuntu under gnome-terminal. + +.. image:: https://github.com/tartley/colorama/raw/master/screenshots/windows-demo.png + :width: 668 + :height: 325 + :alt: Same ANSI sequences on Windows, using Colorama. + +These screenshots show that, on Windows, Colorama does not support ANSI 'dim +text'; it looks the same as 'normal text'. + +Usage +----- + +Initialisation +.............. + +If the only thing you want from Colorama is to get ANSI escapes to work on +Windows, then run: + +.. code-block:: python + + from colorama import just_fix_windows_console + just_fix_windows_console() + +If you're on a recent version of Windows 10 or better, and your stdout/stderr +are pointing to a Windows console, then this will flip the magic configuration +switch to enable Windows' built-in ANSI support. + +If you're on an older version of Windows, and your stdout/stderr are pointing to +a Windows console, then this will wrap ``sys.stdout`` and/or ``sys.stderr`` in a +magic file object that intercepts ANSI escape sequences and issues the +appropriate Win32 calls to emulate them. + +In all other circumstances, it does nothing whatsoever. Basically the idea is +that this makes Windows act like Unix with respect to ANSI escape handling. + +It's safe to call this function multiple times. It's safe to call this function +on non-Windows platforms, but it won't do anything. It's safe to call this +function when one or both of your stdout/stderr are redirected to a file – it +won't do anything to those streams. + +Alternatively, you can use the older interface with more features (but also more +potential footguns): + +.. code-block:: python + + from colorama import init + init() + +This does the same thing as ``just_fix_windows_console``, except for the +following differences: + +- It's not safe to call ``init`` multiple times; you can end up with multiple + layers of wrapping and broken ANSI support. + +- Colorama will apply a heuristic to guess whether stdout/stderr support ANSI, + and if it thinks they don't, then it will wrap ``sys.stdout`` and + ``sys.stderr`` in a magic file object that strips out ANSI escape sequences + before printing them. This happens on all platforms, and can be convenient if + you want to write your code to emit ANSI escape sequences unconditionally, and + let Colorama decide whether they should actually be output. But note that + Colorama's heuristic is not particularly clever. + +- ``init`` also accepts explicit keyword args to enable/disable various + functionality – see below. + +To stop using Colorama before your program exits, simply call ``deinit()``. +This will restore ``stdout`` and ``stderr`` to their original values, so that +Colorama is disabled. To resume using Colorama again, call ``reinit()``; it is +cheaper than calling ``init()`` again (but does the same thing). + +Most users should depend on ``colorama >= 0.4.6``, and use +``just_fix_windows_console``. The old ``init`` interface will be supported +indefinitely for backwards compatibility, but we don't plan to fix any issues +with it, also for backwards compatibility. + +Colored Output +.............. + +Cross-platform printing of colored text can then be done using Colorama's +constant shorthand for ANSI escape sequences. These are deliberately +rudimentary, see below. + +.. code-block:: python + + from colorama import Fore, Back, Style + print(Fore.RED + 'some red text') + print(Back.GREEN + 'and with a green background') + print(Style.DIM + 'and in dim text') + print(Style.RESET_ALL) + print('back to normal now') + +...or simply by manually printing ANSI sequences from your own code: + +.. code-block:: python + + print('\033[31m' + 'some red text') + print('\033[39m') # and reset to default color + +...or, Colorama can be used in conjunction with existing ANSI libraries +such as the venerable `Termcolor `_ +the fabulous `Blessings `_, +or the incredible `_Rich `_. + +If you wish Colorama's Fore, Back and Style constants were more capable, +then consider using one of the above highly capable libraries to generate +colors, etc, and use Colorama just for its primary purpose: to convert +those ANSI sequences to also work on Windows: + +SIMILARLY, do not send PRs adding the generation of new ANSI types to Colorama. +We are only interested in converting ANSI codes to win32 API calls, not +shortcuts like the above to generate ANSI characters. + +.. code-block:: python + + from colorama import just_fix_windows_console + from termcolor import colored + + # use Colorama to make Termcolor work on Windows too + just_fix_windows_console() + + # then use Termcolor for all colored text output + print(colored('Hello, World!', 'green', 'on_red')) + +Available formatting constants are:: + + Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. + Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. + Style: DIM, NORMAL, BRIGHT, RESET_ALL + +``Style.RESET_ALL`` resets foreground, background, and brightness. Colorama will +perform this reset automatically on program exit. + +These are fairly well supported, but not part of the standard:: + + Fore: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX + Back: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX + +Cursor Positioning +.................. + +ANSI codes to reposition the cursor are supported. See ``demos/demo06.py`` for +an example of how to generate them. + +Init Keyword Args +................. + +``init()`` accepts some ``**kwargs`` to override default behaviour. + +init(autoreset=False): + If you find yourself repeatedly sending reset sequences to turn off color + changes at the end of every print, then ``init(autoreset=True)`` will + automate that: + + .. code-block:: python + + from colorama import init + init(autoreset=True) + print(Fore.RED + 'some red text') + print('automatically back to default color again') + +init(strip=None): + Pass ``True`` or ``False`` to override whether ANSI codes should be + stripped from the output. The default behaviour is to strip if on Windows + or if output is redirected (not a tty). + +init(convert=None): + Pass ``True`` or ``False`` to override whether to convert ANSI codes in the + output into win32 calls. The default behaviour is to convert if on Windows + and output is to a tty (terminal). + +init(wrap=True): + On Windows, Colorama works by replacing ``sys.stdout`` and ``sys.stderr`` + with proxy objects, which override the ``.write()`` method to do their work. + If this wrapping causes you problems, then this can be disabled by passing + ``init(wrap=False)``. The default behaviour is to wrap if ``autoreset`` or + ``strip`` or ``convert`` are True. + + When wrapping is disabled, colored printing on non-Windows platforms will + continue to work as normal. To do cross-platform colored output, you can + use Colorama's ``AnsiToWin32`` proxy directly: + + .. code-block:: python + + import sys + from colorama import init, AnsiToWin32 + init(wrap=False) + stream = AnsiToWin32(sys.stderr).stream + + # Python 2 + print >>stream, Fore.BLUE + 'blue text on stderr' + + # Python 3 + print(Fore.BLUE + 'blue text on stderr', file=stream) + +Recognised ANSI Sequences +......................... + +ANSI sequences generally take the form:: + + ESC [ ; ... + +Where ```` is an integer, and ```` is a single letter. Zero or +more params are passed to a ````. If no params are passed, it is +generally synonymous with passing a single zero. No spaces exist in the +sequence; they have been inserted here simply to read more easily. + +The only ANSI sequences that Colorama converts into win32 calls are:: + + ESC [ 0 m # reset all (colors and brightness) + ESC [ 1 m # bright + ESC [ 2 m # dim (looks same as normal brightness) + ESC [ 22 m # normal brightness + + # FOREGROUND: + ESC [ 30 m # black + ESC [ 31 m # red + ESC [ 32 m # green + ESC [ 33 m # yellow + ESC [ 34 m # blue + ESC [ 35 m # magenta + ESC [ 36 m # cyan + ESC [ 37 m # white + ESC [ 39 m # reset + + # BACKGROUND + ESC [ 40 m # black + ESC [ 41 m # red + ESC [ 42 m # green + ESC [ 43 m # yellow + ESC [ 44 m # blue + ESC [ 45 m # magenta + ESC [ 46 m # cyan + ESC [ 47 m # white + ESC [ 49 m # reset + + # cursor positioning + ESC [ y;x H # position cursor at x across, y down + ESC [ y;x f # position cursor at x across, y down + ESC [ n A # move cursor n lines up + ESC [ n B # move cursor n lines down + ESC [ n C # move cursor n characters forward + ESC [ n D # move cursor n characters backward + + # clear the screen + ESC [ mode J # clear the screen + + # clear the line + ESC [ mode K # clear the line + +Multiple numeric params to the ``'m'`` command can be combined into a single +sequence:: + + ESC [ 36 ; 45 ; 1 m # bright cyan text on magenta background + +All other ANSI sequences of the form ``ESC [ ; ... `` +are silently stripped from the output on Windows. + +Any other form of ANSI sequence, such as single-character codes or alternative +initial characters, are not recognised or stripped. It would be cool to add +them though. Let me know if it would be useful for you, via the Issues on +GitHub. + +Status & Known Problems +----------------------- + +I've personally only tested it on Windows XP (CMD, Console2), Ubuntu +(gnome-terminal, xterm), and OS X. + +Some valid ANSI sequences aren't recognised. + +If you're hacking on the code, see `README-hacking.md`_. ESPECIALLY, see the +explanation there of why we do not want PRs that allow Colorama to generate new +types of ANSI codes. + +See outstanding issues and wish-list: +https://github.com/tartley/colorama/issues + +If anything doesn't work for you, or doesn't do what you expected or hoped for, +I'd love to hear about it on that issues list, would be delighted by patches, +and would be happy to grant commit access to anyone who submits a working patch +or two. + +.. _README-hacking.md: README-hacking.md + +License +------- + +Copyright Jonathan Hartley & Arnon Yaari, 2013-2020. BSD 3-Clause license; see +LICENSE file. + +Professional support +-------------------- + +.. |tideliftlogo| image:: https://cdn2.hubspot.net/hubfs/4008838/website/logos/logos_for_download/Tidelift_primary-shorthand-logo.png + :alt: Tidelift + :target: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme + +.. list-table:: + :widths: 10 100 + + * - |tideliftlogo| + - Professional support for colorama is available as part of the + `Tidelift Subscription`_. + Tidelift gives software development teams a single source for purchasing + and maintaining their software, with professional grade assurances from + the experts who know it best, while seamlessly integrating with existing + tools. + +.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme + +Thanks +------ + +See the CHANGELOG for more thanks! + +* Marc Schlaich (schlamar) for a ``setup.py`` fix for Python2.5. +* Marc Abramowitz, reported & fixed a crash on exit with closed ``stdout``, + providing a solution to issue #7's setuptools/distutils debate, + and other fixes. +* User 'eryksun', for guidance on correctly instantiating ``ctypes.windll``. +* Matthew McCormick for politely pointing out a longstanding crash on non-Win. +* Ben Hoyt, for a magnificent fix under 64-bit Windows. +* Jesse at Empty Square for submitting a fix for examples in the README. +* User 'jamessp', an observant documentation fix for cursor positioning. +* User 'vaal1239', Dave Mckee & Lackner Kristof for a tiny but much-needed Win7 + fix. +* Julien Stuyck, for wisely suggesting Python3 compatible updates to README. +* Daniel Griffith for multiple fabulous patches. +* Oscar Lesta for a valuable fix to stop ANSI chars being sent to non-tty + output. +* Roger Binns, for many suggestions, valuable feedback, & bug reports. +* Tim Golden for thought and much appreciated feedback on the initial idea. +* User 'Zearin' for updates to the README file. +* John Szakmeister for adding support for light colors +* Charles Merriam for adding documentation to demos +* Jurko for a fix on 64-bit Windows CPython2.5 w/o ctypes +* Florian Bruhin for a fix when stdout or stderr are None +* Thomas Weininger for fixing ValueError on Windows +* Remi Rampin for better Github integration and fixes to the README file +* Simeon Visser for closing a file handle using 'with' and updating classifiers + to include Python 3.3 and 3.4 +* Andy Neff for fixing RESET of LIGHT_EX colors. +* Jonathan Hartley for the initial idea and implementation. diff --git a/venv/Lib/site-packages/colorama-0.4.6.dist-info/RECORD b/venv/Lib/site-packages/colorama-0.4.6.dist-info/RECORD new file mode 100644 index 00000000..cd6b130d --- /dev/null +++ b/venv/Lib/site-packages/colorama-0.4.6.dist-info/RECORD @@ -0,0 +1,31 @@ +colorama-0.4.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +colorama-0.4.6.dist-info/METADATA,sha256=e67SnrUMOym9sz_4TjF3vxvAV4T3aF7NyqRHHH3YEMw,17158 +colorama-0.4.6.dist-info/RECORD,, +colorama-0.4.6.dist-info/WHEEL,sha256=cdcF4Fbd0FPtw2EMIOwH-3rSOTUdTCeOSXRMD1iLUb8,105 +colorama-0.4.6.dist-info/licenses/LICENSE.txt,sha256=ysNcAmhuXQSlpxQL-zs25zrtSWZW6JEQLkKIhteTAxg,1491 +colorama/__init__.py,sha256=wePQA4U20tKgYARySLEC047ucNX-g8pRLpYBuiHlLb8,266 +colorama/__pycache__/__init__.cpython-312.pyc,, +colorama/__pycache__/ansi.cpython-312.pyc,, +colorama/__pycache__/ansitowin32.cpython-312.pyc,, +colorama/__pycache__/initialise.cpython-312.pyc,, +colorama/__pycache__/win32.cpython-312.pyc,, +colorama/__pycache__/winterm.cpython-312.pyc,, +colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522 +colorama/ansitowin32.py,sha256=vPNYa3OZbxjbuFyaVo0Tmhmy1FZ1lKMWCnT7odXpItk,11128 +colorama/initialise.py,sha256=-hIny86ClXo39ixh5iSCfUIa2f_h_bgKRDW7gqs-KLU,3325 +colorama/tests/__init__.py,sha256=MkgPAEzGQd-Rq0w0PZXSX2LadRWhUECcisJY8lSrm4Q,75 +colorama/tests/__pycache__/__init__.cpython-312.pyc,, +colorama/tests/__pycache__/ansi_test.cpython-312.pyc,, +colorama/tests/__pycache__/ansitowin32_test.cpython-312.pyc,, +colorama/tests/__pycache__/initialise_test.cpython-312.pyc,, +colorama/tests/__pycache__/isatty_test.cpython-312.pyc,, +colorama/tests/__pycache__/utils.cpython-312.pyc,, +colorama/tests/__pycache__/winterm_test.cpython-312.pyc,, +colorama/tests/ansi_test.py,sha256=FeViDrUINIZcr505PAxvU4AjXz1asEiALs9GXMhwRaE,2839 +colorama/tests/ansitowin32_test.py,sha256=RN7AIhMJ5EqDsYaCjVo-o4u8JzDD4ukJbmevWKS70rY,10678 +colorama/tests/initialise_test.py,sha256=BbPy-XfyHwJ6zKozuQOvNvQZzsx9vdb_0bYXn7hsBTc,6741 +colorama/tests/isatty_test.py,sha256=Pg26LRpv0yQDB5Ac-sxgVXG7hsA1NYvapFgApZfYzZg,1866 +colorama/tests/utils.py,sha256=1IIRylG39z5-dzq09R_ngufxyPZxgldNbrxKxUGwGKE,1079 +colorama/tests/winterm_test.py,sha256=qoWFPEjym5gm2RuMwpf3pOis3a5r_PJZFCzK254JL8A,3709 +colorama/win32.py,sha256=YQOKwMTwtGBbsY4dL5HYTvwTeP9wIQra5MvPNddpxZs,6181 +colorama/winterm.py,sha256=XCQFDHjPi6AHYNdZwy0tA02H-Jh48Jp-HvCjeLeLp3U,7134 diff --git a/venv/Lib/site-packages/colorama-0.4.6.dist-info/WHEEL b/venv/Lib/site-packages/colorama-0.4.6.dist-info/WHEEL new file mode 100644 index 00000000..d79189fd --- /dev/null +++ b/venv/Lib/site-packages/colorama-0.4.6.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.11.1 +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any diff --git a/venv/Lib/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt b/venv/Lib/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt new file mode 100644 index 00000000..3105888e --- /dev/null +++ b/venv/Lib/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2010 Jonathan Hartley +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holders, nor those of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/Lib/site-packages/colorama/__init__.py b/venv/Lib/site-packages/colorama/__init__.py new file mode 100644 index 00000000..383101cd --- /dev/null +++ b/venv/Lib/site-packages/colorama/__init__.py @@ -0,0 +1,7 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from .initialise import init, deinit, reinit, colorama_text, just_fix_windows_console +from .ansi import Fore, Back, Style, Cursor +from .ansitowin32 import AnsiToWin32 + +__version__ = '0.4.6' + diff --git a/venv/Lib/site-packages/colorama/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/colorama/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..109241fd Binary files /dev/null and b/venv/Lib/site-packages/colorama/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/__pycache__/ansi.cpython-312.pyc b/venv/Lib/site-packages/colorama/__pycache__/ansi.cpython-312.pyc new file mode 100644 index 00000000..5c713d62 Binary files /dev/null and b/venv/Lib/site-packages/colorama/__pycache__/ansi.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/__pycache__/ansitowin32.cpython-312.pyc b/venv/Lib/site-packages/colorama/__pycache__/ansitowin32.cpython-312.pyc new file mode 100644 index 00000000..9da7b718 Binary files /dev/null and b/venv/Lib/site-packages/colorama/__pycache__/ansitowin32.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/__pycache__/initialise.cpython-312.pyc b/venv/Lib/site-packages/colorama/__pycache__/initialise.cpython-312.pyc new file mode 100644 index 00000000..f05ce99e Binary files /dev/null and b/venv/Lib/site-packages/colorama/__pycache__/initialise.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/__pycache__/win32.cpython-312.pyc b/venv/Lib/site-packages/colorama/__pycache__/win32.cpython-312.pyc new file mode 100644 index 00000000..168fc948 Binary files /dev/null and b/venv/Lib/site-packages/colorama/__pycache__/win32.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/__pycache__/winterm.cpython-312.pyc b/venv/Lib/site-packages/colorama/__pycache__/winterm.cpython-312.pyc new file mode 100644 index 00000000..143e3d67 Binary files /dev/null and b/venv/Lib/site-packages/colorama/__pycache__/winterm.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/ansi.py b/venv/Lib/site-packages/colorama/ansi.py new file mode 100644 index 00000000..11ec695f --- /dev/null +++ b/venv/Lib/site-packages/colorama/ansi.py @@ -0,0 +1,102 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +''' +This module generates ANSI character codes to printing colors to terminals. +See: http://en.wikipedia.org/wiki/ANSI_escape_code +''' + +CSI = '\033[' +OSC = '\033]' +BEL = '\a' + + +def code_to_chars(code): + return CSI + str(code) + 'm' + +def set_title(title): + return OSC + '2;' + title + BEL + +def clear_screen(mode=2): + return CSI + str(mode) + 'J' + +def clear_line(mode=2): + return CSI + str(mode) + 'K' + + +class AnsiCodes(object): + def __init__(self): + # the subclasses declare class attributes which are numbers. + # Upon instantiation we define instance attributes, which are the same + # as the class attributes but wrapped with the ANSI escape sequence + for name in dir(self): + if not name.startswith('_'): + value = getattr(self, name) + setattr(self, name, code_to_chars(value)) + + +class AnsiCursor(object): + def UP(self, n=1): + return CSI + str(n) + 'A' + def DOWN(self, n=1): + return CSI + str(n) + 'B' + def FORWARD(self, n=1): + return CSI + str(n) + 'C' + def BACK(self, n=1): + return CSI + str(n) + 'D' + def POS(self, x=1, y=1): + return CSI + str(y) + ';' + str(x) + 'H' + + +class AnsiFore(AnsiCodes): + BLACK = 30 + RED = 31 + GREEN = 32 + YELLOW = 33 + BLUE = 34 + MAGENTA = 35 + CYAN = 36 + WHITE = 37 + RESET = 39 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 90 + LIGHTRED_EX = 91 + LIGHTGREEN_EX = 92 + LIGHTYELLOW_EX = 93 + LIGHTBLUE_EX = 94 + LIGHTMAGENTA_EX = 95 + LIGHTCYAN_EX = 96 + LIGHTWHITE_EX = 97 + + +class AnsiBack(AnsiCodes): + BLACK = 40 + RED = 41 + GREEN = 42 + YELLOW = 43 + BLUE = 44 + MAGENTA = 45 + CYAN = 46 + WHITE = 47 + RESET = 49 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 100 + LIGHTRED_EX = 101 + LIGHTGREEN_EX = 102 + LIGHTYELLOW_EX = 103 + LIGHTBLUE_EX = 104 + LIGHTMAGENTA_EX = 105 + LIGHTCYAN_EX = 106 + LIGHTWHITE_EX = 107 + + +class AnsiStyle(AnsiCodes): + BRIGHT = 1 + DIM = 2 + NORMAL = 22 + RESET_ALL = 0 + +Fore = AnsiFore() +Back = AnsiBack() +Style = AnsiStyle() +Cursor = AnsiCursor() diff --git a/venv/Lib/site-packages/colorama/ansitowin32.py b/venv/Lib/site-packages/colorama/ansitowin32.py new file mode 100644 index 00000000..abf209e6 --- /dev/null +++ b/venv/Lib/site-packages/colorama/ansitowin32.py @@ -0,0 +1,277 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import re +import sys +import os + +from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL +from .winterm import enable_vt_processing, WinTerm, WinColor, WinStyle +from .win32 import windll, winapi_test + + +winterm = None +if windll is not None: + winterm = WinTerm() + + +class StreamWrapper(object): + ''' + Wraps a stream (such as stdout), acting as a transparent proxy for all + attribute access apart from method 'write()', which is delegated to our + Converter instance. + ''' + def __init__(self, wrapped, converter): + # double-underscore everything to prevent clashes with names of + # attributes on the wrapped stream object. + self.__wrapped = wrapped + self.__convertor = converter + + def __getattr__(self, name): + return getattr(self.__wrapped, name) + + def __enter__(self, *args, **kwargs): + # special method lookup bypasses __getattr__/__getattribute__, see + # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit + # thus, contextlib magic methods are not proxied via __getattr__ + return self.__wrapped.__enter__(*args, **kwargs) + + def __exit__(self, *args, **kwargs): + return self.__wrapped.__exit__(*args, **kwargs) + + def __setstate__(self, state): + self.__dict__ = state + + def __getstate__(self): + return self.__dict__ + + def write(self, text): + self.__convertor.write(text) + + def isatty(self): + stream = self.__wrapped + if 'PYCHARM_HOSTED' in os.environ: + if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__): + return True + try: + stream_isatty = stream.isatty + except AttributeError: + return False + else: + return stream_isatty() + + @property + def closed(self): + stream = self.__wrapped + try: + return stream.closed + # AttributeError in the case that the stream doesn't support being closed + # ValueError for the case that the stream has already been detached when atexit runs + except (AttributeError, ValueError): + return True + + +class AnsiToWin32(object): + ''' + Implements a 'write()' method which, on Windows, will strip ANSI character + sequences from the text, and if outputting to a tty, will convert them into + win32 function calls. + ''' + ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer + ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command + + def __init__(self, wrapped, convert=None, strip=None, autoreset=False): + # The wrapped stream (normally sys.stdout or sys.stderr) + self.wrapped = wrapped + + # should we reset colors to defaults after every .write() + self.autoreset = autoreset + + # create the proxy wrapping our output stream + self.stream = StreamWrapper(wrapped, self) + + on_windows = os.name == 'nt' + # We test if the WinAPI works, because even if we are on Windows + # we may be using a terminal that doesn't support the WinAPI + # (e.g. Cygwin Terminal). In this case it's up to the terminal + # to support the ANSI codes. + conversion_supported = on_windows and winapi_test() + try: + fd = wrapped.fileno() + except Exception: + fd = -1 + system_has_native_ansi = not on_windows or enable_vt_processing(fd) + have_tty = not self.stream.closed and self.stream.isatty() + need_conversion = conversion_supported and not system_has_native_ansi + + # should we strip ANSI sequences from our output? + if strip is None: + strip = need_conversion or not have_tty + self.strip = strip + + # should we should convert ANSI sequences into win32 calls? + if convert is None: + convert = need_conversion and have_tty + self.convert = convert + + # dict of ansi codes to win32 functions and parameters + self.win32_calls = self.get_win32_calls() + + # are we wrapping stderr? + self.on_stderr = self.wrapped is sys.stderr + + def should_wrap(self): + ''' + True if this class is actually needed. If false, then the output + stream will not be affected, nor will win32 calls be issued, so + wrapping stdout is not actually required. This will generally be + False on non-Windows platforms, unless optional functionality like + autoreset has been requested using kwargs to init() + ''' + return self.convert or self.strip or self.autoreset + + def get_win32_calls(self): + if self.convert and winterm: + return { + AnsiStyle.RESET_ALL: (winterm.reset_all, ), + AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), + AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), + AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), + AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), + AnsiFore.RED: (winterm.fore, WinColor.RED), + AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), + AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), + AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), + AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), + AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), + AnsiFore.WHITE: (winterm.fore, WinColor.GREY), + AnsiFore.RESET: (winterm.fore, ), + AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), + AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), + AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), + AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), + AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), + AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), + AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), + AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), + AnsiBack.BLACK: (winterm.back, WinColor.BLACK), + AnsiBack.RED: (winterm.back, WinColor.RED), + AnsiBack.GREEN: (winterm.back, WinColor.GREEN), + AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), + AnsiBack.BLUE: (winterm.back, WinColor.BLUE), + AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), + AnsiBack.CYAN: (winterm.back, WinColor.CYAN), + AnsiBack.WHITE: (winterm.back, WinColor.GREY), + AnsiBack.RESET: (winterm.back, ), + AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), + AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), + AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), + AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), + AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), + AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), + AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), + AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), + } + return dict() + + def write(self, text): + if self.strip or self.convert: + self.write_and_convert(text) + else: + self.wrapped.write(text) + self.wrapped.flush() + if self.autoreset: + self.reset_all() + + + def reset_all(self): + if self.convert: + self.call_win32('m', (0,)) + elif not self.strip and not self.stream.closed: + self.wrapped.write(Style.RESET_ALL) + + + def write_and_convert(self, text): + ''' + Write the given text to our wrapped stream, stripping any ANSI + sequences from the text, and optionally converting them into win32 + calls. + ''' + cursor = 0 + text = self.convert_osc(text) + for match in self.ANSI_CSI_RE.finditer(text): + start, end = match.span() + self.write_plain_text(text, cursor, start) + self.convert_ansi(*match.groups()) + cursor = end + self.write_plain_text(text, cursor, len(text)) + + + def write_plain_text(self, text, start, end): + if start < end: + self.wrapped.write(text[start:end]) + self.wrapped.flush() + + + def convert_ansi(self, paramstring, command): + if self.convert: + params = self.extract_params(command, paramstring) + self.call_win32(command, params) + + + def extract_params(self, command, paramstring): + if command in 'Hf': + params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) + while len(params) < 2: + # defaults: + params = params + (1,) + else: + params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) + if len(params) == 0: + # defaults: + if command in 'JKm': + params = (0,) + elif command in 'ABCD': + params = (1,) + + return params + + + def call_win32(self, command, params): + if command == 'm': + for param in params: + if param in self.win32_calls: + func_args = self.win32_calls[param] + func = func_args[0] + args = func_args[1:] + kwargs = dict(on_stderr=self.on_stderr) + func(*args, **kwargs) + elif command in 'J': + winterm.erase_screen(params[0], on_stderr=self.on_stderr) + elif command in 'K': + winterm.erase_line(params[0], on_stderr=self.on_stderr) + elif command in 'Hf': # cursor position - absolute + winterm.set_cursor_position(params, on_stderr=self.on_stderr) + elif command in 'ABCD': # cursor position - relative + n = params[0] + # A - up, B - down, C - forward, D - back + x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] + winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) + + + def convert_osc(self, text): + for match in self.ANSI_OSC_RE.finditer(text): + start, end = match.span() + text = text[:start] + text[end:] + paramstring, command = match.groups() + if command == BEL: + if paramstring.count(";") == 1: + params = paramstring.split(";") + # 0 - change title and icon (we will only change title) + # 1 - change icon (we don't support this) + # 2 - change title + if params[0] in '02': + winterm.set_title(params[1]) + return text + + + def flush(self): + self.wrapped.flush() diff --git a/venv/Lib/site-packages/colorama/initialise.py b/venv/Lib/site-packages/colorama/initialise.py new file mode 100644 index 00000000..d5fd4b71 --- /dev/null +++ b/venv/Lib/site-packages/colorama/initialise.py @@ -0,0 +1,121 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import atexit +import contextlib +import sys + +from .ansitowin32 import AnsiToWin32 + + +def _wipe_internal_state_for_tests(): + global orig_stdout, orig_stderr + orig_stdout = None + orig_stderr = None + + global wrapped_stdout, wrapped_stderr + wrapped_stdout = None + wrapped_stderr = None + + global atexit_done + atexit_done = False + + global fixed_windows_console + fixed_windows_console = False + + try: + # no-op if it wasn't registered + atexit.unregister(reset_all) + except AttributeError: + # python 2: no atexit.unregister. Oh well, we did our best. + pass + + +def reset_all(): + if AnsiToWin32 is not None: # Issue #74: objects might become None at exit + AnsiToWin32(orig_stdout).reset_all() + + +def init(autoreset=False, convert=None, strip=None, wrap=True): + + if not wrap and any([autoreset, convert, strip]): + raise ValueError('wrap=False conflicts with any other arg=True') + + global wrapped_stdout, wrapped_stderr + global orig_stdout, orig_stderr + + orig_stdout = sys.stdout + orig_stderr = sys.stderr + + if sys.stdout is None: + wrapped_stdout = None + else: + sys.stdout = wrapped_stdout = \ + wrap_stream(orig_stdout, convert, strip, autoreset, wrap) + if sys.stderr is None: + wrapped_stderr = None + else: + sys.stderr = wrapped_stderr = \ + wrap_stream(orig_stderr, convert, strip, autoreset, wrap) + + global atexit_done + if not atexit_done: + atexit.register(reset_all) + atexit_done = True + + +def deinit(): + if orig_stdout is not None: + sys.stdout = orig_stdout + if orig_stderr is not None: + sys.stderr = orig_stderr + + +def just_fix_windows_console(): + global fixed_windows_console + + if sys.platform != "win32": + return + if fixed_windows_console: + return + if wrapped_stdout is not None or wrapped_stderr is not None: + # Someone already ran init() and it did stuff, so we won't second-guess them + return + + # On newer versions of Windows, AnsiToWin32.__init__ will implicitly enable the + # native ANSI support in the console as a side-effect. We only need to actually + # replace sys.stdout/stderr if we're in the old-style conversion mode. + new_stdout = AnsiToWin32(sys.stdout, convert=None, strip=None, autoreset=False) + if new_stdout.convert: + sys.stdout = new_stdout + new_stderr = AnsiToWin32(sys.stderr, convert=None, strip=None, autoreset=False) + if new_stderr.convert: + sys.stderr = new_stderr + + fixed_windows_console = True + +@contextlib.contextmanager +def colorama_text(*args, **kwargs): + init(*args, **kwargs) + try: + yield + finally: + deinit() + + +def reinit(): + if wrapped_stdout is not None: + sys.stdout = wrapped_stdout + if wrapped_stderr is not None: + sys.stderr = wrapped_stderr + + +def wrap_stream(stream, convert, strip, autoreset, wrap): + if wrap: + wrapper = AnsiToWin32(stream, + convert=convert, strip=strip, autoreset=autoreset) + if wrapper.should_wrap(): + stream = wrapper.stream + return stream + + +# Use this for initial setup as well, to reduce code duplication +_wipe_internal_state_for_tests() diff --git a/venv/Lib/site-packages/colorama/tests/__init__.py b/venv/Lib/site-packages/colorama/tests/__init__.py new file mode 100644 index 00000000..8c5661e9 --- /dev/null +++ b/venv/Lib/site-packages/colorama/tests/__init__.py @@ -0,0 +1 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. diff --git a/venv/Lib/site-packages/colorama/tests/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/colorama/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..f521c0d9 Binary files /dev/null and b/venv/Lib/site-packages/colorama/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/tests/__pycache__/ansi_test.cpython-312.pyc b/venv/Lib/site-packages/colorama/tests/__pycache__/ansi_test.cpython-312.pyc new file mode 100644 index 00000000..5c350a85 Binary files /dev/null and b/venv/Lib/site-packages/colorama/tests/__pycache__/ansi_test.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/tests/__pycache__/ansitowin32_test.cpython-312.pyc b/venv/Lib/site-packages/colorama/tests/__pycache__/ansitowin32_test.cpython-312.pyc new file mode 100644 index 00000000..5aecb348 Binary files /dev/null and b/venv/Lib/site-packages/colorama/tests/__pycache__/ansitowin32_test.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/tests/__pycache__/initialise_test.cpython-312.pyc b/venv/Lib/site-packages/colorama/tests/__pycache__/initialise_test.cpython-312.pyc new file mode 100644 index 00000000..cd7a3dea Binary files /dev/null and b/venv/Lib/site-packages/colorama/tests/__pycache__/initialise_test.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/tests/__pycache__/isatty_test.cpython-312.pyc b/venv/Lib/site-packages/colorama/tests/__pycache__/isatty_test.cpython-312.pyc new file mode 100644 index 00000000..45dd79ad Binary files /dev/null and b/venv/Lib/site-packages/colorama/tests/__pycache__/isatty_test.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/tests/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/colorama/tests/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..0109bb22 Binary files /dev/null and b/venv/Lib/site-packages/colorama/tests/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/tests/__pycache__/winterm_test.cpython-312.pyc b/venv/Lib/site-packages/colorama/tests/__pycache__/winterm_test.cpython-312.pyc new file mode 100644 index 00000000..b593e07e Binary files /dev/null and b/venv/Lib/site-packages/colorama/tests/__pycache__/winterm_test.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/colorama/tests/ansi_test.py b/venv/Lib/site-packages/colorama/tests/ansi_test.py new file mode 100644 index 00000000..0a20c80f --- /dev/null +++ b/venv/Lib/site-packages/colorama/tests/ansi_test.py @@ -0,0 +1,76 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main + +from ..ansi import Back, Fore, Style +from ..ansitowin32 import AnsiToWin32 + +stdout_orig = sys.stdout +stderr_orig = sys.stderr + + +class AnsiTest(TestCase): + + def setUp(self): + # sanity check: stdout should be a file or StringIO object. + # It will only be AnsiToWin32 if init() has previously wrapped it + self.assertNotEqual(type(sys.stdout), AnsiToWin32) + self.assertNotEqual(type(sys.stderr), AnsiToWin32) + + def tearDown(self): + sys.stdout = stdout_orig + sys.stderr = stderr_orig + + + def testForeAttributes(self): + self.assertEqual(Fore.BLACK, '\033[30m') + self.assertEqual(Fore.RED, '\033[31m') + self.assertEqual(Fore.GREEN, '\033[32m') + self.assertEqual(Fore.YELLOW, '\033[33m') + self.assertEqual(Fore.BLUE, '\033[34m') + self.assertEqual(Fore.MAGENTA, '\033[35m') + self.assertEqual(Fore.CYAN, '\033[36m') + self.assertEqual(Fore.WHITE, '\033[37m') + self.assertEqual(Fore.RESET, '\033[39m') + + # Check the light, extended versions. + self.assertEqual(Fore.LIGHTBLACK_EX, '\033[90m') + self.assertEqual(Fore.LIGHTRED_EX, '\033[91m') + self.assertEqual(Fore.LIGHTGREEN_EX, '\033[92m') + self.assertEqual(Fore.LIGHTYELLOW_EX, '\033[93m') + self.assertEqual(Fore.LIGHTBLUE_EX, '\033[94m') + self.assertEqual(Fore.LIGHTMAGENTA_EX, '\033[95m') + self.assertEqual(Fore.LIGHTCYAN_EX, '\033[96m') + self.assertEqual(Fore.LIGHTWHITE_EX, '\033[97m') + + + def testBackAttributes(self): + self.assertEqual(Back.BLACK, '\033[40m') + self.assertEqual(Back.RED, '\033[41m') + self.assertEqual(Back.GREEN, '\033[42m') + self.assertEqual(Back.YELLOW, '\033[43m') + self.assertEqual(Back.BLUE, '\033[44m') + self.assertEqual(Back.MAGENTA, '\033[45m') + self.assertEqual(Back.CYAN, '\033[46m') + self.assertEqual(Back.WHITE, '\033[47m') + self.assertEqual(Back.RESET, '\033[49m') + + # Check the light, extended versions. + self.assertEqual(Back.LIGHTBLACK_EX, '\033[100m') + self.assertEqual(Back.LIGHTRED_EX, '\033[101m') + self.assertEqual(Back.LIGHTGREEN_EX, '\033[102m') + self.assertEqual(Back.LIGHTYELLOW_EX, '\033[103m') + self.assertEqual(Back.LIGHTBLUE_EX, '\033[104m') + self.assertEqual(Back.LIGHTMAGENTA_EX, '\033[105m') + self.assertEqual(Back.LIGHTCYAN_EX, '\033[106m') + self.assertEqual(Back.LIGHTWHITE_EX, '\033[107m') + + + def testStyleAttributes(self): + self.assertEqual(Style.DIM, '\033[2m') + self.assertEqual(Style.NORMAL, '\033[22m') + self.assertEqual(Style.BRIGHT, '\033[1m') + + +if __name__ == '__main__': + main() diff --git a/venv/Lib/site-packages/colorama/tests/ansitowin32_test.py b/venv/Lib/site-packages/colorama/tests/ansitowin32_test.py new file mode 100644 index 00000000..91ca551f --- /dev/null +++ b/venv/Lib/site-packages/colorama/tests/ansitowin32_test.py @@ -0,0 +1,294 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from io import StringIO, TextIOWrapper +from unittest import TestCase, main +try: + from contextlib import ExitStack +except ImportError: + # python 2 + from contextlib2 import ExitStack + +try: + from unittest.mock import MagicMock, Mock, patch +except ImportError: + from mock import MagicMock, Mock, patch + +from ..ansitowin32 import AnsiToWin32, StreamWrapper +from ..win32 import ENABLE_VIRTUAL_TERMINAL_PROCESSING +from .utils import osname + + +class StreamWrapperTest(TestCase): + + def testIsAProxy(self): + mockStream = Mock() + wrapper = StreamWrapper(mockStream, None) + self.assertTrue( wrapper.random_attr is mockStream.random_attr ) + + def testDelegatesWrite(self): + mockStream = Mock() + mockConverter = Mock() + wrapper = StreamWrapper(mockStream, mockConverter) + wrapper.write('hello') + self.assertTrue(mockConverter.write.call_args, (('hello',), {})) + + def testDelegatesContext(self): + mockConverter = Mock() + s = StringIO() + with StreamWrapper(s, mockConverter) as fp: + fp.write(u'hello') + self.assertTrue(s.closed) + + def testProxyNoContextManager(self): + mockStream = MagicMock() + mockStream.__enter__.side_effect = AttributeError() + mockConverter = Mock() + with self.assertRaises(AttributeError) as excinfo: + with StreamWrapper(mockStream, mockConverter) as wrapper: + wrapper.write('hello') + + def test_closed_shouldnt_raise_on_closed_stream(self): + stream = StringIO() + stream.close() + wrapper = StreamWrapper(stream, None) + self.assertEqual(wrapper.closed, True) + + def test_closed_shouldnt_raise_on_detached_stream(self): + stream = TextIOWrapper(StringIO()) + stream.detach() + wrapper = StreamWrapper(stream, None) + self.assertEqual(wrapper.closed, True) + +class AnsiToWin32Test(TestCase): + + def testInit(self): + mockStdout = Mock() + auto = Mock() + stream = AnsiToWin32(mockStdout, autoreset=auto) + self.assertEqual(stream.wrapped, mockStdout) + self.assertEqual(stream.autoreset, auto) + + @patch('colorama.ansitowin32.winterm', None) + @patch('colorama.ansitowin32.winapi_test', lambda *_: True) + def testStripIsTrueOnWindows(self): + with osname('nt'): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout) + self.assertTrue(stream.strip) + + def testStripIsFalseOffWindows(self): + with osname('posix'): + mockStdout = Mock(closed=False) + stream = AnsiToWin32(mockStdout) + self.assertFalse(stream.strip) + + def testWriteStripsAnsi(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout) + stream.wrapped = Mock() + stream.write_and_convert = Mock() + stream.strip = True + + stream.write('abc') + + self.assertFalse(stream.wrapped.write.called) + self.assertEqual(stream.write_and_convert.call_args, (('abc',), {})) + + def testWriteDoesNotStripAnsi(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout) + stream.wrapped = Mock() + stream.write_and_convert = Mock() + stream.strip = False + stream.convert = False + + stream.write('abc') + + self.assertFalse(stream.write_and_convert.called) + self.assertEqual(stream.wrapped.write.call_args, (('abc',), {})) + + def assert_autoresets(self, convert, autoreset=True): + stream = AnsiToWin32(Mock()) + stream.convert = convert + stream.reset_all = Mock() + stream.autoreset = autoreset + stream.winterm = Mock() + + stream.write('abc') + + self.assertEqual(stream.reset_all.called, autoreset) + + def testWriteAutoresets(self): + self.assert_autoresets(convert=True) + self.assert_autoresets(convert=False) + self.assert_autoresets(convert=True, autoreset=False) + self.assert_autoresets(convert=False, autoreset=False) + + def testWriteAndConvertWritesPlainText(self): + stream = AnsiToWin32(Mock()) + stream.write_and_convert( 'abc' ) + self.assertEqual( stream.wrapped.write.call_args, (('abc',), {}) ) + + def testWriteAndConvertStripsAllValidAnsi(self): + stream = AnsiToWin32(Mock()) + stream.call_win32 = Mock() + data = [ + 'abc\033[mdef', + 'abc\033[0mdef', + 'abc\033[2mdef', + 'abc\033[02mdef', + 'abc\033[002mdef', + 'abc\033[40mdef', + 'abc\033[040mdef', + 'abc\033[0;1mdef', + 'abc\033[40;50mdef', + 'abc\033[50;30;40mdef', + 'abc\033[Adef', + 'abc\033[0Gdef', + 'abc\033[1;20;128Hdef', + ] + for datum in data: + stream.wrapped.write.reset_mock() + stream.write_and_convert( datum ) + self.assertEqual( + [args[0] for args in stream.wrapped.write.call_args_list], + [ ('abc',), ('def',) ] + ) + + def testWriteAndConvertSkipsEmptySnippets(self): + stream = AnsiToWin32(Mock()) + stream.call_win32 = Mock() + stream.write_and_convert( '\033[40m\033[41m' ) + self.assertFalse( stream.wrapped.write.called ) + + def testWriteAndConvertCallsWin32WithParamsAndCommand(self): + stream = AnsiToWin32(Mock()) + stream.convert = True + stream.call_win32 = Mock() + stream.extract_params = Mock(return_value='params') + data = { + 'abc\033[adef': ('a', 'params'), + 'abc\033[;;bdef': ('b', 'params'), + 'abc\033[0cdef': ('c', 'params'), + 'abc\033[;;0;;Gdef': ('G', 'params'), + 'abc\033[1;20;128Hdef': ('H', 'params'), + } + for datum, expected in data.items(): + stream.call_win32.reset_mock() + stream.write_and_convert( datum ) + self.assertEqual( stream.call_win32.call_args[0], expected ) + + def test_reset_all_shouldnt_raise_on_closed_orig_stdout(self): + stream = StringIO() + converter = AnsiToWin32(stream) + stream.close() + + converter.reset_all() + + def test_wrap_shouldnt_raise_on_closed_orig_stdout(self): + stream = StringIO() + stream.close() + with \ + patch("colorama.ansitowin32.os.name", "nt"), \ + patch("colorama.ansitowin32.winapi_test", lambda: True): + converter = AnsiToWin32(stream) + self.assertTrue(converter.strip) + self.assertFalse(converter.convert) + + def test_wrap_shouldnt_raise_on_missing_closed_attr(self): + with \ + patch("colorama.ansitowin32.os.name", "nt"), \ + patch("colorama.ansitowin32.winapi_test", lambda: True): + converter = AnsiToWin32(object()) + self.assertTrue(converter.strip) + self.assertFalse(converter.convert) + + def testExtractParams(self): + stream = AnsiToWin32(Mock()) + data = { + '': (0,), + ';;': (0,), + '2': (2,), + ';;002;;': (2,), + '0;1': (0, 1), + ';;003;;456;;': (3, 456), + '11;22;33;44;55': (11, 22, 33, 44, 55), + } + for datum, expected in data.items(): + self.assertEqual(stream.extract_params('m', datum), expected) + + def testCallWin32UsesLookup(self): + listener = Mock() + stream = AnsiToWin32(listener) + stream.win32_calls = { + 1: (lambda *_, **__: listener(11),), + 2: (lambda *_, **__: listener(22),), + 3: (lambda *_, **__: listener(33),), + } + stream.call_win32('m', (3, 1, 99, 2)) + self.assertEqual( + [a[0][0] for a in listener.call_args_list], + [33, 11, 22] ) + + def test_osc_codes(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout, convert=True) + with patch('colorama.ansitowin32.winterm') as winterm: + data = [ + '\033]0\x07', # missing arguments + '\033]0;foo\x08', # wrong OSC command + '\033]0;colorama_test_title\x07', # should work + '\033]1;colorama_test_title\x07', # wrong set command + '\033]2;colorama_test_title\x07', # should work + '\033]' + ';' * 64 + '\x08', # see issue #247 + ] + for code in data: + stream.write(code) + self.assertEqual(winterm.set_title.call_count, 2) + + def test_native_windows_ansi(self): + with ExitStack() as stack: + def p(a, b): + stack.enter_context(patch(a, b, create=True)) + # Pretend to be on Windows + p("colorama.ansitowin32.os.name", "nt") + p("colorama.ansitowin32.winapi_test", lambda: True) + p("colorama.win32.winapi_test", lambda: True) + p("colorama.winterm.win32.windll", "non-None") + p("colorama.winterm.get_osfhandle", lambda _: 1234) + + # Pretend that our mock stream has native ANSI support + p( + "colorama.winterm.win32.GetConsoleMode", + lambda _: ENABLE_VIRTUAL_TERMINAL_PROCESSING, + ) + SetConsoleMode = Mock() + p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) + + stdout = Mock() + stdout.closed = False + stdout.isatty.return_value = True + stdout.fileno.return_value = 1 + + # Our fake console says it has native vt support, so AnsiToWin32 should + # enable that support and do nothing else. + stream = AnsiToWin32(stdout) + SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) + self.assertFalse(stream.strip) + self.assertFalse(stream.convert) + self.assertFalse(stream.should_wrap()) + + # Now let's pretend we're on an old Windows console, that doesn't have + # native ANSI support. + p("colorama.winterm.win32.GetConsoleMode", lambda _: 0) + SetConsoleMode = Mock() + p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) + + stream = AnsiToWin32(stdout) + SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) + self.assertTrue(stream.strip) + self.assertTrue(stream.convert) + self.assertTrue(stream.should_wrap()) + + +if __name__ == '__main__': + main() diff --git a/venv/Lib/site-packages/colorama/tests/initialise_test.py b/venv/Lib/site-packages/colorama/tests/initialise_test.py new file mode 100644 index 00000000..89f9b075 --- /dev/null +++ b/venv/Lib/site-packages/colorama/tests/initialise_test.py @@ -0,0 +1,189 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main, skipUnless + +try: + from unittest.mock import patch, Mock +except ImportError: + from mock import patch, Mock + +from ..ansitowin32 import StreamWrapper +from ..initialise import init, just_fix_windows_console, _wipe_internal_state_for_tests +from .utils import osname, replace_by + +orig_stdout = sys.stdout +orig_stderr = sys.stderr + + +class InitTest(TestCase): + + @skipUnless(sys.stdout.isatty(), "sys.stdout is not a tty") + def setUp(self): + # sanity check + self.assertNotWrapped() + + def tearDown(self): + _wipe_internal_state_for_tests() + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + def assertWrapped(self): + self.assertIsNot(sys.stdout, orig_stdout, 'stdout should be wrapped') + self.assertIsNot(sys.stderr, orig_stderr, 'stderr should be wrapped') + self.assertTrue(isinstance(sys.stdout, StreamWrapper), + 'bad stdout wrapper') + self.assertTrue(isinstance(sys.stderr, StreamWrapper), + 'bad stderr wrapper') + + def assertNotWrapped(self): + self.assertIs(sys.stdout, orig_stdout, 'stdout should not be wrapped') + self.assertIs(sys.stderr, orig_stderr, 'stderr should not be wrapped') + + @patch('colorama.initialise.reset_all') + @patch('colorama.ansitowin32.winapi_test', lambda *_: True) + @patch('colorama.ansitowin32.enable_vt_processing', lambda *_: False) + def testInitWrapsOnWindows(self, _): + with osname("nt"): + init() + self.assertWrapped() + + @patch('colorama.initialise.reset_all') + @patch('colorama.ansitowin32.winapi_test', lambda *_: False) + def testInitDoesntWrapOnEmulatedWindows(self, _): + with osname("nt"): + init() + self.assertNotWrapped() + + def testInitDoesntWrapOnNonWindows(self): + with osname("posix"): + init() + self.assertNotWrapped() + + def testInitDoesntWrapIfNone(self): + with replace_by(None): + init() + # We can't use assertNotWrapped here because replace_by(None) + # changes stdout/stderr already. + self.assertIsNone(sys.stdout) + self.assertIsNone(sys.stderr) + + def testInitAutoresetOnWrapsOnAllPlatforms(self): + with osname("posix"): + init(autoreset=True) + self.assertWrapped() + + def testInitWrapOffDoesntWrapOnWindows(self): + with osname("nt"): + init(wrap=False) + self.assertNotWrapped() + + def testInitWrapOffIncompatibleWithAutoresetOn(self): + self.assertRaises(ValueError, lambda: init(autoreset=True, wrap=False)) + + @patch('colorama.win32.SetConsoleTextAttribute') + @patch('colorama.initialise.AnsiToWin32') + def testAutoResetPassedOn(self, mockATW32, _): + with osname("nt"): + init(autoreset=True) + self.assertEqual(len(mockATW32.call_args_list), 2) + self.assertEqual(mockATW32.call_args_list[1][1]['autoreset'], True) + self.assertEqual(mockATW32.call_args_list[0][1]['autoreset'], True) + + @patch('colorama.initialise.AnsiToWin32') + def testAutoResetChangeable(self, mockATW32): + with osname("nt"): + init() + + init(autoreset=True) + self.assertEqual(len(mockATW32.call_args_list), 4) + self.assertEqual(mockATW32.call_args_list[2][1]['autoreset'], True) + self.assertEqual(mockATW32.call_args_list[3][1]['autoreset'], True) + + init() + self.assertEqual(len(mockATW32.call_args_list), 6) + self.assertEqual( + mockATW32.call_args_list[4][1]['autoreset'], False) + self.assertEqual( + mockATW32.call_args_list[5][1]['autoreset'], False) + + + @patch('colorama.initialise.atexit.register') + def testAtexitRegisteredOnlyOnce(self, mockRegister): + init() + self.assertTrue(mockRegister.called) + mockRegister.reset_mock() + init() + self.assertFalse(mockRegister.called) + + +class JustFixWindowsConsoleTest(TestCase): + def _reset(self): + _wipe_internal_state_for_tests() + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + def tearDown(self): + self._reset() + + @patch("colorama.ansitowin32.winapi_test", lambda: True) + def testJustFixWindowsConsole(self): + if sys.platform != "win32": + # just_fix_windows_console should be a no-op + just_fix_windows_console() + self.assertIs(sys.stdout, orig_stdout) + self.assertIs(sys.stderr, orig_stderr) + else: + def fake_std(): + # Emulate stdout=not a tty, stderr=tty + # to check that we handle both cases correctly + stdout = Mock() + stdout.closed = False + stdout.isatty.return_value = False + stdout.fileno.return_value = 1 + sys.stdout = stdout + + stderr = Mock() + stderr.closed = False + stderr.isatty.return_value = True + stderr.fileno.return_value = 2 + sys.stderr = stderr + + for native_ansi in [False, True]: + with patch( + 'colorama.ansitowin32.enable_vt_processing', + lambda *_: native_ansi + ): + self._reset() + fake_std() + + # Regular single-call test + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(sys.stdout, prev_stdout) + if native_ansi: + self.assertIs(sys.stderr, prev_stderr) + else: + self.assertIsNot(sys.stderr, prev_stderr) + + # second call without resetting is always a no-op + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(sys.stdout, prev_stdout) + self.assertIs(sys.stderr, prev_stderr) + + self._reset() + fake_std() + + # If init() runs first, just_fix_windows_console should be a no-op + init() + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(prev_stdout, sys.stdout) + self.assertIs(prev_stderr, sys.stderr) + + +if __name__ == '__main__': + main() diff --git a/venv/Lib/site-packages/colorama/tests/isatty_test.py b/venv/Lib/site-packages/colorama/tests/isatty_test.py new file mode 100644 index 00000000..0f84e4be --- /dev/null +++ b/venv/Lib/site-packages/colorama/tests/isatty_test.py @@ -0,0 +1,57 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main + +from ..ansitowin32 import StreamWrapper, AnsiToWin32 +from .utils import pycharm, replace_by, replace_original_by, StreamTTY, StreamNonTTY + + +def is_a_tty(stream): + return StreamWrapper(stream, None).isatty() + +class IsattyTest(TestCase): + + def test_TTY(self): + tty = StreamTTY() + self.assertTrue(is_a_tty(tty)) + with pycharm(): + self.assertTrue(is_a_tty(tty)) + + def test_nonTTY(self): + non_tty = StreamNonTTY() + self.assertFalse(is_a_tty(non_tty)) + with pycharm(): + self.assertFalse(is_a_tty(non_tty)) + + def test_withPycharm(self): + with pycharm(): + self.assertTrue(is_a_tty(sys.stderr)) + self.assertTrue(is_a_tty(sys.stdout)) + + def test_withPycharmTTYOverride(self): + tty = StreamTTY() + with pycharm(), replace_by(tty): + self.assertTrue(is_a_tty(tty)) + + def test_withPycharmNonTTYOverride(self): + non_tty = StreamNonTTY() + with pycharm(), replace_by(non_tty): + self.assertFalse(is_a_tty(non_tty)) + + def test_withPycharmNoneOverride(self): + with pycharm(): + with replace_by(None), replace_original_by(None): + self.assertFalse(is_a_tty(None)) + self.assertFalse(is_a_tty(StreamNonTTY())) + self.assertTrue(is_a_tty(StreamTTY())) + + def test_withPycharmStreamWrapped(self): + with pycharm(): + self.assertTrue(AnsiToWin32(StreamTTY()).stream.isatty()) + self.assertFalse(AnsiToWin32(StreamNonTTY()).stream.isatty()) + self.assertTrue(AnsiToWin32(sys.stdout).stream.isatty()) + self.assertTrue(AnsiToWin32(sys.stderr).stream.isatty()) + + +if __name__ == '__main__': + main() diff --git a/venv/Lib/site-packages/colorama/tests/utils.py b/venv/Lib/site-packages/colorama/tests/utils.py new file mode 100644 index 00000000..472fafb4 --- /dev/null +++ b/venv/Lib/site-packages/colorama/tests/utils.py @@ -0,0 +1,49 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from contextlib import contextmanager +from io import StringIO +import sys +import os + + +class StreamTTY(StringIO): + def isatty(self): + return True + +class StreamNonTTY(StringIO): + def isatty(self): + return False + +@contextmanager +def osname(name): + orig = os.name + os.name = name + yield + os.name = orig + +@contextmanager +def replace_by(stream): + orig_stdout = sys.stdout + orig_stderr = sys.stderr + sys.stdout = stream + sys.stderr = stream + yield + sys.stdout = orig_stdout + sys.stderr = orig_stderr + +@contextmanager +def replace_original_by(stream): + orig_stdout = sys.__stdout__ + orig_stderr = sys.__stderr__ + sys.__stdout__ = stream + sys.__stderr__ = stream + yield + sys.__stdout__ = orig_stdout + sys.__stderr__ = orig_stderr + +@contextmanager +def pycharm(): + os.environ["PYCHARM_HOSTED"] = "1" + non_tty = StreamNonTTY() + with replace_by(non_tty), replace_original_by(non_tty): + yield + del os.environ["PYCHARM_HOSTED"] diff --git a/venv/Lib/site-packages/colorama/tests/winterm_test.py b/venv/Lib/site-packages/colorama/tests/winterm_test.py new file mode 100644 index 00000000..d0955f9e --- /dev/null +++ b/venv/Lib/site-packages/colorama/tests/winterm_test.py @@ -0,0 +1,131 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main, skipUnless + +try: + from unittest.mock import Mock, patch +except ImportError: + from mock import Mock, patch + +from ..winterm import WinColor, WinStyle, WinTerm + + +class WinTermTest(TestCase): + + @patch('colorama.winterm.win32') + def testInit(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 7 + 6 * 16 + 8 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + self.assertEqual(term._fore, 7) + self.assertEqual(term._back, 6) + self.assertEqual(term._style, 8) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testGetAttrs(self): + term = WinTerm() + + term._fore = 0 + term._back = 0 + term._style = 0 + self.assertEqual(term.get_attrs(), 0) + + term._fore = WinColor.YELLOW + self.assertEqual(term.get_attrs(), WinColor.YELLOW) + + term._back = WinColor.MAGENTA + self.assertEqual( + term.get_attrs(), + WinColor.YELLOW + WinColor.MAGENTA * 16) + + term._style = WinStyle.BRIGHT + self.assertEqual( + term.get_attrs(), + WinColor.YELLOW + WinColor.MAGENTA * 16 + WinStyle.BRIGHT) + + @patch('colorama.winterm.win32') + def testResetAll(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 1 + 2 * 16 + 8 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + + term.set_console = Mock() + term._fore = -1 + term._back = -1 + term._style = -1 + + term.reset_all() + + self.assertEqual(term._fore, 1) + self.assertEqual(term._back, 2) + self.assertEqual(term._style, 8) + self.assertEqual(term.set_console.called, True) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testFore(self): + term = WinTerm() + term.set_console = Mock() + term._fore = 0 + + term.fore(5) + + self.assertEqual(term._fore, 5) + self.assertEqual(term.set_console.called, True) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testBack(self): + term = WinTerm() + term.set_console = Mock() + term._back = 0 + + term.back(5) + + self.assertEqual(term._back, 5) + self.assertEqual(term.set_console.called, True) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testStyle(self): + term = WinTerm() + term.set_console = Mock() + term._style = 0 + + term.style(22) + + self.assertEqual(term._style, 22) + self.assertEqual(term.set_console.called, True) + + @patch('colorama.winterm.win32') + def testSetConsole(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 0 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + term.windll = Mock() + + term.set_console() + + self.assertEqual( + mockWin32.SetConsoleTextAttribute.call_args, + ((mockWin32.STDOUT, term.get_attrs()), {}) + ) + + @patch('colorama.winterm.win32') + def testSetConsoleOnStderr(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 0 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + term.windll = Mock() + + term.set_console(on_stderr=True) + + self.assertEqual( + mockWin32.SetConsoleTextAttribute.call_args, + ((mockWin32.STDERR, term.get_attrs()), {}) + ) + + +if __name__ == '__main__': + main() diff --git a/venv/Lib/site-packages/colorama/win32.py b/venv/Lib/site-packages/colorama/win32.py new file mode 100644 index 00000000..841b0e27 --- /dev/null +++ b/venv/Lib/site-packages/colorama/win32.py @@ -0,0 +1,180 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. + +# from winbase.h +STDOUT = -11 +STDERR = -12 + +ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + +try: + import ctypes + from ctypes import LibraryLoader + windll = LibraryLoader(ctypes.WinDLL) + from ctypes import wintypes +except (AttributeError, ImportError): + windll = None + SetConsoleTextAttribute = lambda *_: None + winapi_test = lambda *_: None +else: + from ctypes import byref, Structure, c_char, POINTER + + COORD = wintypes._COORD + + class CONSOLE_SCREEN_BUFFER_INFO(Structure): + """struct in wincon.h.""" + _fields_ = [ + ("dwSize", COORD), + ("dwCursorPosition", COORD), + ("wAttributes", wintypes.WORD), + ("srWindow", wintypes.SMALL_RECT), + ("dwMaximumWindowSize", COORD), + ] + def __str__(self): + return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( + self.dwSize.Y, self.dwSize.X + , self.dwCursorPosition.Y, self.dwCursorPosition.X + , self.wAttributes + , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right + , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X + ) + + _GetStdHandle = windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [ + wintypes.DWORD, + ] + _GetStdHandle.restype = wintypes.HANDLE + + _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo + _GetConsoleScreenBufferInfo.argtypes = [ + wintypes.HANDLE, + POINTER(CONSOLE_SCREEN_BUFFER_INFO), + ] + _GetConsoleScreenBufferInfo.restype = wintypes.BOOL + + _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute + _SetConsoleTextAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + ] + _SetConsoleTextAttribute.restype = wintypes.BOOL + + _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition + _SetConsoleCursorPosition.argtypes = [ + wintypes.HANDLE, + COORD, + ] + _SetConsoleCursorPosition.restype = wintypes.BOOL + + _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA + _FillConsoleOutputCharacterA.argtypes = [ + wintypes.HANDLE, + c_char, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputCharacterA.restype = wintypes.BOOL + + _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute + _FillConsoleOutputAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputAttribute.restype = wintypes.BOOL + + _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW + _SetConsoleTitleW.argtypes = [ + wintypes.LPCWSTR + ] + _SetConsoleTitleW.restype = wintypes.BOOL + + _GetConsoleMode = windll.kernel32.GetConsoleMode + _GetConsoleMode.argtypes = [ + wintypes.HANDLE, + POINTER(wintypes.DWORD) + ] + _GetConsoleMode.restype = wintypes.BOOL + + _SetConsoleMode = windll.kernel32.SetConsoleMode + _SetConsoleMode.argtypes = [ + wintypes.HANDLE, + wintypes.DWORD + ] + _SetConsoleMode.restype = wintypes.BOOL + + def _winapi_test(handle): + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return bool(success) + + def winapi_test(): + return any(_winapi_test(h) for h in + (_GetStdHandle(STDOUT), _GetStdHandle(STDERR))) + + def GetConsoleScreenBufferInfo(stream_id=STDOUT): + handle = _GetStdHandle(stream_id) + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return csbi + + def SetConsoleTextAttribute(stream_id, attrs): + handle = _GetStdHandle(stream_id) + return _SetConsoleTextAttribute(handle, attrs) + + def SetConsoleCursorPosition(stream_id, position, adjust=True): + position = COORD(*position) + # If the position is out of range, do nothing. + if position.Y <= 0 or position.X <= 0: + return + # Adjust for Windows' SetConsoleCursorPosition: + # 1. being 0-based, while ANSI is 1-based. + # 2. expecting (x,y), while ANSI uses (y,x). + adjusted_position = COORD(position.Y - 1, position.X - 1) + if adjust: + # Adjust for viewport's scroll position + sr = GetConsoleScreenBufferInfo(STDOUT).srWindow + adjusted_position.Y += sr.Top + adjusted_position.X += sr.Left + # Resume normal processing + handle = _GetStdHandle(stream_id) + return _SetConsoleCursorPosition(handle, adjusted_position) + + def FillConsoleOutputCharacter(stream_id, char, length, start): + handle = _GetStdHandle(stream_id) + char = c_char(char.encode()) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + success = _FillConsoleOutputCharacterA( + handle, char, length, start, byref(num_written)) + return num_written.value + + def FillConsoleOutputAttribute(stream_id, attr, length, start): + ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' + handle = _GetStdHandle(stream_id) + attribute = wintypes.WORD(attr) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + return _FillConsoleOutputAttribute( + handle, attribute, length, start, byref(num_written)) + + def SetConsoleTitle(title): + return _SetConsoleTitleW(title) + + def GetConsoleMode(handle): + mode = wintypes.DWORD() + success = _GetConsoleMode(handle, byref(mode)) + if not success: + raise ctypes.WinError() + return mode.value + + def SetConsoleMode(handle, mode): + success = _SetConsoleMode(handle, mode) + if not success: + raise ctypes.WinError() diff --git a/venv/Lib/site-packages/colorama/winterm.py b/venv/Lib/site-packages/colorama/winterm.py new file mode 100644 index 00000000..aad867e8 --- /dev/null +++ b/venv/Lib/site-packages/colorama/winterm.py @@ -0,0 +1,195 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +try: + from msvcrt import get_osfhandle +except ImportError: + def get_osfhandle(_): + raise OSError("This isn't windows!") + + +from . import win32 + +# from wincon.h +class WinColor(object): + BLACK = 0 + BLUE = 1 + GREEN = 2 + CYAN = 3 + RED = 4 + MAGENTA = 5 + YELLOW = 6 + GREY = 7 + +# from wincon.h +class WinStyle(object): + NORMAL = 0x00 # dim text, dim background + BRIGHT = 0x08 # bright text, dim background + BRIGHT_BACKGROUND = 0x80 # dim text, bright background + +class WinTerm(object): + + def __init__(self): + self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes + self.set_attrs(self._default) + self._default_fore = self._fore + self._default_back = self._back + self._default_style = self._style + # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style. + # So that LIGHT_EX colors and BRIGHT style do not clobber each other, + # we track them separately, since LIGHT_EX is overwritten by Fore/Back + # and BRIGHT is overwritten by Style codes. + self._light = 0 + + def get_attrs(self): + return self._fore + self._back * 16 + (self._style | self._light) + + def set_attrs(self, value): + self._fore = value & 7 + self._back = (value >> 4) & 7 + self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) + + def reset_all(self, on_stderr=None): + self.set_attrs(self._default) + self.set_console(attrs=self._default) + self._light = 0 + + def fore(self, fore=None, light=False, on_stderr=False): + if fore is None: + fore = self._default_fore + self._fore = fore + # Emulate LIGHT_EX with BRIGHT Style + if light: + self._light |= WinStyle.BRIGHT + else: + self._light &= ~WinStyle.BRIGHT + self.set_console(on_stderr=on_stderr) + + def back(self, back=None, light=False, on_stderr=False): + if back is None: + back = self._default_back + self._back = back + # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style + if light: + self._light |= WinStyle.BRIGHT_BACKGROUND + else: + self._light &= ~WinStyle.BRIGHT_BACKGROUND + self.set_console(on_stderr=on_stderr) + + def style(self, style=None, on_stderr=False): + if style is None: + style = self._default_style + self._style = style + self.set_console(on_stderr=on_stderr) + + def set_console(self, attrs=None, on_stderr=False): + if attrs is None: + attrs = self.get_attrs() + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleTextAttribute(handle, attrs) + + def get_position(self, handle): + position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition + # Because Windows coordinates are 0-based, + # and win32.SetConsoleCursorPosition expects 1-based. + position.X += 1 + position.Y += 1 + return position + + def set_cursor_position(self, position=None, on_stderr=False): + if position is None: + # I'm not currently tracking the position, so there is no default. + # position = self.get_position() + return + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleCursorPosition(handle, position) + + def cursor_adjust(self, x, y, on_stderr=False): + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + position = self.get_position(handle) + adjusted_position = (position.Y + y, position.X + x) + win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) + + def erase_screen(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the screen. + # 1 should clear from the cursor to the beginning of the screen. + # 2 should clear the entire screen, and move cursor to (1,1) + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + # get the number of character cells in the current buffer + cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y + # get number of character cells before current cursor position + cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = cells_in_screen - cells_before_cursor + elif mode == 1: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_before_cursor + elif mode == 2: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_in_screen + else: + # invalid mode + return + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + if mode == 2: + # put the cursor where needed + win32.SetConsoleCursorPosition(handle, (1, 1)) + + def erase_line(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the line. + # 1 should clear from the cursor to the beginning of the line. + # 2 should clear the entire line. + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X + elif mode == 1: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwCursorPosition.X + elif mode == 2: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwSize.X + else: + # invalid mode + return + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + + def set_title(self, title): + win32.SetConsoleTitle(title) + + +def enable_vt_processing(fd): + if win32.windll is None or not win32.winapi_test(): + return False + + try: + handle = get_osfhandle(fd) + mode = win32.GetConsoleMode(handle) + win32.SetConsoleMode( + handle, + mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING, + ) + + mode = win32.GetConsoleMode(handle) + if mode & win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING: + return True + # Can get TypeError in testsuite where 'fd' is a Mock() + except (OSError, TypeError): + return False diff --git a/venv/Lib/site-packages/distro-1.9.0.dist-info/INSTALLER b/venv/Lib/site-packages/distro-1.9.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/distro-1.9.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/distro-1.9.0.dist-info/LICENSE b/venv/Lib/site-packages/distro-1.9.0.dist-info/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/venv/Lib/site-packages/distro-1.9.0.dist-info/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/venv/Lib/site-packages/distro-1.9.0.dist-info/METADATA b/venv/Lib/site-packages/distro-1.9.0.dist-info/METADATA new file mode 100644 index 00000000..9312e8e4 --- /dev/null +++ b/venv/Lib/site-packages/distro-1.9.0.dist-info/METADATA @@ -0,0 +1,184 @@ +Metadata-Version: 2.1 +Name: distro +Version: 1.9.0 +Summary: Distro - an OS platform information API +Home-page: https://github.com/python-distro/distro +Author: Nir Cohen +Author-email: nir36g@gmail.com +License: Apache License, Version 2.0 +Platform: All +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: POSIX :: BSD +Classifier: Operating System :: POSIX :: BSD :: FreeBSD +Classifier: Operating System :: POSIX :: BSD :: NetBSD +Classifier: Operating System :: POSIX :: BSD :: OpenBSD +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Operating System +Requires-Python: >=3.6 +Description-Content-Type: text/markdown +License-File: LICENSE + +Distro - an OS platform information API +======================================= + +[![CI Status](https://github.com/python-distro/distro/workflows/CI/badge.svg)](https://github.com/python-distro/distro/actions/workflows/ci.yaml) +[![PyPI version](http://img.shields.io/pypi/v/distro.svg)](https://pypi.python.org/pypi/distro) +[![Supported Python Versions](https://img.shields.io/pypi/pyversions/distro.svg)](https://img.shields.io/pypi/pyversions/distro.svg) +[![Code Coverage](https://codecov.io/github/python-distro/distro/coverage.svg?branch=master)](https://codecov.io/github/python-distro/distro?branch=master) +[![Is Wheel](https://img.shields.io/pypi/wheel/distro.svg?style=flat)](https://pypi.python.org/pypi/distro) +[![Latest Github Release](https://readthedocs.org/projects/distro/badge/?version=stable)](http://distro.readthedocs.io/en/latest/) + +`distro` provides information about the +OS distribution it runs on, such as a reliable machine-readable ID, or +version information. + +It is the recommended replacement for Python's original +[`platform.linux_distribution`](https://docs.python.org/3.7/library/platform.html#platform.linux_distribution) +function (removed in Python 3.8). It also provides much more functionality +which isn't necessarily Python bound, like a command-line interface. + +Distro currently supports Linux and BSD based systems but [Windows and OS X support](https://github.com/python-distro/distro/issues/177) is also planned. + +For Python 2.6 support, see https://github.com/python-distro/distro/tree/python2.6-support + +## Installation + +Installation of the latest released version from PyPI: + +```shell +pip install distro +``` + +Installation of the latest development version: + +```shell +pip install https://github.com/python-distro/distro/archive/master.tar.gz +``` + +To use as a standalone script, download `distro.py` directly: + +```shell +curl -O https://raw.githubusercontent.com/python-distro/distro/master/src/distro/distro.py +python distro.py +``` + +``distro`` is safe to vendor within projects that do not wish to add +dependencies. + +```shell +cd myproject +curl -O https://raw.githubusercontent.com/python-distro/distro/master/src/distro/distro.py +``` + +## Usage + +```bash +$ distro +Name: Antergos Linux +Version: 2015.10 (ISO-Rolling) +Codename: ISO-Rolling + +$ distro -j +{ + "codename": "ISO-Rolling", + "id": "antergos", + "like": "arch", + "version": "16.9", + "version_parts": { + "build_number": "", + "major": "16", + "minor": "9" + } +} + + +$ python +>>> import distro +>>> distro.name(pretty=True) +'CentOS Linux 8' +>>> distro.id() +'centos' +>>> distro.version(best=True) +'8.4.2105' +``` + + +## Documentation + +On top of the aforementioned API, several more functions are available. For a complete description of the +API, see the [latest API documentation](http://distro.readthedocs.org/en/latest/). + +## Background + +An alternative implementation became necessary because Python 3.5 deprecated +this function, and Python 3.8 removed it altogether. Its predecessor function +[`platform.dist`](https://docs.python.org/3.7/library/platform.html#platform.dist) +was already deprecated since Python 2.6 and removed in Python 3.8. Still, there +are many cases in which access to that information is needed. See [Python issue +1322](https://bugs.python.org/issue1322) for more information. + +The `distro` package implements a robust and inclusive way of retrieving the +information about a distribution based on new standards and old methods, +namely from these data sources (from high to low precedence): + +* The os-release file `/etc/os-release` if present, with a fall-back on `/usr/lib/os-release` if needed. +* The output of the `lsb_release` command, if available. +* The distro release file (`/etc/*(-|_)(release|version)`), if present. +* The `uname` command for BSD based distrubtions. + + +## Python and Distribution Support + +`distro` is supported and tested on Python 3.6+ and PyPy and on any +distribution that provides one or more of the data sources covered. + +This package is tested with test data that mimics the exact behavior of the data sources of [a number of Linux distributions](https://github.com/python-distro/distro/tree/master/tests/resources/distros). + + +## Testing + +```shell +git clone git@github.com:python-distro/distro.git +cd distro +pip install tox +tox +``` + + +## Contributions + +Pull requests are always welcome to deal with specific distributions or just +for general merriment. + +See [CONTRIBUTIONS](https://github.com/python-distro/distro/blob/master/CONTRIBUTING.md) for contribution info. + +Reference implementations for supporting additional distributions and file +formats can be found here: + +* https://github.com/saltstack/salt/blob/develop/salt/grains/core.py#L1172 +* https://github.com/chef/ohai/blob/master/lib/ohai/plugins/linux/platform.rb +* https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/facts/system/distribution.py +* https://github.com/puppetlabs/facter/blob/master/lib/src/facts/linux/os_linux.cc + +## Package manager distributions + +* https://src.fedoraproject.org/rpms/python-distro +* https://www.archlinux.org/packages/community/any/python-distro/ +* https://launchpad.net/ubuntu/+source/python-distro +* https://packages.debian.org/stable/python3-distro +* https://packages.gentoo.org/packages/dev-python/distro +* https://pkgs.org/download/python3-distro +* https://slackbuilds.org/repository/14.2/python/python-distro/ diff --git a/venv/Lib/site-packages/distro-1.9.0.dist-info/RECORD b/venv/Lib/site-packages/distro-1.9.0.dist-info/RECORD new file mode 100644 index 00000000..efcb7573 --- /dev/null +++ b/venv/Lib/site-packages/distro-1.9.0.dist-info/RECORD @@ -0,0 +1,15 @@ +../../Scripts/distro.exe,sha256=khBuSzWz62kQLI0MDhSURS38G5kPyewnq5Fkaor2V90,108400 +distro-1.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +distro-1.9.0.dist-info/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325 +distro-1.9.0.dist-info/METADATA,sha256=MWMqst5VkRMQkbM5e9zfeXcYV52Fp1GG8Gg53QwJ6B0,6791 +distro-1.9.0.dist-info/RECORD,, +distro-1.9.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92 +distro-1.9.0.dist-info/entry_points.txt,sha256=3ObjqQMbh1xeQQwsWtgbfDNDMDD-EbggR1Oj_z8s9hc,46 +distro-1.9.0.dist-info/top_level.txt,sha256=ikde_V_XEdSBqaGd5tEriN_wzYHLgTX_zVtlsGLHvwQ,7 +distro/__init__.py,sha256=2fHjF-SfgPvjyNZ1iHh_wjqWdR_Yo5ODHwZC0jLBPhc,981 +distro/__main__.py,sha256=bu9d3TifoKciZFcqRBuygV3GSuThnVD_m2IK4cz96Vs,64 +distro/__pycache__/__init__.cpython-312.pyc,, +distro/__pycache__/__main__.cpython-312.pyc,, +distro/__pycache__/distro.cpython-312.pyc,, +distro/distro.py,sha256=XqbefacAhDT4zr_trnbA15eY8vdK4GTghgmvUGrEM_4,49430 +distro/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/Lib/site-packages/distro-1.9.0.dist-info/WHEEL b/venv/Lib/site-packages/distro-1.9.0.dist-info/WHEEL new file mode 100644 index 00000000..98c0d20b --- /dev/null +++ b/venv/Lib/site-packages/distro-1.9.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/distro-1.9.0.dist-info/entry_points.txt b/venv/Lib/site-packages/distro-1.9.0.dist-info/entry_points.txt new file mode 100644 index 00000000..08d29c55 --- /dev/null +++ b/venv/Lib/site-packages/distro-1.9.0.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +distro = distro.distro:main diff --git a/venv/Lib/site-packages/distro-1.9.0.dist-info/top_level.txt b/venv/Lib/site-packages/distro-1.9.0.dist-info/top_level.txt new file mode 100644 index 00000000..0e093317 --- /dev/null +++ b/venv/Lib/site-packages/distro-1.9.0.dist-info/top_level.txt @@ -0,0 +1 @@ +distro diff --git a/venv/Lib/site-packages/distro/__init__.py b/venv/Lib/site-packages/distro/__init__.py new file mode 100644 index 00000000..7686fe85 --- /dev/null +++ b/venv/Lib/site-packages/distro/__init__.py @@ -0,0 +1,54 @@ +from .distro import ( + NORMALIZED_DISTRO_ID, + NORMALIZED_LSB_ID, + NORMALIZED_OS_ID, + LinuxDistribution, + __version__, + build_number, + codename, + distro_release_attr, + distro_release_info, + id, + info, + like, + linux_distribution, + lsb_release_attr, + lsb_release_info, + major_version, + minor_version, + name, + os_release_attr, + os_release_info, + uname_attr, + uname_info, + version, + version_parts, +) + +__all__ = [ + "NORMALIZED_DISTRO_ID", + "NORMALIZED_LSB_ID", + "NORMALIZED_OS_ID", + "LinuxDistribution", + "build_number", + "codename", + "distro_release_attr", + "distro_release_info", + "id", + "info", + "like", + "linux_distribution", + "lsb_release_attr", + "lsb_release_info", + "major_version", + "minor_version", + "name", + "os_release_attr", + "os_release_info", + "uname_attr", + "uname_info", + "version", + "version_parts", +] + +__version__ = __version__ diff --git a/venv/Lib/site-packages/distro/__main__.py b/venv/Lib/site-packages/distro/__main__.py new file mode 100644 index 00000000..0c01d5b0 --- /dev/null +++ b/venv/Lib/site-packages/distro/__main__.py @@ -0,0 +1,4 @@ +from .distro import main + +if __name__ == "__main__": + main() diff --git a/venv/Lib/site-packages/distro/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/distro/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..eddb55b9 Binary files /dev/null and b/venv/Lib/site-packages/distro/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/distro/__pycache__/__main__.cpython-312.pyc b/venv/Lib/site-packages/distro/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 00000000..cf736659 Binary files /dev/null and b/venv/Lib/site-packages/distro/__pycache__/__main__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/distro/__pycache__/distro.cpython-312.pyc b/venv/Lib/site-packages/distro/__pycache__/distro.cpython-312.pyc new file mode 100644 index 00000000..90ff7b2f Binary files /dev/null and b/venv/Lib/site-packages/distro/__pycache__/distro.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/distro/distro.py b/venv/Lib/site-packages/distro/distro.py new file mode 100644 index 00000000..78ccdfa4 --- /dev/null +++ b/venv/Lib/site-packages/distro/distro.py @@ -0,0 +1,1403 @@ +#!/usr/bin/env python +# Copyright 2015-2021 Nir Cohen +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The ``distro`` package (``distro`` stands for Linux Distribution) provides +information about the Linux distribution it runs on, such as a reliable +machine-readable distro ID, or version information. + +It is the recommended replacement for Python's original +:py:func:`platform.linux_distribution` function, but it provides much more +functionality. An alternative implementation became necessary because Python +3.5 deprecated this function, and Python 3.8 removed it altogether. Its +predecessor function :py:func:`platform.dist` was already deprecated since +Python 2.6 and removed in Python 3.8. Still, there are many cases in which +access to OS distribution information is needed. See `Python issue 1322 +`_ for more information. +""" + +import argparse +import json +import logging +import os +import re +import shlex +import subprocess +import sys +import warnings +from typing import ( + Any, + Callable, + Dict, + Iterable, + Optional, + Sequence, + TextIO, + Tuple, + Type, +) + +try: + from typing import TypedDict +except ImportError: + # Python 3.7 + TypedDict = dict + +__version__ = "1.9.0" + + +class VersionDict(TypedDict): + major: str + minor: str + build_number: str + + +class InfoDict(TypedDict): + id: str + version: str + version_parts: VersionDict + like: str + codename: str + + +_UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc") +_UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib") +_OS_RELEASE_BASENAME = "os-release" + +#: Translation table for normalizing the "ID" attribute defined in os-release +#: files, for use by the :func:`distro.id` method. +#: +#: * Key: Value as defined in the os-release file, translated to lower case, +#: with blanks translated to underscores. +#: +#: * Value: Normalized value. +NORMALIZED_OS_ID = { + "ol": "oracle", # Oracle Linux + "opensuse-leap": "opensuse", # Newer versions of OpenSuSE report as opensuse-leap +} + +#: Translation table for normalizing the "Distributor ID" attribute returned by +#: the lsb_release command, for use by the :func:`distro.id` method. +#: +#: * Key: Value as returned by the lsb_release command, translated to lower +#: case, with blanks translated to underscores. +#: +#: * Value: Normalized value. +NORMALIZED_LSB_ID = { + "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4 + "enterpriseenterpriseserver": "oracle", # Oracle Linux 5 + "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation + "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server + "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode +} + +#: Translation table for normalizing the distro ID derived from the file name +#: of distro release files, for use by the :func:`distro.id` method. +#: +#: * Key: Value as derived from the file name of a distro release file, +#: translated to lower case, with blanks translated to underscores. +#: +#: * Value: Normalized value. +NORMALIZED_DISTRO_ID = { + "redhat": "rhel", # RHEL 6.x, 7.x +} + +# Pattern for content of distro release file (reversed) +_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( + r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)" +) + +# Pattern for base file name of distro release file +_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$") + +# Base file names to be looked up for if _UNIXCONFDIR is not readable. +_DISTRO_RELEASE_BASENAMES = [ + "SuSE-release", + "altlinux-release", + "arch-release", + "base-release", + "centos-release", + "fedora-release", + "gentoo-release", + "mageia-release", + "mandrake-release", + "mandriva-release", + "mandrivalinux-release", + "manjaro-release", + "oracle-release", + "redhat-release", + "rocky-release", + "sl-release", + "slackware-version", +] + +# Base file names to be ignored when searching for distro release file +_DISTRO_RELEASE_IGNORE_BASENAMES = ( + "debian_version", + "lsb-release", + "oem-release", + _OS_RELEASE_BASENAME, + "system-release", + "plesk-release", + "iredmail-release", + "board-release", + "ec2_version", +) + + +def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]: + """ + .. deprecated:: 1.6.0 + + :func:`distro.linux_distribution()` is deprecated. It should only be + used as a compatibility shim with Python's + :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`, + :func:`distro.version` and :func:`distro.name` instead. + + Return information about the current OS distribution as a tuple + ``(id_name, version, codename)`` with items as follows: + + * ``id_name``: If *full_distribution_name* is false, the result of + :func:`distro.id`. Otherwise, the result of :func:`distro.name`. + + * ``version``: The result of :func:`distro.version`. + + * ``codename``: The extra item (usually in parentheses) after the + os-release version number, or the result of :func:`distro.codename`. + + The interface of this function is compatible with the original + :py:func:`platform.linux_distribution` function, supporting a subset of + its parameters. + + The data it returns may not exactly be the same, because it uses more data + sources than the original function, and that may lead to different data if + the OS distribution is not consistent across multiple data sources it + provides (there are indeed such distributions ...). + + Another reason for differences is the fact that the :func:`distro.id` + method normalizes the distro ID string to a reliable machine-readable value + for a number of popular OS distributions. + """ + warnings.warn( + "distro.linux_distribution() is deprecated. It should only be used as a " + "compatibility shim with Python's platform.linux_distribution(). Please use " + "distro.id(), distro.version() and distro.name() instead.", + DeprecationWarning, + stacklevel=2, + ) + return _distro.linux_distribution(full_distribution_name) + + +def id() -> str: + """ + Return the distro ID of the current distribution, as a + machine-readable string. + + For a number of OS distributions, the returned distro ID value is + *reliable*, in the sense that it is documented and that it does not change + across releases of the distribution. + + This package maintains the following reliable distro ID values: + + ============== ========================================= + Distro ID Distribution + ============== ========================================= + "ubuntu" Ubuntu + "debian" Debian + "rhel" RedHat Enterprise Linux + "centos" CentOS + "fedora" Fedora + "sles" SUSE Linux Enterprise Server + "opensuse" openSUSE + "amzn" Amazon Linux + "arch" Arch Linux + "buildroot" Buildroot + "cloudlinux" CloudLinux OS + "exherbo" Exherbo Linux + "gentoo" GenToo Linux + "ibm_powerkvm" IBM PowerKVM + "kvmibm" KVM for IBM z Systems + "linuxmint" Linux Mint + "mageia" Mageia + "mandriva" Mandriva Linux + "parallels" Parallels + "pidora" Pidora + "raspbian" Raspbian + "oracle" Oracle Linux (and Oracle Enterprise Linux) + "scientific" Scientific Linux + "slackware" Slackware + "xenserver" XenServer + "openbsd" OpenBSD + "netbsd" NetBSD + "freebsd" FreeBSD + "midnightbsd" MidnightBSD + "rocky" Rocky Linux + "aix" AIX + "guix" Guix System + "altlinux" ALT Linux + ============== ========================================= + + If you have a need to get distros for reliable IDs added into this set, + or if you find that the :func:`distro.id` function returns a different + distro ID for one of the listed distros, please create an issue in the + `distro issue tracker`_. + + **Lookup hierarchy and transformations:** + + First, the ID is obtained from the following sources, in the specified + order. The first available and non-empty value is used: + + * the value of the "ID" attribute of the os-release file, + + * the value of the "Distributor ID" attribute returned by the lsb_release + command, + + * the first part of the file name of the distro release file, + + The so determined ID value then passes the following transformations, + before it is returned by this method: + + * it is translated to lower case, + + * blanks (which should not be there anyway) are translated to underscores, + + * a normalization of the ID is performed, based upon + `normalization tables`_. The purpose of this normalization is to ensure + that the ID is as reliable as possible, even across incompatible changes + in the OS distributions. A common reason for an incompatible change is + the addition of an os-release file, or the addition of the lsb_release + command, with ID values that differ from what was previously determined + from the distro release file name. + """ + return _distro.id() + + +def name(pretty: bool = False) -> str: + """ + Return the name of the current OS distribution, as a human-readable + string. + + If *pretty* is false, the name is returned without version or codename. + (e.g. "CentOS Linux") + + If *pretty* is true, the version and codename are appended. + (e.g. "CentOS Linux 7.1.1503 (Core)") + + **Lookup hierarchy:** + + The name is obtained from the following sources, in the specified order. + The first available and non-empty value is used: + + * If *pretty* is false: + + - the value of the "NAME" attribute of the os-release file, + + - the value of the "Distributor ID" attribute returned by the lsb_release + command, + + - the value of the "" field of the distro release file. + + * If *pretty* is true: + + - the value of the "PRETTY_NAME" attribute of the os-release file, + + - the value of the "Description" attribute returned by the lsb_release + command, + + - the value of the "" field of the distro release file, appended + with the value of the pretty version ("" and "" + fields) of the distro release file, if available. + """ + return _distro.name(pretty) + + +def version(pretty: bool = False, best: bool = False) -> str: + """ + Return the version of the current OS distribution, as a human-readable + string. + + If *pretty* is false, the version is returned without codename (e.g. + "7.0"). + + If *pretty* is true, the codename in parenthesis is appended, if the + codename is non-empty (e.g. "7.0 (Maipo)"). + + Some distributions provide version numbers with different precisions in + the different sources of distribution information. Examining the different + sources in a fixed priority order does not always yield the most precise + version (e.g. for Debian 8.2, or CentOS 7.1). + + Some other distributions may not provide this kind of information. In these + cases, an empty string would be returned. This behavior can be observed + with rolling releases distributions (e.g. Arch Linux). + + The *best* parameter can be used to control the approach for the returned + version: + + If *best* is false, the first non-empty version number in priority order of + the examined sources is returned. + + If *best* is true, the most precise version number out of all examined + sources is returned. + + **Lookup hierarchy:** + + In all cases, the version number is obtained from the following sources. + If *best* is false, this order represents the priority order: + + * the value of the "VERSION_ID" attribute of the os-release file, + * the value of the "Release" attribute returned by the lsb_release + command, + * the version number parsed from the "" field of the first line + of the distro release file, + * the version number parsed from the "PRETTY_NAME" attribute of the + os-release file, if it follows the format of the distro release files. + * the version number parsed from the "Description" attribute returned by + the lsb_release command, if it follows the format of the distro release + files. + """ + return _distro.version(pretty, best) + + +def version_parts(best: bool = False) -> Tuple[str, str, str]: + """ + Return the version of the current OS distribution as a tuple + ``(major, minor, build_number)`` with items as follows: + + * ``major``: The result of :func:`distro.major_version`. + + * ``minor``: The result of :func:`distro.minor_version`. + + * ``build_number``: The result of :func:`distro.build_number`. + + For a description of the *best* parameter, see the :func:`distro.version` + method. + """ + return _distro.version_parts(best) + + +def major_version(best: bool = False) -> str: + """ + Return the major version of the current OS distribution, as a string, + if provided. + Otherwise, the empty string is returned. The major version is the first + part of the dot-separated version string. + + For a description of the *best* parameter, see the :func:`distro.version` + method. + """ + return _distro.major_version(best) + + +def minor_version(best: bool = False) -> str: + """ + Return the minor version of the current OS distribution, as a string, + if provided. + Otherwise, the empty string is returned. The minor version is the second + part of the dot-separated version string. + + For a description of the *best* parameter, see the :func:`distro.version` + method. + """ + return _distro.minor_version(best) + + +def build_number(best: bool = False) -> str: + """ + Return the build number of the current OS distribution, as a string, + if provided. + Otherwise, the empty string is returned. The build number is the third part + of the dot-separated version string. + + For a description of the *best* parameter, see the :func:`distro.version` + method. + """ + return _distro.build_number(best) + + +def like() -> str: + """ + Return a space-separated list of distro IDs of distributions that are + closely related to the current OS distribution in regards to packaging + and programming interfaces, for example distributions the current + distribution is a derivative from. + + **Lookup hierarchy:** + + This information item is only provided by the os-release file. + For details, see the description of the "ID_LIKE" attribute in the + `os-release man page + `_. + """ + return _distro.like() + + +def codename() -> str: + """ + Return the codename for the release of the current OS distribution, + as a string. + + If the distribution does not have a codename, an empty string is returned. + + Note that the returned codename is not always really a codename. For + example, openSUSE returns "x86_64". This function does not handle such + cases in any special way and just returns the string it finds, if any. + + **Lookup hierarchy:** + + * the codename within the "VERSION" attribute of the os-release file, if + provided, + + * the value of the "Codename" attribute returned by the lsb_release + command, + + * the value of the "" field of the distro release file. + """ + return _distro.codename() + + +def info(pretty: bool = False, best: bool = False) -> InfoDict: + """ + Return certain machine-readable information items about the current OS + distribution in a dictionary, as shown in the following example: + + .. sourcecode:: python + + { + 'id': 'rhel', + 'version': '7.0', + 'version_parts': { + 'major': '7', + 'minor': '0', + 'build_number': '' + }, + 'like': 'fedora', + 'codename': 'Maipo' + } + + The dictionary structure and keys are always the same, regardless of which + information items are available in the underlying data sources. The values + for the various keys are as follows: + + * ``id``: The result of :func:`distro.id`. + + * ``version``: The result of :func:`distro.version`. + + * ``version_parts -> major``: The result of :func:`distro.major_version`. + + * ``version_parts -> minor``: The result of :func:`distro.minor_version`. + + * ``version_parts -> build_number``: The result of + :func:`distro.build_number`. + + * ``like``: The result of :func:`distro.like`. + + * ``codename``: The result of :func:`distro.codename`. + + For a description of the *pretty* and *best* parameters, see the + :func:`distro.version` method. + """ + return _distro.info(pretty, best) + + +def os_release_info() -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information items + from the os-release file data source of the current OS distribution. + + See `os-release file`_ for details about these information items. + """ + return _distro.os_release_info() + + +def lsb_release_info() -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information items + from the lsb_release command data source of the current OS distribution. + + See `lsb_release command output`_ for details about these information + items. + """ + return _distro.lsb_release_info() + + +def distro_release_info() -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information items + from the distro release file data source of the current OS distribution. + + See `distro release file`_ for details about these information items. + """ + return _distro.distro_release_info() + + +def uname_info() -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information items + from the distro release file data source of the current OS distribution. + """ + return _distro.uname_info() + + +def os_release_attr(attribute: str) -> str: + """ + Return a single named information item from the os-release file data source + of the current OS distribution. + + Parameters: + + * ``attribute`` (string): Key of the information item. + + Returns: + + * (string): Value of the information item, if the item exists. + The empty string, if the item does not exist. + + See `os-release file`_ for details about these information items. + """ + return _distro.os_release_attr(attribute) + + +def lsb_release_attr(attribute: str) -> str: + """ + Return a single named information item from the lsb_release command output + data source of the current OS distribution. + + Parameters: + + * ``attribute`` (string): Key of the information item. + + Returns: + + * (string): Value of the information item, if the item exists. + The empty string, if the item does not exist. + + See `lsb_release command output`_ for details about these information + items. + """ + return _distro.lsb_release_attr(attribute) + + +def distro_release_attr(attribute: str) -> str: + """ + Return a single named information item from the distro release file + data source of the current OS distribution. + + Parameters: + + * ``attribute`` (string): Key of the information item. + + Returns: + + * (string): Value of the information item, if the item exists. + The empty string, if the item does not exist. + + See `distro release file`_ for details about these information items. + """ + return _distro.distro_release_attr(attribute) + + +def uname_attr(attribute: str) -> str: + """ + Return a single named information item from the distro release file + data source of the current OS distribution. + + Parameters: + + * ``attribute`` (string): Key of the information item. + + Returns: + + * (string): Value of the information item, if the item exists. + The empty string, if the item does not exist. + """ + return _distro.uname_attr(attribute) + + +try: + from functools import cached_property +except ImportError: + # Python < 3.8 + class cached_property: # type: ignore + """A version of @property which caches the value. On access, it calls the + underlying function and sets the value in `__dict__` so future accesses + will not re-call the property. + """ + + def __init__(self, f: Callable[[Any], Any]) -> None: + self._fname = f.__name__ + self._f = f + + def __get__(self, obj: Any, owner: Type[Any]) -> Any: + assert obj is not None, f"call {self._fname} on an instance" + ret = obj.__dict__[self._fname] = self._f(obj) + return ret + + +class LinuxDistribution: + """ + Provides information about a OS distribution. + + This package creates a private module-global instance of this class with + default initialization arguments, that is used by the + `consolidated accessor functions`_ and `single source accessor functions`_. + By using default initialization arguments, that module-global instance + returns data about the current OS distribution (i.e. the distro this + package runs on). + + Normally, it is not necessary to create additional instances of this class. + However, in situations where control is needed over the exact data sources + that are used, instances of this class can be created with a specific + distro release file, or a specific os-release file, or without invoking the + lsb_release command. + """ + + def __init__( + self, + include_lsb: Optional[bool] = None, + os_release_file: str = "", + distro_release_file: str = "", + include_uname: Optional[bool] = None, + root_dir: Optional[str] = None, + include_oslevel: Optional[bool] = None, + ) -> None: + """ + The initialization method of this class gathers information from the + available data sources, and stores that in private instance attributes. + Subsequent access to the information items uses these private instance + attributes, so that the data sources are read only once. + + Parameters: + + * ``include_lsb`` (bool): Controls whether the + `lsb_release command output`_ is included as a data source. + + If the lsb_release command is not available in the program execution + path, the data source for the lsb_release command will be empty. + + * ``os_release_file`` (string): The path name of the + `os-release file`_ that is to be used as a data source. + + An empty string (the default) will cause the default path name to + be used (see `os-release file`_ for details). + + If the specified or defaulted os-release file does not exist, the + data source for the os-release file will be empty. + + * ``distro_release_file`` (string): The path name of the + `distro release file`_ that is to be used as a data source. + + An empty string (the default) will cause a default search algorithm + to be used (see `distro release file`_ for details). + + If the specified distro release file does not exist, or if no default + distro release file can be found, the data source for the distro + release file will be empty. + + * ``include_uname`` (bool): Controls whether uname command output is + included as a data source. If the uname command is not available in + the program execution path the data source for the uname command will + be empty. + + * ``root_dir`` (string): The absolute path to the root directory to use + to find distro-related information files. Note that ``include_*`` + parameters must not be enabled in combination with ``root_dir``. + + * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command + output is included as a data source. If the oslevel command is not + available in the program execution path the data source will be + empty. + + Public instance attributes: + + * ``os_release_file`` (string): The path name of the + `os-release file`_ that is actually used as a data source. The + empty string if no distro release file is used as a data source. + + * ``distro_release_file`` (string): The path name of the + `distro release file`_ that is actually used as a data source. The + empty string if no distro release file is used as a data source. + + * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter. + This controls whether the lsb information will be loaded. + + * ``include_uname`` (bool): The result of the ``include_uname`` + parameter. This controls whether the uname information will + be loaded. + + * ``include_oslevel`` (bool): The result of the ``include_oslevel`` + parameter. This controls whether (AIX) oslevel information will be + loaded. + + * ``root_dir`` (string): The result of the ``root_dir`` parameter. + The absolute path to the root directory to use to find distro-related + information files. + + Raises: + + * :py:exc:`ValueError`: Initialization parameters combination is not + supported. + + * :py:exc:`OSError`: Some I/O issue with an os-release file or distro + release file. + + * :py:exc:`UnicodeError`: A data source has unexpected characters or + uses an unexpected encoding. + """ + self.root_dir = root_dir + self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR + self.usr_lib_dir = ( + os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR + ) + + if os_release_file: + self.os_release_file = os_release_file + else: + etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME) + usr_lib_os_release_file = os.path.join( + self.usr_lib_dir, _OS_RELEASE_BASENAME + ) + + # NOTE: The idea is to respect order **and** have it set + # at all times for API backwards compatibility. + if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile( + usr_lib_os_release_file + ): + self.os_release_file = etc_dir_os_release_file + else: + self.os_release_file = usr_lib_os_release_file + + self.distro_release_file = distro_release_file or "" # updated later + + is_root_dir_defined = root_dir is not None + if is_root_dir_defined and (include_lsb or include_uname or include_oslevel): + raise ValueError( + "Including subprocess data sources from specific root_dir is disallowed" + " to prevent false information" + ) + self.include_lsb = ( + include_lsb if include_lsb is not None else not is_root_dir_defined + ) + self.include_uname = ( + include_uname if include_uname is not None else not is_root_dir_defined + ) + self.include_oslevel = ( + include_oslevel if include_oslevel is not None else not is_root_dir_defined + ) + + def __repr__(self) -> str: + """Return repr of all info""" + return ( + "LinuxDistribution(" + "os_release_file={self.os_release_file!r}, " + "distro_release_file={self.distro_release_file!r}, " + "include_lsb={self.include_lsb!r}, " + "include_uname={self.include_uname!r}, " + "include_oslevel={self.include_oslevel!r}, " + "root_dir={self.root_dir!r}, " + "_os_release_info={self._os_release_info!r}, " + "_lsb_release_info={self._lsb_release_info!r}, " + "_distro_release_info={self._distro_release_info!r}, " + "_uname_info={self._uname_info!r}, " + "_oslevel_info={self._oslevel_info!r})".format(self=self) + ) + + def linux_distribution( + self, full_distribution_name: bool = True + ) -> Tuple[str, str, str]: + """ + Return information about the OS distribution that is compatible + with Python's :func:`platform.linux_distribution`, supporting a subset + of its parameters. + + For details, see :func:`distro.linux_distribution`. + """ + return ( + self.name() if full_distribution_name else self.id(), + self.version(), + self._os_release_info.get("release_codename") or self.codename(), + ) + + def id(self) -> str: + """Return the distro ID of the OS distribution, as a string. + + For details, see :func:`distro.id`. + """ + + def normalize(distro_id: str, table: Dict[str, str]) -> str: + distro_id = distro_id.lower().replace(" ", "_") + return table.get(distro_id, distro_id) + + distro_id = self.os_release_attr("id") + if distro_id: + return normalize(distro_id, NORMALIZED_OS_ID) + + distro_id = self.lsb_release_attr("distributor_id") + if distro_id: + return normalize(distro_id, NORMALIZED_LSB_ID) + + distro_id = self.distro_release_attr("id") + if distro_id: + return normalize(distro_id, NORMALIZED_DISTRO_ID) + + distro_id = self.uname_attr("id") + if distro_id: + return normalize(distro_id, NORMALIZED_DISTRO_ID) + + return "" + + def name(self, pretty: bool = False) -> str: + """ + Return the name of the OS distribution, as a string. + + For details, see :func:`distro.name`. + """ + name = ( + self.os_release_attr("name") + or self.lsb_release_attr("distributor_id") + or self.distro_release_attr("name") + or self.uname_attr("name") + ) + if pretty: + name = self.os_release_attr("pretty_name") or self.lsb_release_attr( + "description" + ) + if not name: + name = self.distro_release_attr("name") or self.uname_attr("name") + version = self.version(pretty=True) + if version: + name = f"{name} {version}" + return name or "" + + def version(self, pretty: bool = False, best: bool = False) -> str: + """ + Return the version of the OS distribution, as a string. + + For details, see :func:`distro.version`. + """ + versions = [ + self.os_release_attr("version_id"), + self.lsb_release_attr("release"), + self.distro_release_attr("version_id"), + self._parse_distro_release_content(self.os_release_attr("pretty_name")).get( + "version_id", "" + ), + self._parse_distro_release_content( + self.lsb_release_attr("description") + ).get("version_id", ""), + self.uname_attr("release"), + ] + if self.uname_attr("id").startswith("aix"): + # On AIX platforms, prefer oslevel command output. + versions.insert(0, self.oslevel_info()) + elif self.id() == "debian" or "debian" in self.like().split(): + # On Debian-like, add debian_version file content to candidates list. + versions.append(self._debian_version) + version = "" + if best: + # This algorithm uses the last version in priority order that has + # the best precision. If the versions are not in conflict, that + # does not matter; otherwise, using the last one instead of the + # first one might be considered a surprise. + for v in versions: + if v.count(".") > version.count(".") or version == "": + version = v + else: + for v in versions: + if v != "": + version = v + break + if pretty and version and self.codename(): + version = f"{version} ({self.codename()})" + return version + + def version_parts(self, best: bool = False) -> Tuple[str, str, str]: + """ + Return the version of the OS distribution, as a tuple of version + numbers. + + For details, see :func:`distro.version_parts`. + """ + version_str = self.version(best=best) + if version_str: + version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?") + matches = version_regex.match(version_str) + if matches: + major, minor, build_number = matches.groups() + return major, minor or "", build_number or "" + return "", "", "" + + def major_version(self, best: bool = False) -> str: + """ + Return the major version number of the current distribution. + + For details, see :func:`distro.major_version`. + """ + return self.version_parts(best)[0] + + def minor_version(self, best: bool = False) -> str: + """ + Return the minor version number of the current distribution. + + For details, see :func:`distro.minor_version`. + """ + return self.version_parts(best)[1] + + def build_number(self, best: bool = False) -> str: + """ + Return the build number of the current distribution. + + For details, see :func:`distro.build_number`. + """ + return self.version_parts(best)[2] + + def like(self) -> str: + """ + Return the IDs of distributions that are like the OS distribution. + + For details, see :func:`distro.like`. + """ + return self.os_release_attr("id_like") or "" + + def codename(self) -> str: + """ + Return the codename of the OS distribution. + + For details, see :func:`distro.codename`. + """ + try: + # Handle os_release specially since distros might purposefully set + # this to empty string to have no codename + return self._os_release_info["codename"] + except KeyError: + return ( + self.lsb_release_attr("codename") + or self.distro_release_attr("codename") + or "" + ) + + def info(self, pretty: bool = False, best: bool = False) -> InfoDict: + """ + Return certain machine-readable information about the OS + distribution. + + For details, see :func:`distro.info`. + """ + return InfoDict( + id=self.id(), + version=self.version(pretty, best), + version_parts=VersionDict( + major=self.major_version(best), + minor=self.minor_version(best), + build_number=self.build_number(best), + ), + like=self.like(), + codename=self.codename(), + ) + + def os_release_info(self) -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information + items from the os-release file data source of the OS distribution. + + For details, see :func:`distro.os_release_info`. + """ + return self._os_release_info + + def lsb_release_info(self) -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information + items from the lsb_release command data source of the OS + distribution. + + For details, see :func:`distro.lsb_release_info`. + """ + return self._lsb_release_info + + def distro_release_info(self) -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information + items from the distro release file data source of the OS + distribution. + + For details, see :func:`distro.distro_release_info`. + """ + return self._distro_release_info + + def uname_info(self) -> Dict[str, str]: + """ + Return a dictionary containing key-value pairs for the information + items from the uname command data source of the OS distribution. + + For details, see :func:`distro.uname_info`. + """ + return self._uname_info + + def oslevel_info(self) -> str: + """ + Return AIX' oslevel command output. + """ + return self._oslevel_info + + def os_release_attr(self, attribute: str) -> str: + """ + Return a single named information item from the os-release file data + source of the OS distribution. + + For details, see :func:`distro.os_release_attr`. + """ + return self._os_release_info.get(attribute, "") + + def lsb_release_attr(self, attribute: str) -> str: + """ + Return a single named information item from the lsb_release command + output data source of the OS distribution. + + For details, see :func:`distro.lsb_release_attr`. + """ + return self._lsb_release_info.get(attribute, "") + + def distro_release_attr(self, attribute: str) -> str: + """ + Return a single named information item from the distro release file + data source of the OS distribution. + + For details, see :func:`distro.distro_release_attr`. + """ + return self._distro_release_info.get(attribute, "") + + def uname_attr(self, attribute: str) -> str: + """ + Return a single named information item from the uname command + output data source of the OS distribution. + + For details, see :func:`distro.uname_attr`. + """ + return self._uname_info.get(attribute, "") + + @cached_property + def _os_release_info(self) -> Dict[str, str]: + """ + Get the information items from the specified os-release file. + + Returns: + A dictionary containing all information items. + """ + if os.path.isfile(self.os_release_file): + with open(self.os_release_file, encoding="utf-8") as release_file: + return self._parse_os_release_content(release_file) + return {} + + @staticmethod + def _parse_os_release_content(lines: TextIO) -> Dict[str, str]: + """ + Parse the lines of an os-release file. + + Parameters: + + * lines: Iterable through the lines in the os-release file. + Each line must be a unicode string or a UTF-8 encoded byte + string. + + Returns: + A dictionary containing all information items. + """ + props = {} + lexer = shlex.shlex(lines, posix=True) + lexer.whitespace_split = True + + tokens = list(lexer) + for token in tokens: + # At this point, all shell-like parsing has been done (i.e. + # comments processed, quotes and backslash escape sequences + # processed, multi-line values assembled, trailing newlines + # stripped, etc.), so the tokens are now either: + # * variable assignments: var=value + # * commands or their arguments (not allowed in os-release) + # Ignore any tokens that are not variable assignments + if "=" in token: + k, v = token.split("=", 1) + props[k.lower()] = v + + if "version" in props: + # extract release codename (if any) from version attribute + match = re.search(r"\((\D+)\)|,\s*(\D+)", props["version"]) + if match: + release_codename = match.group(1) or match.group(2) + props["codename"] = props["release_codename"] = release_codename + + if "version_codename" in props: + # os-release added a version_codename field. Use that in + # preference to anything else Note that some distros purposefully + # do not have code names. They should be setting + # version_codename="" + props["codename"] = props["version_codename"] + elif "ubuntu_codename" in props: + # Same as above but a non-standard field name used on older Ubuntus + props["codename"] = props["ubuntu_codename"] + + return props + + @cached_property + def _lsb_release_info(self) -> Dict[str, str]: + """ + Get the information items from the lsb_release command output. + + Returns: + A dictionary containing all information items. + """ + if not self.include_lsb: + return {} + try: + cmd = ("lsb_release", "-a") + stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) + # Command not found or lsb_release returned error + except (OSError, subprocess.CalledProcessError): + return {} + content = self._to_str(stdout).splitlines() + return self._parse_lsb_release_content(content) + + @staticmethod + def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]: + """ + Parse the output of the lsb_release command. + + Parameters: + + * lines: Iterable through the lines of the lsb_release output. + Each line must be a unicode string or a UTF-8 encoded byte + string. + + Returns: + A dictionary containing all information items. + """ + props = {} + for line in lines: + kv = line.strip("\n").split(":", 1) + if len(kv) != 2: + # Ignore lines without colon. + continue + k, v = kv + props.update({k.replace(" ", "_").lower(): v.strip()}) + return props + + @cached_property + def _uname_info(self) -> Dict[str, str]: + if not self.include_uname: + return {} + try: + cmd = ("uname", "-rs") + stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) + except OSError: + return {} + content = self._to_str(stdout).splitlines() + return self._parse_uname_content(content) + + @cached_property + def _oslevel_info(self) -> str: + if not self.include_oslevel: + return "" + try: + stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL) + except (OSError, subprocess.CalledProcessError): + return "" + return self._to_str(stdout).strip() + + @cached_property + def _debian_version(self) -> str: + try: + with open( + os.path.join(self.etc_dir, "debian_version"), encoding="ascii" + ) as fp: + return fp.readline().rstrip() + except FileNotFoundError: + return "" + + @staticmethod + def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]: + if not lines: + return {} + props = {} + match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip()) + if match: + name, version = match.groups() + + # This is to prevent the Linux kernel version from + # appearing as the 'best' version on otherwise + # identifiable distributions. + if name == "Linux": + return {} + props["id"] = name.lower() + props["name"] = name + props["release"] = version + return props + + @staticmethod + def _to_str(bytestring: bytes) -> str: + encoding = sys.getfilesystemencoding() + return bytestring.decode(encoding) + + @cached_property + def _distro_release_info(self) -> Dict[str, str]: + """ + Get the information items from the specified distro release file. + + Returns: + A dictionary containing all information items. + """ + if self.distro_release_file: + # If it was specified, we use it and parse what we can, even if + # its file name or content does not match the expected pattern. + distro_info = self._parse_distro_release_file(self.distro_release_file) + basename = os.path.basename(self.distro_release_file) + # The file name pattern for user-specified distro release files + # is somewhat more tolerant (compared to when searching for the + # file), because we want to use what was specified as best as + # possible. + match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) + else: + try: + basenames = [ + basename + for basename in os.listdir(self.etc_dir) + if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES + and os.path.isfile(os.path.join(self.etc_dir, basename)) + ] + # We sort for repeatability in cases where there are multiple + # distro specific files; e.g. CentOS, Oracle, Enterprise all + # containing `redhat-release` on top of their own. + basenames.sort() + except OSError: + # This may occur when /etc is not readable but we can't be + # sure about the *-release files. Check common entries of + # /etc for information. If they turn out to not be there the + # error is handled in `_parse_distro_release_file()`. + basenames = _DISTRO_RELEASE_BASENAMES + for basename in basenames: + match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) + if match is None: + continue + filepath = os.path.join(self.etc_dir, basename) + distro_info = self._parse_distro_release_file(filepath) + # The name is always present if the pattern matches. + if "name" not in distro_info: + continue + self.distro_release_file = filepath + break + else: # the loop didn't "break": no candidate. + return {} + + if match is not None: + distro_info["id"] = match.group(1) + + # CloudLinux < 7: manually enrich info with proper id. + if "cloudlinux" in distro_info.get("name", "").lower(): + distro_info["id"] = "cloudlinux" + + return distro_info + + def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]: + """ + Parse a distro release file. + + Parameters: + + * filepath: Path name of the distro release file. + + Returns: + A dictionary containing all information items. + """ + try: + with open(filepath, encoding="utf-8") as fp: + # Only parse the first line. For instance, on SLES there + # are multiple lines. We don't want them... + return self._parse_distro_release_content(fp.readline()) + except OSError: + # Ignore not being able to read a specific, seemingly version + # related file. + # See https://github.com/python-distro/distro/issues/162 + return {} + + @staticmethod + def _parse_distro_release_content(line: str) -> Dict[str, str]: + """ + Parse a line from a distro release file. + + Parameters: + * line: Line from the distro release file. Must be a unicode string + or a UTF-8 encoded byte string. + + Returns: + A dictionary containing all information items. + """ + matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1]) + distro_info = {} + if matches: + # regexp ensures non-None + distro_info["name"] = matches.group(3)[::-1] + if matches.group(2): + distro_info["version_id"] = matches.group(2)[::-1] + if matches.group(1): + distro_info["codename"] = matches.group(1)[::-1] + elif line: + distro_info["name"] = line.strip() + return distro_info + + +_distro = LinuxDistribution() + + +def main() -> None: + logger = logging.getLogger(__name__) + logger.setLevel(logging.DEBUG) + logger.addHandler(logging.StreamHandler(sys.stdout)) + + parser = argparse.ArgumentParser(description="OS distro info tool") + parser.add_argument( + "--json", "-j", help="Output in machine readable format", action="store_true" + ) + + parser.add_argument( + "--root-dir", + "-r", + type=str, + dest="root_dir", + help="Path to the root filesystem directory (defaults to /)", + ) + + args = parser.parse_args() + + if args.root_dir: + dist = LinuxDistribution( + include_lsb=False, + include_uname=False, + include_oslevel=False, + root_dir=args.root_dir, + ) + else: + dist = _distro + + if args.json: + logger.info(json.dumps(dist.info(), indent=4, sort_keys=True)) + else: + logger.info("Name: %s", dist.name(pretty=True)) + distribution_version = dist.version(pretty=True) + logger.info("Version: %s", distribution_version) + distribution_codename = dist.codename() + logger.info("Codename: %s", distribution_codename) + + +if __name__ == "__main__": + main() diff --git a/venv/Lib/site-packages/distro/py.typed b/venv/Lib/site-packages/distro/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/fastapi-0.115.12.dist-info/INSTALLER b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/fastapi-0.115.12.dist-info/METADATA b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/METADATA new file mode 100644 index 00000000..0d845023 --- /dev/null +++ b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/METADATA @@ -0,0 +1,565 @@ +Metadata-Version: 2.1 +Name: fastapi +Version: 0.115.12 +Summary: FastAPI framework, high performance, easy to learn, fast to code, ready for production +Author-Email: =?utf-8?q?Sebasti=C3=A1n_Ram=C3=ADrez?= +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: System Administrators +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python +Classifier: Topic :: Internet +Classifier: Topic :: Software Development :: Libraries :: Application Frameworks +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development +Classifier: Typing :: Typed +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Web Environment +Classifier: Framework :: AsyncIO +Classifier: Framework :: FastAPI +Classifier: Framework :: Pydantic +Classifier: Framework :: Pydantic :: 1 +Classifier: Framework :: Pydantic :: 2 +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Topic :: Internet :: WWW/HTTP :: HTTP Servers +Classifier: Topic :: Internet :: WWW/HTTP +Project-URL: Homepage, https://github.com/fastapi/fastapi +Project-URL: Documentation, https://fastapi.tiangolo.com/ +Project-URL: Repository, https://github.com/fastapi/fastapi +Project-URL: Issues, https://github.com/fastapi/fastapi/issues +Project-URL: Changelog, https://fastapi.tiangolo.com/release-notes/ +Requires-Python: >=3.8 +Requires-Dist: starlette<0.47.0,>=0.40.0 +Requires-Dist: pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,!=2.1.0,<3.0.0,>=1.7.4 +Requires-Dist: typing-extensions>=4.8.0 +Provides-Extra: standard +Requires-Dist: fastapi-cli[standard]>=0.0.5; extra == "standard" +Requires-Dist: httpx>=0.23.0; extra == "standard" +Requires-Dist: jinja2>=3.1.5; extra == "standard" +Requires-Dist: python-multipart>=0.0.18; extra == "standard" +Requires-Dist: email-validator>=2.0.0; extra == "standard" +Requires-Dist: uvicorn[standard]>=0.12.0; extra == "standard" +Provides-Extra: all +Requires-Dist: fastapi-cli[standard]>=0.0.5; extra == "all" +Requires-Dist: httpx>=0.23.0; extra == "all" +Requires-Dist: jinja2>=3.1.5; extra == "all" +Requires-Dist: python-multipart>=0.0.18; extra == "all" +Requires-Dist: itsdangerous>=1.1.0; extra == "all" +Requires-Dist: pyyaml>=5.3.1; extra == "all" +Requires-Dist: ujson!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0,>=4.0.1; extra == "all" +Requires-Dist: orjson>=3.2.1; extra == "all" +Requires-Dist: email-validator>=2.0.0; extra == "all" +Requires-Dist: uvicorn[standard]>=0.12.0; extra == "all" +Requires-Dist: pydantic-settings>=2.0.0; extra == "all" +Requires-Dist: pydantic-extra-types>=2.0.0; extra == "all" +Description-Content-Type: text/markdown + +

+ FastAPI +

+

+ FastAPI framework, high performance, easy to learn, fast to code, ready for production +

+

+ + Test + + + Coverage + + + Package version + + + Supported Python versions + +

+ +--- + +**Documentation**: https://fastapi.tiangolo.com + +**Source Code**: https://github.com/fastapi/fastapi + +--- + +FastAPI is a modern, fast (high-performance), web framework for building APIs with Python based on standard Python type hints. + +The key features are: + +* **Fast**: Very high performance, on par with **NodeJS** and **Go** (thanks to Starlette and Pydantic). [One of the fastest Python frameworks available](#performance). +* **Fast to code**: Increase the speed to develop features by about 200% to 300%. * +* **Fewer bugs**: Reduce about 40% of human (developer) induced errors. * +* **Intuitive**: Great editor support. Completion everywhere. Less time debugging. +* **Easy**: Designed to be easy to use and learn. Less time reading docs. +* **Short**: Minimize code duplication. Multiple features from each parameter declaration. Fewer bugs. +* **Robust**: Get production-ready code. With automatic interactive documentation. +* **Standards-based**: Based on (and fully compatible with) the open standards for APIs: OpenAPI (previously known as Swagger) and JSON Schema. + +* estimation based on tests on an internal development team, building production applications. + +## Sponsors + + + + + + + + + + + + + + + + + + + + + + + + +Other sponsors + +## Opinions + +"_[...] I'm using **FastAPI** a ton these days. [...] I'm actually planning to use it for all of my team's **ML services at Microsoft**. Some of them are getting integrated into the core **Windows** product and some **Office** products._" + +
Kabir Khan - Microsoft (ref)
+ +--- + +"_We adopted the **FastAPI** library to spawn a **REST** server that can be queried to obtain **predictions**. [for Ludwig]_" + +
Piero Molino, Yaroslav Dudin, and Sai Sumanth Miryala - Uber (ref)
+ +--- + +"_**Netflix** is pleased to announce the open-source release of our **crisis management** orchestration framework: **Dispatch**! [built with **FastAPI**]_" + +
Kevin Glisson, Marc Vilanova, Forest Monsen - Netflix (ref)
+ +--- + +"_I’m over the moon excited about **FastAPI**. It’s so fun!_" + +
Brian Okken - Python Bytes podcast host (ref)
+ +--- + +"_Honestly, what you've built looks super solid and polished. In many ways, it's what I wanted **Hug** to be - it's really inspiring to see someone build that._" + +
Timothy Crosley - Hug creator (ref)
+ +--- + +"_If you're looking to learn one **modern framework** for building REST APIs, check out **FastAPI** [...] It's fast, easy to use and easy to learn [...]_" + +"_We've switched over to **FastAPI** for our **APIs** [...] I think you'll like it [...]_" + +
Ines Montani - Matthew Honnibal - Explosion AI founders - spaCy creators (ref) - (ref)
+ +--- + +"_If anyone is looking to build a production Python API, I would highly recommend **FastAPI**. It is **beautifully designed**, **simple to use** and **highly scalable**, it has become a **key component** in our API first development strategy and is driving many automations and services such as our Virtual TAC Engineer._" + +
Deon Pillsbury - Cisco (ref)
+ +--- + +## **Typer**, the FastAPI of CLIs + + + +If you are building a CLI app to be used in the terminal instead of a web API, check out **Typer**. + +**Typer** is FastAPI's little sibling. And it's intended to be the **FastAPI of CLIs**. ⌨️ 🚀 + +## Requirements + +FastAPI stands on the shoulders of giants: + +* Starlette for the web parts. +* Pydantic for the data parts. + +## Installation + +Create and activate a virtual environment and then install FastAPI: + +
+ +```console +$ pip install "fastapi[standard]" + +---> 100% +``` + +
+ +**Note**: Make sure you put `"fastapi[standard]"` in quotes to ensure it works in all terminals. + +## Example + +### Create it + +* Create a file `main.py` with: + +```Python +from typing import Union + +from fastapi import FastAPI + +app = FastAPI() + + +@app.get("/") +def read_root(): + return {"Hello": "World"} + + +@app.get("/items/{item_id}") +def read_item(item_id: int, q: Union[str, None] = None): + return {"item_id": item_id, "q": q} +``` + +
+Or use async def... + +If your code uses `async` / `await`, use `async def`: + +```Python hl_lines="9 14" +from typing import Union + +from fastapi import FastAPI + +app = FastAPI() + + +@app.get("/") +async def read_root(): + return {"Hello": "World"} + + +@app.get("/items/{item_id}") +async def read_item(item_id: int, q: Union[str, None] = None): + return {"item_id": item_id, "q": q} +``` + +**Note**: + +If you don't know, check the _"In a hurry?"_ section about `async` and `await` in the docs. + +
+ +### Run it + +Run the server with: + +
+ +```console +$ fastapi dev main.py + + ╭────────── FastAPI CLI - Development mode ───────────╮ + │ │ + │ Serving at: http://127.0.0.1:8000 │ + │ │ + │ API docs: http://127.0.0.1:8000/docs │ + │ │ + │ Running in development mode, for production use: │ + │ │ + │ fastapi run │ + │ │ + ╰─────────────────────────────────────────────────────╯ + +INFO: Will watch for changes in these directories: ['/home/user/code/awesomeapp'] +INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit) +INFO: Started reloader process [2248755] using WatchFiles +INFO: Started server process [2248757] +INFO: Waiting for application startup. +INFO: Application startup complete. +``` + +
+ +
+About the command fastapi dev main.py... + +The command `fastapi dev` reads your `main.py` file, detects the **FastAPI** app in it, and starts a server using Uvicorn. + +By default, `fastapi dev` will start with auto-reload enabled for local development. + +You can read more about it in the FastAPI CLI docs. + +
+ +### Check it + +Open your browser at http://127.0.0.1:8000/items/5?q=somequery. + +You will see the JSON response as: + +```JSON +{"item_id": 5, "q": "somequery"} +``` + +You already created an API that: + +* Receives HTTP requests in the _paths_ `/` and `/items/{item_id}`. +* Both _paths_ take `GET` operations (also known as HTTP _methods_). +* The _path_ `/items/{item_id}` has a _path parameter_ `item_id` that should be an `int`. +* The _path_ `/items/{item_id}` has an optional `str` _query parameter_ `q`. + +### Interactive API docs + +Now go to http://127.0.0.1:8000/docs. + +You will see the automatic interactive API documentation (provided by Swagger UI): + +![Swagger UI](https://fastapi.tiangolo.com/img/index/index-01-swagger-ui-simple.png) + +### Alternative API docs + +And now, go to http://127.0.0.1:8000/redoc. + +You will see the alternative automatic documentation (provided by ReDoc): + +![ReDoc](https://fastapi.tiangolo.com/img/index/index-02-redoc-simple.png) + +## Example upgrade + +Now modify the file `main.py` to receive a body from a `PUT` request. + +Declare the body using standard Python types, thanks to Pydantic. + +```Python hl_lines="4 9-12 25-27" +from typing import Union + +from fastapi import FastAPI +from pydantic import BaseModel + +app = FastAPI() + + +class Item(BaseModel): + name: str + price: float + is_offer: Union[bool, None] = None + + +@app.get("/") +def read_root(): + return {"Hello": "World"} + + +@app.get("/items/{item_id}") +def read_item(item_id: int, q: Union[str, None] = None): + return {"item_id": item_id, "q": q} + + +@app.put("/items/{item_id}") +def update_item(item_id: int, item: Item): + return {"item_name": item.name, "item_id": item_id} +``` + +The `fastapi dev` server should reload automatically. + +### Interactive API docs upgrade + +Now go to http://127.0.0.1:8000/docs. + +* The interactive API documentation will be automatically updated, including the new body: + +![Swagger UI](https://fastapi.tiangolo.com/img/index/index-03-swagger-02.png) + +* Click on the button "Try it out", it allows you to fill the parameters and directly interact with the API: + +![Swagger UI interaction](https://fastapi.tiangolo.com/img/index/index-04-swagger-03.png) + +* Then click on the "Execute" button, the user interface will communicate with your API, send the parameters, get the results and show them on the screen: + +![Swagger UI interaction](https://fastapi.tiangolo.com/img/index/index-05-swagger-04.png) + +### Alternative API docs upgrade + +And now, go to http://127.0.0.1:8000/redoc. + +* The alternative documentation will also reflect the new query parameter and body: + +![ReDoc](https://fastapi.tiangolo.com/img/index/index-06-redoc-02.png) + +### Recap + +In summary, you declare **once** the types of parameters, body, etc. as function parameters. + +You do that with standard modern Python types. + +You don't have to learn a new syntax, the methods or classes of a specific library, etc. + +Just standard **Python**. + +For example, for an `int`: + +```Python +item_id: int +``` + +or for a more complex `Item` model: + +```Python +item: Item +``` + +...and with that single declaration you get: + +* Editor support, including: + * Completion. + * Type checks. +* Validation of data: + * Automatic and clear errors when the data is invalid. + * Validation even for deeply nested JSON objects. +* Conversion of input data: coming from the network to Python data and types. Reading from: + * JSON. + * Path parameters. + * Query parameters. + * Cookies. + * Headers. + * Forms. + * Files. +* Conversion of output data: converting from Python data and types to network data (as JSON): + * Convert Python types (`str`, `int`, `float`, `bool`, `list`, etc). + * `datetime` objects. + * `UUID` objects. + * Database models. + * ...and many more. +* Automatic interactive API documentation, including 2 alternative user interfaces: + * Swagger UI. + * ReDoc. + +--- + +Coming back to the previous code example, **FastAPI** will: + +* Validate that there is an `item_id` in the path for `GET` and `PUT` requests. +* Validate that the `item_id` is of type `int` for `GET` and `PUT` requests. + * If it is not, the client will see a useful, clear error. +* Check if there is an optional query parameter named `q` (as in `http://127.0.0.1:8000/items/foo?q=somequery`) for `GET` requests. + * As the `q` parameter is declared with `= None`, it is optional. + * Without the `None` it would be required (as is the body in the case with `PUT`). +* For `PUT` requests to `/items/{item_id}`, read the body as JSON: + * Check that it has a required attribute `name` that should be a `str`. + * Check that it has a required attribute `price` that has to be a `float`. + * Check that it has an optional attribute `is_offer`, that should be a `bool`, if present. + * All this would also work for deeply nested JSON objects. +* Convert from and to JSON automatically. +* Document everything with OpenAPI, that can be used by: + * Interactive documentation systems. + * Automatic client code generation systems, for many languages. +* Provide 2 interactive documentation web interfaces directly. + +--- + +We just scratched the surface, but you already get the idea of how it all works. + +Try changing the line with: + +```Python + return {"item_name": item.name, "item_id": item_id} +``` + +...from: + +```Python + ... "item_name": item.name ... +``` + +...to: + +```Python + ... "item_price": item.price ... +``` + +...and see how your editor will auto-complete the attributes and know their types: + +![editor support](https://fastapi.tiangolo.com/img/vscode-completion.png) + +For a more complete example including more features, see the Tutorial - User Guide. + +**Spoiler alert**: the tutorial - user guide includes: + +* Declaration of **parameters** from other different places as: **headers**, **cookies**, **form fields** and **files**. +* How to set **validation constraints** as `maximum_length` or `regex`. +* A very powerful and easy to use **Dependency Injection** system. +* Security and authentication, including support for **OAuth2** with **JWT tokens** and **HTTP Basic** auth. +* More advanced (but equally easy) techniques for declaring **deeply nested JSON models** (thanks to Pydantic). +* **GraphQL** integration with Strawberry and other libraries. +* Many extra features (thanks to Starlette) as: + * **WebSockets** + * extremely easy tests based on HTTPX and `pytest` + * **CORS** + * **Cookie Sessions** + * ...and more. + +## Performance + +Independent TechEmpower benchmarks show **FastAPI** applications running under Uvicorn as one of the fastest Python frameworks available, only below Starlette and Uvicorn themselves (used internally by FastAPI). (*) + +To understand more about it, see the section Benchmarks. + +## Dependencies + +FastAPI depends on Pydantic and Starlette. + +### `standard` Dependencies + +When you install FastAPI with `pip install "fastapi[standard]"` it comes with the `standard` group of optional dependencies: + +Used by Pydantic: + +* email-validator - for email validation. + +Used by Starlette: + +* httpx - Required if you want to use the `TestClient`. +* jinja2 - Required if you want to use the default template configuration. +* python-multipart - Required if you want to support form "parsing", with `request.form()`. + +Used by FastAPI / Starlette: + +* uvicorn - for the server that loads and serves your application. This includes `uvicorn[standard]`, which includes some dependencies (e.g. `uvloop`) needed for high performance serving. +* `fastapi-cli` - to provide the `fastapi` command. + +### Without `standard` Dependencies + +If you don't want to include the `standard` optional dependencies, you can install with `pip install fastapi` instead of `pip install "fastapi[standard]"`. + +### Additional Optional Dependencies + +There are some additional dependencies you might want to install. + +Additional optional Pydantic dependencies: + +* pydantic-settings - for settings management. +* pydantic-extra-types - for extra types to be used with Pydantic. + +Additional optional FastAPI dependencies: + +* orjson - Required if you want to use `ORJSONResponse`. +* ujson - Required if you want to use `UJSONResponse`. + +## License + +This project is licensed under the terms of the MIT license. diff --git a/venv/Lib/site-packages/fastapi-0.115.12.dist-info/RECORD b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/RECORD new file mode 100644 index 00000000..aef05115 --- /dev/null +++ b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/RECORD @@ -0,0 +1,97 @@ +../../Scripts/fastapi.exe,sha256=mJPFHcvhB9nxnJ68q9K1jOOIf0qqu-MPtpvDo8CKt4E,108398 +fastapi-0.115.12.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +fastapi-0.115.12.dist-info/METADATA,sha256=mV7D-DjELmGAedtcxayASLcleQv41h4mqHQhlqtLu1s,27671 +fastapi-0.115.12.dist-info/RECORD,, +fastapi-0.115.12.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fastapi-0.115.12.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90 +fastapi-0.115.12.dist-info/entry_points.txt,sha256=GCf-WbIZxyGT4MUmrPGj1cOHYZoGsNPHAvNkT6hnGeA,61 +fastapi-0.115.12.dist-info/licenses/LICENSE,sha256=Tsif_IFIW5f-xYSy1KlhAy7v_oNEU4lP2cEnSQbMdE4,1086 +fastapi/__init__.py,sha256=sgu-sth5uRYC--mOXSta-IPYjnDKUrpRSz6LuvKHf9o,1082 +fastapi/__main__.py,sha256=bKePXLdO4SsVSM6r9SVoLickJDcR2c0cTOxZRKq26YQ,37 +fastapi/__pycache__/__init__.cpython-312.pyc,, +fastapi/__pycache__/__main__.cpython-312.pyc,, +fastapi/__pycache__/_compat.cpython-312.pyc,, +fastapi/__pycache__/applications.cpython-312.pyc,, +fastapi/__pycache__/background.cpython-312.pyc,, +fastapi/__pycache__/cli.cpython-312.pyc,, +fastapi/__pycache__/concurrency.cpython-312.pyc,, +fastapi/__pycache__/datastructures.cpython-312.pyc,, +fastapi/__pycache__/encoders.cpython-312.pyc,, +fastapi/__pycache__/exception_handlers.cpython-312.pyc,, +fastapi/__pycache__/exceptions.cpython-312.pyc,, +fastapi/__pycache__/logger.cpython-312.pyc,, +fastapi/__pycache__/param_functions.cpython-312.pyc,, +fastapi/__pycache__/params.cpython-312.pyc,, +fastapi/__pycache__/requests.cpython-312.pyc,, +fastapi/__pycache__/responses.cpython-312.pyc,, +fastapi/__pycache__/routing.cpython-312.pyc,, +fastapi/__pycache__/staticfiles.cpython-312.pyc,, +fastapi/__pycache__/templating.cpython-312.pyc,, +fastapi/__pycache__/testclient.cpython-312.pyc,, +fastapi/__pycache__/types.cpython-312.pyc,, +fastapi/__pycache__/utils.cpython-312.pyc,, +fastapi/__pycache__/websockets.cpython-312.pyc,, +fastapi/_compat.py,sha256=Rg7kA7uue4Z6yr8T7hf8b7G6PeC_06mK004Nnykijfk,23953 +fastapi/applications.py,sha256=Ix-o9pQAWhEDf9J0Q1hZ0nBB1uP72c-Y3oiYzvrwqiM,176316 +fastapi/background.py,sha256=rouLirxUANrcYC824MSMypXL_Qb2HYg2YZqaiEqbEKI,1768 +fastapi/cli.py,sha256=OYhZb0NR_deuT5ofyPF2NoNBzZDNOP8Salef2nk-HqA,418 +fastapi/concurrency.py,sha256=MirfowoSpkMQZ8j_g0ZxaQKpV6eB3G-dB5TgcXCrgEA,1424 +fastapi/datastructures.py,sha256=b2PEz77XGq-u3Ur1Inwk0AGjOsQZO49yF9C7IPJ15cY,5766 +fastapi/dependencies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fastapi/dependencies/__pycache__/__init__.cpython-312.pyc,, +fastapi/dependencies/__pycache__/models.cpython-312.pyc,, +fastapi/dependencies/__pycache__/utils.cpython-312.pyc,, +fastapi/dependencies/models.py,sha256=Pjl6vx-4nZ5Tta9kJa3-RfQKkXtCpS09-FhMgs9eWNs,1507 +fastapi/dependencies/utils.py,sha256=SwOOdQYOC0E4thJ-n1a0ohslWN9duyasNY_JBdWPTsY,35971 +fastapi/encoders.py,sha256=LvwYmFeOz4tVwvgBoC5rvZnbr7hZr73KGrU8O7zSptU,11068 +fastapi/exception_handlers.py,sha256=MBrIOA-ugjJDivIi4rSsUJBdTsjuzN76q4yh0q1COKw,1332 +fastapi/exceptions.py,sha256=taNixuFEXb67lI1bnX1ubq8y8TseJ4yoPlWjyP0fTzk,4969 +fastapi/logger.py,sha256=I9NNi3ov8AcqbsbC9wl1X-hdItKgYt2XTrx1f99Zpl4,54 +fastapi/middleware/__init__.py,sha256=oQDxiFVcc1fYJUOIFvphnK7pTT5kktmfL32QXpBFvvo,58 +fastapi/middleware/__pycache__/__init__.cpython-312.pyc,, +fastapi/middleware/__pycache__/cors.cpython-312.pyc,, +fastapi/middleware/__pycache__/gzip.cpython-312.pyc,, +fastapi/middleware/__pycache__/httpsredirect.cpython-312.pyc,, +fastapi/middleware/__pycache__/trustedhost.cpython-312.pyc,, +fastapi/middleware/__pycache__/wsgi.cpython-312.pyc,, +fastapi/middleware/cors.py,sha256=ynwjWQZoc_vbhzZ3_ZXceoaSrslHFHPdoM52rXr0WUU,79 +fastapi/middleware/gzip.py,sha256=xM5PcsH8QlAimZw4VDvcmTnqQamslThsfe3CVN2voa0,79 +fastapi/middleware/httpsredirect.py,sha256=rL8eXMnmLijwVkH7_400zHri1AekfeBd6D6qs8ix950,115 +fastapi/middleware/trustedhost.py,sha256=eE5XGRxGa7c5zPnMJDGp3BxaL25k5iVQlhnv-Pk0Pss,109 +fastapi/middleware/wsgi.py,sha256=Z3Ue-7wni4lUZMvH3G9ek__acgYdJstbnpZX_HQAboY,79 +fastapi/openapi/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fastapi/openapi/__pycache__/__init__.cpython-312.pyc,, +fastapi/openapi/__pycache__/constants.cpython-312.pyc,, +fastapi/openapi/__pycache__/docs.cpython-312.pyc,, +fastapi/openapi/__pycache__/models.cpython-312.pyc,, +fastapi/openapi/__pycache__/utils.cpython-312.pyc,, +fastapi/openapi/constants.py,sha256=adGzmis1L1HJRTE3kJ5fmHS_Noq6tIY6pWv_SFzoFDU,153 +fastapi/openapi/docs.py,sha256=XcQq-ZbQdC5sI0gIGu5MoHK1q-OFaqws7-ORTo6sjY4,10348 +fastapi/openapi/models.py,sha256=PqkxQiqcEgjKuhfUIWPZPQcyTcubtUCB3vcObLsB7VE,15397 +fastapi/openapi/utils.py,sha256=e00G_p0IdpiffBUaq31BUyiloXbpld8RryKYnYKisdY,23964 +fastapi/param_functions.py,sha256=JHNPLIYvoAwdnZZavIVsxOat8x23fX_Kl33reh7HKl8,64019 +fastapi/params.py,sha256=g450axUBQgQJODdtM7WBxZbQj9Z64inFvadrgHikBbU,28237 +fastapi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fastapi/requests.py,sha256=zayepKFcienBllv3snmWI20Gk0oHNVLU4DDhqXBb4LU,142 +fastapi/responses.py,sha256=QNQQlwpKhQoIPZTTWkpc9d_QGeGZ_aVQPaDV3nQ8m7c,1761 +fastapi/routing.py,sha256=qwk_Vm1k4vDGdeEskNCVYTK01fDgYikqfOS_lf0DqAc,176216 +fastapi/security/__init__.py,sha256=bO8pNmxqVRXUjfl2mOKiVZLn0FpBQ61VUYVjmppnbJw,881 +fastapi/security/__pycache__/__init__.cpython-312.pyc,, +fastapi/security/__pycache__/api_key.cpython-312.pyc,, +fastapi/security/__pycache__/base.cpython-312.pyc,, +fastapi/security/__pycache__/http.cpython-312.pyc,, +fastapi/security/__pycache__/oauth2.cpython-312.pyc,, +fastapi/security/__pycache__/open_id_connect_url.cpython-312.pyc,, +fastapi/security/__pycache__/utils.cpython-312.pyc,, +fastapi/security/api_key.py,sha256=cBI5Z4zWVjL1uJrsjTeLy7MafHPAO2HQPzTrpyoIYWA,9094 +fastapi/security/base.py,sha256=dl4pvbC-RxjfbWgPtCWd8MVU-7CB2SZ22rJDXVCXO6c,141 +fastapi/security/http.py,sha256=rWR2x-5CUsjWmRucYthwRig6MG1o-boyrr4Xo-PuuxU,13606 +fastapi/security/oauth2.py,sha256=xCo5j1qpze6CvEuJHIneOI0v2fodGVMpHHVnHpiLfoM,21589 +fastapi/security/open_id_connect_url.py,sha256=8vizZ2tGqEp1ur8SwtVgyHJhGAJ5AqahgcvSpaIioDI,2722 +fastapi/security/utils.py,sha256=bd8T0YM7UQD5ATKucr1bNtAvz_Y3__dVNAv5UebiPvc,293 +fastapi/staticfiles.py,sha256=iirGIt3sdY2QZXd36ijs3Cj-T0FuGFda3cd90kM9Ikw,69 +fastapi/templating.py,sha256=4zsuTWgcjcEainMJFAlW6-gnslm6AgOS1SiiDWfmQxk,76 +fastapi/testclient.py,sha256=nBvaAmX66YldReJNZXPOk1sfuo2Q6hs8bOvIaCep6LQ,66 +fastapi/types.py,sha256=nFb36sK3DSoqoyo7Miwy3meKK5UdFBgkAgLSzQlUVyI,383 +fastapi/utils.py,sha256=y8Bj5ttMaI9tS4D60OUgXqKnktBr99NdYUnHHV9LgoY,7948 +fastapi/websockets.py,sha256=419uncYObEKZG0YcrXscfQQYLSWoE10jqxVMetGdR98,222 diff --git a/venv/Lib/site-packages/fastapi-0.115.12.dist-info/REQUESTED b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/REQUESTED new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/fastapi-0.115.12.dist-info/WHEEL b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/WHEEL new file mode 100644 index 00000000..64b991e8 --- /dev/null +++ b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: pdm-backend (2.4.3) +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/Lib/site-packages/fastapi-0.115.12.dist-info/entry_points.txt b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/entry_points.txt new file mode 100644 index 00000000..b81849e1 --- /dev/null +++ b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +fastapi = fastapi.cli:main + +[gui_scripts] + diff --git a/venv/Lib/site-packages/fastapi-0.115.12.dist-info/licenses/LICENSE b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/licenses/LICENSE new file mode 100644 index 00000000..3e92463e --- /dev/null +++ b/venv/Lib/site-packages/fastapi-0.115.12.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 Sebastián Ramírez + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/venv/Lib/site-packages/fastapi/__init__.py b/venv/Lib/site-packages/fastapi/__init__.py new file mode 100644 index 00000000..80eb783d --- /dev/null +++ b/venv/Lib/site-packages/fastapi/__init__.py @@ -0,0 +1,25 @@ +"""FastAPI framework, high performance, easy to learn, fast to code, ready for production""" + +__version__ = "0.115.12" + +from starlette import status as status + +from .applications import FastAPI as FastAPI +from .background import BackgroundTasks as BackgroundTasks +from .datastructures import UploadFile as UploadFile +from .exceptions import HTTPException as HTTPException +from .exceptions import WebSocketException as WebSocketException +from .param_functions import Body as Body +from .param_functions import Cookie as Cookie +from .param_functions import Depends as Depends +from .param_functions import File as File +from .param_functions import Form as Form +from .param_functions import Header as Header +from .param_functions import Path as Path +from .param_functions import Query as Query +from .param_functions import Security as Security +from .requests import Request as Request +from .responses import Response as Response +from .routing import APIRouter as APIRouter +from .websockets import WebSocket as WebSocket +from .websockets import WebSocketDisconnect as WebSocketDisconnect diff --git a/venv/Lib/site-packages/fastapi/__main__.py b/venv/Lib/site-packages/fastapi/__main__.py new file mode 100644 index 00000000..fc36465f --- /dev/null +++ b/venv/Lib/site-packages/fastapi/__main__.py @@ -0,0 +1,3 @@ +from fastapi.cli import main + +main() diff --git a/venv/Lib/site-packages/fastapi/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..4c2f37e7 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/__main__.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/__main__.cpython-312.pyc new file mode 100644 index 00000000..e78df7e6 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/__main__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/_compat.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/_compat.cpython-312.pyc new file mode 100644 index 00000000..c7a47a0b Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/_compat.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/applications.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/applications.cpython-312.pyc new file mode 100644 index 00000000..b89d15fc Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/applications.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/background.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/background.cpython-312.pyc new file mode 100644 index 00000000..9d123315 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/background.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/cli.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/cli.cpython-312.pyc new file mode 100644 index 00000000..86ef1430 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/cli.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/concurrency.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/concurrency.cpython-312.pyc new file mode 100644 index 00000000..dee52d00 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/concurrency.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/datastructures.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/datastructures.cpython-312.pyc new file mode 100644 index 00000000..2636f813 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/datastructures.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/encoders.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/encoders.cpython-312.pyc new file mode 100644 index 00000000..fe274363 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/encoders.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/exception_handlers.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/exception_handlers.cpython-312.pyc new file mode 100644 index 00000000..c8917600 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/exception_handlers.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/exceptions.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 00000000..c4f98d8d Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/exceptions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/logger.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/logger.cpython-312.pyc new file mode 100644 index 00000000..960701ef Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/logger.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/param_functions.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/param_functions.cpython-312.pyc new file mode 100644 index 00000000..9065e9c9 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/param_functions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/params.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/params.cpython-312.pyc new file mode 100644 index 00000000..df225237 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/params.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/requests.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/requests.cpython-312.pyc new file mode 100644 index 00000000..539322aa Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/requests.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/responses.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/responses.cpython-312.pyc new file mode 100644 index 00000000..f6f07d2b Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/responses.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/routing.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/routing.cpython-312.pyc new file mode 100644 index 00000000..2d713b24 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/routing.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/staticfiles.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/staticfiles.cpython-312.pyc new file mode 100644 index 00000000..72342edf Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/staticfiles.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/templating.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/templating.cpython-312.pyc new file mode 100644 index 00000000..564a2d63 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/templating.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/testclient.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/testclient.cpython-312.pyc new file mode 100644 index 00000000..c6617971 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/testclient.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/types.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/types.cpython-312.pyc new file mode 100644 index 00000000..87ca57f6 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/types.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..4a1c4d03 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/__pycache__/websockets.cpython-312.pyc b/venv/Lib/site-packages/fastapi/__pycache__/websockets.cpython-312.pyc new file mode 100644 index 00000000..bd3e34d2 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/__pycache__/websockets.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/_compat.py b/venv/Lib/site-packages/fastapi/_compat.py new file mode 100644 index 00000000..c07e4a3b --- /dev/null +++ b/venv/Lib/site-packages/fastapi/_compat.py @@ -0,0 +1,659 @@ +from collections import deque +from copy import copy +from dataclasses import dataclass, is_dataclass +from enum import Enum +from functools import lru_cache +from typing import ( + Any, + Callable, + Deque, + Dict, + FrozenSet, + List, + Mapping, + Sequence, + Set, + Tuple, + Type, + Union, +) + +from fastapi.exceptions import RequestErrorModel +from fastapi.types import IncEx, ModelNameMap, UnionType +from pydantic import BaseModel, create_model +from pydantic.version import VERSION as PYDANTIC_VERSION +from starlette.datastructures import UploadFile +from typing_extensions import Annotated, Literal, get_args, get_origin + +PYDANTIC_VERSION_MINOR_TUPLE = tuple(int(x) for x in PYDANTIC_VERSION.split(".")[:2]) +PYDANTIC_V2 = PYDANTIC_VERSION_MINOR_TUPLE[0] == 2 + + +sequence_annotation_to_type = { + Sequence: list, + List: list, + list: list, + Tuple: tuple, + tuple: tuple, + Set: set, + set: set, + FrozenSet: frozenset, + frozenset: frozenset, + Deque: deque, + deque: deque, +} + +sequence_types = tuple(sequence_annotation_to_type.keys()) + +Url: Type[Any] + +if PYDANTIC_V2: + from pydantic import PydanticSchemaGenerationError as PydanticSchemaGenerationError + from pydantic import TypeAdapter + from pydantic import ValidationError as ValidationError + from pydantic._internal._schema_generation_shared import ( # type: ignore[attr-defined] + GetJsonSchemaHandler as GetJsonSchemaHandler, + ) + from pydantic._internal._typing_extra import eval_type_lenient + from pydantic._internal._utils import lenient_issubclass as lenient_issubclass + from pydantic.fields import FieldInfo + from pydantic.json_schema import GenerateJsonSchema as GenerateJsonSchema + from pydantic.json_schema import JsonSchemaValue as JsonSchemaValue + from pydantic_core import CoreSchema as CoreSchema + from pydantic_core import PydanticUndefined, PydanticUndefinedType + from pydantic_core import Url as Url + + try: + from pydantic_core.core_schema import ( + with_info_plain_validator_function as with_info_plain_validator_function, + ) + except ImportError: # pragma: no cover + from pydantic_core.core_schema import ( + general_plain_validator_function as with_info_plain_validator_function, # noqa: F401 + ) + + RequiredParam = PydanticUndefined + Undefined = PydanticUndefined + UndefinedType = PydanticUndefinedType + evaluate_forwardref = eval_type_lenient + Validator = Any + + class BaseConfig: + pass + + class ErrorWrapper(Exception): + pass + + @dataclass + class ModelField: + field_info: FieldInfo + name: str + mode: Literal["validation", "serialization"] = "validation" + + @property + def alias(self) -> str: + a = self.field_info.alias + return a if a is not None else self.name + + @property + def required(self) -> bool: + return self.field_info.is_required() + + @property + def default(self) -> Any: + return self.get_default() + + @property + def type_(self) -> Any: + return self.field_info.annotation + + def __post_init__(self) -> None: + self._type_adapter: TypeAdapter[Any] = TypeAdapter( + Annotated[self.field_info.annotation, self.field_info] + ) + + def get_default(self) -> Any: + if self.field_info.is_required(): + return Undefined + return self.field_info.get_default(call_default_factory=True) + + def validate( + self, + value: Any, + values: Dict[str, Any] = {}, # noqa: B006 + *, + loc: Tuple[Union[int, str], ...] = (), + ) -> Tuple[Any, Union[List[Dict[str, Any]], None]]: + try: + return ( + self._type_adapter.validate_python(value, from_attributes=True), + None, + ) + except ValidationError as exc: + return None, _regenerate_error_with_loc( + errors=exc.errors(include_url=False), loc_prefix=loc + ) + + def serialize( + self, + value: Any, + *, + mode: Literal["json", "python"] = "json", + include: Union[IncEx, None] = None, + exclude: Union[IncEx, None] = None, + by_alias: bool = True, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + ) -> Any: + # What calls this code passes a value that already called + # self._type_adapter.validate_python(value) + return self._type_adapter.dump_python( + value, + mode=mode, + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + + def __hash__(self) -> int: + # Each ModelField is unique for our purposes, to allow making a dict from + # ModelField to its JSON Schema. + return id(self) + + def get_annotation_from_field_info( + annotation: Any, field_info: FieldInfo, field_name: str + ) -> Any: + return annotation + + def _normalize_errors(errors: Sequence[Any]) -> List[Dict[str, Any]]: + return errors # type: ignore[return-value] + + def _model_rebuild(model: Type[BaseModel]) -> None: + model.model_rebuild() + + def _model_dump( + model: BaseModel, mode: Literal["json", "python"] = "json", **kwargs: Any + ) -> Any: + return model.model_dump(mode=mode, **kwargs) + + def _get_model_config(model: BaseModel) -> Any: + return model.model_config + + def get_schema_from_model_field( + *, + field: ModelField, + schema_generator: GenerateJsonSchema, + model_name_map: ModelNameMap, + field_mapping: Dict[ + Tuple[ModelField, Literal["validation", "serialization"]], JsonSchemaValue + ], + separate_input_output_schemas: bool = True, + ) -> Dict[str, Any]: + override_mode: Union[Literal["validation"], None] = ( + None if separate_input_output_schemas else "validation" + ) + # This expects that GenerateJsonSchema was already used to generate the definitions + json_schema = field_mapping[(field, override_mode or field.mode)] + if "$ref" not in json_schema: + # TODO remove when deprecating Pydantic v1 + # Ref: https://github.com/pydantic/pydantic/blob/d61792cc42c80b13b23e3ffa74bc37ec7c77f7d1/pydantic/schema.py#L207 + json_schema["title"] = ( + field.field_info.title or field.alias.title().replace("_", " ") + ) + return json_schema + + def get_compat_model_name_map(fields: List[ModelField]) -> ModelNameMap: + return {} + + def get_definitions( + *, + fields: List[ModelField], + schema_generator: GenerateJsonSchema, + model_name_map: ModelNameMap, + separate_input_output_schemas: bool = True, + ) -> Tuple[ + Dict[ + Tuple[ModelField, Literal["validation", "serialization"]], JsonSchemaValue + ], + Dict[str, Dict[str, Any]], + ]: + override_mode: Union[Literal["validation"], None] = ( + None if separate_input_output_schemas else "validation" + ) + inputs = [ + (field, override_mode or field.mode, field._type_adapter.core_schema) + for field in fields + ] + field_mapping, definitions = schema_generator.generate_definitions( + inputs=inputs + ) + return field_mapping, definitions # type: ignore[return-value] + + def is_scalar_field(field: ModelField) -> bool: + from fastapi import params + + return field_annotation_is_scalar( + field.field_info.annotation + ) and not isinstance(field.field_info, params.Body) + + def is_sequence_field(field: ModelField) -> bool: + return field_annotation_is_sequence(field.field_info.annotation) + + def is_scalar_sequence_field(field: ModelField) -> bool: + return field_annotation_is_scalar_sequence(field.field_info.annotation) + + def is_bytes_field(field: ModelField) -> bool: + return is_bytes_or_nonable_bytes_annotation(field.type_) + + def is_bytes_sequence_field(field: ModelField) -> bool: + return is_bytes_sequence_annotation(field.type_) + + def copy_field_info(*, field_info: FieldInfo, annotation: Any) -> FieldInfo: + cls = type(field_info) + merged_field_info = cls.from_annotation(annotation) + new_field_info = copy(field_info) + new_field_info.metadata = merged_field_info.metadata + new_field_info.annotation = merged_field_info.annotation + return new_field_info + + def serialize_sequence_value(*, field: ModelField, value: Any) -> Sequence[Any]: + origin_type = ( + get_origin(field.field_info.annotation) or field.field_info.annotation + ) + assert issubclass(origin_type, sequence_types) # type: ignore[arg-type] + return sequence_annotation_to_type[origin_type](value) # type: ignore[no-any-return] + + def get_missing_field_error(loc: Tuple[str, ...]) -> Dict[str, Any]: + error = ValidationError.from_exception_data( + "Field required", [{"type": "missing", "loc": loc, "input": {}}] + ).errors(include_url=False)[0] + error["input"] = None + return error # type: ignore[return-value] + + def create_body_model( + *, fields: Sequence[ModelField], model_name: str + ) -> Type[BaseModel]: + field_params = {f.name: (f.field_info.annotation, f.field_info) for f in fields} + BodyModel: Type[BaseModel] = create_model(model_name, **field_params) # type: ignore[call-overload] + return BodyModel + + def get_model_fields(model: Type[BaseModel]) -> List[ModelField]: + return [ + ModelField(field_info=field_info, name=name) + for name, field_info in model.model_fields.items() + ] + +else: + from fastapi.openapi.constants import REF_PREFIX as REF_PREFIX + from pydantic import AnyUrl as Url # noqa: F401 + from pydantic import ( # type: ignore[assignment] + BaseConfig as BaseConfig, # noqa: F401 + ) + from pydantic import ValidationError as ValidationError # noqa: F401 + from pydantic.class_validators import ( # type: ignore[no-redef] + Validator as Validator, # noqa: F401 + ) + from pydantic.error_wrappers import ( # type: ignore[no-redef] + ErrorWrapper as ErrorWrapper, # noqa: F401 + ) + from pydantic.errors import MissingError + from pydantic.fields import ( # type: ignore[attr-defined] + SHAPE_FROZENSET, + SHAPE_LIST, + SHAPE_SEQUENCE, + SHAPE_SET, + SHAPE_SINGLETON, + SHAPE_TUPLE, + SHAPE_TUPLE_ELLIPSIS, + ) + from pydantic.fields import FieldInfo as FieldInfo + from pydantic.fields import ( # type: ignore[no-redef,attr-defined] + ModelField as ModelField, # noqa: F401 + ) + + # Keeping old "Required" functionality from Pydantic V1, without + # shadowing typing.Required. + RequiredParam: Any = Ellipsis # type: ignore[no-redef] + from pydantic.fields import ( # type: ignore[no-redef,attr-defined] + Undefined as Undefined, + ) + from pydantic.fields import ( # type: ignore[no-redef, attr-defined] + UndefinedType as UndefinedType, # noqa: F401 + ) + from pydantic.schema import ( + field_schema, + get_flat_models_from_fields, + get_model_name_map, + model_process_schema, + ) + from pydantic.schema import ( # type: ignore[no-redef] # noqa: F401 + get_annotation_from_field_info as get_annotation_from_field_info, + ) + from pydantic.typing import ( # type: ignore[no-redef] + evaluate_forwardref as evaluate_forwardref, # noqa: F401 + ) + from pydantic.utils import ( # type: ignore[no-redef] + lenient_issubclass as lenient_issubclass, # noqa: F401 + ) + + GetJsonSchemaHandler = Any # type: ignore[assignment,misc] + JsonSchemaValue = Dict[str, Any] # type: ignore[misc] + CoreSchema = Any # type: ignore[assignment,misc] + + sequence_shapes = { + SHAPE_LIST, + SHAPE_SET, + SHAPE_FROZENSET, + SHAPE_TUPLE, + SHAPE_SEQUENCE, + SHAPE_TUPLE_ELLIPSIS, + } + sequence_shape_to_type = { + SHAPE_LIST: list, + SHAPE_SET: set, + SHAPE_TUPLE: tuple, + SHAPE_SEQUENCE: list, + SHAPE_TUPLE_ELLIPSIS: list, + } + + @dataclass + class GenerateJsonSchema: # type: ignore[no-redef] + ref_template: str + + class PydanticSchemaGenerationError(Exception): # type: ignore[no-redef] + pass + + def with_info_plain_validator_function( # type: ignore[misc] + function: Callable[..., Any], + *, + ref: Union[str, None] = None, + metadata: Any = None, + serialization: Any = None, + ) -> Any: + return {} + + def get_model_definitions( + *, + flat_models: Set[Union[Type[BaseModel], Type[Enum]]], + model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str], + ) -> Dict[str, Any]: + definitions: Dict[str, Dict[str, Any]] = {} + for model in flat_models: + m_schema, m_definitions, m_nested_models = model_process_schema( + model, model_name_map=model_name_map, ref_prefix=REF_PREFIX + ) + definitions.update(m_definitions) + model_name = model_name_map[model] + if "description" in m_schema: + m_schema["description"] = m_schema["description"].split("\f")[0] + definitions[model_name] = m_schema + return definitions + + def is_pv1_scalar_field(field: ModelField) -> bool: + from fastapi import params + + field_info = field.field_info + if not ( + field.shape == SHAPE_SINGLETON # type: ignore[attr-defined] + and not lenient_issubclass(field.type_, BaseModel) + and not lenient_issubclass(field.type_, dict) + and not field_annotation_is_sequence(field.type_) + and not is_dataclass(field.type_) + and not isinstance(field_info, params.Body) + ): + return False + if field.sub_fields: # type: ignore[attr-defined] + if not all( + is_pv1_scalar_field(f) + for f in field.sub_fields # type: ignore[attr-defined] + ): + return False + return True + + def is_pv1_scalar_sequence_field(field: ModelField) -> bool: + if (field.shape in sequence_shapes) and not lenient_issubclass( # type: ignore[attr-defined] + field.type_, BaseModel + ): + if field.sub_fields is not None: # type: ignore[attr-defined] + for sub_field in field.sub_fields: # type: ignore[attr-defined] + if not is_pv1_scalar_field(sub_field): + return False + return True + if _annotation_is_sequence(field.type_): + return True + return False + + def _normalize_errors(errors: Sequence[Any]) -> List[Dict[str, Any]]: + use_errors: List[Any] = [] + for error in errors: + if isinstance(error, ErrorWrapper): + new_errors = ValidationError( # type: ignore[call-arg] + errors=[error], model=RequestErrorModel + ).errors() + use_errors.extend(new_errors) + elif isinstance(error, list): + use_errors.extend(_normalize_errors(error)) + else: + use_errors.append(error) + return use_errors + + def _model_rebuild(model: Type[BaseModel]) -> None: + model.update_forward_refs() + + def _model_dump( + model: BaseModel, mode: Literal["json", "python"] = "json", **kwargs: Any + ) -> Any: + return model.dict(**kwargs) + + def _get_model_config(model: BaseModel) -> Any: + return model.__config__ # type: ignore[attr-defined] + + def get_schema_from_model_field( + *, + field: ModelField, + schema_generator: GenerateJsonSchema, + model_name_map: ModelNameMap, + field_mapping: Dict[ + Tuple[ModelField, Literal["validation", "serialization"]], JsonSchemaValue + ], + separate_input_output_schemas: bool = True, + ) -> Dict[str, Any]: + # This expects that GenerateJsonSchema was already used to generate the definitions + return field_schema( # type: ignore[no-any-return] + field, model_name_map=model_name_map, ref_prefix=REF_PREFIX + )[0] + + def get_compat_model_name_map(fields: List[ModelField]) -> ModelNameMap: + models = get_flat_models_from_fields(fields, known_models=set()) + return get_model_name_map(models) # type: ignore[no-any-return] + + def get_definitions( + *, + fields: List[ModelField], + schema_generator: GenerateJsonSchema, + model_name_map: ModelNameMap, + separate_input_output_schemas: bool = True, + ) -> Tuple[ + Dict[ + Tuple[ModelField, Literal["validation", "serialization"]], JsonSchemaValue + ], + Dict[str, Dict[str, Any]], + ]: + models = get_flat_models_from_fields(fields, known_models=set()) + return {}, get_model_definitions( + flat_models=models, model_name_map=model_name_map + ) + + def is_scalar_field(field: ModelField) -> bool: + return is_pv1_scalar_field(field) + + def is_sequence_field(field: ModelField) -> bool: + return field.shape in sequence_shapes or _annotation_is_sequence(field.type_) # type: ignore[attr-defined] + + def is_scalar_sequence_field(field: ModelField) -> bool: + return is_pv1_scalar_sequence_field(field) + + def is_bytes_field(field: ModelField) -> bool: + return lenient_issubclass(field.type_, bytes) + + def is_bytes_sequence_field(field: ModelField) -> bool: + return field.shape in sequence_shapes and lenient_issubclass(field.type_, bytes) # type: ignore[attr-defined] + + def copy_field_info(*, field_info: FieldInfo, annotation: Any) -> FieldInfo: + return copy(field_info) + + def serialize_sequence_value(*, field: ModelField, value: Any) -> Sequence[Any]: + return sequence_shape_to_type[field.shape](value) # type: ignore[no-any-return,attr-defined] + + def get_missing_field_error(loc: Tuple[str, ...]) -> Dict[str, Any]: + missing_field_error = ErrorWrapper(MissingError(), loc=loc) # type: ignore[call-arg] + new_error = ValidationError([missing_field_error], RequestErrorModel) + return new_error.errors()[0] # type: ignore[return-value] + + def create_body_model( + *, fields: Sequence[ModelField], model_name: str + ) -> Type[BaseModel]: + BodyModel = create_model(model_name) + for f in fields: + BodyModel.__fields__[f.name] = f # type: ignore[index] + return BodyModel + + def get_model_fields(model: Type[BaseModel]) -> List[ModelField]: + return list(model.__fields__.values()) # type: ignore[attr-defined] + + +def _regenerate_error_with_loc( + *, errors: Sequence[Any], loc_prefix: Tuple[Union[str, int], ...] +) -> List[Dict[str, Any]]: + updated_loc_errors: List[Any] = [ + {**err, "loc": loc_prefix + err.get("loc", ())} + for err in _normalize_errors(errors) + ] + + return updated_loc_errors + + +def _annotation_is_sequence(annotation: Union[Type[Any], None]) -> bool: + if lenient_issubclass(annotation, (str, bytes)): + return False + return lenient_issubclass(annotation, sequence_types) + + +def field_annotation_is_sequence(annotation: Union[Type[Any], None]) -> bool: + origin = get_origin(annotation) + if origin is Union or origin is UnionType: + for arg in get_args(annotation): + if field_annotation_is_sequence(arg): + return True + return False + return _annotation_is_sequence(annotation) or _annotation_is_sequence( + get_origin(annotation) + ) + + +def value_is_sequence(value: Any) -> bool: + return isinstance(value, sequence_types) and not isinstance(value, (str, bytes)) # type: ignore[arg-type] + + +def _annotation_is_complex(annotation: Union[Type[Any], None]) -> bool: + return ( + lenient_issubclass(annotation, (BaseModel, Mapping, UploadFile)) + or _annotation_is_sequence(annotation) + or is_dataclass(annotation) + ) + + +def field_annotation_is_complex(annotation: Union[Type[Any], None]) -> bool: + origin = get_origin(annotation) + if origin is Union or origin is UnionType: + return any(field_annotation_is_complex(arg) for arg in get_args(annotation)) + + return ( + _annotation_is_complex(annotation) + or _annotation_is_complex(origin) + or hasattr(origin, "__pydantic_core_schema__") + or hasattr(origin, "__get_pydantic_core_schema__") + ) + + +def field_annotation_is_scalar(annotation: Any) -> bool: + # handle Ellipsis here to make tuple[int, ...] work nicely + return annotation is Ellipsis or not field_annotation_is_complex(annotation) + + +def field_annotation_is_scalar_sequence(annotation: Union[Type[Any], None]) -> bool: + origin = get_origin(annotation) + if origin is Union or origin is UnionType: + at_least_one_scalar_sequence = False + for arg in get_args(annotation): + if field_annotation_is_scalar_sequence(arg): + at_least_one_scalar_sequence = True + continue + elif not field_annotation_is_scalar(arg): + return False + return at_least_one_scalar_sequence + return field_annotation_is_sequence(annotation) and all( + field_annotation_is_scalar(sub_annotation) + for sub_annotation in get_args(annotation) + ) + + +def is_bytes_or_nonable_bytes_annotation(annotation: Any) -> bool: + if lenient_issubclass(annotation, bytes): + return True + origin = get_origin(annotation) + if origin is Union or origin is UnionType: + for arg in get_args(annotation): + if lenient_issubclass(arg, bytes): + return True + return False + + +def is_uploadfile_or_nonable_uploadfile_annotation(annotation: Any) -> bool: + if lenient_issubclass(annotation, UploadFile): + return True + origin = get_origin(annotation) + if origin is Union or origin is UnionType: + for arg in get_args(annotation): + if lenient_issubclass(arg, UploadFile): + return True + return False + + +def is_bytes_sequence_annotation(annotation: Any) -> bool: + origin = get_origin(annotation) + if origin is Union or origin is UnionType: + at_least_one = False + for arg in get_args(annotation): + if is_bytes_sequence_annotation(arg): + at_least_one = True + continue + return at_least_one + return field_annotation_is_sequence(annotation) and all( + is_bytes_or_nonable_bytes_annotation(sub_annotation) + for sub_annotation in get_args(annotation) + ) + + +def is_uploadfile_sequence_annotation(annotation: Any) -> bool: + origin = get_origin(annotation) + if origin is Union or origin is UnionType: + at_least_one = False + for arg in get_args(annotation): + if is_uploadfile_sequence_annotation(arg): + at_least_one = True + continue + return at_least_one + return field_annotation_is_sequence(annotation) and all( + is_uploadfile_or_nonable_uploadfile_annotation(sub_annotation) + for sub_annotation in get_args(annotation) + ) + + +@lru_cache +def get_cached_model_fields(model: Type[BaseModel]) -> List[ModelField]: + return get_model_fields(model) diff --git a/venv/Lib/site-packages/fastapi/applications.py b/venv/Lib/site-packages/fastapi/applications.py new file mode 100644 index 00000000..6d427cdc --- /dev/null +++ b/venv/Lib/site-packages/fastapi/applications.py @@ -0,0 +1,4585 @@ +from enum import Enum +from typing import ( + Any, + Awaitable, + Callable, + Coroutine, + Dict, + List, + Optional, + Sequence, + Type, + TypeVar, + Union, +) + +from fastapi import routing +from fastapi.datastructures import Default, DefaultPlaceholder +from fastapi.exception_handlers import ( + http_exception_handler, + request_validation_exception_handler, + websocket_request_validation_exception_handler, +) +from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError +from fastapi.logger import logger +from fastapi.openapi.docs import ( + get_redoc_html, + get_swagger_ui_html, + get_swagger_ui_oauth2_redirect_html, +) +from fastapi.openapi.utils import get_openapi +from fastapi.params import Depends +from fastapi.types import DecoratedCallable, IncEx +from fastapi.utils import generate_unique_id +from starlette.applications import Starlette +from starlette.datastructures import State +from starlette.exceptions import HTTPException +from starlette.middleware import Middleware +from starlette.middleware.base import BaseHTTPMiddleware +from starlette.requests import Request +from starlette.responses import HTMLResponse, JSONResponse, Response +from starlette.routing import BaseRoute +from starlette.types import ASGIApp, Lifespan, Receive, Scope, Send +from typing_extensions import Annotated, Doc, deprecated + +AppType = TypeVar("AppType", bound="FastAPI") + + +class FastAPI(Starlette): + """ + `FastAPI` app class, the main entrypoint to use FastAPI. + + Read more in the + [FastAPI docs for First Steps](https://fastapi.tiangolo.com/tutorial/first-steps/). + + ## Example + + ```python + from fastapi import FastAPI + + app = FastAPI() + ``` + """ + + def __init__( + self: AppType, + *, + debug: Annotated[ + bool, + Doc( + """ + Boolean indicating if debug tracebacks should be returned on server + errors. + + Read more in the + [Starlette docs for Applications](https://www.starlette.io/applications/#instantiating-the-application). + """ + ), + ] = False, + routes: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + **Note**: you probably shouldn't use this parameter, it is inherited + from Starlette and supported for compatibility. + + --- + + A list of routes to serve incoming HTTP and WebSocket requests. + """ + ), + deprecated( + """ + You normally wouldn't use this parameter with FastAPI, it is inherited + from Starlette and supported for compatibility. + + In FastAPI, you normally would use the *path operation methods*, + like `app.get()`, `app.post()`, etc. + """ + ), + ] = None, + title: Annotated[ + str, + Doc( + """ + The title of the API. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more in the + [FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api). + + **Example** + + ```python + from fastapi import FastAPI + + app = FastAPI(title="ChimichangApp") + ``` + """ + ), + ] = "FastAPI", + summary: Annotated[ + Optional[str], + Doc( + """ + A short summary of the API. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more in the + [FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api). + + **Example** + + ```python + from fastapi import FastAPI + + app = FastAPI(summary="Deadpond's favorite app. Nuff said.") + ``` + """ + ), + ] = None, + description: Annotated[ + str, + Doc( + ''' + A description of the API. Supports Markdown (using + [CommonMark syntax](https://commonmark.org/)). + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more in the + [FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api). + + **Example** + + ```python + from fastapi import FastAPI + + app = FastAPI( + description=""" + ChimichangApp API helps you do awesome stuff. 🚀 + + ## Items + + You can **read items**. + + ## Users + + You will be able to: + + * **Create users** (_not implemented_). + * **Read users** (_not implemented_). + + """ + ) + ``` + ''' + ), + ] = "", + version: Annotated[ + str, + Doc( + """ + The version of the API. + + **Note** This is the version of your application, not the version of + the OpenAPI specification nor the version of FastAPI being used. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more in the + [FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api). + + **Example** + + ```python + from fastapi import FastAPI + + app = FastAPI(version="0.0.1") + ``` + """ + ), + ] = "0.1.0", + openapi_url: Annotated[ + Optional[str], + Doc( + """ + The URL where the OpenAPI schema will be served from. + + If you set it to `None`, no OpenAPI schema will be served publicly, and + the default automatic endpoints `/docs` and `/redoc` will also be + disabled. + + Read more in the + [FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#openapi-url). + + **Example** + + ```python + from fastapi import FastAPI + + app = FastAPI(openapi_url="/api/v1/openapi.json") + ``` + """ + ), + ] = "/openapi.json", + openapi_tags: Annotated[ + Optional[List[Dict[str, Any]]], + Doc( + """ + A list of tags used by OpenAPI, these are the same `tags` you can set + in the *path operations*, like: + + * `@app.get("/users/", tags=["users"])` + * `@app.get("/items/", tags=["items"])` + + The order of the tags can be used to specify the order shown in + tools like Swagger UI, used in the automatic path `/docs`. + + It's not required to specify all the tags used. + + The tags that are not declared MAY be organized randomly or based + on the tools' logic. Each tag name in the list MUST be unique. + + The value of each item is a `dict` containing: + + * `name`: The name of the tag. + * `description`: A short description of the tag. + [CommonMark syntax](https://commonmark.org/) MAY be used for rich + text representation. + * `externalDocs`: Additional external documentation for this tag. If + provided, it would contain a `dict` with: + * `description`: A short description of the target documentation. + [CommonMark syntax](https://commonmark.org/) MAY be used for + rich text representation. + * `url`: The URL for the target documentation. Value MUST be in + the form of a URL. + + Read more in the + [FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-tags). + + **Example** + + ```python + from fastapi import FastAPI + + tags_metadata = [ + { + "name": "users", + "description": "Operations with users. The **login** logic is also here.", + }, + { + "name": "items", + "description": "Manage items. So _fancy_ they have their own docs.", + "externalDocs": { + "description": "Items external docs", + "url": "https://fastapi.tiangolo.com/", + }, + }, + ] + + app = FastAPI(openapi_tags=tags_metadata) + ``` + """ + ), + ] = None, + servers: Annotated[ + Optional[List[Dict[str, Union[str, Any]]]], + Doc( + """ + A `list` of `dict`s with connectivity information to a target server. + + You would use it, for example, if your application is served from + different domains and you want to use the same Swagger UI in the + browser to interact with each of them (instead of having multiple + browser tabs open). Or if you want to leave fixed the possible URLs. + + If the servers `list` is not provided, or is an empty `list`, the + default value would be a `dict` with a `url` value of `/`. + + Each item in the `list` is a `dict` containing: + + * `url`: A URL to the target host. This URL supports Server Variables + and MAY be relative, to indicate that the host location is relative + to the location where the OpenAPI document is being served. Variable + substitutions will be made when a variable is named in `{`brackets`}`. + * `description`: An optional string describing the host designated by + the URL. [CommonMark syntax](https://commonmark.org/) MAY be used for + rich text representation. + * `variables`: A `dict` between a variable name and its value. The value + is used for substitution in the server's URL template. + + Read more in the + [FastAPI docs for Behind a Proxy](https://fastapi.tiangolo.com/advanced/behind-a-proxy/#additional-servers). + + **Example** + + ```python + from fastapi import FastAPI + + app = FastAPI( + servers=[ + {"url": "https://stag.example.com", "description": "Staging environment"}, + {"url": "https://prod.example.com", "description": "Production environment"}, + ] + ) + ``` + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[Depends]], + Doc( + """ + A list of global dependencies, they will be applied to each + *path operation*, including in sub-routers. + + Read more about it in the + [FastAPI docs for Global Dependencies](https://fastapi.tiangolo.com/tutorial/dependencies/global-dependencies/). + + **Example** + + ```python + from fastapi import Depends, FastAPI + + from .dependencies import func_dep_1, func_dep_2 + + app = FastAPI(dependencies=[Depends(func_dep_1), Depends(func_dep_2)]) + ``` + """ + ), + ] = None, + default_response_class: Annotated[ + Type[Response], + Doc( + """ + The default response class to be used. + + Read more in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#default-response-class). + + **Example** + + ```python + from fastapi import FastAPI + from fastapi.responses import ORJSONResponse + + app = FastAPI(default_response_class=ORJSONResponse) + ``` + """ + ), + ] = Default(JSONResponse), + redirect_slashes: Annotated[ + bool, + Doc( + """ + Whether to detect and redirect slashes in URLs when the client doesn't + use the same format. + + **Example** + + ```python + from fastapi import FastAPI + + app = FastAPI(redirect_slashes=True) # the default + + @app.get("/items/") + async def read_items(): + return [{"item_id": "Foo"}] + ``` + + With this app, if a client goes to `/items` (without a trailing slash), + they will be automatically redirected with an HTTP status code of 307 + to `/items/`. + """ + ), + ] = True, + docs_url: Annotated[ + Optional[str], + Doc( + """ + The path to the automatic interactive API documentation. + It is handled in the browser by Swagger UI. + + The default URL is `/docs`. You can disable it by setting it to `None`. + + If `openapi_url` is set to `None`, this will be automatically disabled. + + Read more in the + [FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#docs-urls). + + **Example** + + ```python + from fastapi import FastAPI + + app = FastAPI(docs_url="/documentation", redoc_url=None) + ``` + """ + ), + ] = "/docs", + redoc_url: Annotated[ + Optional[str], + Doc( + """ + The path to the alternative automatic interactive API documentation + provided by ReDoc. + + The default URL is `/redoc`. You can disable it by setting it to `None`. + + If `openapi_url` is set to `None`, this will be automatically disabled. + + Read more in the + [FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#docs-urls). + + **Example** + + ```python + from fastapi import FastAPI + + app = FastAPI(docs_url="/documentation", redoc_url="redocumentation") + ``` + """ + ), + ] = "/redoc", + swagger_ui_oauth2_redirect_url: Annotated[ + Optional[str], + Doc( + """ + The OAuth2 redirect endpoint for the Swagger UI. + + By default it is `/docs/oauth2-redirect`. + + This is only used if you use OAuth2 (with the "Authorize" button) + with Swagger UI. + """ + ), + ] = "/docs/oauth2-redirect", + swagger_ui_init_oauth: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + OAuth2 configuration for the Swagger UI, by default shown at `/docs`. + + Read more about the available configuration options in the + [Swagger UI docs](https://swagger.io/docs/open-source-tools/swagger-ui/usage/oauth2/). + """ + ), + ] = None, + middleware: Annotated[ + Optional[Sequence[Middleware]], + Doc( + """ + List of middleware to be added when creating the application. + + In FastAPI you would normally do this with `app.add_middleware()` + instead. + + Read more in the + [FastAPI docs for Middleware](https://fastapi.tiangolo.com/tutorial/middleware/). + """ + ), + ] = None, + exception_handlers: Annotated[ + Optional[ + Dict[ + Union[int, Type[Exception]], + Callable[[Request, Any], Coroutine[Any, Any, Response]], + ] + ], + Doc( + """ + A dictionary with handlers for exceptions. + + In FastAPI, you would normally use the decorator + `@app.exception_handler()`. + + Read more in the + [FastAPI docs for Handling Errors](https://fastapi.tiangolo.com/tutorial/handling-errors/). + """ + ), + ] = None, + on_startup: Annotated[ + Optional[Sequence[Callable[[], Any]]], + Doc( + """ + A list of startup event handler functions. + + You should instead use the `lifespan` handlers. + + Read more in the [FastAPI docs for `lifespan`](https://fastapi.tiangolo.com/advanced/events/). + """ + ), + ] = None, + on_shutdown: Annotated[ + Optional[Sequence[Callable[[], Any]]], + Doc( + """ + A list of shutdown event handler functions. + + You should instead use the `lifespan` handlers. + + Read more in the + [FastAPI docs for `lifespan`](https://fastapi.tiangolo.com/advanced/events/). + """ + ), + ] = None, + lifespan: Annotated[ + Optional[Lifespan[AppType]], + Doc( + """ + A `Lifespan` context manager handler. This replaces `startup` and + `shutdown` functions with a single context manager. + + Read more in the + [FastAPI docs for `lifespan`](https://fastapi.tiangolo.com/advanced/events/). + """ + ), + ] = None, + terms_of_service: Annotated[ + Optional[str], + Doc( + """ + A URL to the Terms of Service for your API. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more at the + [FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api). + + **Example** + + ```python + app = FastAPI(terms_of_service="http://example.com/terms/") + ``` + """ + ), + ] = None, + contact: Annotated[ + Optional[Dict[str, Union[str, Any]]], + Doc( + """ + A dictionary with the contact information for the exposed API. + + It can contain several fields. + + * `name`: (`str`) The name of the contact person/organization. + * `url`: (`str`) A URL pointing to the contact information. MUST be in + the format of a URL. + * `email`: (`str`) The email address of the contact person/organization. + MUST be in the format of an email address. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more at the + [FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api). + + **Example** + + ```python + app = FastAPI( + contact={ + "name": "Deadpoolio the Amazing", + "url": "http://x-force.example.com/contact/", + "email": "dp@x-force.example.com", + } + ) + ``` + """ + ), + ] = None, + license_info: Annotated[ + Optional[Dict[str, Union[str, Any]]], + Doc( + """ + A dictionary with the license information for the exposed API. + + It can contain several fields. + + * `name`: (`str`) **REQUIRED** (if a `license_info` is set). The + license name used for the API. + * `identifier`: (`str`) An [SPDX](https://spdx.dev/) license expression + for the API. The `identifier` field is mutually exclusive of the `url` + field. Available since OpenAPI 3.1.0, FastAPI 0.99.0. + * `url`: (`str`) A URL to the license used for the API. This MUST be + the format of a URL. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more at the + [FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api). + + **Example** + + ```python + app = FastAPI( + license_info={ + "name": "Apache 2.0", + "url": "https://www.apache.org/licenses/LICENSE-2.0.html", + } + ) + ``` + """ + ), + ] = None, + openapi_prefix: Annotated[ + str, + Doc( + """ + A URL prefix for the OpenAPI URL. + """ + ), + deprecated( + """ + "openapi_prefix" has been deprecated in favor of "root_path", which + follows more closely the ASGI standard, is simpler, and more + automatic. + """ + ), + ] = "", + root_path: Annotated[ + str, + Doc( + """ + A path prefix handled by a proxy that is not seen by the application + but is seen by external clients, which affects things like Swagger UI. + + Read more about it at the + [FastAPI docs for Behind a Proxy](https://fastapi.tiangolo.com/advanced/behind-a-proxy/). + + **Example** + + ```python + from fastapi import FastAPI + + app = FastAPI(root_path="/api/v1") + ``` + """ + ), + ] = "", + root_path_in_servers: Annotated[ + bool, + Doc( + """ + To disable automatically generating the URLs in the `servers` field + in the autogenerated OpenAPI using the `root_path`. + + Read more about it in the + [FastAPI docs for Behind a Proxy](https://fastapi.tiangolo.com/advanced/behind-a-proxy/#disable-automatic-server-from-root_path). + + **Example** + + ```python + from fastapi import FastAPI + + app = FastAPI(root_path_in_servers=False) + ``` + """ + ), + ] = True, + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses to be shown in OpenAPI. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Additional Responses in OpenAPI](https://fastapi.tiangolo.com/advanced/additional-responses/). + + And in the + [FastAPI docs for Bigger Applications](https://fastapi.tiangolo.com/tutorial/bigger-applications/#include-an-apirouter-with-a-custom-prefix-tags-responses-and-dependencies). + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + OpenAPI callbacks that should apply to all *path operations*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + webhooks: Annotated[ + Optional[routing.APIRouter], + Doc( + """ + Add OpenAPI webhooks. This is similar to `callbacks` but it doesn't + depend on specific *path operations*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + **Note**: This is available since OpenAPI 3.1.0, FastAPI 0.99.0. + + Read more about it in the + [FastAPI docs for OpenAPI Webhooks](https://fastapi.tiangolo.com/advanced/openapi-webhooks/). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark all *path operations* as deprecated. You probably don't need it, + but it's available. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + include_in_schema: Annotated[ + bool, + Doc( + """ + To include (or not) all the *path operations* in the generated OpenAPI. + You probably don't need it, but it's available. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + swagger_ui_parameters: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Parameters to configure Swagger UI, the autogenerated interactive API + documentation (by default at `/docs`). + + Read more about it in the + [FastAPI docs about how to Configure Swagger UI](https://fastapi.tiangolo.com/how-to/configure-swagger-ui/). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[routing.APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + separate_input_output_schemas: Annotated[ + bool, + Doc( + """ + Whether to generate separate OpenAPI schemas for request body and + response body when the results would be more precise. + + This is particularly useful when automatically generating clients. + + For example, if you have a model like: + + ```python + from pydantic import BaseModel + + class Item(BaseModel): + name: str + tags: list[str] = [] + ``` + + When `Item` is used for input, a request body, `tags` is not required, + the client doesn't have to provide it. + + But when using `Item` for output, for a response body, `tags` is always + available because it has a default value, even if it's just an empty + list. So, the client should be able to always expect it. + + In this case, there would be two different schemas, one for input and + another one for output. + """ + ), + ] = True, + **extra: Annotated[ + Any, + Doc( + """ + Extra keyword arguments to be stored in the app, not used by FastAPI + anywhere. + """ + ), + ], + ) -> None: + self.debug = debug + self.title = title + self.summary = summary + self.description = description + self.version = version + self.terms_of_service = terms_of_service + self.contact = contact + self.license_info = license_info + self.openapi_url = openapi_url + self.openapi_tags = openapi_tags + self.root_path_in_servers = root_path_in_servers + self.docs_url = docs_url + self.redoc_url = redoc_url + self.swagger_ui_oauth2_redirect_url = swagger_ui_oauth2_redirect_url + self.swagger_ui_init_oauth = swagger_ui_init_oauth + self.swagger_ui_parameters = swagger_ui_parameters + self.servers = servers or [] + self.separate_input_output_schemas = separate_input_output_schemas + self.extra = extra + self.openapi_version: Annotated[ + str, + Doc( + """ + The version string of OpenAPI. + + FastAPI will generate OpenAPI version 3.1.0, and will output that as + the OpenAPI version. But some tools, even though they might be + compatible with OpenAPI 3.1.0, might not recognize it as a valid. + + So you could override this value to trick those tools into using + the generated OpenAPI. Have in mind that this is a hack. But if you + avoid using features added in OpenAPI 3.1.0, it might work for your + use case. + + This is not passed as a parameter to the `FastAPI` class to avoid + giving the false idea that FastAPI would generate a different OpenAPI + schema. It is only available as an attribute. + + **Example** + + ```python + from fastapi import FastAPI + + app = FastAPI() + + app.openapi_version = "3.0.2" + ``` + """ + ), + ] = "3.1.0" + self.openapi_schema: Optional[Dict[str, Any]] = None + if self.openapi_url: + assert self.title, "A title must be provided for OpenAPI, e.g.: 'My API'" + assert self.version, "A version must be provided for OpenAPI, e.g.: '2.1.0'" + # TODO: remove when discarding the openapi_prefix parameter + if openapi_prefix: + logger.warning( + '"openapi_prefix" has been deprecated in favor of "root_path", which ' + "follows more closely the ASGI standard, is simpler, and more " + "automatic. Check the docs at " + "https://fastapi.tiangolo.com/advanced/sub-applications/" + ) + self.webhooks: Annotated[ + routing.APIRouter, + Doc( + """ + The `app.webhooks` attribute is an `APIRouter` with the *path + operations* that will be used just for documentation of webhooks. + + Read more about it in the + [FastAPI docs for OpenAPI Webhooks](https://fastapi.tiangolo.com/advanced/openapi-webhooks/). + """ + ), + ] = webhooks or routing.APIRouter() + self.root_path = root_path or openapi_prefix + self.state: Annotated[ + State, + Doc( + """ + A state object for the application. This is the same object for the + entire application, it doesn't change from request to request. + + You normally wouldn't use this in FastAPI, for most of the cases you + would instead use FastAPI dependencies. + + This is simply inherited from Starlette. + + Read more about it in the + [Starlette docs for Applications](https://www.starlette.io/applications/#storing-state-on-the-app-instance). + """ + ), + ] = State() + self.dependency_overrides: Annotated[ + Dict[Callable[..., Any], Callable[..., Any]], + Doc( + """ + A dictionary with overrides for the dependencies. + + Each key is the original dependency callable, and the value is the + actual dependency that should be called. + + This is for testing, to replace expensive dependencies with testing + versions. + + Read more about it in the + [FastAPI docs for Testing Dependencies with Overrides](https://fastapi.tiangolo.com/advanced/testing-dependencies/). + """ + ), + ] = {} + self.router: routing.APIRouter = routing.APIRouter( + routes=routes, + redirect_slashes=redirect_slashes, + dependency_overrides_provider=self, + on_startup=on_startup, + on_shutdown=on_shutdown, + lifespan=lifespan, + default_response_class=default_response_class, + dependencies=dependencies, + callbacks=callbacks, + deprecated=deprecated, + include_in_schema=include_in_schema, + responses=responses, + generate_unique_id_function=generate_unique_id_function, + ) + self.exception_handlers: Dict[ + Any, Callable[[Request, Any], Union[Response, Awaitable[Response]]] + ] = {} if exception_handlers is None else dict(exception_handlers) + self.exception_handlers.setdefault(HTTPException, http_exception_handler) + self.exception_handlers.setdefault( + RequestValidationError, request_validation_exception_handler + ) + self.exception_handlers.setdefault( + WebSocketRequestValidationError, + # Starlette still has incorrect type specification for the handlers + websocket_request_validation_exception_handler, # type: ignore + ) + + self.user_middleware: List[Middleware] = ( + [] if middleware is None else list(middleware) + ) + self.middleware_stack: Union[ASGIApp, None] = None + self.setup() + + def openapi(self) -> Dict[str, Any]: + """ + Generate the OpenAPI schema of the application. This is called by FastAPI + internally. + + The first time it is called it stores the result in the attribute + `app.openapi_schema`, and next times it is called, it just returns that same + result. To avoid the cost of generating the schema every time. + + If you need to modify the generated OpenAPI schema, you could modify it. + + Read more in the + [FastAPI docs for OpenAPI](https://fastapi.tiangolo.com/how-to/extending-openapi/). + """ + if not self.openapi_schema: + self.openapi_schema = get_openapi( + title=self.title, + version=self.version, + openapi_version=self.openapi_version, + summary=self.summary, + description=self.description, + terms_of_service=self.terms_of_service, + contact=self.contact, + license_info=self.license_info, + routes=self.routes, + webhooks=self.webhooks.routes, + tags=self.openapi_tags, + servers=self.servers, + separate_input_output_schemas=self.separate_input_output_schemas, + ) + return self.openapi_schema + + def setup(self) -> None: + if self.openapi_url: + urls = (server_data.get("url") for server_data in self.servers) + server_urls = {url for url in urls if url} + + async def openapi(req: Request) -> JSONResponse: + root_path = req.scope.get("root_path", "").rstrip("/") + if root_path not in server_urls: + if root_path and self.root_path_in_servers: + self.servers.insert(0, {"url": root_path}) + server_urls.add(root_path) + return JSONResponse(self.openapi()) + + self.add_route(self.openapi_url, openapi, include_in_schema=False) + if self.openapi_url and self.docs_url: + + async def swagger_ui_html(req: Request) -> HTMLResponse: + root_path = req.scope.get("root_path", "").rstrip("/") + openapi_url = root_path + self.openapi_url + oauth2_redirect_url = self.swagger_ui_oauth2_redirect_url + if oauth2_redirect_url: + oauth2_redirect_url = root_path + oauth2_redirect_url + return get_swagger_ui_html( + openapi_url=openapi_url, + title=f"{self.title} - Swagger UI", + oauth2_redirect_url=oauth2_redirect_url, + init_oauth=self.swagger_ui_init_oauth, + swagger_ui_parameters=self.swagger_ui_parameters, + ) + + self.add_route(self.docs_url, swagger_ui_html, include_in_schema=False) + + if self.swagger_ui_oauth2_redirect_url: + + async def swagger_ui_redirect(req: Request) -> HTMLResponse: + return get_swagger_ui_oauth2_redirect_html() + + self.add_route( + self.swagger_ui_oauth2_redirect_url, + swagger_ui_redirect, + include_in_schema=False, + ) + if self.openapi_url and self.redoc_url: + + async def redoc_html(req: Request) -> HTMLResponse: + root_path = req.scope.get("root_path", "").rstrip("/") + openapi_url = root_path + self.openapi_url + return get_redoc_html( + openapi_url=openapi_url, title=f"{self.title} - ReDoc" + ) + + self.add_route(self.redoc_url, redoc_html, include_in_schema=False) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if self.root_path: + scope["root_path"] = self.root_path + await super().__call__(scope, receive, send) + + def add_api_route( + self, + path: str, + endpoint: Callable[..., Any], + *, + response_model: Any = Default(None), + status_code: Optional[int] = None, + tags: Optional[List[Union[str, Enum]]] = None, + dependencies: Optional[Sequence[Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + methods: Optional[List[str]] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[IncEx] = None, + response_model_exclude: Optional[IncEx] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Union[Type[Response], DefaultPlaceholder] = Default( + JSONResponse + ), + name: Optional[str] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( + generate_unique_id + ), + ) -> None: + self.router.add_api_route( + path, + endpoint=endpoint, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=methods, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def api_route( + self, + path: str, + *, + response_model: Any = Default(None), + status_code: Optional[int] = None, + tags: Optional[List[Union[str, Enum]]] = None, + dependencies: Optional[Sequence[Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + methods: Optional[List[str]] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[IncEx] = None, + response_model_exclude: Optional[IncEx] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + generate_unique_id_function: Callable[[routing.APIRoute], str] = Default( + generate_unique_id + ), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.router.add_api_route( + path, + func, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=methods, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + return func + + return decorator + + def add_api_websocket_route( + self, + path: str, + endpoint: Callable[..., Any], + name: Optional[str] = None, + *, + dependencies: Optional[Sequence[Depends]] = None, + ) -> None: + self.router.add_api_websocket_route( + path, + endpoint, + name=name, + dependencies=dependencies, + ) + + def websocket( + self, + path: Annotated[ + str, + Doc( + """ + WebSocket path. + """ + ), + ], + name: Annotated[ + Optional[str], + Doc( + """ + A name for the WebSocket. Only used internally. + """ + ), + ] = None, + *, + dependencies: Annotated[ + Optional[Sequence[Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be used for this + WebSocket. + + Read more about it in the + [FastAPI docs for WebSockets](https://fastapi.tiangolo.com/advanced/websockets/). + """ + ), + ] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Decorate a WebSocket function. + + Read more about it in the + [FastAPI docs for WebSockets](https://fastapi.tiangolo.com/advanced/websockets/). + + **Example** + + ```python + from fastapi import FastAPI, WebSocket + + app = FastAPI() + + @app.websocket("/ws") + async def websocket_endpoint(websocket: WebSocket): + await websocket.accept() + while True: + data = await websocket.receive_text() + await websocket.send_text(f"Message text was: {data}") + ``` + """ + + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.add_api_websocket_route( + path, + func, + name=name, + dependencies=dependencies, + ) + return func + + return decorator + + def include_router( + self, + router: Annotated[routing.APIRouter, Doc("The `APIRouter` to include.")], + *, + prefix: Annotated[str, Doc("An optional path prefix for the router.")] = "", + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to all the *path operations* in this + router. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to all the + *path operations* in this router. + + Read more about it in the + [FastAPI docs for Bigger Applications - Multiple Files](https://fastapi.tiangolo.com/tutorial/bigger-applications/#include-an-apirouter-with-a-custom-prefix-tags-responses-and-dependencies). + + **Example** + + ```python + from fastapi import Depends, FastAPI + + from .dependencies import get_token_header + from .internal import admin + + app = FastAPI() + + app.include_router( + admin.router, + dependencies=[Depends(get_token_header)], + ) + ``` + """ + ), + ] = None, + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses to be shown in OpenAPI. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Additional Responses in OpenAPI](https://fastapi.tiangolo.com/advanced/additional-responses/). + + And in the + [FastAPI docs for Bigger Applications](https://fastapi.tiangolo.com/tutorial/bigger-applications/#include-an-apirouter-with-a-custom-prefix-tags-responses-and-dependencies). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark all the *path operations* in this router as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + **Example** + + ```python + from fastapi import FastAPI + + from .internal import old_api + + app = FastAPI() + + app.include_router( + old_api.router, + deprecated=True, + ) + ``` + """ + ), + ] = None, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include (or not) all the *path operations* in this router in the + generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + **Example** + + ```python + from fastapi import FastAPI + + from .internal import old_api + + app = FastAPI() + + app.include_router( + old_api.router, + include_in_schema=False, + ) + ``` + """ + ), + ] = True, + default_response_class: Annotated[ + Type[Response], + Doc( + """ + Default response class to be used for the *path operations* in this + router. + + Read more in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#default-response-class). + + **Example** + + ```python + from fastapi import FastAPI + from fastapi.responses import ORJSONResponse + + from .internal import old_api + + app = FastAPI() + + app.include_router( + old_api.router, + default_response_class=ORJSONResponse, + ) + ``` + """ + ), + ] = Default(JSONResponse), + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[routing.APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> None: + """ + Include an `APIRouter` in the same app. + + Read more about it in the + [FastAPI docs for Bigger Applications](https://fastapi.tiangolo.com/tutorial/bigger-applications/). + + ## Example + + ```python + from fastapi import FastAPI + + from .users import users_router + + app = FastAPI() + + app.include_router(users_router) + ``` + """ + self.router.include_router( + router, + prefix=prefix, + tags=tags, + dependencies=dependencies, + responses=responses, + deprecated=deprecated, + include_in_schema=include_in_schema, + default_response_class=default_response_class, + callbacks=callbacks, + generate_unique_id_function=generate_unique_id_function, + ) + + def get( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[routing.APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP GET operation. + + ## Example + + ```python + from fastapi import FastAPI + + app = FastAPI() + + @app.get("/items/") + def read_items(): + return [{"name": "Empanada"}, {"name": "Arepa"}] + ``` + """ + return self.router.get( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def put( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[routing.APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP PUT operation. + + ## Example + + ```python + from fastapi import FastAPI + from pydantic import BaseModel + + class Item(BaseModel): + name: str + description: str | None = None + + app = FastAPI() + + @app.put("/items/{item_id}") + def replace_item(item_id: str, item: Item): + return {"message": "Item replaced", "id": item_id} + ``` + """ + return self.router.put( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def post( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[routing.APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP POST operation. + + ## Example + + ```python + from fastapi import FastAPI + from pydantic import BaseModel + + class Item(BaseModel): + name: str + description: str | None = None + + app = FastAPI() + + @app.post("/items/") + def create_item(item: Item): + return {"message": "Item created"} + ``` + """ + return self.router.post( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def delete( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[routing.APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP DELETE operation. + + ## Example + + ```python + from fastapi import FastAPI + + app = FastAPI() + + @app.delete("/items/{item_id}") + def delete_item(item_id: str): + return {"message": "Item deleted"} + ``` + """ + return self.router.delete( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def options( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[routing.APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP OPTIONS operation. + + ## Example + + ```python + from fastapi import FastAPI + + app = FastAPI() + + @app.options("/items/") + def get_item_options(): + return {"additions": ["Aji", "Guacamole"]} + ``` + """ + return self.router.options( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def head( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[routing.APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP HEAD operation. + + ## Example + + ```python + from fastapi import FastAPI, Response + + app = FastAPI() + + @app.head("/items/", status_code=204) + def get_items_headers(response: Response): + response.headers["X-Cat-Dog"] = "Alone in the world" + ``` + """ + return self.router.head( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def patch( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[routing.APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP PATCH operation. + + ## Example + + ```python + from fastapi import FastAPI + from pydantic import BaseModel + + class Item(BaseModel): + name: str + description: str | None = None + + app = FastAPI() + + @app.patch("/items/") + def update_item(item: Item): + return {"message": "Item updated in place"} + ``` + """ + return self.router.patch( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def trace( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[routing.APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP TRACE operation. + + ## Example + + ```python + from fastapi import FastAPI + + app = FastAPI() + + @app.put("/items/{item_id}") + def trace_item(item_id: str): + return None + ``` + """ + return self.router.trace( + path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def websocket_route( + self, path: str, name: Union[str, None] = None + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.router.add_websocket_route(path, func, name=name) + return func + + return decorator + + @deprecated( + """ + on_event is deprecated, use lifespan event handlers instead. + + Read more about it in the + [FastAPI docs for Lifespan Events](https://fastapi.tiangolo.com/advanced/events/). + """ + ) + def on_event( + self, + event_type: Annotated[ + str, + Doc( + """ + The type of event. `startup` or `shutdown`. + """ + ), + ], + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add an event handler for the application. + + `on_event` is deprecated, use `lifespan` event handlers instead. + + Read more about it in the + [FastAPI docs for Lifespan Events](https://fastapi.tiangolo.com/advanced/events/#alternative-events-deprecated). + """ + return self.router.on_event(event_type) + + def middleware( + self, + middleware_type: Annotated[ + str, + Doc( + """ + The type of middleware. Currently only supports `http`. + """ + ), + ], + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a middleware to the application. + + Read more about it in the + [FastAPI docs for Middleware](https://fastapi.tiangolo.com/tutorial/middleware/). + + ## Example + + ```python + import time + + from fastapi import FastAPI, Request + + app = FastAPI() + + + @app.middleware("http") + async def add_process_time_header(request: Request, call_next): + start_time = time.time() + response = await call_next(request) + process_time = time.time() - start_time + response.headers["X-Process-Time"] = str(process_time) + return response + ``` + """ + + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.add_middleware(BaseHTTPMiddleware, dispatch=func) + return func + + return decorator + + def exception_handler( + self, + exc_class_or_status_code: Annotated[ + Union[int, Type[Exception]], + Doc( + """ + The Exception class this would handle, or a status code. + """ + ), + ], + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add an exception handler to the app. + + Read more about it in the + [FastAPI docs for Handling Errors](https://fastapi.tiangolo.com/tutorial/handling-errors/). + + ## Example + + ```python + from fastapi import FastAPI, Request + from fastapi.responses import JSONResponse + + + class UnicornException(Exception): + def __init__(self, name: str): + self.name = name + + + app = FastAPI() + + + @app.exception_handler(UnicornException) + async def unicorn_exception_handler(request: Request, exc: UnicornException): + return JSONResponse( + status_code=418, + content={"message": f"Oops! {exc.name} did something. There goes a rainbow..."}, + ) + ``` + """ + + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.add_exception_handler(exc_class_or_status_code, func) + return func + + return decorator diff --git a/venv/Lib/site-packages/fastapi/background.py b/venv/Lib/site-packages/fastapi/background.py new file mode 100644 index 00000000..203578a4 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/background.py @@ -0,0 +1,59 @@ +from typing import Any, Callable + +from starlette.background import BackgroundTasks as StarletteBackgroundTasks +from typing_extensions import Annotated, Doc, ParamSpec + +P = ParamSpec("P") + + +class BackgroundTasks(StarletteBackgroundTasks): + """ + A collection of background tasks that will be called after a response has been + sent to the client. + + Read more about it in the + [FastAPI docs for Background Tasks](https://fastapi.tiangolo.com/tutorial/background-tasks/). + + ## Example + + ```python + from fastapi import BackgroundTasks, FastAPI + + app = FastAPI() + + + def write_notification(email: str, message=""): + with open("log.txt", mode="w") as email_file: + content = f"notification for {email}: {message}" + email_file.write(content) + + + @app.post("/send-notification/{email}") + async def send_notification(email: str, background_tasks: BackgroundTasks): + background_tasks.add_task(write_notification, email, message="some notification") + return {"message": "Notification sent in the background"} + ``` + """ + + def add_task( + self, + func: Annotated[ + Callable[P, Any], + Doc( + """ + The function to call after the response is sent. + + It can be a regular `def` function or an `async def` function. + """ + ), + ], + *args: P.args, + **kwargs: P.kwargs, + ) -> None: + """ + Add a function to be called in the background after the response is sent. + + Read more about it in the + [FastAPI docs for Background Tasks](https://fastapi.tiangolo.com/tutorial/background-tasks/). + """ + return super().add_task(func, *args, **kwargs) diff --git a/venv/Lib/site-packages/fastapi/cli.py b/venv/Lib/site-packages/fastapi/cli.py new file mode 100644 index 00000000..8d3301e9 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/cli.py @@ -0,0 +1,13 @@ +try: + from fastapi_cli.cli import main as cli_main + +except ImportError: # pragma: no cover + cli_main = None # type: ignore + + +def main() -> None: + if not cli_main: # type: ignore[truthy-function] + message = 'To use the fastapi command, please install "fastapi[standard]":\n\n\tpip install "fastapi[standard]"\n' + print(message) + raise RuntimeError(message) # noqa: B904 + cli_main() diff --git a/venv/Lib/site-packages/fastapi/concurrency.py b/venv/Lib/site-packages/fastapi/concurrency.py new file mode 100644 index 00000000..3202c707 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/concurrency.py @@ -0,0 +1,39 @@ +from contextlib import asynccontextmanager as asynccontextmanager +from typing import AsyncGenerator, ContextManager, TypeVar + +import anyio.to_thread +from anyio import CapacityLimiter +from starlette.concurrency import iterate_in_threadpool as iterate_in_threadpool # noqa +from starlette.concurrency import run_in_threadpool as run_in_threadpool # noqa +from starlette.concurrency import ( # noqa + run_until_first_complete as run_until_first_complete, +) + +_T = TypeVar("_T") + + +@asynccontextmanager +async def contextmanager_in_threadpool( + cm: ContextManager[_T], +) -> AsyncGenerator[_T, None]: + # blocking __exit__ from running waiting on a free thread + # can create race conditions/deadlocks if the context manager itself + # has its own internal pool (e.g. a database connection pool) + # to avoid this we let __exit__ run without a capacity limit + # since we're creating a new limiter for each call, any non-zero limit + # works (1 is arbitrary) + exit_limiter = CapacityLimiter(1) + try: + yield await run_in_threadpool(cm.__enter__) + except Exception as e: + ok = bool( + await anyio.to_thread.run_sync( + cm.__exit__, type(e), e, e.__traceback__, limiter=exit_limiter + ) + ) + if not ok: + raise e + else: + await anyio.to_thread.run_sync( + cm.__exit__, None, None, None, limiter=exit_limiter + ) diff --git a/venv/Lib/site-packages/fastapi/datastructures.py b/venv/Lib/site-packages/fastapi/datastructures.py new file mode 100644 index 00000000..cf8406b0 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/datastructures.py @@ -0,0 +1,204 @@ +from typing import ( + Any, + BinaryIO, + Callable, + Dict, + Iterable, + Optional, + Type, + TypeVar, + cast, +) + +from fastapi._compat import ( + PYDANTIC_V2, + CoreSchema, + GetJsonSchemaHandler, + JsonSchemaValue, + with_info_plain_validator_function, +) +from starlette.datastructures import URL as URL # noqa: F401 +from starlette.datastructures import Address as Address # noqa: F401 +from starlette.datastructures import FormData as FormData # noqa: F401 +from starlette.datastructures import Headers as Headers # noqa: F401 +from starlette.datastructures import QueryParams as QueryParams # noqa: F401 +from starlette.datastructures import State as State # noqa: F401 +from starlette.datastructures import UploadFile as StarletteUploadFile +from typing_extensions import Annotated, Doc + + +class UploadFile(StarletteUploadFile): + """ + A file uploaded in a request. + + Define it as a *path operation function* (or dependency) parameter. + + If you are using a regular `def` function, you can use the `upload_file.file` + attribute to access the raw standard Python file (blocking, not async), useful and + needed for non-async code. + + Read more about it in the + [FastAPI docs for Request Files](https://fastapi.tiangolo.com/tutorial/request-files/). + + ## Example + + ```python + from typing import Annotated + + from fastapi import FastAPI, File, UploadFile + + app = FastAPI() + + + @app.post("/files/") + async def create_file(file: Annotated[bytes, File()]): + return {"file_size": len(file)} + + + @app.post("/uploadfile/") + async def create_upload_file(file: UploadFile): + return {"filename": file.filename} + ``` + """ + + file: Annotated[ + BinaryIO, + Doc("The standard Python file object (non-async)."), + ] + filename: Annotated[Optional[str], Doc("The original file name.")] + size: Annotated[Optional[int], Doc("The size of the file in bytes.")] + headers: Annotated[Headers, Doc("The headers of the request.")] + content_type: Annotated[ + Optional[str], Doc("The content type of the request, from the headers.") + ] + + async def write( + self, + data: Annotated[ + bytes, + Doc( + """ + The bytes to write to the file. + """ + ), + ], + ) -> None: + """ + Write some bytes to the file. + + You normally wouldn't use this from a file you read in a request. + + To be awaitable, compatible with async, this is run in threadpool. + """ + return await super().write(data) + + async def read( + self, + size: Annotated[ + int, + Doc( + """ + The number of bytes to read from the file. + """ + ), + ] = -1, + ) -> bytes: + """ + Read some bytes from the file. + + To be awaitable, compatible with async, this is run in threadpool. + """ + return await super().read(size) + + async def seek( + self, + offset: Annotated[ + int, + Doc( + """ + The position in bytes to seek to in the file. + """ + ), + ], + ) -> None: + """ + Move to a position in the file. + + Any next read or write will be done from that position. + + To be awaitable, compatible with async, this is run in threadpool. + """ + return await super().seek(offset) + + async def close(self) -> None: + """ + Close the file. + + To be awaitable, compatible with async, this is run in threadpool. + """ + return await super().close() + + @classmethod + def __get_validators__(cls: Type["UploadFile"]) -> Iterable[Callable[..., Any]]: + yield cls.validate + + @classmethod + def validate(cls: Type["UploadFile"], v: Any) -> Any: + if not isinstance(v, StarletteUploadFile): + raise ValueError(f"Expected UploadFile, received: {type(v)}") + return v + + @classmethod + def _validate(cls, __input_value: Any, _: Any) -> "UploadFile": + if not isinstance(__input_value, StarletteUploadFile): + raise ValueError(f"Expected UploadFile, received: {type(__input_value)}") + return cast(UploadFile, __input_value) + + if not PYDANTIC_V2: + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update({"type": "string", "format": "binary"}) + + @classmethod + def __get_pydantic_json_schema__( + cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler + ) -> JsonSchemaValue: + return {"type": "string", "format": "binary"} + + @classmethod + def __get_pydantic_core_schema__( + cls, source: Type[Any], handler: Callable[[Any], CoreSchema] + ) -> CoreSchema: + return with_info_plain_validator_function(cls._validate) + + +class DefaultPlaceholder: + """ + You shouldn't use this class directly. + + It's used internally to recognize when a default value has been overwritten, even + if the overridden default value was truthy. + """ + + def __init__(self, value: Any): + self.value = value + + def __bool__(self) -> bool: + return bool(self.value) + + def __eq__(self, o: object) -> bool: + return isinstance(o, DefaultPlaceholder) and o.value == self.value + + +DefaultType = TypeVar("DefaultType") + + +def Default(value: DefaultType) -> DefaultType: + """ + You shouldn't use this function directly. + + It's used internally to recognize when a default value has been overwritten, even + if the overridden default value was truthy. + """ + return DefaultPlaceholder(value) # type: ignore diff --git a/venv/Lib/site-packages/fastapi/dependencies/__init__.py b/venv/Lib/site-packages/fastapi/dependencies/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/fastapi/dependencies/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/fastapi/dependencies/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..9e18f201 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/dependencies/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/dependencies/__pycache__/models.cpython-312.pyc b/venv/Lib/site-packages/fastapi/dependencies/__pycache__/models.cpython-312.pyc new file mode 100644 index 00000000..41ffc15f Binary files /dev/null and b/venv/Lib/site-packages/fastapi/dependencies/__pycache__/models.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/dependencies/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/fastapi/dependencies/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..8c14d03c Binary files /dev/null and b/venv/Lib/site-packages/fastapi/dependencies/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/dependencies/models.py b/venv/Lib/site-packages/fastapi/dependencies/models.py new file mode 100644 index 00000000..418c1172 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/dependencies/models.py @@ -0,0 +1,37 @@ +from dataclasses import dataclass, field +from typing import Any, Callable, List, Optional, Sequence, Tuple + +from fastapi._compat import ModelField +from fastapi.security.base import SecurityBase + + +@dataclass +class SecurityRequirement: + security_scheme: SecurityBase + scopes: Optional[Sequence[str]] = None + + +@dataclass +class Dependant: + path_params: List[ModelField] = field(default_factory=list) + query_params: List[ModelField] = field(default_factory=list) + header_params: List[ModelField] = field(default_factory=list) + cookie_params: List[ModelField] = field(default_factory=list) + body_params: List[ModelField] = field(default_factory=list) + dependencies: List["Dependant"] = field(default_factory=list) + security_requirements: List[SecurityRequirement] = field(default_factory=list) + name: Optional[str] = None + call: Optional[Callable[..., Any]] = None + request_param_name: Optional[str] = None + websocket_param_name: Optional[str] = None + http_connection_param_name: Optional[str] = None + response_param_name: Optional[str] = None + background_tasks_param_name: Optional[str] = None + security_scopes_param_name: Optional[str] = None + security_scopes: Optional[List[str]] = None + use_cache: bool = True + path: Optional[str] = None + cache_key: Tuple[Optional[Callable[..., Any]], Tuple[str, ...]] = field(init=False) + + def __post_init__(self) -> None: + self.cache_key = (self.call, tuple(sorted(set(self.security_scopes or [])))) diff --git a/venv/Lib/site-packages/fastapi/dependencies/utils.py b/venv/Lib/site-packages/fastapi/dependencies/utils.py new file mode 100644 index 00000000..84dfa4d0 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/dependencies/utils.py @@ -0,0 +1,980 @@ +import inspect +from contextlib import AsyncExitStack, contextmanager +from copy import copy, deepcopy +from dataclasses import dataclass +from typing import ( + Any, + Callable, + Coroutine, + Dict, + ForwardRef, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +import anyio +from fastapi import params +from fastapi._compat import ( + PYDANTIC_V2, + ErrorWrapper, + ModelField, + RequiredParam, + Undefined, + _regenerate_error_with_loc, + copy_field_info, + create_body_model, + evaluate_forwardref, + field_annotation_is_scalar, + get_annotation_from_field_info, + get_cached_model_fields, + get_missing_field_error, + is_bytes_field, + is_bytes_sequence_field, + is_scalar_field, + is_scalar_sequence_field, + is_sequence_field, + is_uploadfile_or_nonable_uploadfile_annotation, + is_uploadfile_sequence_annotation, + lenient_issubclass, + sequence_types, + serialize_sequence_value, + value_is_sequence, +) +from fastapi.background import BackgroundTasks +from fastapi.concurrency import ( + asynccontextmanager, + contextmanager_in_threadpool, +) +from fastapi.dependencies.models import Dependant, SecurityRequirement +from fastapi.logger import logger +from fastapi.security.base import SecurityBase +from fastapi.security.oauth2 import OAuth2, SecurityScopes +from fastapi.security.open_id_connect_url import OpenIdConnect +from fastapi.utils import create_model_field, get_path_param_names +from pydantic import BaseModel +from pydantic.fields import FieldInfo +from starlette.background import BackgroundTasks as StarletteBackgroundTasks +from starlette.concurrency import run_in_threadpool +from starlette.datastructures import ( + FormData, + Headers, + ImmutableMultiDict, + QueryParams, + UploadFile, +) +from starlette.requests import HTTPConnection, Request +from starlette.responses import Response +from starlette.websockets import WebSocket +from typing_extensions import Annotated, get_args, get_origin + +multipart_not_installed_error = ( + 'Form data requires "python-multipart" to be installed. \n' + 'You can install "python-multipart" with: \n\n' + "pip install python-multipart\n" +) +multipart_incorrect_install_error = ( + 'Form data requires "python-multipart" to be installed. ' + 'It seems you installed "multipart" instead. \n' + 'You can remove "multipart" with: \n\n' + "pip uninstall multipart\n\n" + 'And then install "python-multipart" with: \n\n' + "pip install python-multipart\n" +) + + +def ensure_multipart_is_installed() -> None: + try: + from python_multipart import __version__ + + # Import an attribute that can be mocked/deleted in testing + assert __version__ > "0.0.12" + except (ImportError, AssertionError): + try: + # __version__ is available in both multiparts, and can be mocked + from multipart import __version__ # type: ignore[no-redef,import-untyped] + + assert __version__ + try: + # parse_options_header is only available in the right multipart + from multipart.multipart import ( # type: ignore[import-untyped] + parse_options_header, + ) + + assert parse_options_header + except ImportError: + logger.error(multipart_incorrect_install_error) + raise RuntimeError(multipart_incorrect_install_error) from None + except ImportError: + logger.error(multipart_not_installed_error) + raise RuntimeError(multipart_not_installed_error) from None + + +def get_param_sub_dependant( + *, + param_name: str, + depends: params.Depends, + path: str, + security_scopes: Optional[List[str]] = None, +) -> Dependant: + assert depends.dependency + return get_sub_dependant( + depends=depends, + dependency=depends.dependency, + path=path, + name=param_name, + security_scopes=security_scopes, + ) + + +def get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant: + assert callable(depends.dependency), ( + "A parameter-less dependency must have a callable dependency" + ) + return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path) + + +def get_sub_dependant( + *, + depends: params.Depends, + dependency: Callable[..., Any], + path: str, + name: Optional[str] = None, + security_scopes: Optional[List[str]] = None, +) -> Dependant: + security_requirement = None + security_scopes = security_scopes or [] + if isinstance(depends, params.Security): + dependency_scopes = depends.scopes + security_scopes.extend(dependency_scopes) + if isinstance(dependency, SecurityBase): + use_scopes: List[str] = [] + if isinstance(dependency, (OAuth2, OpenIdConnect)): + use_scopes = security_scopes + security_requirement = SecurityRequirement( + security_scheme=dependency, scopes=use_scopes + ) + sub_dependant = get_dependant( + path=path, + call=dependency, + name=name, + security_scopes=security_scopes, + use_cache=depends.use_cache, + ) + if security_requirement: + sub_dependant.security_requirements.append(security_requirement) + return sub_dependant + + +CacheKey = Tuple[Optional[Callable[..., Any]], Tuple[str, ...]] + + +def get_flat_dependant( + dependant: Dependant, + *, + skip_repeats: bool = False, + visited: Optional[List[CacheKey]] = None, +) -> Dependant: + if visited is None: + visited = [] + visited.append(dependant.cache_key) + + flat_dependant = Dependant( + path_params=dependant.path_params.copy(), + query_params=dependant.query_params.copy(), + header_params=dependant.header_params.copy(), + cookie_params=dependant.cookie_params.copy(), + body_params=dependant.body_params.copy(), + security_requirements=dependant.security_requirements.copy(), + use_cache=dependant.use_cache, + path=dependant.path, + ) + for sub_dependant in dependant.dependencies: + if skip_repeats and sub_dependant.cache_key in visited: + continue + flat_sub = get_flat_dependant( + sub_dependant, skip_repeats=skip_repeats, visited=visited + ) + flat_dependant.path_params.extend(flat_sub.path_params) + flat_dependant.query_params.extend(flat_sub.query_params) + flat_dependant.header_params.extend(flat_sub.header_params) + flat_dependant.cookie_params.extend(flat_sub.cookie_params) + flat_dependant.body_params.extend(flat_sub.body_params) + flat_dependant.security_requirements.extend(flat_sub.security_requirements) + return flat_dependant + + +def _get_flat_fields_from_params(fields: List[ModelField]) -> List[ModelField]: + if not fields: + return fields + first_field = fields[0] + if len(fields) == 1 and lenient_issubclass(first_field.type_, BaseModel): + fields_to_extract = get_cached_model_fields(first_field.type_) + return fields_to_extract + return fields + + +def get_flat_params(dependant: Dependant) -> List[ModelField]: + flat_dependant = get_flat_dependant(dependant, skip_repeats=True) + path_params = _get_flat_fields_from_params(flat_dependant.path_params) + query_params = _get_flat_fields_from_params(flat_dependant.query_params) + header_params = _get_flat_fields_from_params(flat_dependant.header_params) + cookie_params = _get_flat_fields_from_params(flat_dependant.cookie_params) + return path_params + query_params + header_params + cookie_params + + +def get_typed_signature(call: Callable[..., Any]) -> inspect.Signature: + signature = inspect.signature(call) + globalns = getattr(call, "__globals__", {}) + typed_params = [ + inspect.Parameter( + name=param.name, + kind=param.kind, + default=param.default, + annotation=get_typed_annotation(param.annotation, globalns), + ) + for param in signature.parameters.values() + ] + typed_signature = inspect.Signature(typed_params) + return typed_signature + + +def get_typed_annotation(annotation: Any, globalns: Dict[str, Any]) -> Any: + if isinstance(annotation, str): + annotation = ForwardRef(annotation) + annotation = evaluate_forwardref(annotation, globalns, globalns) + return annotation + + +def get_typed_return_annotation(call: Callable[..., Any]) -> Any: + signature = inspect.signature(call) + annotation = signature.return_annotation + + if annotation is inspect.Signature.empty: + return None + + globalns = getattr(call, "__globals__", {}) + return get_typed_annotation(annotation, globalns) + + +def get_dependant( + *, + path: str, + call: Callable[..., Any], + name: Optional[str] = None, + security_scopes: Optional[List[str]] = None, + use_cache: bool = True, +) -> Dependant: + path_param_names = get_path_param_names(path) + endpoint_signature = get_typed_signature(call) + signature_params = endpoint_signature.parameters + dependant = Dependant( + call=call, + name=name, + path=path, + security_scopes=security_scopes, + use_cache=use_cache, + ) + for param_name, param in signature_params.items(): + is_path_param = param_name in path_param_names + param_details = analyze_param( + param_name=param_name, + annotation=param.annotation, + value=param.default, + is_path_param=is_path_param, + ) + if param_details.depends is not None: + sub_dependant = get_param_sub_dependant( + param_name=param_name, + depends=param_details.depends, + path=path, + security_scopes=security_scopes, + ) + dependant.dependencies.append(sub_dependant) + continue + if add_non_field_param_to_dependency( + param_name=param_name, + type_annotation=param_details.type_annotation, + dependant=dependant, + ): + assert param_details.field is None, ( + f"Cannot specify multiple FastAPI annotations for {param_name!r}" + ) + continue + assert param_details.field is not None + if isinstance(param_details.field.field_info, params.Body): + dependant.body_params.append(param_details.field) + else: + add_param_to_fields(field=param_details.field, dependant=dependant) + return dependant + + +def add_non_field_param_to_dependency( + *, param_name: str, type_annotation: Any, dependant: Dependant +) -> Optional[bool]: + if lenient_issubclass(type_annotation, Request): + dependant.request_param_name = param_name + return True + elif lenient_issubclass(type_annotation, WebSocket): + dependant.websocket_param_name = param_name + return True + elif lenient_issubclass(type_annotation, HTTPConnection): + dependant.http_connection_param_name = param_name + return True + elif lenient_issubclass(type_annotation, Response): + dependant.response_param_name = param_name + return True + elif lenient_issubclass(type_annotation, StarletteBackgroundTasks): + dependant.background_tasks_param_name = param_name + return True + elif lenient_issubclass(type_annotation, SecurityScopes): + dependant.security_scopes_param_name = param_name + return True + return None + + +@dataclass +class ParamDetails: + type_annotation: Any + depends: Optional[params.Depends] + field: Optional[ModelField] + + +def analyze_param( + *, + param_name: str, + annotation: Any, + value: Any, + is_path_param: bool, +) -> ParamDetails: + field_info = None + depends = None + type_annotation: Any = Any + use_annotation: Any = Any + if annotation is not inspect.Signature.empty: + use_annotation = annotation + type_annotation = annotation + # Extract Annotated info + if get_origin(use_annotation) is Annotated: + annotated_args = get_args(annotation) + type_annotation = annotated_args[0] + fastapi_annotations = [ + arg + for arg in annotated_args[1:] + if isinstance(arg, (FieldInfo, params.Depends)) + ] + fastapi_specific_annotations = [ + arg + for arg in fastapi_annotations + if isinstance(arg, (params.Param, params.Body, params.Depends)) + ] + if fastapi_specific_annotations: + fastapi_annotation: Union[FieldInfo, params.Depends, None] = ( + fastapi_specific_annotations[-1] + ) + else: + fastapi_annotation = None + # Set default for Annotated FieldInfo + if isinstance(fastapi_annotation, FieldInfo): + # Copy `field_info` because we mutate `field_info.default` below. + field_info = copy_field_info( + field_info=fastapi_annotation, annotation=use_annotation + ) + assert ( + field_info.default is Undefined or field_info.default is RequiredParam + ), ( + f"`{field_info.__class__.__name__}` default value cannot be set in" + f" `Annotated` for {param_name!r}. Set the default value with `=` instead." + ) + if value is not inspect.Signature.empty: + assert not is_path_param, "Path parameters cannot have default values" + field_info.default = value + else: + field_info.default = RequiredParam + # Get Annotated Depends + elif isinstance(fastapi_annotation, params.Depends): + depends = fastapi_annotation + # Get Depends from default value + if isinstance(value, params.Depends): + assert depends is None, ( + "Cannot specify `Depends` in `Annotated` and default value" + f" together for {param_name!r}" + ) + assert field_info is None, ( + "Cannot specify a FastAPI annotation in `Annotated` and `Depends` as a" + f" default value together for {param_name!r}" + ) + depends = value + # Get FieldInfo from default value + elif isinstance(value, FieldInfo): + assert field_info is None, ( + "Cannot specify FastAPI annotations in `Annotated` and default value" + f" together for {param_name!r}" + ) + field_info = value + if PYDANTIC_V2: + field_info.annotation = type_annotation + + # Get Depends from type annotation + if depends is not None and depends.dependency is None: + # Copy `depends` before mutating it + depends = copy(depends) + depends.dependency = type_annotation + + # Handle non-param type annotations like Request + if lenient_issubclass( + type_annotation, + ( + Request, + WebSocket, + HTTPConnection, + Response, + StarletteBackgroundTasks, + SecurityScopes, + ), + ): + assert depends is None, f"Cannot specify `Depends` for type {type_annotation!r}" + assert field_info is None, ( + f"Cannot specify FastAPI annotation for type {type_annotation!r}" + ) + # Handle default assignations, neither field_info nor depends was not found in Annotated nor default value + elif field_info is None and depends is None: + default_value = value if value is not inspect.Signature.empty else RequiredParam + if is_path_param: + # We might check here that `default_value is RequiredParam`, but the fact is that the same + # parameter might sometimes be a path parameter and sometimes not. See + # `tests/test_infer_param_optionality.py` for an example. + field_info = params.Path(annotation=use_annotation) + elif is_uploadfile_or_nonable_uploadfile_annotation( + type_annotation + ) or is_uploadfile_sequence_annotation(type_annotation): + field_info = params.File(annotation=use_annotation, default=default_value) + elif not field_annotation_is_scalar(annotation=type_annotation): + field_info = params.Body(annotation=use_annotation, default=default_value) + else: + field_info = params.Query(annotation=use_annotation, default=default_value) + + field = None + # It's a field_info, not a dependency + if field_info is not None: + # Handle field_info.in_ + if is_path_param: + assert isinstance(field_info, params.Path), ( + f"Cannot use `{field_info.__class__.__name__}` for path param" + f" {param_name!r}" + ) + elif ( + isinstance(field_info, params.Param) + and getattr(field_info, "in_", None) is None + ): + field_info.in_ = params.ParamTypes.query + use_annotation_from_field_info = get_annotation_from_field_info( + use_annotation, + field_info, + param_name, + ) + if isinstance(field_info, params.Form): + ensure_multipart_is_installed() + if not field_info.alias and getattr(field_info, "convert_underscores", None): + alias = param_name.replace("_", "-") + else: + alias = field_info.alias or param_name + field_info.alias = alias + field = create_model_field( + name=param_name, + type_=use_annotation_from_field_info, + default=field_info.default, + alias=alias, + required=field_info.default in (RequiredParam, Undefined), + field_info=field_info, + ) + if is_path_param: + assert is_scalar_field(field=field), ( + "Path params must be of one of the supported types" + ) + elif isinstance(field_info, params.Query): + assert ( + is_scalar_field(field) + or is_scalar_sequence_field(field) + or ( + lenient_issubclass(field.type_, BaseModel) + # For Pydantic v1 + and getattr(field, "shape", 1) == 1 + ) + ) + + return ParamDetails(type_annotation=type_annotation, depends=depends, field=field) + + +def add_param_to_fields(*, field: ModelField, dependant: Dependant) -> None: + field_info = field.field_info + field_info_in = getattr(field_info, "in_", None) + if field_info_in == params.ParamTypes.path: + dependant.path_params.append(field) + elif field_info_in == params.ParamTypes.query: + dependant.query_params.append(field) + elif field_info_in == params.ParamTypes.header: + dependant.header_params.append(field) + else: + assert field_info_in == params.ParamTypes.cookie, ( + f"non-body parameters must be in path, query, header or cookie: {field.name}" + ) + dependant.cookie_params.append(field) + + +def is_coroutine_callable(call: Callable[..., Any]) -> bool: + if inspect.isroutine(call): + return inspect.iscoroutinefunction(call) + if inspect.isclass(call): + return False + dunder_call = getattr(call, "__call__", None) # noqa: B004 + return inspect.iscoroutinefunction(dunder_call) + + +def is_async_gen_callable(call: Callable[..., Any]) -> bool: + if inspect.isasyncgenfunction(call): + return True + dunder_call = getattr(call, "__call__", None) # noqa: B004 + return inspect.isasyncgenfunction(dunder_call) + + +def is_gen_callable(call: Callable[..., Any]) -> bool: + if inspect.isgeneratorfunction(call): + return True + dunder_call = getattr(call, "__call__", None) # noqa: B004 + return inspect.isgeneratorfunction(dunder_call) + + +async def solve_generator( + *, call: Callable[..., Any], stack: AsyncExitStack, sub_values: Dict[str, Any] +) -> Any: + if is_gen_callable(call): + cm = contextmanager_in_threadpool(contextmanager(call)(**sub_values)) + elif is_async_gen_callable(call): + cm = asynccontextmanager(call)(**sub_values) + return await stack.enter_async_context(cm) + + +@dataclass +class SolvedDependency: + values: Dict[str, Any] + errors: List[Any] + background_tasks: Optional[StarletteBackgroundTasks] + response: Response + dependency_cache: Dict[Tuple[Callable[..., Any], Tuple[str]], Any] + + +async def solve_dependencies( + *, + request: Union[Request, WebSocket], + dependant: Dependant, + body: Optional[Union[Dict[str, Any], FormData]] = None, + background_tasks: Optional[StarletteBackgroundTasks] = None, + response: Optional[Response] = None, + dependency_overrides_provider: Optional[Any] = None, + dependency_cache: Optional[Dict[Tuple[Callable[..., Any], Tuple[str]], Any]] = None, + async_exit_stack: AsyncExitStack, + embed_body_fields: bool, +) -> SolvedDependency: + values: Dict[str, Any] = {} + errors: List[Any] = [] + if response is None: + response = Response() + del response.headers["content-length"] + response.status_code = None # type: ignore + dependency_cache = dependency_cache or {} + sub_dependant: Dependant + for sub_dependant in dependant.dependencies: + sub_dependant.call = cast(Callable[..., Any], sub_dependant.call) + sub_dependant.cache_key = cast( + Tuple[Callable[..., Any], Tuple[str]], sub_dependant.cache_key + ) + call = sub_dependant.call + use_sub_dependant = sub_dependant + if ( + dependency_overrides_provider + and dependency_overrides_provider.dependency_overrides + ): + original_call = sub_dependant.call + call = getattr( + dependency_overrides_provider, "dependency_overrides", {} + ).get(original_call, original_call) + use_path: str = sub_dependant.path # type: ignore + use_sub_dependant = get_dependant( + path=use_path, + call=call, + name=sub_dependant.name, + security_scopes=sub_dependant.security_scopes, + ) + + solved_result = await solve_dependencies( + request=request, + dependant=use_sub_dependant, + body=body, + background_tasks=background_tasks, + response=response, + dependency_overrides_provider=dependency_overrides_provider, + dependency_cache=dependency_cache, + async_exit_stack=async_exit_stack, + embed_body_fields=embed_body_fields, + ) + background_tasks = solved_result.background_tasks + dependency_cache.update(solved_result.dependency_cache) + if solved_result.errors: + errors.extend(solved_result.errors) + continue + if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache: + solved = dependency_cache[sub_dependant.cache_key] + elif is_gen_callable(call) or is_async_gen_callable(call): + solved = await solve_generator( + call=call, stack=async_exit_stack, sub_values=solved_result.values + ) + elif is_coroutine_callable(call): + solved = await call(**solved_result.values) + else: + solved = await run_in_threadpool(call, **solved_result.values) + if sub_dependant.name is not None: + values[sub_dependant.name] = solved + if sub_dependant.cache_key not in dependency_cache: + dependency_cache[sub_dependant.cache_key] = solved + path_values, path_errors = request_params_to_args( + dependant.path_params, request.path_params + ) + query_values, query_errors = request_params_to_args( + dependant.query_params, request.query_params + ) + header_values, header_errors = request_params_to_args( + dependant.header_params, request.headers + ) + cookie_values, cookie_errors = request_params_to_args( + dependant.cookie_params, request.cookies + ) + values.update(path_values) + values.update(query_values) + values.update(header_values) + values.update(cookie_values) + errors += path_errors + query_errors + header_errors + cookie_errors + if dependant.body_params: + ( + body_values, + body_errors, + ) = await request_body_to_args( # body_params checked above + body_fields=dependant.body_params, + received_body=body, + embed_body_fields=embed_body_fields, + ) + values.update(body_values) + errors.extend(body_errors) + if dependant.http_connection_param_name: + values[dependant.http_connection_param_name] = request + if dependant.request_param_name and isinstance(request, Request): + values[dependant.request_param_name] = request + elif dependant.websocket_param_name and isinstance(request, WebSocket): + values[dependant.websocket_param_name] = request + if dependant.background_tasks_param_name: + if background_tasks is None: + background_tasks = BackgroundTasks() + values[dependant.background_tasks_param_name] = background_tasks + if dependant.response_param_name: + values[dependant.response_param_name] = response + if dependant.security_scopes_param_name: + values[dependant.security_scopes_param_name] = SecurityScopes( + scopes=dependant.security_scopes + ) + return SolvedDependency( + values=values, + errors=errors, + background_tasks=background_tasks, + response=response, + dependency_cache=dependency_cache, + ) + + +def _validate_value_with_model_field( + *, field: ModelField, value: Any, values: Dict[str, Any], loc: Tuple[str, ...] +) -> Tuple[Any, List[Any]]: + if value is None: + if field.required: + return None, [get_missing_field_error(loc=loc)] + else: + return deepcopy(field.default), [] + v_, errors_ = field.validate(value, values, loc=loc) + if isinstance(errors_, ErrorWrapper): + return None, [errors_] + elif isinstance(errors_, list): + new_errors = _regenerate_error_with_loc(errors=errors_, loc_prefix=()) + return None, new_errors + else: + return v_, [] + + +def _get_multidict_value( + field: ModelField, values: Mapping[str, Any], alias: Union[str, None] = None +) -> Any: + alias = alias or field.alias + if is_sequence_field(field) and isinstance(values, (ImmutableMultiDict, Headers)): + value = values.getlist(alias) + else: + value = values.get(alias, None) + if ( + value is None + or ( + isinstance(field.field_info, params.Form) + and isinstance(value, str) # For type checks + and value == "" + ) + or (is_sequence_field(field) and len(value) == 0) + ): + if field.required: + return + else: + return deepcopy(field.default) + return value + + +def request_params_to_args( + fields: Sequence[ModelField], + received_params: Union[Mapping[str, Any], QueryParams, Headers], +) -> Tuple[Dict[str, Any], List[Any]]: + values: Dict[str, Any] = {} + errors: List[Dict[str, Any]] = [] + + if not fields: + return values, errors + + first_field = fields[0] + fields_to_extract = fields + single_not_embedded_field = False + default_convert_underscores = True + if len(fields) == 1 and lenient_issubclass(first_field.type_, BaseModel): + fields_to_extract = get_cached_model_fields(first_field.type_) + single_not_embedded_field = True + # If headers are in a Pydantic model, the way to disable convert_underscores + # would be with Header(convert_underscores=False) at the Pydantic model level + default_convert_underscores = getattr( + first_field.field_info, "convert_underscores", True + ) + + params_to_process: Dict[str, Any] = {} + + processed_keys = set() + + for field in fields_to_extract: + alias = None + if isinstance(received_params, Headers): + # Handle fields extracted from a Pydantic Model for a header, each field + # doesn't have a FieldInfo of type Header with the default convert_underscores=True + convert_underscores = getattr( + field.field_info, "convert_underscores", default_convert_underscores + ) + if convert_underscores: + alias = ( + field.alias + if field.alias != field.name + else field.name.replace("_", "-") + ) + value = _get_multidict_value(field, received_params, alias=alias) + if value is not None: + params_to_process[field.name] = value + processed_keys.add(alias or field.alias) + processed_keys.add(field.name) + + for key, value in received_params.items(): + if key not in processed_keys: + params_to_process[key] = value + + if single_not_embedded_field: + field_info = first_field.field_info + assert isinstance(field_info, params.Param), ( + "Params must be subclasses of Param" + ) + loc: Tuple[str, ...] = (field_info.in_.value,) + v_, errors_ = _validate_value_with_model_field( + field=first_field, value=params_to_process, values=values, loc=loc + ) + return {first_field.name: v_}, errors_ + + for field in fields: + value = _get_multidict_value(field, received_params) + field_info = field.field_info + assert isinstance(field_info, params.Param), ( + "Params must be subclasses of Param" + ) + loc = (field_info.in_.value, field.alias) + v_, errors_ = _validate_value_with_model_field( + field=field, value=value, values=values, loc=loc + ) + if errors_: + errors.extend(errors_) + else: + values[field.name] = v_ + return values, errors + + +def _should_embed_body_fields(fields: List[ModelField]) -> bool: + if not fields: + return False + # More than one dependency could have the same field, it would show up as multiple + # fields but it's the same one, so count them by name + body_param_names_set = {field.name for field in fields} + # A top level field has to be a single field, not multiple + if len(body_param_names_set) > 1: + return True + first_field = fields[0] + # If it explicitly specifies it is embedded, it has to be embedded + if getattr(first_field.field_info, "embed", None): + return True + # If it's a Form (or File) field, it has to be a BaseModel to be top level + # otherwise it has to be embedded, so that the key value pair can be extracted + if isinstance(first_field.field_info, params.Form) and not lenient_issubclass( + first_field.type_, BaseModel + ): + return True + return False + + +async def _extract_form_body( + body_fields: List[ModelField], + received_body: FormData, +) -> Dict[str, Any]: + values = {} + first_field = body_fields[0] + first_field_info = first_field.field_info + + for field in body_fields: + value = _get_multidict_value(field, received_body) + if ( + isinstance(first_field_info, params.File) + and is_bytes_field(field) + and isinstance(value, UploadFile) + ): + value = await value.read() + elif ( + is_bytes_sequence_field(field) + and isinstance(first_field_info, params.File) + and value_is_sequence(value) + ): + # For types + assert isinstance(value, sequence_types) # type: ignore[arg-type] + results: List[Union[bytes, str]] = [] + + async def process_fn( + fn: Callable[[], Coroutine[Any, Any, Any]], + ) -> None: + result = await fn() + results.append(result) # noqa: B023 + + async with anyio.create_task_group() as tg: + for sub_value in value: + tg.start_soon(process_fn, sub_value.read) + value = serialize_sequence_value(field=field, value=results) + if value is not None: + values[field.alias] = value + for key, value in received_body.items(): + if key not in values: + values[key] = value + return values + + +async def request_body_to_args( + body_fields: List[ModelField], + received_body: Optional[Union[Dict[str, Any], FormData]], + embed_body_fields: bool, +) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]: + values: Dict[str, Any] = {} + errors: List[Dict[str, Any]] = [] + assert body_fields, "request_body_to_args() should be called with fields" + single_not_embedded_field = len(body_fields) == 1 and not embed_body_fields + first_field = body_fields[0] + body_to_process = received_body + + fields_to_extract: List[ModelField] = body_fields + + if single_not_embedded_field and lenient_issubclass(first_field.type_, BaseModel): + fields_to_extract = get_cached_model_fields(first_field.type_) + + if isinstance(received_body, FormData): + body_to_process = await _extract_form_body(fields_to_extract, received_body) + + if single_not_embedded_field: + loc: Tuple[str, ...] = ("body",) + v_, errors_ = _validate_value_with_model_field( + field=first_field, value=body_to_process, values=values, loc=loc + ) + return {first_field.name: v_}, errors_ + for field in body_fields: + loc = ("body", field.alias) + value: Optional[Any] = None + if body_to_process is not None: + try: + value = body_to_process.get(field.alias) + # If the received body is a list, not a dict + except AttributeError: + errors.append(get_missing_field_error(loc)) + continue + v_, errors_ = _validate_value_with_model_field( + field=field, value=value, values=values, loc=loc + ) + if errors_: + errors.extend(errors_) + else: + values[field.name] = v_ + return values, errors + + +def get_body_field( + *, flat_dependant: Dependant, name: str, embed_body_fields: bool +) -> Optional[ModelField]: + """ + Get a ModelField representing the request body for a path operation, combining + all body parameters into a single field if necessary. + + Used to check if it's form data (with `isinstance(body_field, params.Form)`) + or JSON and to generate the JSON Schema for a request body. + + This is **not** used to validate/parse the request body, that's done with each + individual body parameter. + """ + if not flat_dependant.body_params: + return None + first_param = flat_dependant.body_params[0] + if not embed_body_fields: + return first_param + model_name = "Body_" + name + BodyModel = create_body_model( + fields=flat_dependant.body_params, model_name=model_name + ) + required = any(True for f in flat_dependant.body_params if f.required) + BodyFieldInfo_kwargs: Dict[str, Any] = { + "annotation": BodyModel, + "alias": "body", + } + if not required: + BodyFieldInfo_kwargs["default"] = None + if any(isinstance(f.field_info, params.File) for f in flat_dependant.body_params): + BodyFieldInfo: Type[params.Body] = params.File + elif any(isinstance(f.field_info, params.Form) for f in flat_dependant.body_params): + BodyFieldInfo = params.Form + else: + BodyFieldInfo = params.Body + + body_param_media_types = [ + f.field_info.media_type + for f in flat_dependant.body_params + if isinstance(f.field_info, params.Body) + ] + if len(set(body_param_media_types)) == 1: + BodyFieldInfo_kwargs["media_type"] = body_param_media_types[0] + final_field = create_model_field( + name="body", + type_=BodyModel, + required=required, + alias="body", + field_info=BodyFieldInfo(**BodyFieldInfo_kwargs), + ) + return final_field diff --git a/venv/Lib/site-packages/fastapi/encoders.py b/venv/Lib/site-packages/fastapi/encoders.py new file mode 100644 index 00000000..451ea076 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/encoders.py @@ -0,0 +1,343 @@ +import dataclasses +import datetime +from collections import defaultdict, deque +from decimal import Decimal +from enum import Enum +from ipaddress import ( + IPv4Address, + IPv4Interface, + IPv4Network, + IPv6Address, + IPv6Interface, + IPv6Network, +) +from pathlib import Path, PurePath +from re import Pattern +from types import GeneratorType +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union +from uuid import UUID + +from fastapi.types import IncEx +from pydantic import BaseModel +from pydantic.color import Color +from pydantic.networks import AnyUrl, NameEmail +from pydantic.types import SecretBytes, SecretStr +from typing_extensions import Annotated, Doc + +from ._compat import PYDANTIC_V2, UndefinedType, Url, _model_dump + + +# Taken from Pydantic v1 as is +def isoformat(o: Union[datetime.date, datetime.time]) -> str: + return o.isoformat() + + +# Taken from Pydantic v1 as is +# TODO: pv2 should this return strings instead? +def decimal_encoder(dec_value: Decimal) -> Union[int, float]: + """ + Encodes a Decimal as int of there's no exponent, otherwise float + + This is useful when we use ConstrainedDecimal to represent Numeric(x,0) + where a integer (but not int typed) is used. Encoding this as a float + results in failed round-tripping between encode and parse. + Our Id type is a prime example of this. + + >>> decimal_encoder(Decimal("1.0")) + 1.0 + + >>> decimal_encoder(Decimal("1")) + 1 + """ + if dec_value.as_tuple().exponent >= 0: # type: ignore[operator] + return int(dec_value) + else: + return float(dec_value) + + +ENCODERS_BY_TYPE: Dict[Type[Any], Callable[[Any], Any]] = { + bytes: lambda o: o.decode(), + Color: str, + datetime.date: isoformat, + datetime.datetime: isoformat, + datetime.time: isoformat, + datetime.timedelta: lambda td: td.total_seconds(), + Decimal: decimal_encoder, + Enum: lambda o: o.value, + frozenset: list, + deque: list, + GeneratorType: list, + IPv4Address: str, + IPv4Interface: str, + IPv4Network: str, + IPv6Address: str, + IPv6Interface: str, + IPv6Network: str, + NameEmail: str, + Path: str, + Pattern: lambda o: o.pattern, + SecretBytes: str, + SecretStr: str, + set: list, + UUID: str, + Url: str, + AnyUrl: str, +} + + +def generate_encoders_by_class_tuples( + type_encoder_map: Dict[Any, Callable[[Any], Any]], +) -> Dict[Callable[[Any], Any], Tuple[Any, ...]]: + encoders_by_class_tuples: Dict[Callable[[Any], Any], Tuple[Any, ...]] = defaultdict( + tuple + ) + for type_, encoder in type_encoder_map.items(): + encoders_by_class_tuples[encoder] += (type_,) + return encoders_by_class_tuples + + +encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE) + + +def jsonable_encoder( + obj: Annotated[ + Any, + Doc( + """ + The input object to convert to JSON. + """ + ), + ], + include: Annotated[ + Optional[IncEx], + Doc( + """ + Pydantic's `include` parameter, passed to Pydantic models to set the + fields to include. + """ + ), + ] = None, + exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Pydantic's `exclude` parameter, passed to Pydantic models to set the + fields to exclude. + """ + ), + ] = None, + by_alias: Annotated[ + bool, + Doc( + """ + Pydantic's `by_alias` parameter, passed to Pydantic models to define if + the output should use the alias names (when provided) or the Python + attribute names. In an API, if you set an alias, it's probably because you + want to use it in the result, so you probably want to leave this set to + `True`. + """ + ), + ] = True, + exclude_unset: Annotated[ + bool, + Doc( + """ + Pydantic's `exclude_unset` parameter, passed to Pydantic models to define + if it should exclude from the output the fields that were not explicitly + set (and that only had their default values). + """ + ), + ] = False, + exclude_defaults: Annotated[ + bool, + Doc( + """ + Pydantic's `exclude_defaults` parameter, passed to Pydantic models to define + if it should exclude from the output the fields that had the same default + value, even when they were explicitly set. + """ + ), + ] = False, + exclude_none: Annotated[ + bool, + Doc( + """ + Pydantic's `exclude_none` parameter, passed to Pydantic models to define + if it should exclude from the output any fields that have a `None` value. + """ + ), + ] = False, + custom_encoder: Annotated[ + Optional[Dict[Any, Callable[[Any], Any]]], + Doc( + """ + Pydantic's `custom_encoder` parameter, passed to Pydantic models to define + a custom encoder. + """ + ), + ] = None, + sqlalchemy_safe: Annotated[ + bool, + Doc( + """ + Exclude from the output any fields that start with the name `_sa`. + + This is mainly a hack for compatibility with SQLAlchemy objects, they + store internal SQLAlchemy-specific state in attributes named with `_sa`, + and those objects can't (and shouldn't be) serialized to JSON. + """ + ), + ] = True, +) -> Any: + """ + Convert any object to something that can be encoded in JSON. + + This is used internally by FastAPI to make sure anything you return can be + encoded as JSON before it is sent to the client. + + You can also use it yourself, for example to convert objects before saving them + in a database that supports only JSON. + + Read more about it in the + [FastAPI docs for JSON Compatible Encoder](https://fastapi.tiangolo.com/tutorial/encoder/). + """ + custom_encoder = custom_encoder or {} + if custom_encoder: + if type(obj) in custom_encoder: + return custom_encoder[type(obj)](obj) + else: + for encoder_type, encoder_instance in custom_encoder.items(): + if isinstance(obj, encoder_type): + return encoder_instance(obj) + if include is not None and not isinstance(include, (set, dict)): + include = set(include) + if exclude is not None and not isinstance(exclude, (set, dict)): + exclude = set(exclude) + if isinstance(obj, BaseModel): + # TODO: remove when deprecating Pydantic v1 + encoders: Dict[Any, Any] = {} + if not PYDANTIC_V2: + encoders = getattr(obj.__config__, "json_encoders", {}) # type: ignore[attr-defined] + if custom_encoder: + encoders.update(custom_encoder) + obj_dict = _model_dump( + obj, + mode="json", + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_none=exclude_none, + exclude_defaults=exclude_defaults, + ) + if "__root__" in obj_dict: + obj_dict = obj_dict["__root__"] + return jsonable_encoder( + obj_dict, + exclude_none=exclude_none, + exclude_defaults=exclude_defaults, + # TODO: remove when deprecating Pydantic v1 + custom_encoder=encoders, + sqlalchemy_safe=sqlalchemy_safe, + ) + if dataclasses.is_dataclass(obj): + obj_dict = dataclasses.asdict(obj) + return jsonable_encoder( + obj_dict, + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + custom_encoder=custom_encoder, + sqlalchemy_safe=sqlalchemy_safe, + ) + if isinstance(obj, Enum): + return obj.value + if isinstance(obj, PurePath): + return str(obj) + if isinstance(obj, (str, int, float, type(None))): + return obj + if isinstance(obj, UndefinedType): + return None + if isinstance(obj, dict): + encoded_dict = {} + allowed_keys = set(obj.keys()) + if include is not None: + allowed_keys &= set(include) + if exclude is not None: + allowed_keys -= set(exclude) + for key, value in obj.items(): + if ( + ( + not sqlalchemy_safe + or (not isinstance(key, str)) + or (not key.startswith("_sa")) + ) + and (value is not None or not exclude_none) + and key in allowed_keys + ): + encoded_key = jsonable_encoder( + key, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_none=exclude_none, + custom_encoder=custom_encoder, + sqlalchemy_safe=sqlalchemy_safe, + ) + encoded_value = jsonable_encoder( + value, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_none=exclude_none, + custom_encoder=custom_encoder, + sqlalchemy_safe=sqlalchemy_safe, + ) + encoded_dict[encoded_key] = encoded_value + return encoded_dict + if isinstance(obj, (list, set, frozenset, GeneratorType, tuple, deque)): + encoded_list = [] + for item in obj: + encoded_list.append( + jsonable_encoder( + item, + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + custom_encoder=custom_encoder, + sqlalchemy_safe=sqlalchemy_safe, + ) + ) + return encoded_list + + if type(obj) in ENCODERS_BY_TYPE: + return ENCODERS_BY_TYPE[type(obj)](obj) + for encoder, classes_tuple in encoders_by_class_tuples.items(): + if isinstance(obj, classes_tuple): + return encoder(obj) + + try: + data = dict(obj) + except Exception as e: + errors: List[Exception] = [] + errors.append(e) + try: + data = vars(obj) + except Exception as e: + errors.append(e) + raise ValueError(errors) from e + return jsonable_encoder( + data, + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + custom_encoder=custom_encoder, + sqlalchemy_safe=sqlalchemy_safe, + ) diff --git a/venv/Lib/site-packages/fastapi/exception_handlers.py b/venv/Lib/site-packages/fastapi/exception_handlers.py new file mode 100644 index 00000000..6c2ba7fe --- /dev/null +++ b/venv/Lib/site-packages/fastapi/exception_handlers.py @@ -0,0 +1,34 @@ +from fastapi.encoders import jsonable_encoder +from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError +from fastapi.utils import is_body_allowed_for_status_code +from fastapi.websockets import WebSocket +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.responses import JSONResponse, Response +from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY, WS_1008_POLICY_VIOLATION + + +async def http_exception_handler(request: Request, exc: HTTPException) -> Response: + headers = getattr(exc, "headers", None) + if not is_body_allowed_for_status_code(exc.status_code): + return Response(status_code=exc.status_code, headers=headers) + return JSONResponse( + {"detail": exc.detail}, status_code=exc.status_code, headers=headers + ) + + +async def request_validation_exception_handler( + request: Request, exc: RequestValidationError +) -> JSONResponse: + return JSONResponse( + status_code=HTTP_422_UNPROCESSABLE_ENTITY, + content={"detail": jsonable_encoder(exc.errors())}, + ) + + +async def websocket_request_validation_exception_handler( + websocket: WebSocket, exc: WebSocketRequestValidationError +) -> None: + await websocket.close( + code=WS_1008_POLICY_VIOLATION, reason=jsonable_encoder(exc.errors()) + ) diff --git a/venv/Lib/site-packages/fastapi/exceptions.py b/venv/Lib/site-packages/fastapi/exceptions.py new file mode 100644 index 00000000..44d4ada8 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/exceptions.py @@ -0,0 +1,176 @@ +from typing import Any, Dict, Optional, Sequence, Type, Union + +from pydantic import BaseModel, create_model +from starlette.exceptions import HTTPException as StarletteHTTPException +from starlette.exceptions import WebSocketException as StarletteWebSocketException +from typing_extensions import Annotated, Doc + + +class HTTPException(StarletteHTTPException): + """ + An HTTP exception you can raise in your own code to show errors to the client. + + This is for client errors, invalid authentication, invalid data, etc. Not for server + errors in your code. + + Read more about it in the + [FastAPI docs for Handling Errors](https://fastapi.tiangolo.com/tutorial/handling-errors/). + + ## Example + + ```python + from fastapi import FastAPI, HTTPException + + app = FastAPI() + + items = {"foo": "The Foo Wrestlers"} + + + @app.get("/items/{item_id}") + async def read_item(item_id: str): + if item_id not in items: + raise HTTPException(status_code=404, detail="Item not found") + return {"item": items[item_id]} + ``` + """ + + def __init__( + self, + status_code: Annotated[ + int, + Doc( + """ + HTTP status code to send to the client. + """ + ), + ], + detail: Annotated[ + Any, + Doc( + """ + Any data to be sent to the client in the `detail` key of the JSON + response. + """ + ), + ] = None, + headers: Annotated[ + Optional[Dict[str, str]], + Doc( + """ + Any headers to send to the client in the response. + """ + ), + ] = None, + ) -> None: + super().__init__(status_code=status_code, detail=detail, headers=headers) + + +class WebSocketException(StarletteWebSocketException): + """ + A WebSocket exception you can raise in your own code to show errors to the client. + + This is for client errors, invalid authentication, invalid data, etc. Not for server + errors in your code. + + Read more about it in the + [FastAPI docs for WebSockets](https://fastapi.tiangolo.com/advanced/websockets/). + + ## Example + + ```python + from typing import Annotated + + from fastapi import ( + Cookie, + FastAPI, + WebSocket, + WebSocketException, + status, + ) + + app = FastAPI() + + @app.websocket("/items/{item_id}/ws") + async def websocket_endpoint( + *, + websocket: WebSocket, + session: Annotated[str | None, Cookie()] = None, + item_id: str, + ): + if session is None: + raise WebSocketException(code=status.WS_1008_POLICY_VIOLATION) + await websocket.accept() + while True: + data = await websocket.receive_text() + await websocket.send_text(f"Session cookie is: {session}") + await websocket.send_text(f"Message text was: {data}, for item ID: {item_id}") + ``` + """ + + def __init__( + self, + code: Annotated[ + int, + Doc( + """ + A closing code from the + [valid codes defined in the specification](https://datatracker.ietf.org/doc/html/rfc6455#section-7.4.1). + """ + ), + ], + reason: Annotated[ + Union[str, None], + Doc( + """ + The reason to close the WebSocket connection. + + It is UTF-8-encoded data. The interpretation of the reason is up to the + application, it is not specified by the WebSocket specification. + + It could contain text that could be human-readable or interpretable + by the client code, etc. + """ + ), + ] = None, + ) -> None: + super().__init__(code=code, reason=reason) + + +RequestErrorModel: Type[BaseModel] = create_model("Request") +WebSocketErrorModel: Type[BaseModel] = create_model("WebSocket") + + +class FastAPIError(RuntimeError): + """ + A generic, FastAPI-specific error. + """ + + +class ValidationException(Exception): + def __init__(self, errors: Sequence[Any]) -> None: + self._errors = errors + + def errors(self) -> Sequence[Any]: + return self._errors + + +class RequestValidationError(ValidationException): + def __init__(self, errors: Sequence[Any], *, body: Any = None) -> None: + super().__init__(errors) + self.body = body + + +class WebSocketRequestValidationError(ValidationException): + pass + + +class ResponseValidationError(ValidationException): + def __init__(self, errors: Sequence[Any], *, body: Any = None) -> None: + super().__init__(errors) + self.body = body + + def __str__(self) -> str: + message = f"{len(self._errors)} validation errors:\n" + for err in self._errors: + message += f" {err}\n" + return message diff --git a/venv/Lib/site-packages/fastapi/logger.py b/venv/Lib/site-packages/fastapi/logger.py new file mode 100644 index 00000000..5b2c4ad5 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/logger.py @@ -0,0 +1,3 @@ +import logging + +logger = logging.getLogger("fastapi") diff --git a/venv/Lib/site-packages/fastapi/middleware/__init__.py b/venv/Lib/site-packages/fastapi/middleware/__init__.py new file mode 100644 index 00000000..620296d5 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/middleware/__init__.py @@ -0,0 +1 @@ +from starlette.middleware import Middleware as Middleware diff --git a/venv/Lib/site-packages/fastapi/middleware/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/fastapi/middleware/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..042df5c8 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/middleware/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/middleware/__pycache__/cors.cpython-312.pyc b/venv/Lib/site-packages/fastapi/middleware/__pycache__/cors.cpython-312.pyc new file mode 100644 index 00000000..7912908d Binary files /dev/null and b/venv/Lib/site-packages/fastapi/middleware/__pycache__/cors.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/middleware/__pycache__/gzip.cpython-312.pyc b/venv/Lib/site-packages/fastapi/middleware/__pycache__/gzip.cpython-312.pyc new file mode 100644 index 00000000..0e096268 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/middleware/__pycache__/gzip.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/middleware/__pycache__/httpsredirect.cpython-312.pyc b/venv/Lib/site-packages/fastapi/middleware/__pycache__/httpsredirect.cpython-312.pyc new file mode 100644 index 00000000..55fcb922 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/middleware/__pycache__/httpsredirect.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/middleware/__pycache__/trustedhost.cpython-312.pyc b/venv/Lib/site-packages/fastapi/middleware/__pycache__/trustedhost.cpython-312.pyc new file mode 100644 index 00000000..140a632d Binary files /dev/null and b/venv/Lib/site-packages/fastapi/middleware/__pycache__/trustedhost.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/middleware/__pycache__/wsgi.cpython-312.pyc b/venv/Lib/site-packages/fastapi/middleware/__pycache__/wsgi.cpython-312.pyc new file mode 100644 index 00000000..af340b0e Binary files /dev/null and b/venv/Lib/site-packages/fastapi/middleware/__pycache__/wsgi.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/middleware/cors.py b/venv/Lib/site-packages/fastapi/middleware/cors.py new file mode 100644 index 00000000..8dfaad0d --- /dev/null +++ b/venv/Lib/site-packages/fastapi/middleware/cors.py @@ -0,0 +1 @@ +from starlette.middleware.cors import CORSMiddleware as CORSMiddleware # noqa diff --git a/venv/Lib/site-packages/fastapi/middleware/gzip.py b/venv/Lib/site-packages/fastapi/middleware/gzip.py new file mode 100644 index 00000000..bbeb2cc7 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/middleware/gzip.py @@ -0,0 +1 @@ +from starlette.middleware.gzip import GZipMiddleware as GZipMiddleware # noqa diff --git a/venv/Lib/site-packages/fastapi/middleware/httpsredirect.py b/venv/Lib/site-packages/fastapi/middleware/httpsredirect.py new file mode 100644 index 00000000..b7a3d8e0 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/middleware/httpsredirect.py @@ -0,0 +1,3 @@ +from starlette.middleware.httpsredirect import ( # noqa + HTTPSRedirectMiddleware as HTTPSRedirectMiddleware, +) diff --git a/venv/Lib/site-packages/fastapi/middleware/trustedhost.py b/venv/Lib/site-packages/fastapi/middleware/trustedhost.py new file mode 100644 index 00000000..08d7e035 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/middleware/trustedhost.py @@ -0,0 +1,3 @@ +from starlette.middleware.trustedhost import ( # noqa + TrustedHostMiddleware as TrustedHostMiddleware, +) diff --git a/venv/Lib/site-packages/fastapi/middleware/wsgi.py b/venv/Lib/site-packages/fastapi/middleware/wsgi.py new file mode 100644 index 00000000..c4c6a797 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/middleware/wsgi.py @@ -0,0 +1 @@ +from starlette.middleware.wsgi import WSGIMiddleware as WSGIMiddleware # noqa diff --git a/venv/Lib/site-packages/fastapi/openapi/__init__.py b/venv/Lib/site-packages/fastapi/openapi/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/fastapi/openapi/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/fastapi/openapi/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..a809c3f3 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/openapi/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/openapi/__pycache__/constants.cpython-312.pyc b/venv/Lib/site-packages/fastapi/openapi/__pycache__/constants.cpython-312.pyc new file mode 100644 index 00000000..fd712805 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/openapi/__pycache__/constants.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/openapi/__pycache__/docs.cpython-312.pyc b/venv/Lib/site-packages/fastapi/openapi/__pycache__/docs.cpython-312.pyc new file mode 100644 index 00000000..e6d309c9 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/openapi/__pycache__/docs.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/openapi/__pycache__/models.cpython-312.pyc b/venv/Lib/site-packages/fastapi/openapi/__pycache__/models.cpython-312.pyc new file mode 100644 index 00000000..dd6cc8d5 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/openapi/__pycache__/models.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/openapi/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/fastapi/openapi/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..9557b85f Binary files /dev/null and b/venv/Lib/site-packages/fastapi/openapi/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/openapi/constants.py b/venv/Lib/site-packages/fastapi/openapi/constants.py new file mode 100644 index 00000000..d724ee3c --- /dev/null +++ b/venv/Lib/site-packages/fastapi/openapi/constants.py @@ -0,0 +1,3 @@ +METHODS_WITH_BODY = {"GET", "HEAD", "POST", "PUT", "DELETE", "PATCH"} +REF_PREFIX = "#/components/schemas/" +REF_TEMPLATE = "#/components/schemas/{model}" diff --git a/venv/Lib/site-packages/fastapi/openapi/docs.py b/venv/Lib/site-packages/fastapi/openapi/docs.py new file mode 100644 index 00000000..c2ec358d --- /dev/null +++ b/venv/Lib/site-packages/fastapi/openapi/docs.py @@ -0,0 +1,344 @@ +import json +from typing import Any, Dict, Optional + +from fastapi.encoders import jsonable_encoder +from starlette.responses import HTMLResponse +from typing_extensions import Annotated, Doc + +swagger_ui_default_parameters: Annotated[ + Dict[str, Any], + Doc( + """ + Default configurations for Swagger UI. + + You can use it as a template to add any other configurations needed. + """ + ), +] = { + "dom_id": "#swagger-ui", + "layout": "BaseLayout", + "deepLinking": True, + "showExtensions": True, + "showCommonExtensions": True, +} + + +def get_swagger_ui_html( + *, + openapi_url: Annotated[ + str, + Doc( + """ + The OpenAPI URL that Swagger UI should load and use. + + This is normally done automatically by FastAPI using the default URL + `/openapi.json`. + """ + ), + ], + title: Annotated[ + str, + Doc( + """ + The HTML `` content, normally shown in the browser tab. + """ + ), + ], + swagger_js_url: Annotated[ + str, + Doc( + """ + The URL to use to load the Swagger UI JavaScript. + + It is normally set to a CDN URL. + """ + ), + ] = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui-bundle.js", + swagger_css_url: Annotated[ + str, + Doc( + """ + The URL to use to load the Swagger UI CSS. + + It is normally set to a CDN URL. + """ + ), + ] = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui.css", + swagger_favicon_url: Annotated[ + str, + Doc( + """ + The URL of the favicon to use. It is normally shown in the browser tab. + """ + ), + ] = "https://fastapi.tiangolo.com/img/favicon.png", + oauth2_redirect_url: Annotated[ + Optional[str], + Doc( + """ + The OAuth2 redirect URL, it is normally automatically handled by FastAPI. + """ + ), + ] = None, + init_oauth: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + A dictionary with Swagger UI OAuth2 initialization configurations. + """ + ), + ] = None, + swagger_ui_parameters: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Configuration parameters for Swagger UI. + + It defaults to [swagger_ui_default_parameters][fastapi.openapi.docs.swagger_ui_default_parameters]. + """ + ), + ] = None, +) -> HTMLResponse: + """ + Generate and return the HTML that loads Swagger UI for the interactive + API docs (normally served at `/docs`). + + You would only call this function yourself if you needed to override some parts, + for example the URLs to use to load Swagger UI's JavaScript and CSS. + + Read more about it in the + [FastAPI docs for Configure Swagger UI](https://fastapi.tiangolo.com/how-to/configure-swagger-ui/) + and the [FastAPI docs for Custom Docs UI Static Assets (Self-Hosting)](https://fastapi.tiangolo.com/how-to/custom-docs-ui-assets/). + """ + current_swagger_ui_parameters = swagger_ui_default_parameters.copy() + if swagger_ui_parameters: + current_swagger_ui_parameters.update(swagger_ui_parameters) + + html = f""" + <!DOCTYPE html> + <html> + <head> + <link type="text/css" rel="stylesheet" href="{swagger_css_url}"> + <link rel="shortcut icon" href="{swagger_favicon_url}"> + <title>{title} + + +
+
+ + + + + + """ + return HTMLResponse(html) + + +def get_redoc_html( + *, + openapi_url: Annotated[ + str, + Doc( + """ + The OpenAPI URL that ReDoc should load and use. + + This is normally done automatically by FastAPI using the default URL + `/openapi.json`. + """ + ), + ], + title: Annotated[ + str, + Doc( + """ + The HTML `` content, normally shown in the browser tab. + """ + ), + ], + redoc_js_url: Annotated[ + str, + Doc( + """ + The URL to use to load the ReDoc JavaScript. + + It is normally set to a CDN URL. + """ + ), + ] = "https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js", + redoc_favicon_url: Annotated[ + str, + Doc( + """ + The URL of the favicon to use. It is normally shown in the browser tab. + """ + ), + ] = "https://fastapi.tiangolo.com/img/favicon.png", + with_google_fonts: Annotated[ + bool, + Doc( + """ + Load and use Google Fonts. + """ + ), + ] = True, +) -> HTMLResponse: + """ + Generate and return the HTML response that loads ReDoc for the alternative + API docs (normally served at `/redoc`). + + You would only call this function yourself if you needed to override some parts, + for example the URLs to use to load ReDoc's JavaScript and CSS. + + Read more about it in the + [FastAPI docs for Custom Docs UI Static Assets (Self-Hosting)](https://fastapi.tiangolo.com/how-to/custom-docs-ui-assets/). + """ + html = f""" + <!DOCTYPE html> + <html> + <head> + <title>{title} + + + + """ + if with_google_fonts: + html += """ + + """ + html += f""" + + + + + + + + + + + """ + return HTMLResponse(html) + + +def get_swagger_ui_oauth2_redirect_html() -> HTMLResponse: + """ + Generate the HTML response with the OAuth2 redirection for Swagger UI. + + You normally don't need to use or change this. + """ + # copied from https://github.com/swagger-api/swagger-ui/blob/v4.14.0/dist/oauth2-redirect.html + html = """ + + + + Swagger UI: OAuth2 Redirect + + + + + + """ + return HTMLResponse(content=html) diff --git a/venv/Lib/site-packages/fastapi/openapi/models.py b/venv/Lib/site-packages/fastapi/openapi/models.py new file mode 100644 index 00000000..ed07b40f --- /dev/null +++ b/venv/Lib/site-packages/fastapi/openapi/models.py @@ -0,0 +1,445 @@ +from enum import Enum +from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union + +from fastapi._compat import ( + PYDANTIC_V2, + CoreSchema, + GetJsonSchemaHandler, + JsonSchemaValue, + _model_rebuild, + with_info_plain_validator_function, +) +from fastapi.logger import logger +from pydantic import AnyUrl, BaseModel, Field +from typing_extensions import Annotated, Literal, TypedDict +from typing_extensions import deprecated as typing_deprecated + +try: + import email_validator + + assert email_validator # make autoflake ignore the unused import + from pydantic import EmailStr +except ImportError: # pragma: no cover + + class EmailStr(str): # type: ignore + @classmethod + def __get_validators__(cls) -> Iterable[Callable[..., Any]]: + yield cls.validate + + @classmethod + def validate(cls, v: Any) -> str: + logger.warning( + "email-validator not installed, email fields will be treated as str.\n" + "To install, run: pip install email-validator" + ) + return str(v) + + @classmethod + def _validate(cls, __input_value: Any, _: Any) -> str: + logger.warning( + "email-validator not installed, email fields will be treated as str.\n" + "To install, run: pip install email-validator" + ) + return str(__input_value) + + @classmethod + def __get_pydantic_json_schema__( + cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler + ) -> JsonSchemaValue: + return {"type": "string", "format": "email"} + + @classmethod + def __get_pydantic_core_schema__( + cls, source: Type[Any], handler: Callable[[Any], CoreSchema] + ) -> CoreSchema: + return with_info_plain_validator_function(cls._validate) + + +class BaseModelWithConfig(BaseModel): + if PYDANTIC_V2: + model_config = {"extra": "allow"} + + else: + + class Config: + extra = "allow" + + +class Contact(BaseModelWithConfig): + name: Optional[str] = None + url: Optional[AnyUrl] = None + email: Optional[EmailStr] = None + + +class License(BaseModelWithConfig): + name: str + identifier: Optional[str] = None + url: Optional[AnyUrl] = None + + +class Info(BaseModelWithConfig): + title: str + summary: Optional[str] = None + description: Optional[str] = None + termsOfService: Optional[str] = None + contact: Optional[Contact] = None + license: Optional[License] = None + version: str + + +class ServerVariable(BaseModelWithConfig): + enum: Annotated[Optional[List[str]], Field(min_length=1)] = None + default: str + description: Optional[str] = None + + +class Server(BaseModelWithConfig): + url: Union[AnyUrl, str] + description: Optional[str] = None + variables: Optional[Dict[str, ServerVariable]] = None + + +class Reference(BaseModel): + ref: str = Field(alias="$ref") + + +class Discriminator(BaseModel): + propertyName: str + mapping: Optional[Dict[str, str]] = None + + +class XML(BaseModelWithConfig): + name: Optional[str] = None + namespace: Optional[str] = None + prefix: Optional[str] = None + attribute: Optional[bool] = None + wrapped: Optional[bool] = None + + +class ExternalDocumentation(BaseModelWithConfig): + description: Optional[str] = None + url: AnyUrl + + +class Schema(BaseModelWithConfig): + # Ref: JSON Schema 2020-12: https://json-schema.org/draft/2020-12/json-schema-core.html#name-the-json-schema-core-vocabu + # Core Vocabulary + schema_: Optional[str] = Field(default=None, alias="$schema") + vocabulary: Optional[str] = Field(default=None, alias="$vocabulary") + id: Optional[str] = Field(default=None, alias="$id") + anchor: Optional[str] = Field(default=None, alias="$anchor") + dynamicAnchor: Optional[str] = Field(default=None, alias="$dynamicAnchor") + ref: Optional[str] = Field(default=None, alias="$ref") + dynamicRef: Optional[str] = Field(default=None, alias="$dynamicRef") + defs: Optional[Dict[str, "SchemaOrBool"]] = Field(default=None, alias="$defs") + comment: Optional[str] = Field(default=None, alias="$comment") + # Ref: JSON Schema 2020-12: https://json-schema.org/draft/2020-12/json-schema-core.html#name-a-vocabulary-for-applying-s + # A Vocabulary for Applying Subschemas + allOf: Optional[List["SchemaOrBool"]] = None + anyOf: Optional[List["SchemaOrBool"]] = None + oneOf: Optional[List["SchemaOrBool"]] = None + not_: Optional["SchemaOrBool"] = Field(default=None, alias="not") + if_: Optional["SchemaOrBool"] = Field(default=None, alias="if") + then: Optional["SchemaOrBool"] = None + else_: Optional["SchemaOrBool"] = Field(default=None, alias="else") + dependentSchemas: Optional[Dict[str, "SchemaOrBool"]] = None + prefixItems: Optional[List["SchemaOrBool"]] = None + # TODO: uncomment and remove below when deprecating Pydantic v1 + # It generales a list of schemas for tuples, before prefixItems was available + # items: Optional["SchemaOrBool"] = None + items: Optional[Union["SchemaOrBool", List["SchemaOrBool"]]] = None + contains: Optional["SchemaOrBool"] = None + properties: Optional[Dict[str, "SchemaOrBool"]] = None + patternProperties: Optional[Dict[str, "SchemaOrBool"]] = None + additionalProperties: Optional["SchemaOrBool"] = None + propertyNames: Optional["SchemaOrBool"] = None + unevaluatedItems: Optional["SchemaOrBool"] = None + unevaluatedProperties: Optional["SchemaOrBool"] = None + # Ref: JSON Schema Validation 2020-12: https://json-schema.org/draft/2020-12/json-schema-validation.html#name-a-vocabulary-for-structural + # A Vocabulary for Structural Validation + type: Optional[str] = None + enum: Optional[List[Any]] = None + const: Optional[Any] = None + multipleOf: Optional[float] = Field(default=None, gt=0) + maximum: Optional[float] = None + exclusiveMaximum: Optional[float] = None + minimum: Optional[float] = None + exclusiveMinimum: Optional[float] = None + maxLength: Optional[int] = Field(default=None, ge=0) + minLength: Optional[int] = Field(default=None, ge=0) + pattern: Optional[str] = None + maxItems: Optional[int] = Field(default=None, ge=0) + minItems: Optional[int] = Field(default=None, ge=0) + uniqueItems: Optional[bool] = None + maxContains: Optional[int] = Field(default=None, ge=0) + minContains: Optional[int] = Field(default=None, ge=0) + maxProperties: Optional[int] = Field(default=None, ge=0) + minProperties: Optional[int] = Field(default=None, ge=0) + required: Optional[List[str]] = None + dependentRequired: Optional[Dict[str, Set[str]]] = None + # Ref: JSON Schema Validation 2020-12: https://json-schema.org/draft/2020-12/json-schema-validation.html#name-vocabularies-for-semantic-c + # Vocabularies for Semantic Content With "format" + format: Optional[str] = None + # Ref: JSON Schema Validation 2020-12: https://json-schema.org/draft/2020-12/json-schema-validation.html#name-a-vocabulary-for-the-conten + # A Vocabulary for the Contents of String-Encoded Data + contentEncoding: Optional[str] = None + contentMediaType: Optional[str] = None + contentSchema: Optional["SchemaOrBool"] = None + # Ref: JSON Schema Validation 2020-12: https://json-schema.org/draft/2020-12/json-schema-validation.html#name-a-vocabulary-for-basic-meta + # A Vocabulary for Basic Meta-Data Annotations + title: Optional[str] = None + description: Optional[str] = None + default: Optional[Any] = None + deprecated: Optional[bool] = None + readOnly: Optional[bool] = None + writeOnly: Optional[bool] = None + examples: Optional[List[Any]] = None + # Ref: OpenAPI 3.1.0: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#schema-object + # Schema Object + discriminator: Optional[Discriminator] = None + xml: Optional[XML] = None + externalDocs: Optional[ExternalDocumentation] = None + example: Annotated[ + Optional[Any], + typing_deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = None + + +# Ref: https://json-schema.org/draft/2020-12/json-schema-core.html#name-json-schema-documents +# A JSON Schema MUST be an object or a boolean. +SchemaOrBool = Union[Schema, bool] + + +class Example(TypedDict, total=False): + summary: Optional[str] + description: Optional[str] + value: Optional[Any] + externalValue: Optional[AnyUrl] + + if PYDANTIC_V2: # type: ignore [misc] + __pydantic_config__ = {"extra": "allow"} + + else: + + class Config: + extra = "allow" + + +class ParameterInType(Enum): + query = "query" + header = "header" + path = "path" + cookie = "cookie" + + +class Encoding(BaseModelWithConfig): + contentType: Optional[str] = None + headers: Optional[Dict[str, Union["Header", Reference]]] = None + style: Optional[str] = None + explode: Optional[bool] = None + allowReserved: Optional[bool] = None + + +class MediaType(BaseModelWithConfig): + schema_: Optional[Union[Schema, Reference]] = Field(default=None, alias="schema") + example: Optional[Any] = None + examples: Optional[Dict[str, Union[Example, Reference]]] = None + encoding: Optional[Dict[str, Encoding]] = None + + +class ParameterBase(BaseModelWithConfig): + description: Optional[str] = None + required: Optional[bool] = None + deprecated: Optional[bool] = None + # Serialization rules for simple scenarios + style: Optional[str] = None + explode: Optional[bool] = None + allowReserved: Optional[bool] = None + schema_: Optional[Union[Schema, Reference]] = Field(default=None, alias="schema") + example: Optional[Any] = None + examples: Optional[Dict[str, Union[Example, Reference]]] = None + # Serialization rules for more complex scenarios + content: Optional[Dict[str, MediaType]] = None + + +class Parameter(ParameterBase): + name: str + in_: ParameterInType = Field(alias="in") + + +class Header(ParameterBase): + pass + + +class RequestBody(BaseModelWithConfig): + description: Optional[str] = None + content: Dict[str, MediaType] + required: Optional[bool] = None + + +class Link(BaseModelWithConfig): + operationRef: Optional[str] = None + operationId: Optional[str] = None + parameters: Optional[Dict[str, Union[Any, str]]] = None + requestBody: Optional[Union[Any, str]] = None + description: Optional[str] = None + server: Optional[Server] = None + + +class Response(BaseModelWithConfig): + description: str + headers: Optional[Dict[str, Union[Header, Reference]]] = None + content: Optional[Dict[str, MediaType]] = None + links: Optional[Dict[str, Union[Link, Reference]]] = None + + +class Operation(BaseModelWithConfig): + tags: Optional[List[str]] = None + summary: Optional[str] = None + description: Optional[str] = None + externalDocs: Optional[ExternalDocumentation] = None + operationId: Optional[str] = None + parameters: Optional[List[Union[Parameter, Reference]]] = None + requestBody: Optional[Union[RequestBody, Reference]] = None + # Using Any for Specification Extensions + responses: Optional[Dict[str, Union[Response, Any]]] = None + callbacks: Optional[Dict[str, Union[Dict[str, "PathItem"], Reference]]] = None + deprecated: Optional[bool] = None + security: Optional[List[Dict[str, List[str]]]] = None + servers: Optional[List[Server]] = None + + +class PathItem(BaseModelWithConfig): + ref: Optional[str] = Field(default=None, alias="$ref") + summary: Optional[str] = None + description: Optional[str] = None + get: Optional[Operation] = None + put: Optional[Operation] = None + post: Optional[Operation] = None + delete: Optional[Operation] = None + options: Optional[Operation] = None + head: Optional[Operation] = None + patch: Optional[Operation] = None + trace: Optional[Operation] = None + servers: Optional[List[Server]] = None + parameters: Optional[List[Union[Parameter, Reference]]] = None + + +class SecuritySchemeType(Enum): + apiKey = "apiKey" + http = "http" + oauth2 = "oauth2" + openIdConnect = "openIdConnect" + + +class SecurityBase(BaseModelWithConfig): + type_: SecuritySchemeType = Field(alias="type") + description: Optional[str] = None + + +class APIKeyIn(Enum): + query = "query" + header = "header" + cookie = "cookie" + + +class APIKey(SecurityBase): + type_: SecuritySchemeType = Field(default=SecuritySchemeType.apiKey, alias="type") + in_: APIKeyIn = Field(alias="in") + name: str + + +class HTTPBase(SecurityBase): + type_: SecuritySchemeType = Field(default=SecuritySchemeType.http, alias="type") + scheme: str + + +class HTTPBearer(HTTPBase): + scheme: Literal["bearer"] = "bearer" + bearerFormat: Optional[str] = None + + +class OAuthFlow(BaseModelWithConfig): + refreshUrl: Optional[str] = None + scopes: Dict[str, str] = {} + + +class OAuthFlowImplicit(OAuthFlow): + authorizationUrl: str + + +class OAuthFlowPassword(OAuthFlow): + tokenUrl: str + + +class OAuthFlowClientCredentials(OAuthFlow): + tokenUrl: str + + +class OAuthFlowAuthorizationCode(OAuthFlow): + authorizationUrl: str + tokenUrl: str + + +class OAuthFlows(BaseModelWithConfig): + implicit: Optional[OAuthFlowImplicit] = None + password: Optional[OAuthFlowPassword] = None + clientCredentials: Optional[OAuthFlowClientCredentials] = None + authorizationCode: Optional[OAuthFlowAuthorizationCode] = None + + +class OAuth2(SecurityBase): + type_: SecuritySchemeType = Field(default=SecuritySchemeType.oauth2, alias="type") + flows: OAuthFlows + + +class OpenIdConnect(SecurityBase): + type_: SecuritySchemeType = Field( + default=SecuritySchemeType.openIdConnect, alias="type" + ) + openIdConnectUrl: str + + +SecurityScheme = Union[APIKey, HTTPBase, OAuth2, OpenIdConnect, HTTPBearer] + + +class Components(BaseModelWithConfig): + schemas: Optional[Dict[str, Union[Schema, Reference]]] = None + responses: Optional[Dict[str, Union[Response, Reference]]] = None + parameters: Optional[Dict[str, Union[Parameter, Reference]]] = None + examples: Optional[Dict[str, Union[Example, Reference]]] = None + requestBodies: Optional[Dict[str, Union[RequestBody, Reference]]] = None + headers: Optional[Dict[str, Union[Header, Reference]]] = None + securitySchemes: Optional[Dict[str, Union[SecurityScheme, Reference]]] = None + links: Optional[Dict[str, Union[Link, Reference]]] = None + # Using Any for Specification Extensions + callbacks: Optional[Dict[str, Union[Dict[str, PathItem], Reference, Any]]] = None + pathItems: Optional[Dict[str, Union[PathItem, Reference]]] = None + + +class Tag(BaseModelWithConfig): + name: str + description: Optional[str] = None + externalDocs: Optional[ExternalDocumentation] = None + + +class OpenAPI(BaseModelWithConfig): + openapi: str + info: Info + jsonSchemaDialect: Optional[str] = None + servers: Optional[List[Server]] = None + # Using Any for Specification Extensions + paths: Optional[Dict[str, Union[PathItem, Any]]] = None + webhooks: Optional[Dict[str, Union[PathItem, Reference]]] = None + components: Optional[Components] = None + security: Optional[List[Dict[str, List[str]]]] = None + tags: Optional[List[Tag]] = None + externalDocs: Optional[ExternalDocumentation] = None + + +_model_rebuild(Schema) +_model_rebuild(Operation) +_model_rebuild(Encoding) diff --git a/venv/Lib/site-packages/fastapi/openapi/utils.py b/venv/Lib/site-packages/fastapi/openapi/utils.py new file mode 100644 index 00000000..808646cc --- /dev/null +++ b/venv/Lib/site-packages/fastapi/openapi/utils.py @@ -0,0 +1,569 @@ +import http.client +import inspect +import warnings +from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Type, Union, cast + +from fastapi import routing +from fastapi._compat import ( + GenerateJsonSchema, + JsonSchemaValue, + ModelField, + Undefined, + get_compat_model_name_map, + get_definitions, + get_schema_from_model_field, + lenient_issubclass, +) +from fastapi.datastructures import DefaultPlaceholder +from fastapi.dependencies.models import Dependant +from fastapi.dependencies.utils import ( + _get_flat_fields_from_params, + get_flat_dependant, + get_flat_params, +) +from fastapi.encoders import jsonable_encoder +from fastapi.openapi.constants import METHODS_WITH_BODY, REF_PREFIX, REF_TEMPLATE +from fastapi.openapi.models import OpenAPI +from fastapi.params import Body, ParamTypes +from fastapi.responses import Response +from fastapi.types import ModelNameMap +from fastapi.utils import ( + deep_dict_update, + generate_operation_id_for_path, + is_body_allowed_for_status_code, +) +from pydantic import BaseModel +from starlette.responses import JSONResponse +from starlette.routing import BaseRoute +from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY +from typing_extensions import Literal + +validation_error_definition = { + "title": "ValidationError", + "type": "object", + "properties": { + "loc": { + "title": "Location", + "type": "array", + "items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}, + }, + "msg": {"title": "Message", "type": "string"}, + "type": {"title": "Error Type", "type": "string"}, + }, + "required": ["loc", "msg", "type"], +} + +validation_error_response_definition = { + "title": "HTTPValidationError", + "type": "object", + "properties": { + "detail": { + "title": "Detail", + "type": "array", + "items": {"$ref": REF_PREFIX + "ValidationError"}, + } + }, +} + +status_code_ranges: Dict[str, str] = { + "1XX": "Information", + "2XX": "Success", + "3XX": "Redirection", + "4XX": "Client Error", + "5XX": "Server Error", + "DEFAULT": "Default Response", +} + + +def get_openapi_security_definitions( + flat_dependant: Dependant, +) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]: + security_definitions = {} + operation_security = [] + for security_requirement in flat_dependant.security_requirements: + security_definition = jsonable_encoder( + security_requirement.security_scheme.model, + by_alias=True, + exclude_none=True, + ) + security_name = security_requirement.security_scheme.scheme_name + security_definitions[security_name] = security_definition + operation_security.append({security_name: security_requirement.scopes}) + return security_definitions, operation_security + + +def _get_openapi_operation_parameters( + *, + dependant: Dependant, + schema_generator: GenerateJsonSchema, + model_name_map: ModelNameMap, + field_mapping: Dict[ + Tuple[ModelField, Literal["validation", "serialization"]], JsonSchemaValue + ], + separate_input_output_schemas: bool = True, +) -> List[Dict[str, Any]]: + parameters = [] + flat_dependant = get_flat_dependant(dependant, skip_repeats=True) + path_params = _get_flat_fields_from_params(flat_dependant.path_params) + query_params = _get_flat_fields_from_params(flat_dependant.query_params) + header_params = _get_flat_fields_from_params(flat_dependant.header_params) + cookie_params = _get_flat_fields_from_params(flat_dependant.cookie_params) + parameter_groups = [ + (ParamTypes.path, path_params), + (ParamTypes.query, query_params), + (ParamTypes.header, header_params), + (ParamTypes.cookie, cookie_params), + ] + default_convert_underscores = True + if len(flat_dependant.header_params) == 1: + first_field = flat_dependant.header_params[0] + if lenient_issubclass(first_field.type_, BaseModel): + default_convert_underscores = getattr( + first_field.field_info, "convert_underscores", True + ) + for param_type, param_group in parameter_groups: + for param in param_group: + field_info = param.field_info + # field_info = cast(Param, field_info) + if not getattr(field_info, "include_in_schema", True): + continue + param_schema = get_schema_from_model_field( + field=param, + schema_generator=schema_generator, + model_name_map=model_name_map, + field_mapping=field_mapping, + separate_input_output_schemas=separate_input_output_schemas, + ) + name = param.alias + convert_underscores = getattr( + param.field_info, + "convert_underscores", + default_convert_underscores, + ) + if ( + param_type == ParamTypes.header + and param.alias == param.name + and convert_underscores + ): + name = param.name.replace("_", "-") + + parameter = { + "name": name, + "in": param_type.value, + "required": param.required, + "schema": param_schema, + } + if field_info.description: + parameter["description"] = field_info.description + openapi_examples = getattr(field_info, "openapi_examples", None) + example = getattr(field_info, "example", None) + if openapi_examples: + parameter["examples"] = jsonable_encoder(openapi_examples) + elif example != Undefined: + parameter["example"] = jsonable_encoder(example) + if getattr(field_info, "deprecated", None): + parameter["deprecated"] = True + parameters.append(parameter) + return parameters + + +def get_openapi_operation_request_body( + *, + body_field: Optional[ModelField], + schema_generator: GenerateJsonSchema, + model_name_map: ModelNameMap, + field_mapping: Dict[ + Tuple[ModelField, Literal["validation", "serialization"]], JsonSchemaValue + ], + separate_input_output_schemas: bool = True, +) -> Optional[Dict[str, Any]]: + if not body_field: + return None + assert isinstance(body_field, ModelField) + body_schema = get_schema_from_model_field( + field=body_field, + schema_generator=schema_generator, + model_name_map=model_name_map, + field_mapping=field_mapping, + separate_input_output_schemas=separate_input_output_schemas, + ) + field_info = cast(Body, body_field.field_info) + request_media_type = field_info.media_type + required = body_field.required + request_body_oai: Dict[str, Any] = {} + if required: + request_body_oai["required"] = required + request_media_content: Dict[str, Any] = {"schema": body_schema} + if field_info.openapi_examples: + request_media_content["examples"] = jsonable_encoder( + field_info.openapi_examples + ) + elif field_info.example != Undefined: + request_media_content["example"] = jsonable_encoder(field_info.example) + request_body_oai["content"] = {request_media_type: request_media_content} + return request_body_oai + + +def generate_operation_id( + *, route: routing.APIRoute, method: str +) -> str: # pragma: nocover + warnings.warn( + "fastapi.openapi.utils.generate_operation_id() was deprecated, " + "it is not used internally, and will be removed soon", + DeprecationWarning, + stacklevel=2, + ) + if route.operation_id: + return route.operation_id + path: str = route.path_format + return generate_operation_id_for_path(name=route.name, path=path, method=method) + + +def generate_operation_summary(*, route: routing.APIRoute, method: str) -> str: + if route.summary: + return route.summary + return route.name.replace("_", " ").title() + + +def get_openapi_operation_metadata( + *, route: routing.APIRoute, method: str, operation_ids: Set[str] +) -> Dict[str, Any]: + operation: Dict[str, Any] = {} + if route.tags: + operation["tags"] = route.tags + operation["summary"] = generate_operation_summary(route=route, method=method) + if route.description: + operation["description"] = route.description + operation_id = route.operation_id or route.unique_id + if operation_id in operation_ids: + message = ( + f"Duplicate Operation ID {operation_id} for function " + + f"{route.endpoint.__name__}" + ) + file_name = getattr(route.endpoint, "__globals__", {}).get("__file__") + if file_name: + message += f" at {file_name}" + warnings.warn(message, stacklevel=1) + operation_ids.add(operation_id) + operation["operationId"] = operation_id + if route.deprecated: + operation["deprecated"] = route.deprecated + return operation + + +def get_openapi_path( + *, + route: routing.APIRoute, + operation_ids: Set[str], + schema_generator: GenerateJsonSchema, + model_name_map: ModelNameMap, + field_mapping: Dict[ + Tuple[ModelField, Literal["validation", "serialization"]], JsonSchemaValue + ], + separate_input_output_schemas: bool = True, +) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any]]: + path = {} + security_schemes: Dict[str, Any] = {} + definitions: Dict[str, Any] = {} + assert route.methods is not None, "Methods must be a list" + if isinstance(route.response_class, DefaultPlaceholder): + current_response_class: Type[Response] = route.response_class.value + else: + current_response_class = route.response_class + assert current_response_class, "A response class is needed to generate OpenAPI" + route_response_media_type: Optional[str] = current_response_class.media_type + if route.include_in_schema: + for method in route.methods: + operation = get_openapi_operation_metadata( + route=route, method=method, operation_ids=operation_ids + ) + parameters: List[Dict[str, Any]] = [] + flat_dependant = get_flat_dependant(route.dependant, skip_repeats=True) + security_definitions, operation_security = get_openapi_security_definitions( + flat_dependant=flat_dependant + ) + if operation_security: + operation.setdefault("security", []).extend(operation_security) + if security_definitions: + security_schemes.update(security_definitions) + operation_parameters = _get_openapi_operation_parameters( + dependant=route.dependant, + schema_generator=schema_generator, + model_name_map=model_name_map, + field_mapping=field_mapping, + separate_input_output_schemas=separate_input_output_schemas, + ) + parameters.extend(operation_parameters) + if parameters: + all_parameters = { + (param["in"], param["name"]): param for param in parameters + } + required_parameters = { + (param["in"], param["name"]): param + for param in parameters + if param.get("required") + } + # Make sure required definitions of the same parameter take precedence + # over non-required definitions + all_parameters.update(required_parameters) + operation["parameters"] = list(all_parameters.values()) + if method in METHODS_WITH_BODY: + request_body_oai = get_openapi_operation_request_body( + body_field=route.body_field, + schema_generator=schema_generator, + model_name_map=model_name_map, + field_mapping=field_mapping, + separate_input_output_schemas=separate_input_output_schemas, + ) + if request_body_oai: + operation["requestBody"] = request_body_oai + if route.callbacks: + callbacks = {} + for callback in route.callbacks: + if isinstance(callback, routing.APIRoute): + ( + cb_path, + cb_security_schemes, + cb_definitions, + ) = get_openapi_path( + route=callback, + operation_ids=operation_ids, + schema_generator=schema_generator, + model_name_map=model_name_map, + field_mapping=field_mapping, + separate_input_output_schemas=separate_input_output_schemas, + ) + callbacks[callback.name] = {callback.path: cb_path} + operation["callbacks"] = callbacks + if route.status_code is not None: + status_code = str(route.status_code) + else: + # It would probably make more sense for all response classes to have an + # explicit default status_code, and to extract it from them, instead of + # doing this inspection tricks, that would probably be in the future + # TODO: probably make status_code a default class attribute for all + # responses in Starlette + response_signature = inspect.signature(current_response_class.__init__) + status_code_param = response_signature.parameters.get("status_code") + if status_code_param is not None: + if isinstance(status_code_param.default, int): + status_code = str(status_code_param.default) + operation.setdefault("responses", {}).setdefault(status_code, {})[ + "description" + ] = route.response_description + if route_response_media_type and is_body_allowed_for_status_code( + route.status_code + ): + response_schema = {"type": "string"} + if lenient_issubclass(current_response_class, JSONResponse): + if route.response_field: + response_schema = get_schema_from_model_field( + field=route.response_field, + schema_generator=schema_generator, + model_name_map=model_name_map, + field_mapping=field_mapping, + separate_input_output_schemas=separate_input_output_schemas, + ) + else: + response_schema = {} + operation.setdefault("responses", {}).setdefault( + status_code, {} + ).setdefault("content", {}).setdefault(route_response_media_type, {})[ + "schema" + ] = response_schema + if route.responses: + operation_responses = operation.setdefault("responses", {}) + for ( + additional_status_code, + additional_response, + ) in route.responses.items(): + process_response = additional_response.copy() + process_response.pop("model", None) + status_code_key = str(additional_status_code).upper() + if status_code_key == "DEFAULT": + status_code_key = "default" + openapi_response = operation_responses.setdefault( + status_code_key, {} + ) + assert isinstance(process_response, dict), ( + "An additional response must be a dict" + ) + field = route.response_fields.get(additional_status_code) + additional_field_schema: Optional[Dict[str, Any]] = None + if field: + additional_field_schema = get_schema_from_model_field( + field=field, + schema_generator=schema_generator, + model_name_map=model_name_map, + field_mapping=field_mapping, + separate_input_output_schemas=separate_input_output_schemas, + ) + media_type = route_response_media_type or "application/json" + additional_schema = ( + process_response.setdefault("content", {}) + .setdefault(media_type, {}) + .setdefault("schema", {}) + ) + deep_dict_update(additional_schema, additional_field_schema) + status_text: Optional[str] = status_code_ranges.get( + str(additional_status_code).upper() + ) or http.client.responses.get(int(additional_status_code)) + description = ( + process_response.get("description") + or openapi_response.get("description") + or status_text + or "Additional Response" + ) + deep_dict_update(openapi_response, process_response) + openapi_response["description"] = description + http422 = str(HTTP_422_UNPROCESSABLE_ENTITY) + all_route_params = get_flat_params(route.dependant) + if (all_route_params or route.body_field) and not any( + status in operation["responses"] + for status in [http422, "4XX", "default"] + ): + operation["responses"][http422] = { + "description": "Validation Error", + "content": { + "application/json": { + "schema": {"$ref": REF_PREFIX + "HTTPValidationError"} + } + }, + } + if "ValidationError" not in definitions: + definitions.update( + { + "ValidationError": validation_error_definition, + "HTTPValidationError": validation_error_response_definition, + } + ) + if route.openapi_extra: + deep_dict_update(operation, route.openapi_extra) + path[method.lower()] = operation + return path, security_schemes, definitions + + +def get_fields_from_routes( + routes: Sequence[BaseRoute], +) -> List[ModelField]: + body_fields_from_routes: List[ModelField] = [] + responses_from_routes: List[ModelField] = [] + request_fields_from_routes: List[ModelField] = [] + callback_flat_models: List[ModelField] = [] + for route in routes: + if getattr(route, "include_in_schema", None) and isinstance( + route, routing.APIRoute + ): + if route.body_field: + assert isinstance(route.body_field, ModelField), ( + "A request body must be a Pydantic Field" + ) + body_fields_from_routes.append(route.body_field) + if route.response_field: + responses_from_routes.append(route.response_field) + if route.response_fields: + responses_from_routes.extend(route.response_fields.values()) + if route.callbacks: + callback_flat_models.extend(get_fields_from_routes(route.callbacks)) + params = get_flat_params(route.dependant) + request_fields_from_routes.extend(params) + + flat_models = callback_flat_models + list( + body_fields_from_routes + responses_from_routes + request_fields_from_routes + ) + return flat_models + + +def get_openapi( + *, + title: str, + version: str, + openapi_version: str = "3.1.0", + summary: Optional[str] = None, + description: Optional[str] = None, + routes: Sequence[BaseRoute], + webhooks: Optional[Sequence[BaseRoute]] = None, + tags: Optional[List[Dict[str, Any]]] = None, + servers: Optional[List[Dict[str, Union[str, Any]]]] = None, + terms_of_service: Optional[str] = None, + contact: Optional[Dict[str, Union[str, Any]]] = None, + license_info: Optional[Dict[str, Union[str, Any]]] = None, + separate_input_output_schemas: bool = True, +) -> Dict[str, Any]: + info: Dict[str, Any] = {"title": title, "version": version} + if summary: + info["summary"] = summary + if description: + info["description"] = description + if terms_of_service: + info["termsOfService"] = terms_of_service + if contact: + info["contact"] = contact + if license_info: + info["license"] = license_info + output: Dict[str, Any] = {"openapi": openapi_version, "info": info} + if servers: + output["servers"] = servers + components: Dict[str, Dict[str, Any]] = {} + paths: Dict[str, Dict[str, Any]] = {} + webhook_paths: Dict[str, Dict[str, Any]] = {} + operation_ids: Set[str] = set() + all_fields = get_fields_from_routes(list(routes or []) + list(webhooks or [])) + model_name_map = get_compat_model_name_map(all_fields) + schema_generator = GenerateJsonSchema(ref_template=REF_TEMPLATE) + field_mapping, definitions = get_definitions( + fields=all_fields, + schema_generator=schema_generator, + model_name_map=model_name_map, + separate_input_output_schemas=separate_input_output_schemas, + ) + for route in routes or []: + if isinstance(route, routing.APIRoute): + result = get_openapi_path( + route=route, + operation_ids=operation_ids, + schema_generator=schema_generator, + model_name_map=model_name_map, + field_mapping=field_mapping, + separate_input_output_schemas=separate_input_output_schemas, + ) + if result: + path, security_schemes, path_definitions = result + if path: + paths.setdefault(route.path_format, {}).update(path) + if security_schemes: + components.setdefault("securitySchemes", {}).update( + security_schemes + ) + if path_definitions: + definitions.update(path_definitions) + for webhook in webhooks or []: + if isinstance(webhook, routing.APIRoute): + result = get_openapi_path( + route=webhook, + operation_ids=operation_ids, + schema_generator=schema_generator, + model_name_map=model_name_map, + field_mapping=field_mapping, + separate_input_output_schemas=separate_input_output_schemas, + ) + if result: + path, security_schemes, path_definitions = result + if path: + webhook_paths.setdefault(webhook.path_format, {}).update(path) + if security_schemes: + components.setdefault("securitySchemes", {}).update( + security_schemes + ) + if path_definitions: + definitions.update(path_definitions) + if definitions: + components["schemas"] = {k: definitions[k] for k in sorted(definitions)} + if components: + output["components"] = components + output["paths"] = paths + if webhook_paths: + output["webhooks"] = webhook_paths + if tags: + output["tags"] = tags + return jsonable_encoder(OpenAPI(**output), by_alias=True, exclude_none=True) # type: ignore diff --git a/venv/Lib/site-packages/fastapi/param_functions.py b/venv/Lib/site-packages/fastapi/param_functions.py new file mode 100644 index 00000000..b3621626 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/param_functions.py @@ -0,0 +1,2360 @@ +from typing import Any, Callable, Dict, List, Optional, Sequence, Union + +from fastapi import params +from fastapi._compat import Undefined +from fastapi.openapi.models import Example +from typing_extensions import Annotated, Doc, deprecated + +_Unset: Any = Undefined + + +def Path( # noqa: N802 + default: Annotated[ + Any, + Doc( + """ + Default value if the parameter field is not set. + + This doesn't affect `Path` parameters as the value is always required. + The parameter is available only for compatibility. + """ + ), + ] = ..., + *, + default_factory: Annotated[ + Union[Callable[[], Any], None], + Doc( + """ + A callable to generate the default value. + + This doesn't affect `Path` parameters as the value is always required. + The parameter is available only for compatibility. + """ + ), + ] = _Unset, + alias: Annotated[ + Optional[str], + Doc( + """ + An alternative name for the parameter field. + + This will be used to extract the data and for the generated OpenAPI. + It is particularly useful when you can't use the name you want because it + is a Python reserved keyword or similar. + """ + ), + ] = None, + alias_priority: Annotated[ + Union[int, None], + Doc( + """ + Priority of the alias. This affects whether an alias generator is used. + """ + ), + ] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Whitelist' validation step. The parameter field will be the single one + allowed by the alias or set of aliases defined. + """ + ), + ] = None, + serialization_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Blacklist' validation step. The vanilla parameter field will be the + single one of the alias' or set of aliases' fields and all the other + fields will be ignored at serialization time. + """ + ), + ] = None, + title: Annotated[ + Optional[str], + Doc( + """ + Human-readable title. + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Human-readable description. + """ + ), + ] = None, + gt: Annotated[ + Optional[float], + Doc( + """ + Greater than. If set, value must be greater than this. Only applicable to + numbers. + """ + ), + ] = None, + ge: Annotated[ + Optional[float], + Doc( + """ + Greater than or equal. If set, value must be greater than or equal to + this. Only applicable to numbers. + """ + ), + ] = None, + lt: Annotated[ + Optional[float], + Doc( + """ + Less than. If set, value must be less than this. Only applicable to numbers. + """ + ), + ] = None, + le: Annotated[ + Optional[float], + Doc( + """ + Less than or equal. If set, value must be less than or equal to this. + Only applicable to numbers. + """ + ), + ] = None, + min_length: Annotated[ + Optional[int], + Doc( + """ + Minimum length for strings. + """ + ), + ] = None, + max_length: Annotated[ + Optional[int], + Doc( + """ + Maximum length for strings. + """ + ), + ] = None, + pattern: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + ] = None, + regex: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Annotated[ + Union[str, None], + Doc( + """ + Parameter field name for discriminating the type in a tagged union. + """ + ), + ] = None, + strict: Annotated[ + Union[bool, None], + Doc( + """ + If `True`, strict validation is applied to the field. + """ + ), + ] = _Unset, + multiple_of: Annotated[ + Union[float, None], + Doc( + """ + Value must be a multiple of this. Only applicable to numbers. + """ + ), + ] = _Unset, + allow_inf_nan: Annotated[ + Union[bool, None], + Doc( + """ + Allow `inf`, `-inf`, `nan`. Only applicable to numbers. + """ + ), + ] = _Unset, + max_digits: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of allow digits for strings. + """ + ), + ] = _Unset, + decimal_places: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of decimal places allowed for numbers. + """ + ), + ] = _Unset, + examples: Annotated[ + Optional[List[Any]], + Doc( + """ + Example values for this field. + """ + ), + ] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Annotated[ + Optional[Dict[str, Example]], + Doc( + """ + OpenAPI-specific examples. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Swagger UI (that provides the `/docs` interface) has better support for the + OpenAPI-specific examples than the JSON Schema `examples`, that's the main + use case for this. + + Read more about it in the + [FastAPI docs for Declare Request Example Data](https://fastapi.tiangolo.com/tutorial/schema-extra-example/#using-the-openapi_examples-parameter). + """ + ), + ] = None, + deprecated: Annotated[ + Union[deprecated, str, bool, None], + Doc( + """ + Mark this parameter field as deprecated. + + It will affect the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + include_in_schema: Annotated[ + bool, + Doc( + """ + To include (or not) this parameter field in the generated OpenAPI. + You probably don't need it, but it's available. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = True, + json_schema_extra: Annotated[ + Union[Dict[str, Any], None], + Doc( + """ + Any additional JSON schema data. + """ + ), + ] = None, + **extra: Annotated[ + Any, + Doc( + """ + Include extra fields used by the JSON Schema. + """ + ), + deprecated( + """ + The `extra` kwargs is deprecated. Use `json_schema_extra` instead. + """ + ), + ], +) -> Any: + """ + Declare a path parameter for a *path operation*. + + Read more about it in the + [FastAPI docs for Path Parameters and Numeric Validations](https://fastapi.tiangolo.com/tutorial/path-params-numeric-validations/). + + ```python + from typing import Annotated + + from fastapi import FastAPI, Path + + app = FastAPI() + + + @app.get("/items/{item_id}") + async def read_items( + item_id: Annotated[int, Path(title="The ID of the item to get")], + ): + return {"item_id": item_id} + ``` + """ + return params.Path( + default=default, + default_factory=default_factory, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + example=example, + examples=examples, + openapi_examples=openapi_examples, + deprecated=deprecated, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +def Query( # noqa: N802 + default: Annotated[ + Any, + Doc( + """ + Default value if the parameter field is not set. + """ + ), + ] = Undefined, + *, + default_factory: Annotated[ + Union[Callable[[], Any], None], + Doc( + """ + A callable to generate the default value. + + This doesn't affect `Path` parameters as the value is always required. + The parameter is available only for compatibility. + """ + ), + ] = _Unset, + alias: Annotated[ + Optional[str], + Doc( + """ + An alternative name for the parameter field. + + This will be used to extract the data and for the generated OpenAPI. + It is particularly useful when you can't use the name you want because it + is a Python reserved keyword or similar. + """ + ), + ] = None, + alias_priority: Annotated[ + Union[int, None], + Doc( + """ + Priority of the alias. This affects whether an alias generator is used. + """ + ), + ] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Whitelist' validation step. The parameter field will be the single one + allowed by the alias or set of aliases defined. + """ + ), + ] = None, + serialization_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Blacklist' validation step. The vanilla parameter field will be the + single one of the alias' or set of aliases' fields and all the other + fields will be ignored at serialization time. + """ + ), + ] = None, + title: Annotated[ + Optional[str], + Doc( + """ + Human-readable title. + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Human-readable description. + """ + ), + ] = None, + gt: Annotated[ + Optional[float], + Doc( + """ + Greater than. If set, value must be greater than this. Only applicable to + numbers. + """ + ), + ] = None, + ge: Annotated[ + Optional[float], + Doc( + """ + Greater than or equal. If set, value must be greater than or equal to + this. Only applicable to numbers. + """ + ), + ] = None, + lt: Annotated[ + Optional[float], + Doc( + """ + Less than. If set, value must be less than this. Only applicable to numbers. + """ + ), + ] = None, + le: Annotated[ + Optional[float], + Doc( + """ + Less than or equal. If set, value must be less than or equal to this. + Only applicable to numbers. + """ + ), + ] = None, + min_length: Annotated[ + Optional[int], + Doc( + """ + Minimum length for strings. + """ + ), + ] = None, + max_length: Annotated[ + Optional[int], + Doc( + """ + Maximum length for strings. + """ + ), + ] = None, + pattern: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + ] = None, + regex: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Annotated[ + Union[str, None], + Doc( + """ + Parameter field name for discriminating the type in a tagged union. + """ + ), + ] = None, + strict: Annotated[ + Union[bool, None], + Doc( + """ + If `True`, strict validation is applied to the field. + """ + ), + ] = _Unset, + multiple_of: Annotated[ + Union[float, None], + Doc( + """ + Value must be a multiple of this. Only applicable to numbers. + """ + ), + ] = _Unset, + allow_inf_nan: Annotated[ + Union[bool, None], + Doc( + """ + Allow `inf`, `-inf`, `nan`. Only applicable to numbers. + """ + ), + ] = _Unset, + max_digits: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of allow digits for strings. + """ + ), + ] = _Unset, + decimal_places: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of decimal places allowed for numbers. + """ + ), + ] = _Unset, + examples: Annotated[ + Optional[List[Any]], + Doc( + """ + Example values for this field. + """ + ), + ] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Annotated[ + Optional[Dict[str, Example]], + Doc( + """ + OpenAPI-specific examples. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Swagger UI (that provides the `/docs` interface) has better support for the + OpenAPI-specific examples than the JSON Schema `examples`, that's the main + use case for this. + + Read more about it in the + [FastAPI docs for Declare Request Example Data](https://fastapi.tiangolo.com/tutorial/schema-extra-example/#using-the-openapi_examples-parameter). + """ + ), + ] = None, + deprecated: Annotated[ + Union[deprecated, str, bool, None], + Doc( + """ + Mark this parameter field as deprecated. + + It will affect the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + include_in_schema: Annotated[ + bool, + Doc( + """ + To include (or not) this parameter field in the generated OpenAPI. + You probably don't need it, but it's available. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = True, + json_schema_extra: Annotated[ + Union[Dict[str, Any], None], + Doc( + """ + Any additional JSON schema data. + """ + ), + ] = None, + **extra: Annotated[ + Any, + Doc( + """ + Include extra fields used by the JSON Schema. + """ + ), + deprecated( + """ + The `extra` kwargs is deprecated. Use `json_schema_extra` instead. + """ + ), + ], +) -> Any: + return params.Query( + default=default, + default_factory=default_factory, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + example=example, + examples=examples, + openapi_examples=openapi_examples, + deprecated=deprecated, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +def Header( # noqa: N802 + default: Annotated[ + Any, + Doc( + """ + Default value if the parameter field is not set. + """ + ), + ] = Undefined, + *, + default_factory: Annotated[ + Union[Callable[[], Any], None], + Doc( + """ + A callable to generate the default value. + + This doesn't affect `Path` parameters as the value is always required. + The parameter is available only for compatibility. + """ + ), + ] = _Unset, + alias: Annotated[ + Optional[str], + Doc( + """ + An alternative name for the parameter field. + + This will be used to extract the data and for the generated OpenAPI. + It is particularly useful when you can't use the name you want because it + is a Python reserved keyword or similar. + """ + ), + ] = None, + alias_priority: Annotated[ + Union[int, None], + Doc( + """ + Priority of the alias. This affects whether an alias generator is used. + """ + ), + ] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Whitelist' validation step. The parameter field will be the single one + allowed by the alias or set of aliases defined. + """ + ), + ] = None, + serialization_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Blacklist' validation step. The vanilla parameter field will be the + single one of the alias' or set of aliases' fields and all the other + fields will be ignored at serialization time. + """ + ), + ] = None, + convert_underscores: Annotated[ + bool, + Doc( + """ + Automatically convert underscores to hyphens in the parameter field name. + + Read more about it in the + [FastAPI docs for Header Parameters](https://fastapi.tiangolo.com/tutorial/header-params/#automatic-conversion) + """ + ), + ] = True, + title: Annotated[ + Optional[str], + Doc( + """ + Human-readable title. + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Human-readable description. + """ + ), + ] = None, + gt: Annotated[ + Optional[float], + Doc( + """ + Greater than. If set, value must be greater than this. Only applicable to + numbers. + """ + ), + ] = None, + ge: Annotated[ + Optional[float], + Doc( + """ + Greater than or equal. If set, value must be greater than or equal to + this. Only applicable to numbers. + """ + ), + ] = None, + lt: Annotated[ + Optional[float], + Doc( + """ + Less than. If set, value must be less than this. Only applicable to numbers. + """ + ), + ] = None, + le: Annotated[ + Optional[float], + Doc( + """ + Less than or equal. If set, value must be less than or equal to this. + Only applicable to numbers. + """ + ), + ] = None, + min_length: Annotated[ + Optional[int], + Doc( + """ + Minimum length for strings. + """ + ), + ] = None, + max_length: Annotated[ + Optional[int], + Doc( + """ + Maximum length for strings. + """ + ), + ] = None, + pattern: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + ] = None, + regex: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Annotated[ + Union[str, None], + Doc( + """ + Parameter field name for discriminating the type in a tagged union. + """ + ), + ] = None, + strict: Annotated[ + Union[bool, None], + Doc( + """ + If `True`, strict validation is applied to the field. + """ + ), + ] = _Unset, + multiple_of: Annotated[ + Union[float, None], + Doc( + """ + Value must be a multiple of this. Only applicable to numbers. + """ + ), + ] = _Unset, + allow_inf_nan: Annotated[ + Union[bool, None], + Doc( + """ + Allow `inf`, `-inf`, `nan`. Only applicable to numbers. + """ + ), + ] = _Unset, + max_digits: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of allow digits for strings. + """ + ), + ] = _Unset, + decimal_places: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of decimal places allowed for numbers. + """ + ), + ] = _Unset, + examples: Annotated[ + Optional[List[Any]], + Doc( + """ + Example values for this field. + """ + ), + ] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Annotated[ + Optional[Dict[str, Example]], + Doc( + """ + OpenAPI-specific examples. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Swagger UI (that provides the `/docs` interface) has better support for the + OpenAPI-specific examples than the JSON Schema `examples`, that's the main + use case for this. + + Read more about it in the + [FastAPI docs for Declare Request Example Data](https://fastapi.tiangolo.com/tutorial/schema-extra-example/#using-the-openapi_examples-parameter). + """ + ), + ] = None, + deprecated: Annotated[ + Union[deprecated, str, bool, None], + Doc( + """ + Mark this parameter field as deprecated. + + It will affect the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + include_in_schema: Annotated[ + bool, + Doc( + """ + To include (or not) this parameter field in the generated OpenAPI. + You probably don't need it, but it's available. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = True, + json_schema_extra: Annotated[ + Union[Dict[str, Any], None], + Doc( + """ + Any additional JSON schema data. + """ + ), + ] = None, + **extra: Annotated[ + Any, + Doc( + """ + Include extra fields used by the JSON Schema. + """ + ), + deprecated( + """ + The `extra` kwargs is deprecated. Use `json_schema_extra` instead. + """ + ), + ], +) -> Any: + return params.Header( + default=default, + default_factory=default_factory, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + convert_underscores=convert_underscores, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + example=example, + examples=examples, + openapi_examples=openapi_examples, + deprecated=deprecated, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +def Cookie( # noqa: N802 + default: Annotated[ + Any, + Doc( + """ + Default value if the parameter field is not set. + """ + ), + ] = Undefined, + *, + default_factory: Annotated[ + Union[Callable[[], Any], None], + Doc( + """ + A callable to generate the default value. + + This doesn't affect `Path` parameters as the value is always required. + The parameter is available only for compatibility. + """ + ), + ] = _Unset, + alias: Annotated[ + Optional[str], + Doc( + """ + An alternative name for the parameter field. + + This will be used to extract the data and for the generated OpenAPI. + It is particularly useful when you can't use the name you want because it + is a Python reserved keyword or similar. + """ + ), + ] = None, + alias_priority: Annotated[ + Union[int, None], + Doc( + """ + Priority of the alias. This affects whether an alias generator is used. + """ + ), + ] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Whitelist' validation step. The parameter field will be the single one + allowed by the alias or set of aliases defined. + """ + ), + ] = None, + serialization_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Blacklist' validation step. The vanilla parameter field will be the + single one of the alias' or set of aliases' fields and all the other + fields will be ignored at serialization time. + """ + ), + ] = None, + title: Annotated[ + Optional[str], + Doc( + """ + Human-readable title. + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Human-readable description. + """ + ), + ] = None, + gt: Annotated[ + Optional[float], + Doc( + """ + Greater than. If set, value must be greater than this. Only applicable to + numbers. + """ + ), + ] = None, + ge: Annotated[ + Optional[float], + Doc( + """ + Greater than or equal. If set, value must be greater than or equal to + this. Only applicable to numbers. + """ + ), + ] = None, + lt: Annotated[ + Optional[float], + Doc( + """ + Less than. If set, value must be less than this. Only applicable to numbers. + """ + ), + ] = None, + le: Annotated[ + Optional[float], + Doc( + """ + Less than or equal. If set, value must be less than or equal to this. + Only applicable to numbers. + """ + ), + ] = None, + min_length: Annotated[ + Optional[int], + Doc( + """ + Minimum length for strings. + """ + ), + ] = None, + max_length: Annotated[ + Optional[int], + Doc( + """ + Maximum length for strings. + """ + ), + ] = None, + pattern: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + ] = None, + regex: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Annotated[ + Union[str, None], + Doc( + """ + Parameter field name for discriminating the type in a tagged union. + """ + ), + ] = None, + strict: Annotated[ + Union[bool, None], + Doc( + """ + If `True`, strict validation is applied to the field. + """ + ), + ] = _Unset, + multiple_of: Annotated[ + Union[float, None], + Doc( + """ + Value must be a multiple of this. Only applicable to numbers. + """ + ), + ] = _Unset, + allow_inf_nan: Annotated[ + Union[bool, None], + Doc( + """ + Allow `inf`, `-inf`, `nan`. Only applicable to numbers. + """ + ), + ] = _Unset, + max_digits: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of allow digits for strings. + """ + ), + ] = _Unset, + decimal_places: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of decimal places allowed for numbers. + """ + ), + ] = _Unset, + examples: Annotated[ + Optional[List[Any]], + Doc( + """ + Example values for this field. + """ + ), + ] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Annotated[ + Optional[Dict[str, Example]], + Doc( + """ + OpenAPI-specific examples. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Swagger UI (that provides the `/docs` interface) has better support for the + OpenAPI-specific examples than the JSON Schema `examples`, that's the main + use case for this. + + Read more about it in the + [FastAPI docs for Declare Request Example Data](https://fastapi.tiangolo.com/tutorial/schema-extra-example/#using-the-openapi_examples-parameter). + """ + ), + ] = None, + deprecated: Annotated[ + Union[deprecated, str, bool, None], + Doc( + """ + Mark this parameter field as deprecated. + + It will affect the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + include_in_schema: Annotated[ + bool, + Doc( + """ + To include (or not) this parameter field in the generated OpenAPI. + You probably don't need it, but it's available. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = True, + json_schema_extra: Annotated[ + Union[Dict[str, Any], None], + Doc( + """ + Any additional JSON schema data. + """ + ), + ] = None, + **extra: Annotated[ + Any, + Doc( + """ + Include extra fields used by the JSON Schema. + """ + ), + deprecated( + """ + The `extra` kwargs is deprecated. Use `json_schema_extra` instead. + """ + ), + ], +) -> Any: + return params.Cookie( + default=default, + default_factory=default_factory, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + example=example, + examples=examples, + openapi_examples=openapi_examples, + deprecated=deprecated, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +def Body( # noqa: N802 + default: Annotated[ + Any, + Doc( + """ + Default value if the parameter field is not set. + """ + ), + ] = Undefined, + *, + default_factory: Annotated[ + Union[Callable[[], Any], None], + Doc( + """ + A callable to generate the default value. + + This doesn't affect `Path` parameters as the value is always required. + The parameter is available only for compatibility. + """ + ), + ] = _Unset, + embed: Annotated[ + Union[bool, None], + Doc( + """ + When `embed` is `True`, the parameter will be expected in a JSON body as a + key instead of being the JSON body itself. + + This happens automatically when more than one `Body` parameter is declared. + + Read more about it in the + [FastAPI docs for Body - Multiple Parameters](https://fastapi.tiangolo.com/tutorial/body-multiple-params/#embed-a-single-body-parameter). + """ + ), + ] = None, + media_type: Annotated[ + str, + Doc( + """ + The media type of this parameter field. Changing it would affect the + generated OpenAPI, but currently it doesn't affect the parsing of the data. + """ + ), + ] = "application/json", + alias: Annotated[ + Optional[str], + Doc( + """ + An alternative name for the parameter field. + + This will be used to extract the data and for the generated OpenAPI. + It is particularly useful when you can't use the name you want because it + is a Python reserved keyword or similar. + """ + ), + ] = None, + alias_priority: Annotated[ + Union[int, None], + Doc( + """ + Priority of the alias. This affects whether an alias generator is used. + """ + ), + ] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Whitelist' validation step. The parameter field will be the single one + allowed by the alias or set of aliases defined. + """ + ), + ] = None, + serialization_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Blacklist' validation step. The vanilla parameter field will be the + single one of the alias' or set of aliases' fields and all the other + fields will be ignored at serialization time. + """ + ), + ] = None, + title: Annotated[ + Optional[str], + Doc( + """ + Human-readable title. + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Human-readable description. + """ + ), + ] = None, + gt: Annotated[ + Optional[float], + Doc( + """ + Greater than. If set, value must be greater than this. Only applicable to + numbers. + """ + ), + ] = None, + ge: Annotated[ + Optional[float], + Doc( + """ + Greater than or equal. If set, value must be greater than or equal to + this. Only applicable to numbers. + """ + ), + ] = None, + lt: Annotated[ + Optional[float], + Doc( + """ + Less than. If set, value must be less than this. Only applicable to numbers. + """ + ), + ] = None, + le: Annotated[ + Optional[float], + Doc( + """ + Less than or equal. If set, value must be less than or equal to this. + Only applicable to numbers. + """ + ), + ] = None, + min_length: Annotated[ + Optional[int], + Doc( + """ + Minimum length for strings. + """ + ), + ] = None, + max_length: Annotated[ + Optional[int], + Doc( + """ + Maximum length for strings. + """ + ), + ] = None, + pattern: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + ] = None, + regex: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Annotated[ + Union[str, None], + Doc( + """ + Parameter field name for discriminating the type in a tagged union. + """ + ), + ] = None, + strict: Annotated[ + Union[bool, None], + Doc( + """ + If `True`, strict validation is applied to the field. + """ + ), + ] = _Unset, + multiple_of: Annotated[ + Union[float, None], + Doc( + """ + Value must be a multiple of this. Only applicable to numbers. + """ + ), + ] = _Unset, + allow_inf_nan: Annotated[ + Union[bool, None], + Doc( + """ + Allow `inf`, `-inf`, `nan`. Only applicable to numbers. + """ + ), + ] = _Unset, + max_digits: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of allow digits for strings. + """ + ), + ] = _Unset, + decimal_places: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of decimal places allowed for numbers. + """ + ), + ] = _Unset, + examples: Annotated[ + Optional[List[Any]], + Doc( + """ + Example values for this field. + """ + ), + ] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Annotated[ + Optional[Dict[str, Example]], + Doc( + """ + OpenAPI-specific examples. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Swagger UI (that provides the `/docs` interface) has better support for the + OpenAPI-specific examples than the JSON Schema `examples`, that's the main + use case for this. + + Read more about it in the + [FastAPI docs for Declare Request Example Data](https://fastapi.tiangolo.com/tutorial/schema-extra-example/#using-the-openapi_examples-parameter). + """ + ), + ] = None, + deprecated: Annotated[ + Union[deprecated, str, bool, None], + Doc( + """ + Mark this parameter field as deprecated. + + It will affect the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + include_in_schema: Annotated[ + bool, + Doc( + """ + To include (or not) this parameter field in the generated OpenAPI. + You probably don't need it, but it's available. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = True, + json_schema_extra: Annotated[ + Union[Dict[str, Any], None], + Doc( + """ + Any additional JSON schema data. + """ + ), + ] = None, + **extra: Annotated[ + Any, + Doc( + """ + Include extra fields used by the JSON Schema. + """ + ), + deprecated( + """ + The `extra` kwargs is deprecated. Use `json_schema_extra` instead. + """ + ), + ], +) -> Any: + return params.Body( + default=default, + default_factory=default_factory, + embed=embed, + media_type=media_type, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + example=example, + examples=examples, + openapi_examples=openapi_examples, + deprecated=deprecated, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +def Form( # noqa: N802 + default: Annotated[ + Any, + Doc( + """ + Default value if the parameter field is not set. + """ + ), + ] = Undefined, + *, + default_factory: Annotated[ + Union[Callable[[], Any], None], + Doc( + """ + A callable to generate the default value. + + This doesn't affect `Path` parameters as the value is always required. + The parameter is available only for compatibility. + """ + ), + ] = _Unset, + media_type: Annotated[ + str, + Doc( + """ + The media type of this parameter field. Changing it would affect the + generated OpenAPI, but currently it doesn't affect the parsing of the data. + """ + ), + ] = "application/x-www-form-urlencoded", + alias: Annotated[ + Optional[str], + Doc( + """ + An alternative name for the parameter field. + + This will be used to extract the data and for the generated OpenAPI. + It is particularly useful when you can't use the name you want because it + is a Python reserved keyword or similar. + """ + ), + ] = None, + alias_priority: Annotated[ + Union[int, None], + Doc( + """ + Priority of the alias. This affects whether an alias generator is used. + """ + ), + ] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Whitelist' validation step. The parameter field will be the single one + allowed by the alias or set of aliases defined. + """ + ), + ] = None, + serialization_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Blacklist' validation step. The vanilla parameter field will be the + single one of the alias' or set of aliases' fields and all the other + fields will be ignored at serialization time. + """ + ), + ] = None, + title: Annotated[ + Optional[str], + Doc( + """ + Human-readable title. + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Human-readable description. + """ + ), + ] = None, + gt: Annotated[ + Optional[float], + Doc( + """ + Greater than. If set, value must be greater than this. Only applicable to + numbers. + """ + ), + ] = None, + ge: Annotated[ + Optional[float], + Doc( + """ + Greater than or equal. If set, value must be greater than or equal to + this. Only applicable to numbers. + """ + ), + ] = None, + lt: Annotated[ + Optional[float], + Doc( + """ + Less than. If set, value must be less than this. Only applicable to numbers. + """ + ), + ] = None, + le: Annotated[ + Optional[float], + Doc( + """ + Less than or equal. If set, value must be less than or equal to this. + Only applicable to numbers. + """ + ), + ] = None, + min_length: Annotated[ + Optional[int], + Doc( + """ + Minimum length for strings. + """ + ), + ] = None, + max_length: Annotated[ + Optional[int], + Doc( + """ + Maximum length for strings. + """ + ), + ] = None, + pattern: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + ] = None, + regex: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Annotated[ + Union[str, None], + Doc( + """ + Parameter field name for discriminating the type in a tagged union. + """ + ), + ] = None, + strict: Annotated[ + Union[bool, None], + Doc( + """ + If `True`, strict validation is applied to the field. + """ + ), + ] = _Unset, + multiple_of: Annotated[ + Union[float, None], + Doc( + """ + Value must be a multiple of this. Only applicable to numbers. + """ + ), + ] = _Unset, + allow_inf_nan: Annotated[ + Union[bool, None], + Doc( + """ + Allow `inf`, `-inf`, `nan`. Only applicable to numbers. + """ + ), + ] = _Unset, + max_digits: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of allow digits for strings. + """ + ), + ] = _Unset, + decimal_places: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of decimal places allowed for numbers. + """ + ), + ] = _Unset, + examples: Annotated[ + Optional[List[Any]], + Doc( + """ + Example values for this field. + """ + ), + ] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Annotated[ + Optional[Dict[str, Example]], + Doc( + """ + OpenAPI-specific examples. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Swagger UI (that provides the `/docs` interface) has better support for the + OpenAPI-specific examples than the JSON Schema `examples`, that's the main + use case for this. + + Read more about it in the + [FastAPI docs for Declare Request Example Data](https://fastapi.tiangolo.com/tutorial/schema-extra-example/#using-the-openapi_examples-parameter). + """ + ), + ] = None, + deprecated: Annotated[ + Union[deprecated, str, bool, None], + Doc( + """ + Mark this parameter field as deprecated. + + It will affect the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + include_in_schema: Annotated[ + bool, + Doc( + """ + To include (or not) this parameter field in the generated OpenAPI. + You probably don't need it, but it's available. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = True, + json_schema_extra: Annotated[ + Union[Dict[str, Any], None], + Doc( + """ + Any additional JSON schema data. + """ + ), + ] = None, + **extra: Annotated[ + Any, + Doc( + """ + Include extra fields used by the JSON Schema. + """ + ), + deprecated( + """ + The `extra` kwargs is deprecated. Use `json_schema_extra` instead. + """ + ), + ], +) -> Any: + return params.Form( + default=default, + default_factory=default_factory, + media_type=media_type, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + example=example, + examples=examples, + openapi_examples=openapi_examples, + deprecated=deprecated, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +def File( # noqa: N802 + default: Annotated[ + Any, + Doc( + """ + Default value if the parameter field is not set. + """ + ), + ] = Undefined, + *, + default_factory: Annotated[ + Union[Callable[[], Any], None], + Doc( + """ + A callable to generate the default value. + + This doesn't affect `Path` parameters as the value is always required. + The parameter is available only for compatibility. + """ + ), + ] = _Unset, + media_type: Annotated[ + str, + Doc( + """ + The media type of this parameter field. Changing it would affect the + generated OpenAPI, but currently it doesn't affect the parsing of the data. + """ + ), + ] = "multipart/form-data", + alias: Annotated[ + Optional[str], + Doc( + """ + An alternative name for the parameter field. + + This will be used to extract the data and for the generated OpenAPI. + It is particularly useful when you can't use the name you want because it + is a Python reserved keyword or similar. + """ + ), + ] = None, + alias_priority: Annotated[ + Union[int, None], + Doc( + """ + Priority of the alias. This affects whether an alias generator is used. + """ + ), + ] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Whitelist' validation step. The parameter field will be the single one + allowed by the alias or set of aliases defined. + """ + ), + ] = None, + serialization_alias: Annotated[ + Union[str, None], + Doc( + """ + 'Blacklist' validation step. The vanilla parameter field will be the + single one of the alias' or set of aliases' fields and all the other + fields will be ignored at serialization time. + """ + ), + ] = None, + title: Annotated[ + Optional[str], + Doc( + """ + Human-readable title. + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Human-readable description. + """ + ), + ] = None, + gt: Annotated[ + Optional[float], + Doc( + """ + Greater than. If set, value must be greater than this. Only applicable to + numbers. + """ + ), + ] = None, + ge: Annotated[ + Optional[float], + Doc( + """ + Greater than or equal. If set, value must be greater than or equal to + this. Only applicable to numbers. + """ + ), + ] = None, + lt: Annotated[ + Optional[float], + Doc( + """ + Less than. If set, value must be less than this. Only applicable to numbers. + """ + ), + ] = None, + le: Annotated[ + Optional[float], + Doc( + """ + Less than or equal. If set, value must be less than or equal to this. + Only applicable to numbers. + """ + ), + ] = None, + min_length: Annotated[ + Optional[int], + Doc( + """ + Minimum length for strings. + """ + ), + ] = None, + max_length: Annotated[ + Optional[int], + Doc( + """ + Maximum length for strings. + """ + ), + ] = None, + pattern: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + ] = None, + regex: Annotated[ + Optional[str], + Doc( + """ + RegEx pattern for strings. + """ + ), + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Annotated[ + Union[str, None], + Doc( + """ + Parameter field name for discriminating the type in a tagged union. + """ + ), + ] = None, + strict: Annotated[ + Union[bool, None], + Doc( + """ + If `True`, strict validation is applied to the field. + """ + ), + ] = _Unset, + multiple_of: Annotated[ + Union[float, None], + Doc( + """ + Value must be a multiple of this. Only applicable to numbers. + """ + ), + ] = _Unset, + allow_inf_nan: Annotated[ + Union[bool, None], + Doc( + """ + Allow `inf`, `-inf`, `nan`. Only applicable to numbers. + """ + ), + ] = _Unset, + max_digits: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of allow digits for strings. + """ + ), + ] = _Unset, + decimal_places: Annotated[ + Union[int, None], + Doc( + """ + Maximum number of decimal places allowed for numbers. + """ + ), + ] = _Unset, + examples: Annotated[ + Optional[List[Any]], + Doc( + """ + Example values for this field. + """ + ), + ] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Annotated[ + Optional[Dict[str, Example]], + Doc( + """ + OpenAPI-specific examples. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Swagger UI (that provides the `/docs` interface) has better support for the + OpenAPI-specific examples than the JSON Schema `examples`, that's the main + use case for this. + + Read more about it in the + [FastAPI docs for Declare Request Example Data](https://fastapi.tiangolo.com/tutorial/schema-extra-example/#using-the-openapi_examples-parameter). + """ + ), + ] = None, + deprecated: Annotated[ + Union[deprecated, str, bool, None], + Doc( + """ + Mark this parameter field as deprecated. + + It will affect the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + include_in_schema: Annotated[ + bool, + Doc( + """ + To include (or not) this parameter field in the generated OpenAPI. + You probably don't need it, but it's available. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = True, + json_schema_extra: Annotated[ + Union[Dict[str, Any], None], + Doc( + """ + Any additional JSON schema data. + """ + ), + ] = None, + **extra: Annotated[ + Any, + Doc( + """ + Include extra fields used by the JSON Schema. + """ + ), + deprecated( + """ + The `extra` kwargs is deprecated. Use `json_schema_extra` instead. + """ + ), + ], +) -> Any: + return params.File( + default=default, + default_factory=default_factory, + media_type=media_type, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + example=example, + examples=examples, + openapi_examples=openapi_examples, + deprecated=deprecated, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +def Depends( # noqa: N802 + dependency: Annotated[ + Optional[Callable[..., Any]], + Doc( + """ + A "dependable" callable (like a function). + + Don't call it directly, FastAPI will call it for you, just pass the object + directly. + """ + ), + ] = None, + *, + use_cache: Annotated[ + bool, + Doc( + """ + By default, after a dependency is called the first time in a request, if + the dependency is declared again for the rest of the request (for example + if the dependency is needed by several dependencies), the value will be + re-used for the rest of the request. + + Set `use_cache` to `False` to disable this behavior and ensure the + dependency is called again (if declared more than once) in the same request. + """ + ), + ] = True, +) -> Any: + """ + Declare a FastAPI dependency. + + It takes a single "dependable" callable (like a function). + + Don't call it directly, FastAPI will call it for you. + + Read more about it in the + [FastAPI docs for Dependencies](https://fastapi.tiangolo.com/tutorial/dependencies/). + + **Example** + + ```python + from typing import Annotated + + from fastapi import Depends, FastAPI + + app = FastAPI() + + + async def common_parameters(q: str | None = None, skip: int = 0, limit: int = 100): + return {"q": q, "skip": skip, "limit": limit} + + + @app.get("/items/") + async def read_items(commons: Annotated[dict, Depends(common_parameters)]): + return commons + ``` + """ + return params.Depends(dependency=dependency, use_cache=use_cache) + + +def Security( # noqa: N802 + dependency: Annotated[ + Optional[Callable[..., Any]], + Doc( + """ + A "dependable" callable (like a function). + + Don't call it directly, FastAPI will call it for you, just pass the object + directly. + """ + ), + ] = None, + *, + scopes: Annotated[ + Optional[Sequence[str]], + Doc( + """ + OAuth2 scopes required for the *path operation* that uses this Security + dependency. + + The term "scope" comes from the OAuth2 specification, it seems to be + intentionally vague and interpretable. It normally refers to permissions, + in cases to roles. + + These scopes are integrated with OpenAPI (and the API docs at `/docs`). + So they are visible in the OpenAPI specification. + ) + """ + ), + ] = None, + use_cache: Annotated[ + bool, + Doc( + """ + By default, after a dependency is called the first time in a request, if + the dependency is declared again for the rest of the request (for example + if the dependency is needed by several dependencies), the value will be + re-used for the rest of the request. + + Set `use_cache` to `False` to disable this behavior and ensure the + dependency is called again (if declared more than once) in the same request. + """ + ), + ] = True, +) -> Any: + """ + Declare a FastAPI Security dependency. + + The only difference with a regular dependency is that it can declare OAuth2 + scopes that will be integrated with OpenAPI and the automatic UI docs (by default + at `/docs`). + + It takes a single "dependable" callable (like a function). + + Don't call it directly, FastAPI will call it for you. + + Read more about it in the + [FastAPI docs for Security](https://fastapi.tiangolo.com/tutorial/security/) and + in the + [FastAPI docs for OAuth2 scopes](https://fastapi.tiangolo.com/advanced/security/oauth2-scopes/). + + **Example** + + ```python + from typing import Annotated + + from fastapi import Security, FastAPI + + from .db import User + from .security import get_current_active_user + + app = FastAPI() + + @app.get("/users/me/items/") + async def read_own_items( + current_user: Annotated[User, Security(get_current_active_user, scopes=["items"])] + ): + return [{"item_id": "Foo", "owner": current_user.username}] + ``` + """ + return params.Security(dependency=dependency, scopes=scopes, use_cache=use_cache) diff --git a/venv/Lib/site-packages/fastapi/params.py b/venv/Lib/site-packages/fastapi/params.py new file mode 100644 index 00000000..8f5601dd --- /dev/null +++ b/venv/Lib/site-packages/fastapi/params.py @@ -0,0 +1,786 @@ +import warnings +from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Sequence, Union + +from fastapi.openapi.models import Example +from pydantic.fields import FieldInfo +from typing_extensions import Annotated, deprecated + +from ._compat import ( + PYDANTIC_V2, + PYDANTIC_VERSION_MINOR_TUPLE, + Undefined, +) + +_Unset: Any = Undefined + + +class ParamTypes(Enum): + query = "query" + header = "header" + path = "path" + cookie = "cookie" + + +class Param(FieldInfo): + in_: ParamTypes + + def __init__( + self, + default: Any = Undefined, + *, + default_factory: Union[Callable[[], Any], None] = _Unset, + annotation: Optional[Any] = None, + alias: Optional[str] = None, + alias_priority: Union[int, None] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Union[str, None] = None, + serialization_alias: Union[str, None] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + pattern: Optional[str] = None, + regex: Annotated[ + Optional[str], + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Union[str, None] = None, + strict: Union[bool, None] = _Unset, + multiple_of: Union[float, None] = _Unset, + allow_inf_nan: Union[bool, None] = _Unset, + max_digits: Union[int, None] = _Unset, + decimal_places: Union[int, None] = _Unset, + examples: Optional[List[Any]] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Optional[Dict[str, Example]] = None, + deprecated: Union[deprecated, str, bool, None] = None, + include_in_schema: bool = True, + json_schema_extra: Union[Dict[str, Any], None] = None, + **extra: Any, + ): + if example is not _Unset: + warnings.warn( + "`example` has been deprecated, please use `examples` instead", + category=DeprecationWarning, + stacklevel=4, + ) + self.example = example + self.include_in_schema = include_in_schema + self.openapi_examples = openapi_examples + kwargs = dict( + default=default, + default_factory=default_factory, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + discriminator=discriminator, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + **extra, + ) + if examples is not None: + kwargs["examples"] = examples + if regex is not None: + warnings.warn( + "`regex` has been deprecated, please use `pattern` instead", + category=DeprecationWarning, + stacklevel=4, + ) + current_json_schema_extra = json_schema_extra or extra + if PYDANTIC_VERSION_MINOR_TUPLE < (2, 7): + self.deprecated = deprecated + else: + kwargs["deprecated"] = deprecated + if PYDANTIC_V2: + kwargs.update( + { + "annotation": annotation, + "alias_priority": alias_priority, + "validation_alias": validation_alias, + "serialization_alias": serialization_alias, + "strict": strict, + "json_schema_extra": current_json_schema_extra, + } + ) + kwargs["pattern"] = pattern or regex + else: + kwargs["regex"] = pattern or regex + kwargs.update(**current_json_schema_extra) + use_kwargs = {k: v for k, v in kwargs.items() if v is not _Unset} + + super().__init__(**use_kwargs) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.default})" + + +class Path(Param): + in_ = ParamTypes.path + + def __init__( + self, + default: Any = ..., + *, + default_factory: Union[Callable[[], Any], None] = _Unset, + annotation: Optional[Any] = None, + alias: Optional[str] = None, + alias_priority: Union[int, None] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Union[str, None] = None, + serialization_alias: Union[str, None] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + pattern: Optional[str] = None, + regex: Annotated[ + Optional[str], + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Union[str, None] = None, + strict: Union[bool, None] = _Unset, + multiple_of: Union[float, None] = _Unset, + allow_inf_nan: Union[bool, None] = _Unset, + max_digits: Union[int, None] = _Unset, + decimal_places: Union[int, None] = _Unset, + examples: Optional[List[Any]] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Optional[Dict[str, Example]] = None, + deprecated: Union[deprecated, str, bool, None] = None, + include_in_schema: bool = True, + json_schema_extra: Union[Dict[str, Any], None] = None, + **extra: Any, + ): + assert default is ..., "Path parameters cannot have a default value" + self.in_ = self.in_ + super().__init__( + default=default, + default_factory=default_factory, + annotation=annotation, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + deprecated=deprecated, + example=example, + examples=examples, + openapi_examples=openapi_examples, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +class Query(Param): + in_ = ParamTypes.query + + def __init__( + self, + default: Any = Undefined, + *, + default_factory: Union[Callable[[], Any], None] = _Unset, + annotation: Optional[Any] = None, + alias: Optional[str] = None, + alias_priority: Union[int, None] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Union[str, None] = None, + serialization_alias: Union[str, None] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + pattern: Optional[str] = None, + regex: Annotated[ + Optional[str], + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Union[str, None] = None, + strict: Union[bool, None] = _Unset, + multiple_of: Union[float, None] = _Unset, + allow_inf_nan: Union[bool, None] = _Unset, + max_digits: Union[int, None] = _Unset, + decimal_places: Union[int, None] = _Unset, + examples: Optional[List[Any]] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Optional[Dict[str, Example]] = None, + deprecated: Union[deprecated, str, bool, None] = None, + include_in_schema: bool = True, + json_schema_extra: Union[Dict[str, Any], None] = None, + **extra: Any, + ): + super().__init__( + default=default, + default_factory=default_factory, + annotation=annotation, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + deprecated=deprecated, + example=example, + examples=examples, + openapi_examples=openapi_examples, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +class Header(Param): + in_ = ParamTypes.header + + def __init__( + self, + default: Any = Undefined, + *, + default_factory: Union[Callable[[], Any], None] = _Unset, + annotation: Optional[Any] = None, + alias: Optional[str] = None, + alias_priority: Union[int, None] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Union[str, None] = None, + serialization_alias: Union[str, None] = None, + convert_underscores: bool = True, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + pattern: Optional[str] = None, + regex: Annotated[ + Optional[str], + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Union[str, None] = None, + strict: Union[bool, None] = _Unset, + multiple_of: Union[float, None] = _Unset, + allow_inf_nan: Union[bool, None] = _Unset, + max_digits: Union[int, None] = _Unset, + decimal_places: Union[int, None] = _Unset, + examples: Optional[List[Any]] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Optional[Dict[str, Example]] = None, + deprecated: Union[deprecated, str, bool, None] = None, + include_in_schema: bool = True, + json_schema_extra: Union[Dict[str, Any], None] = None, + **extra: Any, + ): + self.convert_underscores = convert_underscores + super().__init__( + default=default, + default_factory=default_factory, + annotation=annotation, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + deprecated=deprecated, + example=example, + examples=examples, + openapi_examples=openapi_examples, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +class Cookie(Param): + in_ = ParamTypes.cookie + + def __init__( + self, + default: Any = Undefined, + *, + default_factory: Union[Callable[[], Any], None] = _Unset, + annotation: Optional[Any] = None, + alias: Optional[str] = None, + alias_priority: Union[int, None] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Union[str, None] = None, + serialization_alias: Union[str, None] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + pattern: Optional[str] = None, + regex: Annotated[ + Optional[str], + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Union[str, None] = None, + strict: Union[bool, None] = _Unset, + multiple_of: Union[float, None] = _Unset, + allow_inf_nan: Union[bool, None] = _Unset, + max_digits: Union[int, None] = _Unset, + decimal_places: Union[int, None] = _Unset, + examples: Optional[List[Any]] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Optional[Dict[str, Example]] = None, + deprecated: Union[deprecated, str, bool, None] = None, + include_in_schema: bool = True, + json_schema_extra: Union[Dict[str, Any], None] = None, + **extra: Any, + ): + super().__init__( + default=default, + default_factory=default_factory, + annotation=annotation, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + deprecated=deprecated, + example=example, + examples=examples, + openapi_examples=openapi_examples, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +class Body(FieldInfo): + def __init__( + self, + default: Any = Undefined, + *, + default_factory: Union[Callable[[], Any], None] = _Unset, + annotation: Optional[Any] = None, + embed: Union[bool, None] = None, + media_type: str = "application/json", + alias: Optional[str] = None, + alias_priority: Union[int, None] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Union[str, None] = None, + serialization_alias: Union[str, None] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + pattern: Optional[str] = None, + regex: Annotated[ + Optional[str], + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Union[str, None] = None, + strict: Union[bool, None] = _Unset, + multiple_of: Union[float, None] = _Unset, + allow_inf_nan: Union[bool, None] = _Unset, + max_digits: Union[int, None] = _Unset, + decimal_places: Union[int, None] = _Unset, + examples: Optional[List[Any]] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Optional[Dict[str, Example]] = None, + deprecated: Union[deprecated, str, bool, None] = None, + include_in_schema: bool = True, + json_schema_extra: Union[Dict[str, Any], None] = None, + **extra: Any, + ): + self.embed = embed + self.media_type = media_type + if example is not _Unset: + warnings.warn( + "`example` has been deprecated, please use `examples` instead", + category=DeprecationWarning, + stacklevel=4, + ) + self.example = example + self.include_in_schema = include_in_schema + self.openapi_examples = openapi_examples + kwargs = dict( + default=default, + default_factory=default_factory, + alias=alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + discriminator=discriminator, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + **extra, + ) + if examples is not None: + kwargs["examples"] = examples + if regex is not None: + warnings.warn( + "`regex` has been deprecated, please use `pattern` instead", + category=DeprecationWarning, + stacklevel=4, + ) + current_json_schema_extra = json_schema_extra or extra + if PYDANTIC_VERSION_MINOR_TUPLE < (2, 7): + self.deprecated = deprecated + else: + kwargs["deprecated"] = deprecated + if PYDANTIC_V2: + kwargs.update( + { + "annotation": annotation, + "alias_priority": alias_priority, + "validation_alias": validation_alias, + "serialization_alias": serialization_alias, + "strict": strict, + "json_schema_extra": current_json_schema_extra, + } + ) + kwargs["pattern"] = pattern or regex + else: + kwargs["regex"] = pattern or regex + kwargs.update(**current_json_schema_extra) + + use_kwargs = {k: v for k, v in kwargs.items() if v is not _Unset} + + super().__init__(**use_kwargs) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.default})" + + +class Form(Body): + def __init__( + self, + default: Any = Undefined, + *, + default_factory: Union[Callable[[], Any], None] = _Unset, + annotation: Optional[Any] = None, + media_type: str = "application/x-www-form-urlencoded", + alias: Optional[str] = None, + alias_priority: Union[int, None] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Union[str, None] = None, + serialization_alias: Union[str, None] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + pattern: Optional[str] = None, + regex: Annotated[ + Optional[str], + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Union[str, None] = None, + strict: Union[bool, None] = _Unset, + multiple_of: Union[float, None] = _Unset, + allow_inf_nan: Union[bool, None] = _Unset, + max_digits: Union[int, None] = _Unset, + decimal_places: Union[int, None] = _Unset, + examples: Optional[List[Any]] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Optional[Dict[str, Example]] = None, + deprecated: Union[deprecated, str, bool, None] = None, + include_in_schema: bool = True, + json_schema_extra: Union[Dict[str, Any], None] = None, + **extra: Any, + ): + super().__init__( + default=default, + default_factory=default_factory, + annotation=annotation, + media_type=media_type, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + deprecated=deprecated, + example=example, + examples=examples, + openapi_examples=openapi_examples, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +class File(Form): + def __init__( + self, + default: Any = Undefined, + *, + default_factory: Union[Callable[[], Any], None] = _Unset, + annotation: Optional[Any] = None, + media_type: str = "multipart/form-data", + alias: Optional[str] = None, + alias_priority: Union[int, None] = _Unset, + # TODO: update when deprecating Pydantic v1, import these types + # validation_alias: str | AliasPath | AliasChoices | None + validation_alias: Union[str, None] = None, + serialization_alias: Union[str, None] = None, + title: Optional[str] = None, + description: Optional[str] = None, + gt: Optional[float] = None, + ge: Optional[float] = None, + lt: Optional[float] = None, + le: Optional[float] = None, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + pattern: Optional[str] = None, + regex: Annotated[ + Optional[str], + deprecated( + "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead." + ), + ] = None, + discriminator: Union[str, None] = None, + strict: Union[bool, None] = _Unset, + multiple_of: Union[float, None] = _Unset, + allow_inf_nan: Union[bool, None] = _Unset, + max_digits: Union[int, None] = _Unset, + decimal_places: Union[int, None] = _Unset, + examples: Optional[List[Any]] = None, + example: Annotated[ + Optional[Any], + deprecated( + "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, " + "although still supported. Use examples instead." + ), + ] = _Unset, + openapi_examples: Optional[Dict[str, Example]] = None, + deprecated: Union[deprecated, str, bool, None] = None, + include_in_schema: bool = True, + json_schema_extra: Union[Dict[str, Any], None] = None, + **extra: Any, + ): + super().__init__( + default=default, + default_factory=default_factory, + annotation=annotation, + media_type=media_type, + alias=alias, + alias_priority=alias_priority, + validation_alias=validation_alias, + serialization_alias=serialization_alias, + title=title, + description=description, + gt=gt, + ge=ge, + lt=lt, + le=le, + min_length=min_length, + max_length=max_length, + pattern=pattern, + regex=regex, + discriminator=discriminator, + strict=strict, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + deprecated=deprecated, + example=example, + examples=examples, + openapi_examples=openapi_examples, + include_in_schema=include_in_schema, + json_schema_extra=json_schema_extra, + **extra, + ) + + +class Depends: + def __init__( + self, dependency: Optional[Callable[..., Any]] = None, *, use_cache: bool = True + ): + self.dependency = dependency + self.use_cache = use_cache + + def __repr__(self) -> str: + attr = getattr(self.dependency, "__name__", type(self.dependency).__name__) + cache = "" if self.use_cache else ", use_cache=False" + return f"{self.__class__.__name__}({attr}{cache})" + + +class Security(Depends): + def __init__( + self, + dependency: Optional[Callable[..., Any]] = None, + *, + scopes: Optional[Sequence[str]] = None, + use_cache: bool = True, + ): + super().__init__(dependency=dependency, use_cache=use_cache) + self.scopes = scopes or [] diff --git a/venv/Lib/site-packages/fastapi/py.typed b/venv/Lib/site-packages/fastapi/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/fastapi/requests.py b/venv/Lib/site-packages/fastapi/requests.py new file mode 100644 index 00000000..d16552c0 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/requests.py @@ -0,0 +1,2 @@ +from starlette.requests import HTTPConnection as HTTPConnection # noqa: F401 +from starlette.requests import Request as Request # noqa: F401 diff --git a/venv/Lib/site-packages/fastapi/responses.py b/venv/Lib/site-packages/fastapi/responses.py new file mode 100644 index 00000000..6c8db6f3 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/responses.py @@ -0,0 +1,48 @@ +from typing import Any + +from starlette.responses import FileResponse as FileResponse # noqa +from starlette.responses import HTMLResponse as HTMLResponse # noqa +from starlette.responses import JSONResponse as JSONResponse # noqa +from starlette.responses import PlainTextResponse as PlainTextResponse # noqa +from starlette.responses import RedirectResponse as RedirectResponse # noqa +from starlette.responses import Response as Response # noqa +from starlette.responses import StreamingResponse as StreamingResponse # noqa + +try: + import ujson +except ImportError: # pragma: nocover + ujson = None # type: ignore + + +try: + import orjson +except ImportError: # pragma: nocover + orjson = None # type: ignore + + +class UJSONResponse(JSONResponse): + """ + JSON response using the high-performance ujson library to serialize data to JSON. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/). + """ + + def render(self, content: Any) -> bytes: + assert ujson is not None, "ujson must be installed to use UJSONResponse" + return ujson.dumps(content, ensure_ascii=False).encode("utf-8") + + +class ORJSONResponse(JSONResponse): + """ + JSON response using the high-performance orjson library to serialize data to JSON. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/). + """ + + def render(self, content: Any) -> bytes: + assert orjson is not None, "orjson must be installed to use ORJSONResponse" + return orjson.dumps( + content, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SERIALIZE_NUMPY + ) diff --git a/venv/Lib/site-packages/fastapi/routing.py b/venv/Lib/site-packages/fastapi/routing.py new file mode 100644 index 00000000..457481e3 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/routing.py @@ -0,0 +1,4439 @@ +import asyncio +import dataclasses +import email.message +import inspect +import json +from contextlib import AsyncExitStack, asynccontextmanager +from enum import Enum, IntEnum +from typing import ( + Any, + AsyncIterator, + Callable, + Coroutine, + Dict, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, + Type, + Union, +) + +from fastapi import params +from fastapi._compat import ( + ModelField, + Undefined, + _get_model_config, + _model_dump, + _normalize_errors, + lenient_issubclass, +) +from fastapi.datastructures import Default, DefaultPlaceholder +from fastapi.dependencies.models import Dependant +from fastapi.dependencies.utils import ( + _should_embed_body_fields, + get_body_field, + get_dependant, + get_flat_dependant, + get_parameterless_sub_dependant, + get_typed_return_annotation, + solve_dependencies, +) +from fastapi.encoders import jsonable_encoder +from fastapi.exceptions import ( + FastAPIError, + RequestValidationError, + ResponseValidationError, + WebSocketRequestValidationError, +) +from fastapi.types import DecoratedCallable, IncEx +from fastapi.utils import ( + create_cloned_field, + create_model_field, + generate_unique_id, + get_value_or_default, + is_body_allowed_for_status_code, +) +from pydantic import BaseModel +from starlette import routing +from starlette.concurrency import run_in_threadpool +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.responses import JSONResponse, Response +from starlette.routing import ( + BaseRoute, + Match, + compile_path, + get_name, + request_response, + websocket_session, +) +from starlette.routing import Mount as Mount # noqa +from starlette.types import AppType, ASGIApp, Lifespan, Scope +from starlette.websockets import WebSocket +from typing_extensions import Annotated, Doc, deprecated + + +def _prepare_response_content( + res: Any, + *, + exclude_unset: bool, + exclude_defaults: bool = False, + exclude_none: bool = False, +) -> Any: + if isinstance(res, BaseModel): + read_with_orm_mode = getattr(_get_model_config(res), "read_with_orm_mode", None) + if read_with_orm_mode: + # Let from_orm extract the data from this model instead of converting + # it now to a dict. + # Otherwise, there's no way to extract lazy data that requires attribute + # access instead of dict iteration, e.g. lazy relationships. + return res + return _model_dump( + res, + by_alias=True, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + elif isinstance(res, list): + return [ + _prepare_response_content( + item, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + for item in res + ] + elif isinstance(res, dict): + return { + k: _prepare_response_content( + v, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + for k, v in res.items() + } + elif dataclasses.is_dataclass(res): + return dataclasses.asdict(res) + return res + + +def _merge_lifespan_context( + original_context: Lifespan[Any], nested_context: Lifespan[Any] +) -> Lifespan[Any]: + @asynccontextmanager + async def merged_lifespan( + app: AppType, + ) -> AsyncIterator[Optional[Mapping[str, Any]]]: + async with original_context(app) as maybe_original_state: + async with nested_context(app) as maybe_nested_state: + if maybe_nested_state is None and maybe_original_state is None: + yield None # old ASGI compatibility + else: + yield {**(maybe_nested_state or {}), **(maybe_original_state or {})} + + return merged_lifespan # type: ignore[return-value] + + +async def serialize_response( + *, + field: Optional[ModelField] = None, + response_content: Any, + include: Optional[IncEx] = None, + exclude: Optional[IncEx] = None, + by_alias: bool = True, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + is_coroutine: bool = True, +) -> Any: + if field: + errors = [] + if not hasattr(field, "serialize"): + # pydantic v1 + response_content = _prepare_response_content( + response_content, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + if is_coroutine: + value, errors_ = field.validate(response_content, {}, loc=("response",)) + else: + value, errors_ = await run_in_threadpool( + field.validate, response_content, {}, loc=("response",) + ) + if isinstance(errors_, list): + errors.extend(errors_) + elif errors_: + errors.append(errors_) + if errors: + raise ResponseValidationError( + errors=_normalize_errors(errors), body=response_content + ) + + if hasattr(field, "serialize"): + return field.serialize( + value, + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + + return jsonable_encoder( + value, + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + else: + return jsonable_encoder(response_content) + + +async def run_endpoint_function( + *, dependant: Dependant, values: Dict[str, Any], is_coroutine: bool +) -> Any: + # Only called by get_request_handler. Has been split into its own function to + # facilitate profiling endpoints, since inner functions are harder to profile. + assert dependant.call is not None, "dependant.call must be a function" + + if is_coroutine: + return await dependant.call(**values) + else: + return await run_in_threadpool(dependant.call, **values) + + +def get_request_handler( + dependant: Dependant, + body_field: Optional[ModelField] = None, + status_code: Optional[int] = None, + response_class: Union[Type[Response], DefaultPlaceholder] = Default(JSONResponse), + response_field: Optional[ModelField] = None, + response_model_include: Optional[IncEx] = None, + response_model_exclude: Optional[IncEx] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + dependency_overrides_provider: Optional[Any] = None, + embed_body_fields: bool = False, +) -> Callable[[Request], Coroutine[Any, Any, Response]]: + assert dependant.call is not None, "dependant.call must be a function" + is_coroutine = asyncio.iscoroutinefunction(dependant.call) + is_body_form = body_field and isinstance(body_field.field_info, params.Form) + if isinstance(response_class, DefaultPlaceholder): + actual_response_class: Type[Response] = response_class.value + else: + actual_response_class = response_class + + async def app(request: Request) -> Response: + response: Union[Response, None] = None + async with AsyncExitStack() as file_stack: + try: + body: Any = None + if body_field: + if is_body_form: + body = await request.form() + file_stack.push_async_callback(body.close) + else: + body_bytes = await request.body() + if body_bytes: + json_body: Any = Undefined + content_type_value = request.headers.get("content-type") + if not content_type_value: + json_body = await request.json() + else: + message = email.message.Message() + message["content-type"] = content_type_value + if message.get_content_maintype() == "application": + subtype = message.get_content_subtype() + if subtype == "json" or subtype.endswith("+json"): + json_body = await request.json() + if json_body != Undefined: + body = json_body + else: + body = body_bytes + except json.JSONDecodeError as e: + validation_error = RequestValidationError( + [ + { + "type": "json_invalid", + "loc": ("body", e.pos), + "msg": "JSON decode error", + "input": {}, + "ctx": {"error": e.msg}, + } + ], + body=e.doc, + ) + raise validation_error from e + except HTTPException: + # If a middleware raises an HTTPException, it should be raised again + raise + except Exception as e: + http_error = HTTPException( + status_code=400, detail="There was an error parsing the body" + ) + raise http_error from e + errors: List[Any] = [] + async with AsyncExitStack() as async_exit_stack: + solved_result = await solve_dependencies( + request=request, + dependant=dependant, + body=body, + dependency_overrides_provider=dependency_overrides_provider, + async_exit_stack=async_exit_stack, + embed_body_fields=embed_body_fields, + ) + errors = solved_result.errors + if not errors: + raw_response = await run_endpoint_function( + dependant=dependant, + values=solved_result.values, + is_coroutine=is_coroutine, + ) + if isinstance(raw_response, Response): + if raw_response.background is None: + raw_response.background = solved_result.background_tasks + response = raw_response + else: + response_args: Dict[str, Any] = { + "background": solved_result.background_tasks + } + # If status_code was set, use it, otherwise use the default from the + # response class, in the case of redirect it's 307 + current_status_code = ( + status_code + if status_code + else solved_result.response.status_code + ) + if current_status_code is not None: + response_args["status_code"] = current_status_code + if solved_result.response.status_code: + response_args["status_code"] = ( + solved_result.response.status_code + ) + content = await serialize_response( + field=response_field, + response_content=raw_response, + include=response_model_include, + exclude=response_model_exclude, + by_alias=response_model_by_alias, + exclude_unset=response_model_exclude_unset, + exclude_defaults=response_model_exclude_defaults, + exclude_none=response_model_exclude_none, + is_coroutine=is_coroutine, + ) + response = actual_response_class(content, **response_args) + if not is_body_allowed_for_status_code(response.status_code): + response.body = b"" + response.headers.raw.extend(solved_result.response.headers.raw) + if errors: + validation_error = RequestValidationError( + _normalize_errors(errors), body=body + ) + raise validation_error + if response is None: + raise FastAPIError( + "No response object was returned. There's a high chance that the " + "application code is raising an exception and a dependency with yield " + "has a block with a bare except, or a block with except Exception, " + "and is not raising the exception again. Read more about it in the " + "docs: https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-with-yield/#dependencies-with-yield-and-except" + ) + return response + + return app + + +def get_websocket_app( + dependant: Dependant, + dependency_overrides_provider: Optional[Any] = None, + embed_body_fields: bool = False, +) -> Callable[[WebSocket], Coroutine[Any, Any, Any]]: + async def app(websocket: WebSocket) -> None: + async with AsyncExitStack() as async_exit_stack: + # TODO: remove this scope later, after a few releases + # This scope fastapi_astack is no longer used by FastAPI, kept for + # compatibility, just in case + websocket.scope["fastapi_astack"] = async_exit_stack + solved_result = await solve_dependencies( + request=websocket, + dependant=dependant, + dependency_overrides_provider=dependency_overrides_provider, + async_exit_stack=async_exit_stack, + embed_body_fields=embed_body_fields, + ) + if solved_result.errors: + raise WebSocketRequestValidationError( + _normalize_errors(solved_result.errors) + ) + assert dependant.call is not None, "dependant.call must be a function" + await dependant.call(**solved_result.values) + + return app + + +class APIWebSocketRoute(routing.WebSocketRoute): + def __init__( + self, + path: str, + endpoint: Callable[..., Any], + *, + name: Optional[str] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + dependency_overrides_provider: Optional[Any] = None, + ) -> None: + self.path = path + self.endpoint = endpoint + self.name = get_name(endpoint) if name is None else name + self.dependencies = list(dependencies or []) + self.path_regex, self.path_format, self.param_convertors = compile_path(path) + self.dependant = get_dependant(path=self.path_format, call=self.endpoint) + for depends in self.dependencies[::-1]: + self.dependant.dependencies.insert( + 0, + get_parameterless_sub_dependant(depends=depends, path=self.path_format), + ) + self._flat_dependant = get_flat_dependant(self.dependant) + self._embed_body_fields = _should_embed_body_fields( + self._flat_dependant.body_params + ) + self.app = websocket_session( + get_websocket_app( + dependant=self.dependant, + dependency_overrides_provider=dependency_overrides_provider, + embed_body_fields=self._embed_body_fields, + ) + ) + + def matches(self, scope: Scope) -> Tuple[Match, Scope]: + match, child_scope = super().matches(scope) + if match != Match.NONE: + child_scope["route"] = self + return match, child_scope + + +class APIRoute(routing.Route): + def __init__( + self, + path: str, + endpoint: Callable[..., Any], + *, + response_model: Any = Default(None), + status_code: Optional[int] = None, + tags: Optional[List[Union[str, Enum]]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + name: Optional[str] = None, + methods: Optional[Union[Set[str], List[str]]] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[IncEx] = None, + response_model_exclude: Optional[IncEx] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Union[Type[Response], DefaultPlaceholder] = Default( + JSONResponse + ), + dependency_overrides_provider: Optional[Any] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + generate_unique_id_function: Union[ + Callable[["APIRoute"], str], DefaultPlaceholder + ] = Default(generate_unique_id), + ) -> None: + self.path = path + self.endpoint = endpoint + if isinstance(response_model, DefaultPlaceholder): + return_annotation = get_typed_return_annotation(endpoint) + if lenient_issubclass(return_annotation, Response): + response_model = None + else: + response_model = return_annotation + self.response_model = response_model + self.summary = summary + self.response_description = response_description + self.deprecated = deprecated + self.operation_id = operation_id + self.response_model_include = response_model_include + self.response_model_exclude = response_model_exclude + self.response_model_by_alias = response_model_by_alias + self.response_model_exclude_unset = response_model_exclude_unset + self.response_model_exclude_defaults = response_model_exclude_defaults + self.response_model_exclude_none = response_model_exclude_none + self.include_in_schema = include_in_schema + self.response_class = response_class + self.dependency_overrides_provider = dependency_overrides_provider + self.callbacks = callbacks + self.openapi_extra = openapi_extra + self.generate_unique_id_function = generate_unique_id_function + self.tags = tags or [] + self.responses = responses or {} + self.name = get_name(endpoint) if name is None else name + self.path_regex, self.path_format, self.param_convertors = compile_path(path) + if methods is None: + methods = ["GET"] + self.methods: Set[str] = {method.upper() for method in methods} + if isinstance(generate_unique_id_function, DefaultPlaceholder): + current_generate_unique_id: Callable[[APIRoute], str] = ( + generate_unique_id_function.value + ) + else: + current_generate_unique_id = generate_unique_id_function + self.unique_id = self.operation_id or current_generate_unique_id(self) + # normalize enums e.g. http.HTTPStatus + if isinstance(status_code, IntEnum): + status_code = int(status_code) + self.status_code = status_code + if self.response_model: + assert is_body_allowed_for_status_code(status_code), ( + f"Status code {status_code} must not have a response body" + ) + response_name = "Response_" + self.unique_id + self.response_field = create_model_field( + name=response_name, + type_=self.response_model, + mode="serialization", + ) + # Create a clone of the field, so that a Pydantic submodel is not returned + # as is just because it's an instance of a subclass of a more limited class + # e.g. UserInDB (containing hashed_password) could be a subclass of User + # that doesn't have the hashed_password. But because it's a subclass, it + # would pass the validation and be returned as is. + # By being a new field, no inheritance will be passed as is. A new model + # will always be created. + # TODO: remove when deprecating Pydantic v1 + self.secure_cloned_response_field: Optional[ModelField] = ( + create_cloned_field(self.response_field) + ) + else: + self.response_field = None # type: ignore + self.secure_cloned_response_field = None + self.dependencies = list(dependencies or []) + self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "") + # if a "form feed" character (page break) is found in the description text, + # truncate description text to the content preceding the first "form feed" + self.description = self.description.split("\f")[0].strip() + response_fields = {} + for additional_status_code, response in self.responses.items(): + assert isinstance(response, dict), "An additional response must be a dict" + model = response.get("model") + if model: + assert is_body_allowed_for_status_code(additional_status_code), ( + f"Status code {additional_status_code} must not have a response body" + ) + response_name = f"Response_{additional_status_code}_{self.unique_id}" + response_field = create_model_field( + name=response_name, type_=model, mode="serialization" + ) + response_fields[additional_status_code] = response_field + if response_fields: + self.response_fields: Dict[Union[int, str], ModelField] = response_fields + else: + self.response_fields = {} + + assert callable(endpoint), "An endpoint must be a callable" + self.dependant = get_dependant(path=self.path_format, call=self.endpoint) + for depends in self.dependencies[::-1]: + self.dependant.dependencies.insert( + 0, + get_parameterless_sub_dependant(depends=depends, path=self.path_format), + ) + self._flat_dependant = get_flat_dependant(self.dependant) + self._embed_body_fields = _should_embed_body_fields( + self._flat_dependant.body_params + ) + self.body_field = get_body_field( + flat_dependant=self._flat_dependant, + name=self.unique_id, + embed_body_fields=self._embed_body_fields, + ) + self.app = request_response(self.get_route_handler()) + + def get_route_handler(self) -> Callable[[Request], Coroutine[Any, Any, Response]]: + return get_request_handler( + dependant=self.dependant, + body_field=self.body_field, + status_code=self.status_code, + response_class=self.response_class, + response_field=self.secure_cloned_response_field, + response_model_include=self.response_model_include, + response_model_exclude=self.response_model_exclude, + response_model_by_alias=self.response_model_by_alias, + response_model_exclude_unset=self.response_model_exclude_unset, + response_model_exclude_defaults=self.response_model_exclude_defaults, + response_model_exclude_none=self.response_model_exclude_none, + dependency_overrides_provider=self.dependency_overrides_provider, + embed_body_fields=self._embed_body_fields, + ) + + def matches(self, scope: Scope) -> Tuple[Match, Scope]: + match, child_scope = super().matches(scope) + if match != Match.NONE: + child_scope["route"] = self + return match, child_scope + + +class APIRouter(routing.Router): + """ + `APIRouter` class, used to group *path operations*, for example to structure + an app in multiple files. It would then be included in the `FastAPI` app, or + in another `APIRouter` (ultimately included in the app). + + Read more about it in the + [FastAPI docs for Bigger Applications - Multiple Files](https://fastapi.tiangolo.com/tutorial/bigger-applications/). + + ## Example + + ```python + from fastapi import APIRouter, FastAPI + + app = FastAPI() + router = APIRouter() + + + @router.get("/users/", tags=["users"]) + async def read_users(): + return [{"username": "Rick"}, {"username": "Morty"}] + + + app.include_router(router) + ``` + """ + + def __init__( + self, + *, + prefix: Annotated[str, Doc("An optional path prefix for the router.")] = "", + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to all the *path operations* in this + router. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[params.Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to all the + *path operations* in this router. + + Read more about it in the + [FastAPI docs for Bigger Applications - Multiple Files](https://fastapi.tiangolo.com/tutorial/bigger-applications/#include-an-apirouter-with-a-custom-prefix-tags-responses-and-dependencies). + """ + ), + ] = None, + default_response_class: Annotated[ + Type[Response], + Doc( + """ + The default response class to be used. + + Read more in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#default-response-class). + """ + ), + ] = Default(JSONResponse), + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses to be shown in OpenAPI. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Additional Responses in OpenAPI](https://fastapi.tiangolo.com/advanced/additional-responses/). + + And in the + [FastAPI docs for Bigger Applications](https://fastapi.tiangolo.com/tutorial/bigger-applications/#include-an-apirouter-with-a-custom-prefix-tags-responses-and-dependencies). + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + OpenAPI callbacks that should apply to all *path operations* in this + router. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + routes: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + **Note**: you probably shouldn't use this parameter, it is inherited + from Starlette and supported for compatibility. + + --- + + A list of routes to serve incoming HTTP and WebSocket requests. + """ + ), + deprecated( + """ + You normally wouldn't use this parameter with FastAPI, it is inherited + from Starlette and supported for compatibility. + + In FastAPI, you normally would use the *path operation methods*, + like `router.get()`, `router.post()`, etc. + """ + ), + ] = None, + redirect_slashes: Annotated[ + bool, + Doc( + """ + Whether to detect and redirect slashes in URLs when the client doesn't + use the same format. + """ + ), + ] = True, + default: Annotated[ + Optional[ASGIApp], + Doc( + """ + Default function handler for this router. Used to handle + 404 Not Found errors. + """ + ), + ] = None, + dependency_overrides_provider: Annotated[ + Optional[Any], + Doc( + """ + Only used internally by FastAPI to handle dependency overrides. + + You shouldn't need to use it. It normally points to the `FastAPI` app + object. + """ + ), + ] = None, + route_class: Annotated[ + Type[APIRoute], + Doc( + """ + Custom route (*path operation*) class to be used by this router. + + Read more about it in the + [FastAPI docs for Custom Request and APIRoute class](https://fastapi.tiangolo.com/how-to/custom-request-and-route/#custom-apiroute-class-in-a-router). + """ + ), + ] = APIRoute, + on_startup: Annotated[ + Optional[Sequence[Callable[[], Any]]], + Doc( + """ + A list of startup event handler functions. + + You should instead use the `lifespan` handlers. + + Read more in the [FastAPI docs for `lifespan`](https://fastapi.tiangolo.com/advanced/events/). + """ + ), + ] = None, + on_shutdown: Annotated[ + Optional[Sequence[Callable[[], Any]]], + Doc( + """ + A list of shutdown event handler functions. + + You should instead use the `lifespan` handlers. + + Read more in the + [FastAPI docs for `lifespan`](https://fastapi.tiangolo.com/advanced/events/). + """ + ), + ] = None, + # the generic to Lifespan[AppType] is the type of the top level application + # which the router cannot know statically, so we use typing.Any + lifespan: Annotated[ + Optional[Lifespan[Any]], + Doc( + """ + A `Lifespan` context manager handler. This replaces `startup` and + `shutdown` functions with a single context manager. + + Read more in the + [FastAPI docs for `lifespan`](https://fastapi.tiangolo.com/advanced/events/). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark all *path operations* in this router as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + include_in_schema: Annotated[ + bool, + Doc( + """ + To include (or not) all the *path operations* in this router in the + generated OpenAPI. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + generate_unique_id_function: Annotated[ + Callable[[APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> None: + super().__init__( + routes=routes, + redirect_slashes=redirect_slashes, + default=default, + on_startup=on_startup, + on_shutdown=on_shutdown, + lifespan=lifespan, + ) + if prefix: + assert prefix.startswith("/"), "A path prefix must start with '/'" + assert not prefix.endswith("/"), ( + "A path prefix must not end with '/', as the routes will start with '/'" + ) + self.prefix = prefix + self.tags: List[Union[str, Enum]] = tags or [] + self.dependencies = list(dependencies or []) + self.deprecated = deprecated + self.include_in_schema = include_in_schema + self.responses = responses or {} + self.callbacks = callbacks or [] + self.dependency_overrides_provider = dependency_overrides_provider + self.route_class = route_class + self.default_response_class = default_response_class + self.generate_unique_id_function = generate_unique_id_function + + def route( + self, + path: str, + methods: Optional[List[str]] = None, + name: Optional[str] = None, + include_in_schema: bool = True, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.add_route( + path, + func, + methods=methods, + name=name, + include_in_schema=include_in_schema, + ) + return func + + return decorator + + def add_api_route( + self, + path: str, + endpoint: Callable[..., Any], + *, + response_model: Any = Default(None), + status_code: Optional[int] = None, + tags: Optional[List[Union[str, Enum]]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + methods: Optional[Union[Set[str], List[str]]] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[IncEx] = None, + response_model_exclude: Optional[IncEx] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Union[Type[Response], DefaultPlaceholder] = Default( + JSONResponse + ), + name: Optional[str] = None, + route_class_override: Optional[Type[APIRoute]] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + generate_unique_id_function: Union[ + Callable[[APIRoute], str], DefaultPlaceholder + ] = Default(generate_unique_id), + ) -> None: + route_class = route_class_override or self.route_class + responses = responses or {} + combined_responses = {**self.responses, **responses} + current_response_class = get_value_or_default( + response_class, self.default_response_class + ) + current_tags = self.tags.copy() + if tags: + current_tags.extend(tags) + current_dependencies = self.dependencies.copy() + if dependencies: + current_dependencies.extend(dependencies) + current_callbacks = self.callbacks.copy() + if callbacks: + current_callbacks.extend(callbacks) + current_generate_unique_id = get_value_or_default( + generate_unique_id_function, self.generate_unique_id_function + ) + route = route_class( + self.prefix + path, + endpoint=endpoint, + response_model=response_model, + status_code=status_code, + tags=current_tags, + dependencies=current_dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=combined_responses, + deprecated=deprecated or self.deprecated, + methods=methods, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema and self.include_in_schema, + response_class=current_response_class, + name=name, + dependency_overrides_provider=self.dependency_overrides_provider, + callbacks=current_callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=current_generate_unique_id, + ) + self.routes.append(route) + + def api_route( + self, + path: str, + *, + response_model: Any = Default(None), + status_code: Optional[int] = None, + tags: Optional[List[Union[str, Enum]]] = None, + dependencies: Optional[Sequence[params.Depends]] = None, + summary: Optional[str] = None, + description: Optional[str] = None, + response_description: str = "Successful Response", + responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None, + deprecated: Optional[bool] = None, + methods: Optional[List[str]] = None, + operation_id: Optional[str] = None, + response_model_include: Optional[IncEx] = None, + response_model_exclude: Optional[IncEx] = None, + response_model_by_alias: bool = True, + response_model_exclude_unset: bool = False, + response_model_exclude_defaults: bool = False, + response_model_exclude_none: bool = False, + include_in_schema: bool = True, + response_class: Type[Response] = Default(JSONResponse), + name: Optional[str] = None, + callbacks: Optional[List[BaseRoute]] = None, + openapi_extra: Optional[Dict[str, Any]] = None, + generate_unique_id_function: Callable[[APIRoute], str] = Default( + generate_unique_id + ), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.add_api_route( + path, + func, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=methods, + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + return func + + return decorator + + def add_api_websocket_route( + self, + path: str, + endpoint: Callable[..., Any], + name: Optional[str] = None, + *, + dependencies: Optional[Sequence[params.Depends]] = None, + ) -> None: + current_dependencies = self.dependencies.copy() + if dependencies: + current_dependencies.extend(dependencies) + + route = APIWebSocketRoute( + self.prefix + path, + endpoint=endpoint, + name=name, + dependencies=current_dependencies, + dependency_overrides_provider=self.dependency_overrides_provider, + ) + self.routes.append(route) + + def websocket( + self, + path: Annotated[ + str, + Doc( + """ + WebSocket path. + """ + ), + ], + name: Annotated[ + Optional[str], + Doc( + """ + A name for the WebSocket. Only used internally. + """ + ), + ] = None, + *, + dependencies: Annotated[ + Optional[Sequence[params.Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be used for this + WebSocket. + + Read more about it in the + [FastAPI docs for WebSockets](https://fastapi.tiangolo.com/advanced/websockets/). + """ + ), + ] = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Decorate a WebSocket function. + + Read more about it in the + [FastAPI docs for WebSockets](https://fastapi.tiangolo.com/advanced/websockets/). + + **Example** + + ## Example + + ```python + from fastapi import APIRouter, FastAPI, WebSocket + + app = FastAPI() + router = APIRouter() + + @router.websocket("/ws") + async def websocket_endpoint(websocket: WebSocket): + await websocket.accept() + while True: + data = await websocket.receive_text() + await websocket.send_text(f"Message text was: {data}") + + app.include_router(router) + ``` + """ + + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.add_api_websocket_route( + path, func, name=name, dependencies=dependencies + ) + return func + + return decorator + + def websocket_route( + self, path: str, name: Union[str, None] = None + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.add_websocket_route(path, func, name=name) + return func + + return decorator + + def include_router( + self, + router: Annotated["APIRouter", Doc("The `APIRouter` to include.")], + *, + prefix: Annotated[str, Doc("An optional path prefix for the router.")] = "", + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to all the *path operations* in this + router. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[params.Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to all the + *path operations* in this router. + + Read more about it in the + [FastAPI docs for Bigger Applications - Multiple Files](https://fastapi.tiangolo.com/tutorial/bigger-applications/#include-an-apirouter-with-a-custom-prefix-tags-responses-and-dependencies). + """ + ), + ] = None, + default_response_class: Annotated[ + Type[Response], + Doc( + """ + The default response class to be used. + + Read more in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#default-response-class). + """ + ), + ] = Default(JSONResponse), + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses to be shown in OpenAPI. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Additional Responses in OpenAPI](https://fastapi.tiangolo.com/advanced/additional-responses/). + + And in the + [FastAPI docs for Bigger Applications](https://fastapi.tiangolo.com/tutorial/bigger-applications/#include-an-apirouter-with-a-custom-prefix-tags-responses-and-dependencies). + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + OpenAPI callbacks that should apply to all *path operations* in this + router. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark all *path operations* in this router as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include (or not) all the *path operations* in this router in the + generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = True, + generate_unique_id_function: Annotated[ + Callable[[APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> None: + """ + Include another `APIRouter` in the same current `APIRouter`. + + Read more about it in the + [FastAPI docs for Bigger Applications](https://fastapi.tiangolo.com/tutorial/bigger-applications/). + + ## Example + + ```python + from fastapi import APIRouter, FastAPI + + app = FastAPI() + internal_router = APIRouter() + users_router = APIRouter() + + @users_router.get("/users/") + def read_users(): + return [{"name": "Rick"}, {"name": "Morty"}] + + internal_router.include_router(users_router) + app.include_router(internal_router) + ``` + """ + if prefix: + assert prefix.startswith("/"), "A path prefix must start with '/'" + assert not prefix.endswith("/"), ( + "A path prefix must not end with '/', as the routes will start with '/'" + ) + else: + for r in router.routes: + path = getattr(r, "path") # noqa: B009 + name = getattr(r, "name", "unknown") + if path is not None and not path: + raise FastAPIError( + f"Prefix and path cannot be both empty (path operation: {name})" + ) + if responses is None: + responses = {} + for route in router.routes: + if isinstance(route, APIRoute): + combined_responses = {**responses, **route.responses} + use_response_class = get_value_or_default( + route.response_class, + router.default_response_class, + default_response_class, + self.default_response_class, + ) + current_tags = [] + if tags: + current_tags.extend(tags) + if route.tags: + current_tags.extend(route.tags) + current_dependencies: List[params.Depends] = [] + if dependencies: + current_dependencies.extend(dependencies) + if route.dependencies: + current_dependencies.extend(route.dependencies) + current_callbacks = [] + if callbacks: + current_callbacks.extend(callbacks) + if route.callbacks: + current_callbacks.extend(route.callbacks) + current_generate_unique_id = get_value_or_default( + route.generate_unique_id_function, + router.generate_unique_id_function, + generate_unique_id_function, + self.generate_unique_id_function, + ) + self.add_api_route( + prefix + route.path, + route.endpoint, + response_model=route.response_model, + status_code=route.status_code, + tags=current_tags, + dependencies=current_dependencies, + summary=route.summary, + description=route.description, + response_description=route.response_description, + responses=combined_responses, + deprecated=route.deprecated or deprecated or self.deprecated, + methods=route.methods, + operation_id=route.operation_id, + response_model_include=route.response_model_include, + response_model_exclude=route.response_model_exclude, + response_model_by_alias=route.response_model_by_alias, + response_model_exclude_unset=route.response_model_exclude_unset, + response_model_exclude_defaults=route.response_model_exclude_defaults, + response_model_exclude_none=route.response_model_exclude_none, + include_in_schema=route.include_in_schema + and self.include_in_schema + and include_in_schema, + response_class=use_response_class, + name=route.name, + route_class_override=type(route), + callbacks=current_callbacks, + openapi_extra=route.openapi_extra, + generate_unique_id_function=current_generate_unique_id, + ) + elif isinstance(route, routing.Route): + methods = list(route.methods or []) + self.add_route( + prefix + route.path, + route.endpoint, + methods=methods, + include_in_schema=route.include_in_schema, + name=route.name, + ) + elif isinstance(route, APIWebSocketRoute): + current_dependencies = [] + if dependencies: + current_dependencies.extend(dependencies) + if route.dependencies: + current_dependencies.extend(route.dependencies) + self.add_api_websocket_route( + prefix + route.path, + route.endpoint, + dependencies=current_dependencies, + name=route.name, + ) + elif isinstance(route, routing.WebSocketRoute): + self.add_websocket_route( + prefix + route.path, route.endpoint, name=route.name + ) + for handler in router.on_startup: + self.add_event_handler("startup", handler) + for handler in router.on_shutdown: + self.add_event_handler("shutdown", handler) + self.lifespan_context = _merge_lifespan_context( + self.lifespan_context, + router.lifespan_context, + ) + + def get( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[params.Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP GET operation. + + ## Example + + ```python + from fastapi import APIRouter, FastAPI + + app = FastAPI() + router = APIRouter() + + @router.get("/items/") + def read_items(): + return [{"name": "Empanada"}, {"name": "Arepa"}] + + app.include_router(router) + ``` + """ + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["GET"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def put( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[params.Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP PUT operation. + + ## Example + + ```python + from fastapi import APIRouter, FastAPI + from pydantic import BaseModel + + class Item(BaseModel): + name: str + description: str | None = None + + app = FastAPI() + router = APIRouter() + + @router.put("/items/{item_id}") + def replace_item(item_id: str, item: Item): + return {"message": "Item replaced", "id": item_id} + + app.include_router(router) + ``` + """ + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["PUT"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def post( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[params.Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP POST operation. + + ## Example + + ```python + from fastapi import APIRouter, FastAPI + from pydantic import BaseModel + + class Item(BaseModel): + name: str + description: str | None = None + + app = FastAPI() + router = APIRouter() + + @router.post("/items/") + def create_item(item: Item): + return {"message": "Item created"} + + app.include_router(router) + ``` + """ + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["POST"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def delete( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[params.Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP DELETE operation. + + ## Example + + ```python + from fastapi import APIRouter, FastAPI + + app = FastAPI() + router = APIRouter() + + @router.delete("/items/{item_id}") + def delete_item(item_id: str): + return {"message": "Item deleted"} + + app.include_router(router) + ``` + """ + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["DELETE"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def options( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[params.Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP OPTIONS operation. + + ## Example + + ```python + from fastapi import APIRouter, FastAPI + + app = FastAPI() + router = APIRouter() + + @router.options("/items/") + def get_item_options(): + return {"additions": ["Aji", "Guacamole"]} + + app.include_router(router) + ``` + """ + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["OPTIONS"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def head( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[params.Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP HEAD operation. + + ## Example + + ```python + from fastapi import APIRouter, FastAPI + from pydantic import BaseModel + + class Item(BaseModel): + name: str + description: str | None = None + + app = FastAPI() + router = APIRouter() + + @router.head("/items/", status_code=204) + def get_items_headers(response: Response): + response.headers["X-Cat-Dog"] = "Alone in the world" + + app.include_router(router) + ``` + """ + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["HEAD"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def patch( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[params.Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP PATCH operation. + + ## Example + + ```python + from fastapi import APIRouter, FastAPI + from pydantic import BaseModel + + class Item(BaseModel): + name: str + description: str | None = None + + app = FastAPI() + router = APIRouter() + + @router.patch("/items/") + def update_item(item: Item): + return {"message": "Item updated in place"} + + app.include_router(router) + ``` + """ + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["PATCH"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + def trace( + self, + path: Annotated[ + str, + Doc( + """ + The URL path to be used for this *path operation*. + + For example, in `http://example.com/items`, the path is `/items`. + """ + ), + ], + *, + response_model: Annotated[ + Any, + Doc( + """ + The type to use for the response. + + It could be any valid Pydantic *field* type. So, it doesn't have to + be a Pydantic model, it could be other things, like a `list`, `dict`, + etc. + + It will be used for: + + * Documentation: the generated OpenAPI (and the UI at `/docs`) will + show it as the response (JSON Schema). + * Serialization: you could return an arbitrary object and the + `response_model` would be used to serialize that object into the + corresponding JSON. + * Filtering: the JSON sent to the client will only contain the data + (fields) defined in the `response_model`. If you returned an object + that contains an attribute `password` but the `response_model` does + not include that field, the JSON sent to the client would not have + that `password`. + * Validation: whatever you return will be serialized with the + `response_model`, converting any data as necessary to generate the + corresponding JSON. But if the data in the object returned is not + valid, that would mean a violation of the contract with the client, + so it's an error from the API developer. So, FastAPI will raise an + error and return a 500 error code (Internal Server Error). + + Read more about it in the + [FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/). + """ + ), + ] = Default(None), + status_code: Annotated[ + Optional[int], + Doc( + """ + The default status code to be used for the response. + + You could override the status code by returning a response directly. + + Read more about it in the + [FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/). + """ + ), + ] = None, + tags: Annotated[ + Optional[List[Union[str, Enum]]], + Doc( + """ + A list of tags to be applied to the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags). + """ + ), + ] = None, + dependencies: Annotated[ + Optional[Sequence[params.Depends]], + Doc( + """ + A list of dependencies (using `Depends()`) to be applied to the + *path operation*. + + Read more about it in the + [FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/). + """ + ), + ] = None, + summary: Annotated[ + Optional[str], + Doc( + """ + A summary for the *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + A description for the *path operation*. + + If not provided, it will be extracted automatically from the docstring + of the *path operation function*. + + It can contain Markdown. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/). + """ + ), + ] = None, + response_description: Annotated[ + str, + Doc( + """ + The description for the default response. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = "Successful Response", + responses: Annotated[ + Optional[Dict[Union[int, str], Dict[str, Any]]], + Doc( + """ + Additional responses that could be returned by this *path operation*. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + deprecated: Annotated[ + Optional[bool], + Doc( + """ + Mark this *path operation* as deprecated. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + operation_id: Annotated[ + Optional[str], + Doc( + """ + Custom operation ID to be used by this *path operation*. + + By default, it is generated automatically. + + If you provide a custom operation ID, you need to make sure it is + unique for the whole API. + + You can customize the + operation ID generation with the parameter + `generate_unique_id_function` in the `FastAPI` class. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = None, + response_model_include: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to include only certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_exclude: Annotated[ + Optional[IncEx], + Doc( + """ + Configuration passed to Pydantic to exclude certain fields in the + response data. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = None, + response_model_by_alias: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response model + should be serialized by alias when an alias is used. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude). + """ + ), + ] = True, + response_model_exclude_unset: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that were not set and + have their default values. This is different from + `response_model_exclude_defaults` in that if the fields are set, + they will be included in the response, even if the value is the same + as the default. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_defaults: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data + should have all the fields, including the ones that have the same value + as the default. This is different from `response_model_exclude_unset` + in that if the fields are set but contain the same default values, + they will be excluded from the response. + + When `True`, default values are omitted from the response. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter). + """ + ), + ] = False, + response_model_exclude_none: Annotated[ + bool, + Doc( + """ + Configuration passed to Pydantic to define if the response data should + exclude fields set to `None`. + + This is much simpler (less smart) than `response_model_exclude_unset` + and `response_model_exclude_defaults`. You probably want to use one of + those two instead of this one, as those allow returning `None` values + when it makes sense. + + Read more about it in the + [FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none). + """ + ), + ] = False, + include_in_schema: Annotated[ + bool, + Doc( + """ + Include this *path operation* in the generated OpenAPI schema. + + This affects the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-from-openapi). + """ + ), + ] = True, + response_class: Annotated[ + Type[Response], + Doc( + """ + Response class to be used for this *path operation*. + + This will not be used if you return a response directly. + + Read more about it in the + [FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse). + """ + ), + ] = Default(JSONResponse), + name: Annotated[ + Optional[str], + Doc( + """ + Name for this *path operation*. Only used internally. + """ + ), + ] = None, + callbacks: Annotated[ + Optional[List[BaseRoute]], + Doc( + """ + List of *path operations* that will be used as OpenAPI callbacks. + + This is only for OpenAPI documentation, the callbacks won't be used + directly. + + It will be added to the generated OpenAPI (e.g. visible at `/docs`). + + Read more about it in the + [FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/). + """ + ), + ] = None, + openapi_extra: Annotated[ + Optional[Dict[str, Any]], + Doc( + """ + Extra metadata to be included in the OpenAPI schema for this *path + operation*. + + Read more about it in the + [FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema). + """ + ), + ] = None, + generate_unique_id_function: Annotated[ + Callable[[APIRoute], str], + Doc( + """ + Customize the function used to generate unique IDs for the *path + operations* shown in the generated OpenAPI. + + This is particularly useful when automatically generating clients or + SDKs for your API. + + Read more about it in the + [FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function). + """ + ), + ] = Default(generate_unique_id), + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add a *path operation* using an HTTP TRACE operation. + + ## Example + + ```python + from fastapi import APIRouter, FastAPI + from pydantic import BaseModel + + class Item(BaseModel): + name: str + description: str | None = None + + app = FastAPI() + router = APIRouter() + + @router.trace("/items/{item_id}") + def trace_item(item_id: str): + return None + + app.include_router(router) + ``` + """ + return self.api_route( + path=path, + response_model=response_model, + status_code=status_code, + tags=tags, + dependencies=dependencies, + summary=summary, + description=description, + response_description=response_description, + responses=responses, + deprecated=deprecated, + methods=["TRACE"], + operation_id=operation_id, + response_model_include=response_model_include, + response_model_exclude=response_model_exclude, + response_model_by_alias=response_model_by_alias, + response_model_exclude_unset=response_model_exclude_unset, + response_model_exclude_defaults=response_model_exclude_defaults, + response_model_exclude_none=response_model_exclude_none, + include_in_schema=include_in_schema, + response_class=response_class, + name=name, + callbacks=callbacks, + openapi_extra=openapi_extra, + generate_unique_id_function=generate_unique_id_function, + ) + + @deprecated( + """ + on_event is deprecated, use lifespan event handlers instead. + + Read more about it in the + [FastAPI docs for Lifespan Events](https://fastapi.tiangolo.com/advanced/events/). + """ + ) + def on_event( + self, + event_type: Annotated[ + str, + Doc( + """ + The type of event. `startup` or `shutdown`. + """ + ), + ], + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + """ + Add an event handler for the router. + + `on_event` is deprecated, use `lifespan` event handlers instead. + + Read more about it in the + [FastAPI docs for Lifespan Events](https://fastapi.tiangolo.com/advanced/events/#alternative-events-deprecated). + """ + + def decorator(func: DecoratedCallable) -> DecoratedCallable: + self.add_event_handler(event_type, func) + return func + + return decorator diff --git a/venv/Lib/site-packages/fastapi/security/__init__.py b/venv/Lib/site-packages/fastapi/security/__init__.py new file mode 100644 index 00000000..3aa6bf21 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/security/__init__.py @@ -0,0 +1,15 @@ +from .api_key import APIKeyCookie as APIKeyCookie +from .api_key import APIKeyHeader as APIKeyHeader +from .api_key import APIKeyQuery as APIKeyQuery +from .http import HTTPAuthorizationCredentials as HTTPAuthorizationCredentials +from .http import HTTPBasic as HTTPBasic +from .http import HTTPBasicCredentials as HTTPBasicCredentials +from .http import HTTPBearer as HTTPBearer +from .http import HTTPDigest as HTTPDigest +from .oauth2 import OAuth2 as OAuth2 +from .oauth2 import OAuth2AuthorizationCodeBearer as OAuth2AuthorizationCodeBearer +from .oauth2 import OAuth2PasswordBearer as OAuth2PasswordBearer +from .oauth2 import OAuth2PasswordRequestForm as OAuth2PasswordRequestForm +from .oauth2 import OAuth2PasswordRequestFormStrict as OAuth2PasswordRequestFormStrict +from .oauth2 import SecurityScopes as SecurityScopes +from .open_id_connect_url import OpenIdConnect as OpenIdConnect diff --git a/venv/Lib/site-packages/fastapi/security/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/fastapi/security/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..fa8be1e3 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/security/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/security/__pycache__/api_key.cpython-312.pyc b/venv/Lib/site-packages/fastapi/security/__pycache__/api_key.cpython-312.pyc new file mode 100644 index 00000000..6368ac43 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/security/__pycache__/api_key.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/security/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/fastapi/security/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..cf17652c Binary files /dev/null and b/venv/Lib/site-packages/fastapi/security/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/security/__pycache__/http.cpython-312.pyc b/venv/Lib/site-packages/fastapi/security/__pycache__/http.cpython-312.pyc new file mode 100644 index 00000000..bed6d679 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/security/__pycache__/http.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/security/__pycache__/oauth2.cpython-312.pyc b/venv/Lib/site-packages/fastapi/security/__pycache__/oauth2.cpython-312.pyc new file mode 100644 index 00000000..b825ed7c Binary files /dev/null and b/venv/Lib/site-packages/fastapi/security/__pycache__/oauth2.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/security/__pycache__/open_id_connect_url.cpython-312.pyc b/venv/Lib/site-packages/fastapi/security/__pycache__/open_id_connect_url.cpython-312.pyc new file mode 100644 index 00000000..16e376de Binary files /dev/null and b/venv/Lib/site-packages/fastapi/security/__pycache__/open_id_connect_url.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/security/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/fastapi/security/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..fb165233 Binary files /dev/null and b/venv/Lib/site-packages/fastapi/security/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/fastapi/security/api_key.py b/venv/Lib/site-packages/fastapi/security/api_key.py new file mode 100644 index 00000000..70c2dca8 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/security/api_key.py @@ -0,0 +1,288 @@ +from typing import Optional + +from fastapi.openapi.models import APIKey, APIKeyIn +from fastapi.security.base import SecurityBase +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.status import HTTP_403_FORBIDDEN +from typing_extensions import Annotated, Doc + + +class APIKeyBase(SecurityBase): + @staticmethod + def check_api_key(api_key: Optional[str], auto_error: bool) -> Optional[str]: + if not api_key: + if auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + return None + return api_key + + +class APIKeyQuery(APIKeyBase): + """ + API key authentication using a query parameter. + + This defines the name of the query parameter that should be provided in the request + with the API key and integrates that into the OpenAPI documentation. It extracts + the key value sent in the query parameter automatically and provides it as the + dependency result. But it doesn't define how to send that API key to the client. + + ## Usage + + Create an instance object and use that object as the dependency in `Depends()`. + + The dependency result will be a string containing the key value. + + ## Example + + ```python + from fastapi import Depends, FastAPI + from fastapi.security import APIKeyQuery + + app = FastAPI() + + query_scheme = APIKeyQuery(name="api_key") + + + @app.get("/items/") + async def read_items(api_key: str = Depends(query_scheme)): + return {"api_key": api_key} + ``` + """ + + def __init__( + self, + *, + name: Annotated[ + str, + Doc("Query parameter name."), + ], + scheme_name: Annotated[ + Optional[str], + Doc( + """ + Security scheme name. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Security scheme description. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + auto_error: Annotated[ + bool, + Doc( + """ + By default, if the query parameter is not provided, `APIKeyQuery` will + automatically cancel the request and send the client an error. + + If `auto_error` is set to `False`, when the query parameter is not + available, instead of erroring out, the dependency result will be + `None`. + + This is useful when you want to have optional authentication. + + It is also useful when you want to have authentication that can be + provided in one of multiple optional ways (for example, in a query + parameter or in an HTTP Bearer token). + """ + ), + ] = True, + ): + self.model: APIKey = APIKey( + **{"in": APIKeyIn.query}, # type: ignore[arg-type] + name=name, + description=description, + ) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__(self, request: Request) -> Optional[str]: + api_key = request.query_params.get(self.model.name) + return self.check_api_key(api_key, self.auto_error) + + +class APIKeyHeader(APIKeyBase): + """ + API key authentication using a header. + + This defines the name of the header that should be provided in the request with + the API key and integrates that into the OpenAPI documentation. It extracts + the key value sent in the header automatically and provides it as the dependency + result. But it doesn't define how to send that key to the client. + + ## Usage + + Create an instance object and use that object as the dependency in `Depends()`. + + The dependency result will be a string containing the key value. + + ## Example + + ```python + from fastapi import Depends, FastAPI + from fastapi.security import APIKeyHeader + + app = FastAPI() + + header_scheme = APIKeyHeader(name="x-key") + + + @app.get("/items/") + async def read_items(key: str = Depends(header_scheme)): + return {"key": key} + ``` + """ + + def __init__( + self, + *, + name: Annotated[str, Doc("Header name.")], + scheme_name: Annotated[ + Optional[str], + Doc( + """ + Security scheme name. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Security scheme description. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + auto_error: Annotated[ + bool, + Doc( + """ + By default, if the header is not provided, `APIKeyHeader` will + automatically cancel the request and send the client an error. + + If `auto_error` is set to `False`, when the header is not available, + instead of erroring out, the dependency result will be `None`. + + This is useful when you want to have optional authentication. + + It is also useful when you want to have authentication that can be + provided in one of multiple optional ways (for example, in a header or + in an HTTP Bearer token). + """ + ), + ] = True, + ): + self.model: APIKey = APIKey( + **{"in": APIKeyIn.header}, # type: ignore[arg-type] + name=name, + description=description, + ) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__(self, request: Request) -> Optional[str]: + api_key = request.headers.get(self.model.name) + return self.check_api_key(api_key, self.auto_error) + + +class APIKeyCookie(APIKeyBase): + """ + API key authentication using a cookie. + + This defines the name of the cookie that should be provided in the request with + the API key and integrates that into the OpenAPI documentation. It extracts + the key value sent in the cookie automatically and provides it as the dependency + result. But it doesn't define how to set that cookie. + + ## Usage + + Create an instance object and use that object as the dependency in `Depends()`. + + The dependency result will be a string containing the key value. + + ## Example + + ```python + from fastapi import Depends, FastAPI + from fastapi.security import APIKeyCookie + + app = FastAPI() + + cookie_scheme = APIKeyCookie(name="session") + + + @app.get("/items/") + async def read_items(session: str = Depends(cookie_scheme)): + return {"session": session} + ``` + """ + + def __init__( + self, + *, + name: Annotated[str, Doc("Cookie name.")], + scheme_name: Annotated[ + Optional[str], + Doc( + """ + Security scheme name. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Security scheme description. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + auto_error: Annotated[ + bool, + Doc( + """ + By default, if the cookie is not provided, `APIKeyCookie` will + automatically cancel the request and send the client an error. + + If `auto_error` is set to `False`, when the cookie is not available, + instead of erroring out, the dependency result will be `None`. + + This is useful when you want to have optional authentication. + + It is also useful when you want to have authentication that can be + provided in one of multiple optional ways (for example, in a cookie or + in an HTTP Bearer token). + """ + ), + ] = True, + ): + self.model: APIKey = APIKey( + **{"in": APIKeyIn.cookie}, # type: ignore[arg-type] + name=name, + description=description, + ) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__(self, request: Request) -> Optional[str]: + api_key = request.cookies.get(self.model.name) + return self.check_api_key(api_key, self.auto_error) diff --git a/venv/Lib/site-packages/fastapi/security/base.py b/venv/Lib/site-packages/fastapi/security/base.py new file mode 100644 index 00000000..c43555de --- /dev/null +++ b/venv/Lib/site-packages/fastapi/security/base.py @@ -0,0 +1,6 @@ +from fastapi.openapi.models import SecurityBase as SecurityBaseModel + + +class SecurityBase: + model: SecurityBaseModel + scheme_name: str diff --git a/venv/Lib/site-packages/fastapi/security/http.py b/venv/Lib/site-packages/fastapi/security/http.py new file mode 100644 index 00000000..9ab2df3c --- /dev/null +++ b/venv/Lib/site-packages/fastapi/security/http.py @@ -0,0 +1,423 @@ +import binascii +from base64 import b64decode +from typing import Optional + +from fastapi.exceptions import HTTPException +from fastapi.openapi.models import HTTPBase as HTTPBaseModel +from fastapi.openapi.models import HTTPBearer as HTTPBearerModel +from fastapi.security.base import SecurityBase +from fastapi.security.utils import get_authorization_scheme_param +from pydantic import BaseModel +from starlette.requests import Request +from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN +from typing_extensions import Annotated, Doc + + +class HTTPBasicCredentials(BaseModel): + """ + The HTTP Basic credentials given as the result of using `HTTPBasic` in a + dependency. + + Read more about it in the + [FastAPI docs for HTTP Basic Auth](https://fastapi.tiangolo.com/advanced/security/http-basic-auth/). + """ + + username: Annotated[str, Doc("The HTTP Basic username.")] + password: Annotated[str, Doc("The HTTP Basic password.")] + + +class HTTPAuthorizationCredentials(BaseModel): + """ + The HTTP authorization credentials in the result of using `HTTPBearer` or + `HTTPDigest` in a dependency. + + The HTTP authorization header value is split by the first space. + + The first part is the `scheme`, the second part is the `credentials`. + + For example, in an HTTP Bearer token scheme, the client will send a header + like: + + ``` + Authorization: Bearer deadbeef12346 + ``` + + In this case: + + * `scheme` will have the value `"Bearer"` + * `credentials` will have the value `"deadbeef12346"` + """ + + scheme: Annotated[ + str, + Doc( + """ + The HTTP authorization scheme extracted from the header value. + """ + ), + ] + credentials: Annotated[ + str, + Doc( + """ + The HTTP authorization credentials extracted from the header value. + """ + ), + ] + + +class HTTPBase(SecurityBase): + def __init__( + self, + *, + scheme: str, + scheme_name: Optional[str] = None, + description: Optional[str] = None, + auto_error: bool = True, + ): + self.model = HTTPBaseModel(scheme=scheme, description=description) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__( + self, request: Request + ) -> Optional[HTTPAuthorizationCredentials]: + authorization = request.headers.get("Authorization") + scheme, credentials = get_authorization_scheme_param(authorization) + if not (authorization and scheme and credentials): + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials) + + +class HTTPBasic(HTTPBase): + """ + HTTP Basic authentication. + + ## Usage + + Create an instance object and use that object as the dependency in `Depends()`. + + The dependency result will be an `HTTPBasicCredentials` object containing the + `username` and the `password`. + + Read more about it in the + [FastAPI docs for HTTP Basic Auth](https://fastapi.tiangolo.com/advanced/security/http-basic-auth/). + + ## Example + + ```python + from typing import Annotated + + from fastapi import Depends, FastAPI + from fastapi.security import HTTPBasic, HTTPBasicCredentials + + app = FastAPI() + + security = HTTPBasic() + + + @app.get("/users/me") + def read_current_user(credentials: Annotated[HTTPBasicCredentials, Depends(security)]): + return {"username": credentials.username, "password": credentials.password} + ``` + """ + + def __init__( + self, + *, + scheme_name: Annotated[ + Optional[str], + Doc( + """ + Security scheme name. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + realm: Annotated[ + Optional[str], + Doc( + """ + HTTP Basic authentication realm. + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Security scheme description. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + auto_error: Annotated[ + bool, + Doc( + """ + By default, if the HTTP Basic authentication is not provided (a + header), `HTTPBasic` will automatically cancel the request and send the + client an error. + + If `auto_error` is set to `False`, when the HTTP Basic authentication + is not available, instead of erroring out, the dependency result will + be `None`. + + This is useful when you want to have optional authentication. + + It is also useful when you want to have authentication that can be + provided in one of multiple optional ways (for example, in HTTP Basic + authentication or in an HTTP Bearer token). + """ + ), + ] = True, + ): + self.model = HTTPBaseModel(scheme="basic", description=description) + self.scheme_name = scheme_name or self.__class__.__name__ + self.realm = realm + self.auto_error = auto_error + + async def __call__( # type: ignore + self, request: Request + ) -> Optional[HTTPBasicCredentials]: + authorization = request.headers.get("Authorization") + scheme, param = get_authorization_scheme_param(authorization) + if self.realm: + unauthorized_headers = {"WWW-Authenticate": f'Basic realm="{self.realm}"'} + else: + unauthorized_headers = {"WWW-Authenticate": "Basic"} + if not authorization or scheme.lower() != "basic": + if self.auto_error: + raise HTTPException( + status_code=HTTP_401_UNAUTHORIZED, + detail="Not authenticated", + headers=unauthorized_headers, + ) + else: + return None + invalid_user_credentials_exc = HTTPException( + status_code=HTTP_401_UNAUTHORIZED, + detail="Invalid authentication credentials", + headers=unauthorized_headers, + ) + try: + data = b64decode(param).decode("ascii") + except (ValueError, UnicodeDecodeError, binascii.Error): + raise invalid_user_credentials_exc # noqa: B904 + username, separator, password = data.partition(":") + if not separator: + raise invalid_user_credentials_exc + return HTTPBasicCredentials(username=username, password=password) + + +class HTTPBearer(HTTPBase): + """ + HTTP Bearer token authentication. + + ## Usage + + Create an instance object and use that object as the dependency in `Depends()`. + + The dependency result will be an `HTTPAuthorizationCredentials` object containing + the `scheme` and the `credentials`. + + ## Example + + ```python + from typing import Annotated + + from fastapi import Depends, FastAPI + from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer + + app = FastAPI() + + security = HTTPBearer() + + + @app.get("/users/me") + def read_current_user( + credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)] + ): + return {"scheme": credentials.scheme, "credentials": credentials.credentials} + ``` + """ + + def __init__( + self, + *, + bearerFormat: Annotated[Optional[str], Doc("Bearer token format.")] = None, + scheme_name: Annotated[ + Optional[str], + Doc( + """ + Security scheme name. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Security scheme description. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + auto_error: Annotated[ + bool, + Doc( + """ + By default, if the HTTP Bearer token is not provided (in an + `Authorization` header), `HTTPBearer` will automatically cancel the + request and send the client an error. + + If `auto_error` is set to `False`, when the HTTP Bearer token + is not available, instead of erroring out, the dependency result will + be `None`. + + This is useful when you want to have optional authentication. + + It is also useful when you want to have authentication that can be + provided in one of multiple optional ways (for example, in an HTTP + Bearer token or in a cookie). + """ + ), + ] = True, + ): + self.model = HTTPBearerModel(bearerFormat=bearerFormat, description=description) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__( + self, request: Request + ) -> Optional[HTTPAuthorizationCredentials]: + authorization = request.headers.get("Authorization") + scheme, credentials = get_authorization_scheme_param(authorization) + if not (authorization and scheme and credentials): + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + if scheme.lower() != "bearer": + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, + detail="Invalid authentication credentials", + ) + else: + return None + return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials) + + +class HTTPDigest(HTTPBase): + """ + HTTP Digest authentication. + + ## Usage + + Create an instance object and use that object as the dependency in `Depends()`. + + The dependency result will be an `HTTPAuthorizationCredentials` object containing + the `scheme` and the `credentials`. + + ## Example + + ```python + from typing import Annotated + + from fastapi import Depends, FastAPI + from fastapi.security import HTTPAuthorizationCredentials, HTTPDigest + + app = FastAPI() + + security = HTTPDigest() + + + @app.get("/users/me") + def read_current_user( + credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)] + ): + return {"scheme": credentials.scheme, "credentials": credentials.credentials} + ``` + """ + + def __init__( + self, + *, + scheme_name: Annotated[ + Optional[str], + Doc( + """ + Security scheme name. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Security scheme description. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + auto_error: Annotated[ + bool, + Doc( + """ + By default, if the HTTP Digest is not provided, `HTTPDigest` will + automatically cancel the request and send the client an error. + + If `auto_error` is set to `False`, when the HTTP Digest is not + available, instead of erroring out, the dependency result will + be `None`. + + This is useful when you want to have optional authentication. + + It is also useful when you want to have authentication that can be + provided in one of multiple optional ways (for example, in HTTP + Digest or in a cookie). + """ + ), + ] = True, + ): + self.model = HTTPBaseModel(scheme="digest", description=description) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__( + self, request: Request + ) -> Optional[HTTPAuthorizationCredentials]: + authorization = request.headers.get("Authorization") + scheme, credentials = get_authorization_scheme_param(authorization) + if not (authorization and scheme and credentials): + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + if scheme.lower() != "digest": + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, + detail="Invalid authentication credentials", + ) + else: + return None + return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials) diff --git a/venv/Lib/site-packages/fastapi/security/oauth2.py b/venv/Lib/site-packages/fastapi/security/oauth2.py new file mode 100644 index 00000000..5ffad598 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/security/oauth2.py @@ -0,0 +1,638 @@ +from typing import Any, Dict, List, Optional, Union, cast + +from fastapi.exceptions import HTTPException +from fastapi.openapi.models import OAuth2 as OAuth2Model +from fastapi.openapi.models import OAuthFlows as OAuthFlowsModel +from fastapi.param_functions import Form +from fastapi.security.base import SecurityBase +from fastapi.security.utils import get_authorization_scheme_param +from starlette.requests import Request +from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN + +# TODO: import from typing when deprecating Python 3.9 +from typing_extensions import Annotated, Doc + + +class OAuth2PasswordRequestForm: + """ + This is a dependency class to collect the `username` and `password` as form data + for an OAuth2 password flow. + + The OAuth2 specification dictates that for a password flow the data should be + collected using form data (instead of JSON) and that it should have the specific + fields `username` and `password`. + + All the initialization parameters are extracted from the request. + + Read more about it in the + [FastAPI docs for Simple OAuth2 with Password and Bearer](https://fastapi.tiangolo.com/tutorial/security/simple-oauth2/). + + ## Example + + ```python + from typing import Annotated + + from fastapi import Depends, FastAPI + from fastapi.security import OAuth2PasswordRequestForm + + app = FastAPI() + + + @app.post("/login") + def login(form_data: Annotated[OAuth2PasswordRequestForm, Depends()]): + data = {} + data["scopes"] = [] + for scope in form_data.scopes: + data["scopes"].append(scope) + if form_data.client_id: + data["client_id"] = form_data.client_id + if form_data.client_secret: + data["client_secret"] = form_data.client_secret + return data + ``` + + Note that for OAuth2 the scope `items:read` is a single scope in an opaque string. + You could have custom internal logic to separate it by colon characters (`:`) or + similar, and get the two parts `items` and `read`. Many applications do that to + group and organize permissions, you could do it as well in your application, just + know that that it is application specific, it's not part of the specification. + """ + + def __init__( + self, + *, + grant_type: Annotated[ + Union[str, None], + Form(pattern="^password$"), + Doc( + """ + The OAuth2 spec says it is required and MUST be the fixed string + "password". Nevertheless, this dependency class is permissive and + allows not passing it. If you want to enforce it, use instead the + `OAuth2PasswordRequestFormStrict` dependency. + """ + ), + ] = None, + username: Annotated[ + str, + Form(), + Doc( + """ + `username` string. The OAuth2 spec requires the exact field name + `username`. + """ + ), + ], + password: Annotated[ + str, + Form(), + Doc( + """ + `password` string. The OAuth2 spec requires the exact field name + `password". + """ + ), + ], + scope: Annotated[ + str, + Form(), + Doc( + """ + A single string with actually several scopes separated by spaces. Each + scope is also a string. + + For example, a single string with: + + ```python + "items:read items:write users:read profile openid" + ```` + + would represent the scopes: + + * `items:read` + * `items:write` + * `users:read` + * `profile` + * `openid` + """ + ), + ] = "", + client_id: Annotated[ + Union[str, None], + Form(), + Doc( + """ + If there's a `client_id`, it can be sent as part of the form fields. + But the OAuth2 specification recommends sending the `client_id` and + `client_secret` (if any) using HTTP Basic auth. + """ + ), + ] = None, + client_secret: Annotated[ + Union[str, None], + Form(), + Doc( + """ + If there's a `client_password` (and a `client_id`), they can be sent + as part of the form fields. But the OAuth2 specification recommends + sending the `client_id` and `client_secret` (if any) using HTTP Basic + auth. + """ + ), + ] = None, + ): + self.grant_type = grant_type + self.username = username + self.password = password + self.scopes = scope.split() + self.client_id = client_id + self.client_secret = client_secret + + +class OAuth2PasswordRequestFormStrict(OAuth2PasswordRequestForm): + """ + This is a dependency class to collect the `username` and `password` as form data + for an OAuth2 password flow. + + The OAuth2 specification dictates that for a password flow the data should be + collected using form data (instead of JSON) and that it should have the specific + fields `username` and `password`. + + All the initialization parameters are extracted from the request. + + The only difference between `OAuth2PasswordRequestFormStrict` and + `OAuth2PasswordRequestForm` is that `OAuth2PasswordRequestFormStrict` requires the + client to send the form field `grant_type` with the value `"password"`, which + is required in the OAuth2 specification (it seems that for no particular reason), + while for `OAuth2PasswordRequestForm` `grant_type` is optional. + + Read more about it in the + [FastAPI docs for Simple OAuth2 with Password and Bearer](https://fastapi.tiangolo.com/tutorial/security/simple-oauth2/). + + ## Example + + ```python + from typing import Annotated + + from fastapi import Depends, FastAPI + from fastapi.security import OAuth2PasswordRequestForm + + app = FastAPI() + + + @app.post("/login") + def login(form_data: Annotated[OAuth2PasswordRequestFormStrict, Depends()]): + data = {} + data["scopes"] = [] + for scope in form_data.scopes: + data["scopes"].append(scope) + if form_data.client_id: + data["client_id"] = form_data.client_id + if form_data.client_secret: + data["client_secret"] = form_data.client_secret + return data + ``` + + Note that for OAuth2 the scope `items:read` is a single scope in an opaque string. + You could have custom internal logic to separate it by colon characters (`:`) or + similar, and get the two parts `items` and `read`. Many applications do that to + group and organize permissions, you could do it as well in your application, just + know that that it is application specific, it's not part of the specification. + + + grant_type: the OAuth2 spec says it is required and MUST be the fixed string "password". + This dependency is strict about it. If you want to be permissive, use instead the + OAuth2PasswordRequestForm dependency class. + username: username string. The OAuth2 spec requires the exact field name "username". + password: password string. The OAuth2 spec requires the exact field name "password". + scope: Optional string. Several scopes (each one a string) separated by spaces. E.g. + "items:read items:write users:read profile openid" + client_id: optional string. OAuth2 recommends sending the client_id and client_secret (if any) + using HTTP Basic auth, as: client_id:client_secret + client_secret: optional string. OAuth2 recommends sending the client_id and client_secret (if any) + using HTTP Basic auth, as: client_id:client_secret + """ + + def __init__( + self, + grant_type: Annotated[ + str, + Form(pattern="^password$"), + Doc( + """ + The OAuth2 spec says it is required and MUST be the fixed string + "password". This dependency is strict about it. If you want to be + permissive, use instead the `OAuth2PasswordRequestForm` dependency + class. + """ + ), + ], + username: Annotated[ + str, + Form(), + Doc( + """ + `username` string. The OAuth2 spec requires the exact field name + `username`. + """ + ), + ], + password: Annotated[ + str, + Form(), + Doc( + """ + `password` string. The OAuth2 spec requires the exact field name + `password". + """ + ), + ], + scope: Annotated[ + str, + Form(), + Doc( + """ + A single string with actually several scopes separated by spaces. Each + scope is also a string. + + For example, a single string with: + + ```python + "items:read items:write users:read profile openid" + ```` + + would represent the scopes: + + * `items:read` + * `items:write` + * `users:read` + * `profile` + * `openid` + """ + ), + ] = "", + client_id: Annotated[ + Union[str, None], + Form(), + Doc( + """ + If there's a `client_id`, it can be sent as part of the form fields. + But the OAuth2 specification recommends sending the `client_id` and + `client_secret` (if any) using HTTP Basic auth. + """ + ), + ] = None, + client_secret: Annotated[ + Union[str, None], + Form(), + Doc( + """ + If there's a `client_password` (and a `client_id`), they can be sent + as part of the form fields. But the OAuth2 specification recommends + sending the `client_id` and `client_secret` (if any) using HTTP Basic + auth. + """ + ), + ] = None, + ): + super().__init__( + grant_type=grant_type, + username=username, + password=password, + scope=scope, + client_id=client_id, + client_secret=client_secret, + ) + + +class OAuth2(SecurityBase): + """ + This is the base class for OAuth2 authentication, an instance of it would be used + as a dependency. All other OAuth2 classes inherit from it and customize it for + each OAuth2 flow. + + You normally would not create a new class inheriting from it but use one of the + existing subclasses, and maybe compose them if you want to support multiple flows. + + Read more about it in the + [FastAPI docs for Security](https://fastapi.tiangolo.com/tutorial/security/). + """ + + def __init__( + self, + *, + flows: Annotated[ + Union[OAuthFlowsModel, Dict[str, Dict[str, Any]]], + Doc( + """ + The dictionary of OAuth2 flows. + """ + ), + ] = OAuthFlowsModel(), + scheme_name: Annotated[ + Optional[str], + Doc( + """ + Security scheme name. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Security scheme description. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + auto_error: Annotated[ + bool, + Doc( + """ + By default, if no HTTP Authorization header is provided, required for + OAuth2 authentication, it will automatically cancel the request and + send the client an error. + + If `auto_error` is set to `False`, when the HTTP Authorization header + is not available, instead of erroring out, the dependency result will + be `None`. + + This is useful when you want to have optional authentication. + + It is also useful when you want to have authentication that can be + provided in one of multiple optional ways (for example, with OAuth2 + or in a cookie). + """ + ), + ] = True, + ): + self.model = OAuth2Model( + flows=cast(OAuthFlowsModel, flows), description=description + ) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__(self, request: Request) -> Optional[str]: + authorization = request.headers.get("Authorization") + if not authorization: + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + return authorization + + +class OAuth2PasswordBearer(OAuth2): + """ + OAuth2 flow for authentication using a bearer token obtained with a password. + An instance of it would be used as a dependency. + + Read more about it in the + [FastAPI docs for Simple OAuth2 with Password and Bearer](https://fastapi.tiangolo.com/tutorial/security/simple-oauth2/). + """ + + def __init__( + self, + tokenUrl: Annotated[ + str, + Doc( + """ + The URL to obtain the OAuth2 token. This would be the *path operation* + that has `OAuth2PasswordRequestForm` as a dependency. + """ + ), + ], + scheme_name: Annotated[ + Optional[str], + Doc( + """ + Security scheme name. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + scopes: Annotated[ + Optional[Dict[str, str]], + Doc( + """ + The OAuth2 scopes that would be required by the *path operations* that + use this dependency. + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Security scheme description. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + auto_error: Annotated[ + bool, + Doc( + """ + By default, if no HTTP Authorization header is provided, required for + OAuth2 authentication, it will automatically cancel the request and + send the client an error. + + If `auto_error` is set to `False`, when the HTTP Authorization header + is not available, instead of erroring out, the dependency result will + be `None`. + + This is useful when you want to have optional authentication. + + It is also useful when you want to have authentication that can be + provided in one of multiple optional ways (for example, with OAuth2 + or in a cookie). + """ + ), + ] = True, + ): + if not scopes: + scopes = {} + flows = OAuthFlowsModel( + password=cast(Any, {"tokenUrl": tokenUrl, "scopes": scopes}) + ) + super().__init__( + flows=flows, + scheme_name=scheme_name, + description=description, + auto_error=auto_error, + ) + + async def __call__(self, request: Request) -> Optional[str]: + authorization = request.headers.get("Authorization") + scheme, param = get_authorization_scheme_param(authorization) + if not authorization or scheme.lower() != "bearer": + if self.auto_error: + raise HTTPException( + status_code=HTTP_401_UNAUTHORIZED, + detail="Not authenticated", + headers={"WWW-Authenticate": "Bearer"}, + ) + else: + return None + return param + + +class OAuth2AuthorizationCodeBearer(OAuth2): + """ + OAuth2 flow for authentication using a bearer token obtained with an OAuth2 code + flow. An instance of it would be used as a dependency. + """ + + def __init__( + self, + authorizationUrl: str, + tokenUrl: Annotated[ + str, + Doc( + """ + The URL to obtain the OAuth2 token. + """ + ), + ], + refreshUrl: Annotated[ + Optional[str], + Doc( + """ + The URL to refresh the token and obtain a new one. + """ + ), + ] = None, + scheme_name: Annotated[ + Optional[str], + Doc( + """ + Security scheme name. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + scopes: Annotated[ + Optional[Dict[str, str]], + Doc( + """ + The OAuth2 scopes that would be required by the *path operations* that + use this dependency. + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Security scheme description. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + auto_error: Annotated[ + bool, + Doc( + """ + By default, if no HTTP Authorization header is provided, required for + OAuth2 authentication, it will automatically cancel the request and + send the client an error. + + If `auto_error` is set to `False`, when the HTTP Authorization header + is not available, instead of erroring out, the dependency result will + be `None`. + + This is useful when you want to have optional authentication. + + It is also useful when you want to have authentication that can be + provided in one of multiple optional ways (for example, with OAuth2 + or in a cookie). + """ + ), + ] = True, + ): + if not scopes: + scopes = {} + flows = OAuthFlowsModel( + authorizationCode=cast( + Any, + { + "authorizationUrl": authorizationUrl, + "tokenUrl": tokenUrl, + "refreshUrl": refreshUrl, + "scopes": scopes, + }, + ) + ) + super().__init__( + flows=flows, + scheme_name=scheme_name, + description=description, + auto_error=auto_error, + ) + + async def __call__(self, request: Request) -> Optional[str]: + authorization = request.headers.get("Authorization") + scheme, param = get_authorization_scheme_param(authorization) + if not authorization or scheme.lower() != "bearer": + if self.auto_error: + raise HTTPException( + status_code=HTTP_401_UNAUTHORIZED, + detail="Not authenticated", + headers={"WWW-Authenticate": "Bearer"}, + ) + else: + return None # pragma: nocover + return param + + +class SecurityScopes: + """ + This is a special class that you can define in a parameter in a dependency to + obtain the OAuth2 scopes required by all the dependencies in the same chain. + + This way, multiple dependencies can have different scopes, even when used in the + same *path operation*. And with this, you can access all the scopes required in + all those dependencies in a single place. + + Read more about it in the + [FastAPI docs for OAuth2 scopes](https://fastapi.tiangolo.com/advanced/security/oauth2-scopes/). + """ + + def __init__( + self, + scopes: Annotated[ + Optional[List[str]], + Doc( + """ + This will be filled by FastAPI. + """ + ), + ] = None, + ): + self.scopes: Annotated[ + List[str], + Doc( + """ + The list of all the scopes required by dependencies. + """ + ), + ] = scopes or [] + self.scope_str: Annotated[ + str, + Doc( + """ + All the scopes required by all the dependencies in a single string + separated by spaces, as defined in the OAuth2 specification. + """ + ), + ] = " ".join(self.scopes) diff --git a/venv/Lib/site-packages/fastapi/security/open_id_connect_url.py b/venv/Lib/site-packages/fastapi/security/open_id_connect_url.py new file mode 100644 index 00000000..c8cceb91 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/security/open_id_connect_url.py @@ -0,0 +1,84 @@ +from typing import Optional + +from fastapi.openapi.models import OpenIdConnect as OpenIdConnectModel +from fastapi.security.base import SecurityBase +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.status import HTTP_403_FORBIDDEN +from typing_extensions import Annotated, Doc + + +class OpenIdConnect(SecurityBase): + """ + OpenID Connect authentication class. An instance of it would be used as a + dependency. + """ + + def __init__( + self, + *, + openIdConnectUrl: Annotated[ + str, + Doc( + """ + The OpenID Connect URL. + """ + ), + ], + scheme_name: Annotated[ + Optional[str], + Doc( + """ + Security scheme name. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + description: Annotated[ + Optional[str], + Doc( + """ + Security scheme description. + + It will be included in the generated OpenAPI (e.g. visible at `/docs`). + """ + ), + ] = None, + auto_error: Annotated[ + bool, + Doc( + """ + By default, if no HTTP Authorization header is provided, required for + OpenID Connect authentication, it will automatically cancel the request + and send the client an error. + + If `auto_error` is set to `False`, when the HTTP Authorization header + is not available, instead of erroring out, the dependency result will + be `None`. + + This is useful when you want to have optional authentication. + + It is also useful when you want to have authentication that can be + provided in one of multiple optional ways (for example, with OpenID + Connect or in a cookie). + """ + ), + ] = True, + ): + self.model = OpenIdConnectModel( + openIdConnectUrl=openIdConnectUrl, description=description + ) + self.scheme_name = scheme_name or self.__class__.__name__ + self.auto_error = auto_error + + async def __call__(self, request: Request) -> Optional[str]: + authorization = request.headers.get("Authorization") + if not authorization: + if self.auto_error: + raise HTTPException( + status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" + ) + else: + return None + return authorization diff --git a/venv/Lib/site-packages/fastapi/security/utils.py b/venv/Lib/site-packages/fastapi/security/utils.py new file mode 100644 index 00000000..fa7a450b --- /dev/null +++ b/venv/Lib/site-packages/fastapi/security/utils.py @@ -0,0 +1,10 @@ +from typing import Optional, Tuple + + +def get_authorization_scheme_param( + authorization_header_value: Optional[str], +) -> Tuple[str, str]: + if not authorization_header_value: + return "", "" + scheme, _, param = authorization_header_value.partition(" ") + return scheme, param diff --git a/venv/Lib/site-packages/fastapi/staticfiles.py b/venv/Lib/site-packages/fastapi/staticfiles.py new file mode 100644 index 00000000..299015d4 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/staticfiles.py @@ -0,0 +1 @@ +from starlette.staticfiles import StaticFiles as StaticFiles # noqa diff --git a/venv/Lib/site-packages/fastapi/templating.py b/venv/Lib/site-packages/fastapi/templating.py new file mode 100644 index 00000000..0cb86848 --- /dev/null +++ b/venv/Lib/site-packages/fastapi/templating.py @@ -0,0 +1 @@ +from starlette.templating import Jinja2Templates as Jinja2Templates # noqa diff --git a/venv/Lib/site-packages/fastapi/testclient.py b/venv/Lib/site-packages/fastapi/testclient.py new file mode 100644 index 00000000..4012406a --- /dev/null +++ b/venv/Lib/site-packages/fastapi/testclient.py @@ -0,0 +1 @@ +from starlette.testclient import TestClient as TestClient # noqa diff --git a/venv/Lib/site-packages/fastapi/types.py b/venv/Lib/site-packages/fastapi/types.py new file mode 100644 index 00000000..3205654c --- /dev/null +++ b/venv/Lib/site-packages/fastapi/types.py @@ -0,0 +1,10 @@ +import types +from enum import Enum +from typing import Any, Callable, Dict, Set, Type, TypeVar, Union + +from pydantic import BaseModel + +DecoratedCallable = TypeVar("DecoratedCallable", bound=Callable[..., Any]) +UnionType = getattr(types, "UnionType", Union) +ModelNameMap = Dict[Union[Type[BaseModel], Type[Enum]], str] +IncEx = Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any]] diff --git a/venv/Lib/site-packages/fastapi/utils.py b/venv/Lib/site-packages/fastapi/utils.py new file mode 100644 index 00000000..4c7350fe --- /dev/null +++ b/venv/Lib/site-packages/fastapi/utils.py @@ -0,0 +1,220 @@ +import re +import warnings +from dataclasses import is_dataclass +from typing import ( + TYPE_CHECKING, + Any, + Dict, + MutableMapping, + Optional, + Set, + Type, + Union, + cast, +) +from weakref import WeakKeyDictionary + +import fastapi +from fastapi._compat import ( + PYDANTIC_V2, + BaseConfig, + ModelField, + PydanticSchemaGenerationError, + Undefined, + UndefinedType, + Validator, + lenient_issubclass, +) +from fastapi.datastructures import DefaultPlaceholder, DefaultType +from pydantic import BaseModel, create_model +from pydantic.fields import FieldInfo +from typing_extensions import Literal + +if TYPE_CHECKING: # pragma: nocover + from .routing import APIRoute + +# Cache for `create_cloned_field` +_CLONED_TYPES_CACHE: MutableMapping[Type[BaseModel], Type[BaseModel]] = ( + WeakKeyDictionary() +) + + +def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool: + if status_code is None: + return True + # Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1 + if status_code in { + "default", + "1XX", + "2XX", + "3XX", + "4XX", + "5XX", + }: + return True + current_status_code = int(status_code) + return not (current_status_code < 200 or current_status_code in {204, 205, 304}) + + +def get_path_param_names(path: str) -> Set[str]: + return set(re.findall("{(.*?)}", path)) + + +def create_model_field( + name: str, + type_: Any, + class_validators: Optional[Dict[str, Validator]] = None, + default: Optional[Any] = Undefined, + required: Union[bool, UndefinedType] = Undefined, + model_config: Type[BaseConfig] = BaseConfig, + field_info: Optional[FieldInfo] = None, + alias: Optional[str] = None, + mode: Literal["validation", "serialization"] = "validation", +) -> ModelField: + class_validators = class_validators or {} + if PYDANTIC_V2: + field_info = field_info or FieldInfo( + annotation=type_, default=default, alias=alias + ) + else: + field_info = field_info or FieldInfo() + kwargs = {"name": name, "field_info": field_info} + if PYDANTIC_V2: + kwargs.update({"mode": mode}) + else: + kwargs.update( + { + "type_": type_, + "class_validators": class_validators, + "default": default, + "required": required, + "model_config": model_config, + "alias": alias, + } + ) + try: + return ModelField(**kwargs) # type: ignore[arg-type] + except (RuntimeError, PydanticSchemaGenerationError): + raise fastapi.exceptions.FastAPIError( + "Invalid args for response field! Hint: " + f"check that {type_} is a valid Pydantic field type. " + "If you are using a return type annotation that is not a valid Pydantic " + "field (e.g. Union[Response, dict, None]) you can disable generating the " + "response model from the type annotation with the path operation decorator " + "parameter response_model=None. Read more: " + "https://fastapi.tiangolo.com/tutorial/response-model/" + ) from None + + +def create_cloned_field( + field: ModelField, + *, + cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None, +) -> ModelField: + if PYDANTIC_V2: + return field + # cloned_types caches already cloned types to support recursive models and improve + # performance by avoiding unnecessary cloning + if cloned_types is None: + cloned_types = _CLONED_TYPES_CACHE + + original_type = field.type_ + if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"): + original_type = original_type.__pydantic_model__ + use_type = original_type + if lenient_issubclass(original_type, BaseModel): + original_type = cast(Type[BaseModel], original_type) + use_type = cloned_types.get(original_type) + if use_type is None: + use_type = create_model(original_type.__name__, __base__=original_type) + cloned_types[original_type] = use_type + for f in original_type.__fields__.values(): + use_type.__fields__[f.name] = create_cloned_field( + f, cloned_types=cloned_types + ) + new_field = create_model_field(name=field.name, type_=use_type) + new_field.has_alias = field.has_alias # type: ignore[attr-defined] + new_field.alias = field.alias # type: ignore[misc] + new_field.class_validators = field.class_validators # type: ignore[attr-defined] + new_field.default = field.default # type: ignore[misc] + new_field.required = field.required # type: ignore[misc] + new_field.model_config = field.model_config # type: ignore[attr-defined] + new_field.field_info = field.field_info + new_field.allow_none = field.allow_none # type: ignore[attr-defined] + new_field.validate_always = field.validate_always # type: ignore[attr-defined] + if field.sub_fields: # type: ignore[attr-defined] + new_field.sub_fields = [ # type: ignore[attr-defined] + create_cloned_field(sub_field, cloned_types=cloned_types) + for sub_field in field.sub_fields # type: ignore[attr-defined] + ] + if field.key_field: # type: ignore[attr-defined] + new_field.key_field = create_cloned_field( # type: ignore[attr-defined] + field.key_field, # type: ignore[attr-defined] + cloned_types=cloned_types, + ) + new_field.validators = field.validators # type: ignore[attr-defined] + new_field.pre_validators = field.pre_validators # type: ignore[attr-defined] + new_field.post_validators = field.post_validators # type: ignore[attr-defined] + new_field.parse_json = field.parse_json # type: ignore[attr-defined] + new_field.shape = field.shape # type: ignore[attr-defined] + new_field.populate_validators() # type: ignore[attr-defined] + return new_field + + +def generate_operation_id_for_path( + *, name: str, path: str, method: str +) -> str: # pragma: nocover + warnings.warn( + "fastapi.utils.generate_operation_id_for_path() was deprecated, " + "it is not used internally, and will be removed soon", + DeprecationWarning, + stacklevel=2, + ) + operation_id = f"{name}{path}" + operation_id = re.sub(r"\W", "_", operation_id) + operation_id = f"{operation_id}_{method.lower()}" + return operation_id + + +def generate_unique_id(route: "APIRoute") -> str: + operation_id = f"{route.name}{route.path_format}" + operation_id = re.sub(r"\W", "_", operation_id) + assert route.methods + operation_id = f"{operation_id}_{list(route.methods)[0].lower()}" + return operation_id + + +def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None: + for key, value in update_dict.items(): + if ( + key in main_dict + and isinstance(main_dict[key], dict) + and isinstance(value, dict) + ): + deep_dict_update(main_dict[key], value) + elif ( + key in main_dict + and isinstance(main_dict[key], list) + and isinstance(update_dict[key], list) + ): + main_dict[key] = main_dict[key] + update_dict[key] + else: + main_dict[key] = value + + +def get_value_or_default( + first_item: Union[DefaultPlaceholder, DefaultType], + *extra_items: Union[DefaultPlaceholder, DefaultType], +) -> Union[DefaultPlaceholder, DefaultType]: + """ + Pass items or `DefaultPlaceholder`s by descending priority. + + The first one to _not_ be a `DefaultPlaceholder` will be returned. + + Otherwise, the first item (a `DefaultPlaceholder`) will be returned. + """ + items = (first_item,) + extra_items + for item in items: + if not isinstance(item, DefaultPlaceholder): + return item + return first_item diff --git a/venv/Lib/site-packages/fastapi/websockets.py b/venv/Lib/site-packages/fastapi/websockets.py new file mode 100644 index 00000000..55a4ac4a --- /dev/null +++ b/venv/Lib/site-packages/fastapi/websockets.py @@ -0,0 +1,3 @@ +from starlette.websockets import WebSocket as WebSocket # noqa +from starlette.websockets import WebSocketDisconnect as WebSocketDisconnect # noqa +from starlette.websockets import WebSocketState as WebSocketState # noqa diff --git a/venv/Lib/site-packages/greenlet-3.2.2.dist-info/INSTALLER b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/greenlet-3.2.2.dist-info/METADATA b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/METADATA new file mode 100644 index 00000000..6190cbd5 --- /dev/null +++ b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/METADATA @@ -0,0 +1,116 @@ +Metadata-Version: 2.4 +Name: greenlet +Version: 3.2.2 +Summary: Lightweight in-process concurrent programming +Home-page: https://greenlet.readthedocs.io/ +Author: Alexey Borzenkov +Author-email: snaury@gmail.com +Maintainer: Jason Madden +Maintainer-email: jason@seecoresoftware.com +License: MIT AND Python-2.0 +Project-URL: Bug Tracker, https://github.com/python-greenlet/greenlet/issues +Project-URL: Source Code, https://github.com/python-greenlet/greenlet/ +Project-URL: Documentation, https://greenlet.readthedocs.io/ +Project-URL: Changes, https://greenlet.readthedocs.io/en/latest/changes.html +Keywords: greenlet coroutine concurrency threads cooperative +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Natural Language :: English +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Operating System :: OS Independent +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: LICENSE.PSF +Provides-Extra: docs +Requires-Dist: Sphinx; extra == "docs" +Requires-Dist: furo; extra == "docs" +Provides-Extra: test +Requires-Dist: objgraph; extra == "test" +Requires-Dist: psutil; extra == "test" +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: keywords +Dynamic: license +Dynamic: license-file +Dynamic: maintainer +Dynamic: maintainer-email +Dynamic: platform +Dynamic: project-url +Dynamic: provides-extra +Dynamic: requires-python +Dynamic: summary + +.. This file is included into docs/history.rst + + +Greenlets are lightweight coroutines for in-process concurrent +programming. + +The "greenlet" package is a spin-off of `Stackless`_, a version of +CPython that supports micro-threads called "tasklets". Tasklets run +pseudo-concurrently (typically in a single or a few OS-level threads) +and are synchronized with data exchanges on "channels". + +A "greenlet", on the other hand, is a still more primitive notion of +micro-thread with no implicit scheduling; coroutines, in other words. +This is useful when you want to control exactly when your code runs. +You can build custom scheduled micro-threads on top of greenlet; +however, it seems that greenlets are useful on their own as a way to +make advanced control flow structures. For example, we can recreate +generators; the difference with Python's own generators is that our +generators can call nested functions and the nested functions can +yield values too. (Additionally, you don't need a "yield" keyword. See +the example in `test_generator.py +`_). + +Greenlets are provided as a C extension module for the regular unmodified +interpreter. + +.. _`Stackless`: http://www.stackless.com + + +Who is using Greenlet? +====================== + +There are several libraries that use Greenlet as a more flexible +alternative to Python's built in coroutine support: + + - `Concurrence`_ + - `Eventlet`_ + - `Gevent`_ + +.. _Concurrence: http://opensource.hyves.org/concurrence/ +.. _Eventlet: http://eventlet.net/ +.. _Gevent: http://www.gevent.org/ + +Getting Greenlet +================ + +The easiest way to get Greenlet is to install it with pip:: + + pip install greenlet + + +Source code archives and binary distributions are available on the +python package index at https://pypi.org/project/greenlet + +The source code repository is hosted on github: +https://github.com/python-greenlet/greenlet + +Documentation is available on readthedocs.org: +https://greenlet.readthedocs.io diff --git a/venv/Lib/site-packages/greenlet-3.2.2.dist-info/RECORD b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/RECORD new file mode 100644 index 00000000..c70584da --- /dev/null +++ b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/RECORD @@ -0,0 +1,120 @@ +../../include/site/python3.12/greenlet/greenlet.h,sha256=sz5pYRSQqedgOt2AMgxLZdTjO-qcr_JMvgiEJR9IAJ8,4755 +greenlet-3.2.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +greenlet-3.2.2.dist-info/METADATA,sha256=X57v7sBEYSNABzNQXpCK_CGTvT2GasTRzF6Qjy9IUys,4193 +greenlet-3.2.2.dist-info/RECORD,, +greenlet-3.2.2.dist-info/WHEEL,sha256=VryZucl_XSB78oskhQFN0jfmderyetLlP0R6bZqf7Ys,101 +greenlet-3.2.2.dist-info/licenses/LICENSE,sha256=dpgx1uXfrywggC-sz_H6-0wgJd2PYlPfpH_K1Z1NCXk,1434 +greenlet-3.2.2.dist-info/licenses/LICENSE.PSF,sha256=5f88I8EQ5JTNfXNsEP2W1GJFe6_soxCEDbZScpjH1Gs,2424 +greenlet-3.2.2.dist-info/top_level.txt,sha256=YSnRsCRoO61JGlP57o8iKL6rdLWDWuiyKD8ekpWUsDc,9 +greenlet/CObjects.cpp,sha256=OPej1bWBgc4sRrTRQ2aFFML9pzDYKlKhlJSjsI0X_eU,3508 +greenlet/PyGreenlet.cpp,sha256=ogWsQ5VhSdItWRLLpWOgSuqYuM3QwQ4cVCxOQIgHx6E,23441 +greenlet/PyGreenlet.hpp,sha256=2ZQlOxYNoy7QwD7mppFoOXe_At56NIsJ0eNsE_hoSsw,1463 +greenlet/PyGreenletUnswitchable.cpp,sha256=PQE0fSZa_IOyUM44IESHkJoD2KtGW3dkhkmZSYY3WHs,4375 +greenlet/PyModule.cpp,sha256=J2TH06dGcNEarioS6NbWXkdME8hJY05XVbdqLrfO5w4,8587 +greenlet/TBrokenGreenlet.cpp,sha256=smN26uC7ahAbNYiS10rtWPjCeTG4jevM8siA2sjJiXg,1021 +greenlet/TExceptionState.cpp,sha256=U7Ctw9fBdNraS0d174MoQW7bN-ae209Ta0JuiKpcpVI,1359 +greenlet/TGreenlet.cpp,sha256=HGYGKpmKYqQ842tASW-QaaV8wua4a5XV_quYKPDsV_Y,25731 +greenlet/TGreenlet.hpp,sha256=7ti9va3tzIdFkQ-FEEEkG5p7vQ3PTdYlDFMHLcnIHw4,28043 +greenlet/TGreenletGlobals.cpp,sha256=YyEmDjKf1g32bsL-unIUScFLnnA1fzLWf2gOMd-D0Zw,3264 +greenlet/TMainGreenlet.cpp,sha256=fvgb8HHB-FVTPEKjR1s_ifCZSpp5D5YQByik0CnIABg,3276 +greenlet/TPythonState.cpp,sha256=vBMJT9qScTSIqhnOTVJqsGug3WbKv9dDt0cOqyhUk8w,15779 +greenlet/TStackState.cpp,sha256=V444I8Jj9DhQz-9leVW_9dtiSRjaE1NMlgDG02Xxq-Y,7381 +greenlet/TThreadState.hpp,sha256=2Jgg7DtGggMYR_x3CLAvAFf1mIdIDtQvSSItcdmX4ZQ,19131 +greenlet/TThreadStateCreator.hpp,sha256=uYTexDWooXSSgUc5uh-Mhm5BQi3-kR6CqpizvNynBFQ,2610 +greenlet/TThreadStateDestroy.cpp,sha256=36yBCAMq3beXTZd-XnFA7DwaHVSOx2vc28-nf0spysU,8169 +greenlet/TUserGreenlet.cpp,sha256=uemg0lwKXtYB0yzmvyYdIIAsKnNkifXM1OJ2OlrFP1A,23553 +greenlet/__init__.py,sha256=NNXC07srcIhPc_tI8r0YWIxSi9S3V0MNFxTFAnY4mFE,1723 +greenlet/__pycache__/__init__.cpython-312.pyc,, +greenlet/_greenlet.cp312-win_amd64.pyd,sha256=oGNOVPhLdekUjJdM8LwImrgfROqWuskLtp7vGvNLC4Y,219136 +greenlet/greenlet.cpp,sha256=WdItb1yWL9WNsTqJNf0Iw8ZwDHD49pkDP0rIRGBg2pw,10996 +greenlet/greenlet.h,sha256=sz5pYRSQqedgOt2AMgxLZdTjO-qcr_JMvgiEJR9IAJ8,4755 +greenlet/greenlet_allocator.hpp,sha256=kxyWW4Qdwlrc7ufgdb5vd6Y7jhauQ699Kod0mqiO1iM,1582 +greenlet/greenlet_compiler_compat.hpp,sha256=nRxpLN9iNbnLVyFDeVmOwyeeNm6scQrOed1l7JQYMCM,4346 +greenlet/greenlet_cpython_compat.hpp,sha256=XrsoFv8nKavrdxly5_-q9lqGeE8d3wKt1YtldsAHAT8,4068 +greenlet/greenlet_exceptions.hpp,sha256=06Bx81DtVaJTa6RtiMcV141b-XHv4ppEgVItkblcLWY,4503 +greenlet/greenlet_internal.hpp,sha256=Ajc-_09W4xWzm9XfyXHAeQAFUgKGKsnJwYsTCoNy3ns,2709 +greenlet/greenlet_refs.hpp,sha256=OnbA91yZf3QHH6-eJccvoNDAaN-pQBMMrclFU1Ot3J4,34436 +greenlet/greenlet_slp_switch.hpp,sha256=kM1QHA2iV-gH4cFyN6lfIagHQxvJZjWOVJdIxRE3TlQ,3198 +greenlet/greenlet_thread_support.hpp,sha256=XUJ6ljWjf9OYyuOILiz8e_yHvT3fbaUiHdhiPNQUV4s,867 +greenlet/platform/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +greenlet/platform/__pycache__/__init__.cpython-312.pyc,, +greenlet/platform/setup_switch_x64_masm.cmd,sha256=ZpClUJeU0ujEPSTWNSepP0W2f9XiYQKA8QKSoVou8EU,143 +greenlet/platform/switch_aarch64_gcc.h,sha256=GKC0yWNXnbK2X--X6aguRCMj2Tg7hDU1Zkl3RljDvC8,4307 +greenlet/platform/switch_alpha_unix.h,sha256=Z-SvF8JQV3oxWT8JRbL9RFu4gRFxPdJ7cviM8YayMmw,671 +greenlet/platform/switch_amd64_unix.h,sha256=EcSFCBlodEBhqhKjcJqY_5Dn_jn7pKpkJlOvp7gFXLI,2748 +greenlet/platform/switch_arm32_gcc.h,sha256=Z3KkHszdgq6uU4YN3BxvKMG2AdDnovwCCNrqGWZ1Lyo,2479 +greenlet/platform/switch_arm32_ios.h,sha256=mm5_R9aXB92hyxzFRwB71M60H6AlvHjrpTrc72Pz3l8,1892 +greenlet/platform/switch_arm64_masm.asm,sha256=4kpTtfy7rfcr8j1CpJLAK21EtZpGDAJXWRU68HEy5A8,1245 +greenlet/platform/switch_arm64_masm.obj,sha256=DmLnIB_icoEHAz1naue_pJPTZgR9ElM7-Nmztr-o9_U,746 +greenlet/platform/switch_arm64_msvc.h,sha256=RqK5MHLmXI3Q-FQ7tm32KWnbDNZKnkJdq8CR89cz640,398 +greenlet/platform/switch_csky_gcc.h,sha256=kDikyiPpewP71KoBZQO_MukDTXTXBiC7x-hF0_2DL0w,1331 +greenlet/platform/switch_loongarch64_linux.h,sha256=7M-Dhc4Q8tRbJCJhalDLwU6S9Mx8MjmN1RbTDgIvQTM,779 +greenlet/platform/switch_m68k_gcc.h,sha256=VSa6NpZhvyyvF-Q58CTIWSpEDo4FKygOyTz00whctlw,928 +greenlet/platform/switch_mips_unix.h,sha256=E0tYsqc5anDY1BhenU1l8DW-nVHC_BElzLgJw3TGtPk,1426 +greenlet/platform/switch_ppc64_aix.h,sha256=_BL0iyRr3ZA5iPlr3uk9SJ5sNRWGYLrXcZ5z-CE9anE,3860 +greenlet/platform/switch_ppc64_linux.h,sha256=0rriT5XyxPb0GqsSSn_bP9iQsnjsPbBmu0yqo5goSyQ,3815 +greenlet/platform/switch_ppc_aix.h,sha256=pHA4slEjUFP3J3SYm1TAlNPhgb2G_PAtax5cO8BEe1A,2941 +greenlet/platform/switch_ppc_linux.h,sha256=YwrlKUzxlXuiKMQqr6MFAV1bPzWnmvk6X1AqJZEpOWU,2759 +greenlet/platform/switch_ppc_macosx.h,sha256=Z6KN_ud0n6nC3ltJrNz2qtvER6vnRAVRNH9mdIDpMxY,2624 +greenlet/platform/switch_ppc_unix.h,sha256=-ZG7MSSPEA5N4qO9PQChtyEJ-Fm6qInhyZm_ZBHTtMg,2652 +greenlet/platform/switch_riscv_unix.h,sha256=606V6ACDf79Fz_WGItnkgbjIJ0pGg_sHmPyDxQYKK58,949 +greenlet/platform/switch_s390_unix.h,sha256=RRlGu957ybmq95qNNY4Qw1mcaoT3eBnW5KbVwu48KX8,2763 +greenlet/platform/switch_sh_gcc.h,sha256=mcRJBTu-2UBf4kZtX601qofwuDuy-Y-hnxJtrcaB7do,901 +greenlet/platform/switch_sparc_sun_gcc.h,sha256=xZish9GsMHBienUbUMsX1-ZZ-as7hs36sVhYIE3ew8Y,2797 +greenlet/platform/switch_x32_unix.h,sha256=nM98PKtzTWc1lcM7TRMUZJzskVdR1C69U1UqZRWX0GE,1509 +greenlet/platform/switch_x64_masm.asm,sha256=nu6n2sWyXuXfpPx40d9YmLfHXUc1sHgeTvX1kUzuvEM,1841 +greenlet/platform/switch_x64_masm.obj,sha256=GNtTNxYdo7idFUYsQv-mrXWgyT5EJ93-9q90lN6svtQ,1078 +greenlet/platform/switch_x64_msvc.h,sha256=LIeasyKo_vHzspdMzMHbosRhrBfKI4BkQOh4qcTHyJw,1805 +greenlet/platform/switch_x86_msvc.h,sha256=TtGOwinbFfnn6clxMNkCz8i6OmgB6kVRrShoF5iT9to,12838 +greenlet/platform/switch_x86_unix.h,sha256=VplW9H0FF0cZHw1DhJdIUs5q6YLS4cwb2nYwjF83R1s,3059 +greenlet/slp_platformselect.h,sha256=J01Fd1y2sFLxkBfsixdpexCVWaAdeprDTEly-ujDQAk,3841 +greenlet/tests/__init__.py,sha256=sqxm7-dZuGBwmNI0n6xrcQJGoHHjoXUGyUTnvHidcYM,9361 +greenlet/tests/__pycache__/__init__.cpython-312.pyc,, +greenlet/tests/__pycache__/fail_clearing_run_switches.cpython-312.pyc,, +greenlet/tests/__pycache__/fail_cpp_exception.cpython-312.pyc,, +greenlet/tests/__pycache__/fail_initialstub_already_started.cpython-312.pyc,, +greenlet/tests/__pycache__/fail_slp_switch.cpython-312.pyc,, +greenlet/tests/__pycache__/fail_switch_three_greenlets.cpython-312.pyc,, +greenlet/tests/__pycache__/fail_switch_three_greenlets2.cpython-312.pyc,, +greenlet/tests/__pycache__/fail_switch_two_greenlets.cpython-312.pyc,, +greenlet/tests/__pycache__/leakcheck.cpython-312.pyc,, +greenlet/tests/__pycache__/test_contextvars.cpython-312.pyc,, +greenlet/tests/__pycache__/test_cpp.cpython-312.pyc,, +greenlet/tests/__pycache__/test_extension_interface.cpython-312.pyc,, +greenlet/tests/__pycache__/test_gc.cpython-312.pyc,, +greenlet/tests/__pycache__/test_generator.cpython-312.pyc,, +greenlet/tests/__pycache__/test_generator_nested.cpython-312.pyc,, +greenlet/tests/__pycache__/test_greenlet.cpython-312.pyc,, +greenlet/tests/__pycache__/test_greenlet_trash.cpython-312.pyc,, +greenlet/tests/__pycache__/test_leaks.cpython-312.pyc,, +greenlet/tests/__pycache__/test_stack_saved.cpython-312.pyc,, +greenlet/tests/__pycache__/test_throw.cpython-312.pyc,, +greenlet/tests/__pycache__/test_tracing.cpython-312.pyc,, +greenlet/tests/__pycache__/test_version.cpython-312.pyc,, +greenlet/tests/__pycache__/test_weakref.cpython-312.pyc,, +greenlet/tests/_test_extension.c,sha256=vkeGA-6oeJcGILsD7oIrT1qZop2GaTOHXiNT7mcSl-0,5773 +greenlet/tests/_test_extension.cp312-win_amd64.pyd,sha256=X5_Cafkxp9i468CWv8cTgCCjgvQk7xrKjvw2c4aFvnQ,14336 +greenlet/tests/_test_extension_cpp.cp312-win_amd64.pyd,sha256=I6iwo9QMN8hhD3SYzIiqn3FJ9Hi2gO-9h__jx76YhZY,15872 +greenlet/tests/_test_extension_cpp.cpp,sha256=e0kVnaB8CCaEhE9yHtNyfqTjevsPDKKx-zgxk7PPK48,6565 +greenlet/tests/fail_clearing_run_switches.py,sha256=o433oA_nUCtOPaMEGc8VEhZIKa71imVHXFw7TsXaP8M,1263 +greenlet/tests/fail_cpp_exception.py,sha256=o_ZbipWikok8Bjc-vjiQvcb5FHh2nVW-McGKMLcMzh0,985 +greenlet/tests/fail_initialstub_already_started.py,sha256=txENn5IyzGx2p-XR1XB7qXmC8JX_4mKDEA8kYBXUQKc,1961 +greenlet/tests/fail_slp_switch.py,sha256=rJBZcZfTWR3e2ERQtPAud6YKShiDsP84PmwOJbp4ey0,524 +greenlet/tests/fail_switch_three_greenlets.py,sha256=zSitV7rkNnaoHYVzAGGLnxz-yPtohXJJzaE8ehFDQ0M,956 +greenlet/tests/fail_switch_three_greenlets2.py,sha256=FPJensn2EJxoropl03JSTVP3kgP33k04h6aDWWozrOk,1285 +greenlet/tests/fail_switch_two_greenlets.py,sha256=1CaI8s3504VbbF1vj1uBYuy-zxBHVzHPIAd1LIc8ONg,817 +greenlet/tests/leakcheck.py,sha256=inbfM7_oVzd8jIKGxCgo4JqpFZaDAnWPkSULJ8vIE1s,11964 +greenlet/tests/test_contextvars.py,sha256=xutO-qZgKTwKsA9lAqTjIcTBEiQV4RpNKM-vO2_YCVU,10541 +greenlet/tests/test_cpp.py,sha256=hpxhFAdKJTpAVZP8CBGs1ZcrKdscI9BaDZk4btkI5d4,2736 +greenlet/tests/test_extension_interface.py,sha256=eJ3cwLacdK2WbsrC-4DgeyHdwLRcG4zx7rrkRtqSzC4,3829 +greenlet/tests/test_gc.py,sha256=PCOaRpIyjNnNlDogGL3FZU_lrdXuM-pv1rxeE5TP5mc,2923 +greenlet/tests/test_generator.py,sha256=tONXiTf98VGm347o1b-810daPiwdla5cbpFg6QI1R1g,1240 +greenlet/tests/test_generator_nested.py,sha256=7v4HOYrf1XZP39dk5IUMubdZ8yc3ynwZcqj9GUJyMSA,3718 +greenlet/tests/test_greenlet.py,sha256=rYWDvMx7ZpMlQju9KRxsBR61ela7HSJCg98JtR7RPOQ,46251 +greenlet/tests/test_greenlet_trash.py,sha256=n2dBlQfOoEO1ODatFi8QdhboH3fB86YtqzcYMYOXxbw,7947 +greenlet/tests/test_leaks.py,sha256=Qeso_qH9MCWJOkk2I3VcTh7UhaNvWxrzAmNBta-fUyY,17714 +greenlet/tests/test_stack_saved.py,sha256=eyzqNY2VCGuGlxhT_In6TvZ6Okb0AXFZVyBEnK1jDwA,446 +greenlet/tests/test_throw.py,sha256=u2TQ_WvvCd6N6JdXWIxVEcXkKu5fepDlz9dktYdmtng,3712 +greenlet/tests/test_tracing.py,sha256=VlwzMU0C1noospZhuUMyB7MHw200emIvGCN_6G2p2ZU,8250 +greenlet/tests/test_version.py,sha256=O9DpAITsOFgiRcjd4odQ7ejmwx_N9Q1zQENVcbtFHIc,1339 +greenlet/tests/test_weakref.py,sha256=F8M23btEF87bIbpptLNBORosbQqNZGiYeKMqYjWrsak,883 diff --git a/venv/Lib/site-packages/greenlet-3.2.2.dist-info/WHEEL b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/WHEEL new file mode 100644 index 00000000..154917b0 --- /dev/null +++ b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.3.1) +Root-Is-Purelib: false +Tag: cp312-cp312-win_amd64 + diff --git a/venv/Lib/site-packages/greenlet-3.2.2.dist-info/licenses/LICENSE b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/licenses/LICENSE new file mode 100644 index 00000000..b73a4a10 --- /dev/null +++ b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/licenses/LICENSE @@ -0,0 +1,30 @@ +The following files are derived from Stackless Python and are subject to the +same license as Stackless Python: + + src/greenlet/slp_platformselect.h + files in src/greenlet/platform/ directory + +See LICENSE.PSF and http://www.stackless.com/ for details. + +Unless otherwise noted, the files in greenlet have been released under the +following MIT license: + +Copyright (c) Armin Rigo, Christian Tismer and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/venv/Lib/site-packages/greenlet-3.2.2.dist-info/licenses/LICENSE.PSF b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/licenses/LICENSE.PSF new file mode 100644 index 00000000..d3b509a2 --- /dev/null +++ b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/licenses/LICENSE.PSF @@ -0,0 +1,47 @@ +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011 Python Software Foundation; All Rights Reserved" are retained in Python +alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/venv/Lib/site-packages/greenlet-3.2.2.dist-info/top_level.txt b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/top_level.txt new file mode 100644 index 00000000..46725be4 --- /dev/null +++ b/venv/Lib/site-packages/greenlet-3.2.2.dist-info/top_level.txt @@ -0,0 +1 @@ +greenlet diff --git a/venv/Lib/site-packages/greenlet/CObjects.cpp b/venv/Lib/site-packages/greenlet/CObjects.cpp new file mode 100644 index 00000000..c135995b --- /dev/null +++ b/venv/Lib/site-packages/greenlet/CObjects.cpp @@ -0,0 +1,157 @@ +#ifndef COBJECTS_CPP +#define COBJECTS_CPP +/***************************************************************************** + * C interface + * + * These are exported using the CObject API + */ +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wunused-function" +#endif + +#include "greenlet_exceptions.hpp" + +#include "greenlet_internal.hpp" +#include "greenlet_refs.hpp" + + +#include "TThreadStateDestroy.cpp" + +#include "PyGreenlet.hpp" + +using greenlet::PyErrOccurred; +using greenlet::Require; + + + +extern "C" { +static PyGreenlet* +PyGreenlet_GetCurrent(void) +{ + return GET_THREAD_STATE().state().get_current().relinquish_ownership(); +} + +static int +PyGreenlet_SetParent(PyGreenlet* g, PyGreenlet* nparent) +{ + return green_setparent((PyGreenlet*)g, (PyObject*)nparent, NULL); +} + +static PyGreenlet* +PyGreenlet_New(PyObject* run, PyGreenlet* parent) +{ + using greenlet::refs::NewDictReference; + // In the past, we didn't use green_new and green_init, but that + // was a maintenance issue because we duplicated code. This way is + // much safer, but slightly slower. If that's a problem, we could + // refactor green_init to separate argument parsing from initialization. + OwnedGreenlet g = OwnedGreenlet::consuming(green_new(&PyGreenlet_Type, nullptr, nullptr)); + if (!g) { + return NULL; + } + + try { + NewDictReference kwargs; + if (run) { + kwargs.SetItem(mod_globs->str_run, run); + } + if (parent) { + kwargs.SetItem("parent", (PyObject*)parent); + } + + Require(green_init(g.borrow(), mod_globs->empty_tuple, kwargs.borrow())); + } + catch (const PyErrOccurred&) { + return nullptr; + } + + return g.relinquish_ownership(); +} + +static PyObject* +PyGreenlet_Switch(PyGreenlet* self, PyObject* args, PyObject* kwargs) +{ + if (!PyGreenlet_Check(self)) { + PyErr_BadArgument(); + return NULL; + } + + if (args == NULL) { + args = mod_globs->empty_tuple; + } + + if (kwargs == NULL || !PyDict_Check(kwargs)) { + kwargs = NULL; + } + + return green_switch(self, args, kwargs); +} + +static PyObject* +PyGreenlet_Throw(PyGreenlet* self, PyObject* typ, PyObject* val, PyObject* tb) +{ + if (!PyGreenlet_Check(self)) { + PyErr_BadArgument(); + return nullptr; + } + try { + PyErrPieces err_pieces(typ, val, tb); + return internal_green_throw(self, err_pieces).relinquish_ownership(); + } + catch (const PyErrOccurred&) { + return nullptr; + } +} + + + +static int +Extern_PyGreenlet_MAIN(PyGreenlet* self) +{ + if (!PyGreenlet_Check(self)) { + PyErr_BadArgument(); + return -1; + } + return self->pimpl->main(); +} + +static int +Extern_PyGreenlet_ACTIVE(PyGreenlet* self) +{ + if (!PyGreenlet_Check(self)) { + PyErr_BadArgument(); + return -1; + } + return self->pimpl->active(); +} + +static int +Extern_PyGreenlet_STARTED(PyGreenlet* self) +{ + if (!PyGreenlet_Check(self)) { + PyErr_BadArgument(); + return -1; + } + return self->pimpl->started(); +} + +static PyGreenlet* +Extern_PyGreenlet_GET_PARENT(PyGreenlet* self) +{ + if (!PyGreenlet_Check(self)) { + PyErr_BadArgument(); + return NULL; + } + // This can return NULL even if there is no exception + return self->pimpl->parent().acquire(); +} +} // extern C. + +/** End C API ****************************************************************/ +#ifdef __clang__ +# pragma clang diagnostic pop +#endif + + +#endif diff --git a/venv/Lib/site-packages/greenlet/PyGreenlet.cpp b/venv/Lib/site-packages/greenlet/PyGreenlet.cpp new file mode 100644 index 00000000..29c0bba0 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/PyGreenlet.cpp @@ -0,0 +1,738 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +#ifndef PYGREENLET_CPP +#define PYGREENLET_CPP +/***************** +The Python slot functions for TGreenlet. + */ + + +#define PY_SSIZE_T_CLEAN +#include +#include "structmember.h" // PyMemberDef + +#include "greenlet_internal.hpp" +#include "TThreadStateDestroy.cpp" +#include "TGreenlet.hpp" +// #include "TUserGreenlet.cpp" +// #include "TMainGreenlet.cpp" +// #include "TBrokenGreenlet.cpp" + + +#include "greenlet_refs.hpp" +#include "greenlet_slp_switch.hpp" + +#include "greenlet_thread_support.hpp" +#include "TGreenlet.hpp" + +#include "TGreenletGlobals.cpp" +#include "TThreadStateDestroy.cpp" +#include "PyGreenlet.hpp" +// #include "TGreenlet.cpp" + +// #include "TExceptionState.cpp" +// #include "TPythonState.cpp" +// #include "TStackState.cpp" + +using greenlet::LockGuard; +using greenlet::LockInitError; +using greenlet::PyErrOccurred; +using greenlet::Require; + +using greenlet::g_handle_exit; +using greenlet::single_result; + +using greenlet::Greenlet; +using greenlet::UserGreenlet; +using greenlet::MainGreenlet; +using greenlet::BrokenGreenlet; +using greenlet::ThreadState; +using greenlet::PythonState; + + + +static PyGreenlet* +green_new(PyTypeObject* type, PyObject* UNUSED(args), PyObject* UNUSED(kwds)) +{ + PyGreenlet* o = + (PyGreenlet*)PyBaseObject_Type.tp_new(type, mod_globs->empty_tuple, mod_globs->empty_dict); + if (o) { + new UserGreenlet(o, GET_THREAD_STATE().state().borrow_current()); + assert(Py_REFCNT(o) == 1); + } + return o; +} + + +// green_init is used in the tp_init slot. So it's important that +// it can be called directly from CPython. Thus, we don't use +// BorrowedGreenlet and BorrowedObject --- although in theory +// these should be binary layout compatible, that may not be +// guaranteed to be the case (32-bit linux ppc possibly). +static int +green_init(PyGreenlet* self, PyObject* args, PyObject* kwargs) +{ + PyArgParseParam run; + PyArgParseParam nparent; + static const char* kwlist[] = { + "run", + "parent", + NULL + }; + + // recall: The O specifier does NOT increase the reference count. + if (!PyArg_ParseTupleAndKeywords( + args, kwargs, "|OO:green", (char**)kwlist, &run, &nparent)) { + return -1; + } + + if (run) { + if (green_setrun(self, run, NULL)) { + return -1; + } + } + if (nparent && !nparent.is_None()) { + return green_setparent(self, nparent, NULL); + } + return 0; +} + + + +static int +green_traverse(PyGreenlet* self, visitproc visit, void* arg) +{ + // We must only visit referenced objects, i.e. only objects + // Py_INCREF'ed by this greenlet (directly or indirectly): + // + // - stack_prev is not visited: holds previous stack pointer, but it's not + // referenced + // - frames are not visited as we don't strongly reference them; + // alive greenlets are not garbage collected + // anyway. This can be a problem, however, if this greenlet is + // never allowed to finish, and is referenced from the frame: we + // have an uncollectible cycle in that case. Note that the + // frame object itself is also frequently not even tracked by the GC + // starting with Python 3.7 (frames are allocated by the + // interpreter untracked, and only become tracked when their + // evaluation is finished if they have a refcount > 1). All of + // this is to say that we should probably strongly reference + // the frame object. Doing so, while always allowing GC on a + // greenlet, solves several leaks for us. + + Py_VISIT(self->dict); + if (!self->pimpl) { + // Hmm. I have seen this at interpreter shutdown time, + // I think. That's very odd because this doesn't go away until + // we're ``green_dealloc()``, at which point we shouldn't be + // traversed anymore. + return 0; + } + + return self->pimpl->tp_traverse(visit, arg); +} + +static int +green_is_gc(PyObject* _self) +{ + BorrowedGreenlet self(_self); + int result = 0; + /* Main greenlet can be garbage collected since it can only + become unreachable if the underlying thread exited. + Active greenlets --- including those that are suspended --- + cannot be garbage collected, however. + */ + if (self->main() || !self->active()) { + result = 1; + } + // The main greenlet pointer will eventually go away after the thread dies. + if (self->was_running_in_dead_thread()) { + // Our thread is dead! We can never run again. Might as well + // GC us. Note that if a tuple containing only us and other + // immutable objects had been scanned before this, when we + // would have returned 0, the tuple will take itself out of GC + // tracking and never be investigated again. So that could + // result in both us and the tuple leaking due to an + // unreachable/uncollectible reference. The same goes for + // dictionaries. + // + // It's not a great idea to be changing our GC state on the + // fly. + result = 1; + } + return result; +} + + +static int +green_clear(PyGreenlet* self) +{ + /* Greenlet is only cleared if it is about to be collected. + Since active greenlets are not garbage collectable, we can + be sure that, even if they are deallocated during clear, + nothing they reference is in unreachable or finalizers, + so even if it switches we are relatively safe. */ + // XXX: Are we responsible for clearing weakrefs here? + Py_CLEAR(self->dict); + return self->pimpl->tp_clear(); +} + +/** + * Returns 0 on failure (the object was resurrected) or 1 on success. + **/ +static int +_green_dealloc_kill_started_non_main_greenlet(BorrowedGreenlet self) +{ + /* Hacks hacks hacks copied from instance_dealloc() */ + /* Temporarily resurrect the greenlet. */ + assert(self.REFCNT() == 0); + Py_SET_REFCNT(self.borrow(), 1); + /* Save the current exception, if any. */ + PyErrPieces saved_err; + try { + // BY THE TIME WE GET HERE, the state may actually be going + // away + // if we're shutting down the interpreter and freeing thread + // entries, + // this could result in freeing greenlets that were leaked. So + // we can't try to read the state. + self->deallocing_greenlet_in_thread( + self->thread_state() + ? static_cast(GET_THREAD_STATE()) + : nullptr); + } + catch (const PyErrOccurred&) { + PyErr_WriteUnraisable(self.borrow_o()); + /* XXX what else should we do? */ + } + /* Check for no resurrection must be done while we keep + * our internal reference, otherwise PyFile_WriteObject + * causes recursion if using Py_INCREF/Py_DECREF + */ + if (self.REFCNT() == 1 && self->active()) { + /* Not resurrected, but still not dead! + XXX what else should we do? we complain. */ + PyObject* f = PySys_GetObject("stderr"); + Py_INCREF(self.borrow_o()); /* leak! */ + if (f != NULL) { + PyFile_WriteString("GreenletExit did not kill ", f); + PyFile_WriteObject(self.borrow_o(), f, 0); + PyFile_WriteString("\n", f); + } + } + /* Restore the saved exception. */ + saved_err.PyErrRestore(); + /* Undo the temporary resurrection; can't use DECREF here, + * it would cause a recursive call. + */ + assert(self.REFCNT() > 0); + + Py_ssize_t refcnt = self.REFCNT() - 1; + Py_SET_REFCNT(self.borrow_o(), refcnt); + if (refcnt != 0) { + /* Resurrected! */ + _Py_NewReference(self.borrow_o()); + Py_SET_REFCNT(self.borrow_o(), refcnt); + /* Better to use tp_finalizer slot (PEP 442) + * and call ``PyObject_CallFinalizerFromDealloc``, + * but that's only supported in Python 3.4+; see + * Modules/_io/iobase.c for an example. + * + * The following approach is copied from iobase.c in CPython 2.7. + * (along with much of this function in general). Here's their + * comment: + * + * When called from a heap type's dealloc, the type will be + * decref'ed on return (see e.g. subtype_dealloc in typeobject.c). */ + if (PyType_HasFeature(self.TYPE(), Py_TPFLAGS_HEAPTYPE)) { + Py_INCREF(self.TYPE()); + } + + PyObject_GC_Track((PyObject*)self); + + _Py_DEC_REFTOTAL; +#ifdef COUNT_ALLOCS + --Py_TYPE(self)->tp_frees; + --Py_TYPE(self)->tp_allocs; +#endif /* COUNT_ALLOCS */ + return 0; + } + return 1; +} + + +static void +green_dealloc(PyGreenlet* self) +{ + PyObject_GC_UnTrack(self); + BorrowedGreenlet me(self); + if (me->active() + && me->started() + && !me->main()) { + if (!_green_dealloc_kill_started_non_main_greenlet(me)) { + return; + } + } + + if (self->weakreflist != NULL) { + PyObject_ClearWeakRefs((PyObject*)self); + } + Py_CLEAR(self->dict); + + if (self->pimpl) { + // In case deleting this, which frees some memory, + // somehow winds up calling back into us. That's usually a + //bug in our code. + Greenlet* p = self->pimpl; + self->pimpl = nullptr; + delete p; + } + // and finally we're done. self is now invalid. + Py_TYPE(self)->tp_free((PyObject*)self); +} + + + +static OwnedObject +internal_green_throw(BorrowedGreenlet self, PyErrPieces& err_pieces) +{ + PyObject* result = nullptr; + err_pieces.PyErrRestore(); + assert(PyErr_Occurred()); + if (self->started() && !self->active()) { + /* dead greenlet: turn GreenletExit into a regular return */ + result = g_handle_exit(OwnedObject()).relinquish_ownership(); + } + self->args() <<= result; + + return single_result(self->g_switch()); +} + + + +PyDoc_STRVAR( + green_switch_doc, + "switch(*args, **kwargs)\n" + "\n" + "Switch execution to this greenlet.\n" + "\n" + "If this greenlet has never been run, then this greenlet\n" + "will be switched to using the body of ``self.run(*args, **kwargs)``.\n" + "\n" + "If the greenlet is active (has been run, but was switch()'ed\n" + "out before leaving its run function), then this greenlet will\n" + "be resumed and the return value to its switch call will be\n" + "None if no arguments are given, the given argument if one\n" + "argument is given, or the args tuple and keyword args dict if\n" + "multiple arguments are given.\n" + "\n" + "If the greenlet is dead, or is the current greenlet then this\n" + "function will simply return the arguments using the same rules as\n" + "above.\n"); + +static PyObject* +green_switch(PyGreenlet* self, PyObject* args, PyObject* kwargs) +{ + using greenlet::SwitchingArgs; + SwitchingArgs switch_args(OwnedObject::owning(args), OwnedObject::owning(kwargs)); + self->pimpl->may_switch_away(); + self->pimpl->args() <<= switch_args; + + // If we're switching out of a greenlet, and that switch is the + // last thing the greenlet does, the greenlet ought to be able to + // go ahead and die at that point. Currently, someone else must + // manually switch back to the greenlet so that we "fall off the + // end" and can perform cleanup. You'd think we'd be able to + // figure out that this is happening using the frame's ``f_lasti`` + // member, which is supposed to be an index into + // ``frame->f_code->co_code``, the bytecode string. However, in + // recent interpreters, ``f_lasti`` tends not to be updated thanks + // to things like the PREDICT() macros in ceval.c. So it doesn't + // really work to do that in many cases. For example, the Python + // code: + // def run(): + // greenlet.getcurrent().parent.switch() + // produces bytecode of len 16, with the actual call to switch() + // being at index 10 (in Python 3.10). However, the reported + // ``f_lasti`` we actually see is...5! (Which happens to be the + // second byte of the CALL_METHOD op for ``getcurrent()``). + + try { + //OwnedObject result = single_result(self->pimpl->g_switch()); + OwnedObject result(single_result(self->pimpl->g_switch())); +#ifndef NDEBUG + // Note that the current greenlet isn't necessarily self. If self + // finished, we went to one of its parents. + assert(!self->pimpl->args()); + + const BorrowedGreenlet& current = GET_THREAD_STATE().state().borrow_current(); + // It's possible it's never been switched to. + assert(!current->args()); +#endif + PyObject* p = result.relinquish_ownership(); + + if (!p && !PyErr_Occurred()) { + // This shouldn't be happening anymore, so the asserts + // are there for debug builds. Non-debug builds + // crash "gracefully" in this case, although there is an + // argument to be made for killing the process in all + // cases --- for this to be the case, our switches + // probably nested in an incorrect way, so the state is + // suspicious. Nothing should be corrupt though, just + // confused at the Python level. Letting this propagate is + // probably good enough. + assert(p || PyErr_Occurred()); + throw PyErrOccurred( + mod_globs->PyExc_GreenletError, + "Greenlet.switch() returned NULL without an exception set." + ); + } + return p; + } + catch(const PyErrOccurred&) { + return nullptr; + } +} + +PyDoc_STRVAR( + green_throw_doc, + "Switches execution to this greenlet, but immediately raises the\n" + "given exception in this greenlet. If no argument is provided, the " + "exception\n" + "defaults to `greenlet.GreenletExit`. The normal exception\n" + "propagation rules apply, as described for `switch`. Note that calling " + "this\n" + "method is almost equivalent to the following::\n" + "\n" + " def raiser():\n" + " raise typ, val, tb\n" + " g_raiser = greenlet(raiser, parent=g)\n" + " g_raiser.switch()\n" + "\n" + "except that this trick does not work for the\n" + "`greenlet.GreenletExit` exception, which would not propagate\n" + "from ``g_raiser`` to ``g``.\n"); + +static PyObject* +green_throw(PyGreenlet* self, PyObject* args) +{ + PyArgParseParam typ(mod_globs->PyExc_GreenletExit); + PyArgParseParam val; + PyArgParseParam tb; + + if (!PyArg_ParseTuple(args, "|OOO:throw", &typ, &val, &tb)) { + return nullptr; + } + + assert(typ.borrow() || val.borrow()); + + self->pimpl->may_switch_away(); + try { + // Both normalizing the error and the actual throw_greenlet + // could throw PyErrOccurred. + PyErrPieces err_pieces(typ.borrow(), val.borrow(), tb.borrow()); + + return internal_green_throw(self, err_pieces).relinquish_ownership(); + } + catch (const PyErrOccurred&) { + return nullptr; + } +} + +static int +green_bool(PyGreenlet* self) +{ + return self->pimpl->active(); +} + +/** + * CAUTION: Allocates memory, may run GC and arbitrary Python code. + */ +static PyObject* +green_getdict(PyGreenlet* self, void* UNUSED(context)) +{ + if (self->dict == NULL) { + self->dict = PyDict_New(); + if (self->dict == NULL) { + return NULL; + } + } + Py_INCREF(self->dict); + return self->dict; +} + +static int +green_setdict(PyGreenlet* self, PyObject* val, void* UNUSED(context)) +{ + PyObject* tmp; + + if (val == NULL) { + PyErr_SetString(PyExc_TypeError, "__dict__ may not be deleted"); + return -1; + } + if (!PyDict_Check(val)) { + PyErr_SetString(PyExc_TypeError, "__dict__ must be a dictionary"); + return -1; + } + tmp = self->dict; + Py_INCREF(val); + self->dict = val; + Py_XDECREF(tmp); + return 0; +} + +static bool +_green_not_dead(BorrowedGreenlet self) +{ + // XXX: Where else should we do this? + // Probably on entry to most Python-facing functions? + if (self->was_running_in_dead_thread()) { + self->deactivate_and_free(); + return false; + } + return self->active() || !self->started(); +} + + +static PyObject* +green_getdead(PyGreenlet* self, void* UNUSED(context)) +{ + if (_green_not_dead(self)) { + Py_RETURN_FALSE; + } + else { + Py_RETURN_TRUE; + } +} + +static PyObject* +green_get_stack_saved(PyGreenlet* self, void* UNUSED(context)) +{ + return PyLong_FromSsize_t(self->pimpl->stack_saved()); +} + + +static PyObject* +green_getrun(PyGreenlet* self, void* UNUSED(context)) +{ + try { + OwnedObject result(BorrowedGreenlet(self)->run()); + return result.relinquish_ownership(); + } + catch(const PyErrOccurred&) { + return nullptr; + } +} + + +static int +green_setrun(PyGreenlet* self, PyObject* nrun, void* UNUSED(context)) +{ + try { + BorrowedGreenlet(self)->run(nrun); + return 0; + } + catch(const PyErrOccurred&) { + return -1; + } +} + +static PyObject* +green_getparent(PyGreenlet* self, void* UNUSED(context)) +{ + return BorrowedGreenlet(self)->parent().acquire_or_None(); +} + + +static int +green_setparent(PyGreenlet* self, PyObject* nparent, void* UNUSED(context)) +{ + try { + BorrowedGreenlet(self)->parent(nparent); + } + catch(const PyErrOccurred&) { + return -1; + } + return 0; +} + + +static PyObject* +green_getcontext(const PyGreenlet* self, void* UNUSED(context)) +{ + const Greenlet *const g = self->pimpl; + try { + OwnedObject result(g->context()); + return result.relinquish_ownership(); + } + catch(const PyErrOccurred&) { + return nullptr; + } +} + +static int +green_setcontext(PyGreenlet* self, PyObject* nctx, void* UNUSED(context)) +{ + try { + BorrowedGreenlet(self)->context(nctx); + return 0; + } + catch(const PyErrOccurred&) { + return -1; + } +} + + +static PyObject* +green_getframe(PyGreenlet* self, void* UNUSED(context)) +{ + const PythonState::OwnedFrame& top_frame = BorrowedGreenlet(self)->top_frame(); + return top_frame.acquire_or_None(); +} + + +static PyObject* +green_getstate(PyGreenlet* self) +{ + PyErr_Format(PyExc_TypeError, + "cannot serialize '%s' object", + Py_TYPE(self)->tp_name); + return nullptr; +} + +static PyObject* +green_repr(PyGreenlet* _self) +{ + BorrowedGreenlet self(_self); + /* + Return a string like + + + The handling of greenlets across threads is not super good. + We mostly use the internal definitions of these terms, but they + generally should make sense to users as well. + */ + PyObject* result; + int never_started = !self->started() && !self->active(); + + const char* const tp_name = Py_TYPE(self)->tp_name; + + if (_green_not_dead(self)) { + /* XXX: The otid= is almost useless because you can't correlate it to + any thread identifier exposed to Python. We could use + PyThreadState_GET()->thread_id, but we'd need to save that in the + greenlet, or save the whole PyThreadState object itself. + + As it stands, its only useful for identifying greenlets from the same thread. + */ + const char* state_in_thread; + if (self->was_running_in_dead_thread()) { + // The thread it was running in is dead! + // This can happen, especially at interpreter shut down. + // It complicates debugging output because it may be + // impossible to access the current thread state at that + // time. Thus, don't access the current thread state. + state_in_thread = " (thread exited)"; + } + else { + state_in_thread = GET_THREAD_STATE().state().is_current(self) + ? " current" + : (self->started() ? " suspended" : ""); + } + result = PyUnicode_FromFormat( + "<%s object at %p (otid=%p)%s%s%s%s>", + tp_name, + self.borrow_o(), + self->thread_state(), + state_in_thread, + self->active() ? " active" : "", + never_started ? " pending" : " started", + self->main() ? " main" : "" + ); + } + else { + result = PyUnicode_FromFormat( + "<%s object at %p (otid=%p) %sdead>", + tp_name, + self.borrow_o(), + self->thread_state(), + self->was_running_in_dead_thread() + ? "(thread exited) " + : "" + ); + } + + return result; +} + + +static PyMethodDef green_methods[] = { + { + .ml_name="switch", + .ml_meth=reinterpret_cast(green_switch), + .ml_flags=METH_VARARGS | METH_KEYWORDS, + .ml_doc=green_switch_doc + }, + {.ml_name="throw", .ml_meth=(PyCFunction)green_throw, .ml_flags=METH_VARARGS, .ml_doc=green_throw_doc}, + {.ml_name="__getstate__", .ml_meth=(PyCFunction)green_getstate, .ml_flags=METH_NOARGS, .ml_doc=NULL}, + {.ml_name=NULL, .ml_meth=NULL} /* sentinel */ +}; + +static PyGetSetDef green_getsets[] = { + /* name, getter, setter, doc, context pointer */ + {.name="__dict__", .get=(getter)green_getdict, .set=(setter)green_setdict}, + {.name="run", .get=(getter)green_getrun, .set=(setter)green_setrun}, + {.name="parent", .get=(getter)green_getparent, .set=(setter)green_setparent}, + {.name="gr_frame", .get=(getter)green_getframe }, + { + .name="gr_context", + .get=(getter)green_getcontext, + .set=(setter)green_setcontext + }, + {.name="dead", .get=(getter)green_getdead}, + {.name="_stack_saved", .get=(getter)green_get_stack_saved}, + {.name=NULL} +}; + +static PyMemberDef green_members[] = { + {.name=NULL} +}; + +static PyNumberMethods green_as_number = { + .nb_bool=(inquiry)green_bool, +}; + + +PyTypeObject PyGreenlet_Type = { + .ob_base=PyVarObject_HEAD_INIT(NULL, 0) + .tp_name="greenlet.greenlet", /* tp_name */ + .tp_basicsize=sizeof(PyGreenlet), /* tp_basicsize */ + /* methods */ + .tp_dealloc=(destructor)green_dealloc, /* tp_dealloc */ + .tp_repr=(reprfunc)green_repr, /* tp_repr */ + .tp_as_number=&green_as_number, /* tp_as _number*/ + .tp_flags=G_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ + .tp_doc="greenlet(run=None, parent=None) -> greenlet\n\n" + "Creates a new greenlet object (without running it).\n\n" + " - *run* -- The callable to invoke.\n" + " - *parent* -- The parent greenlet. The default is the current " + "greenlet.", /* tp_doc */ + .tp_traverse=(traverseproc)green_traverse, /* tp_traverse */ + .tp_clear=(inquiry)green_clear, /* tp_clear */ + .tp_weaklistoffset=offsetof(PyGreenlet, weakreflist), /* tp_weaklistoffset */ + + .tp_methods=green_methods, /* tp_methods */ + .tp_members=green_members, /* tp_members */ + .tp_getset=green_getsets, /* tp_getset */ + .tp_dictoffset=offsetof(PyGreenlet, dict), /* tp_dictoffset */ + .tp_init=(initproc)green_init, /* tp_init */ + .tp_alloc=PyType_GenericAlloc, /* tp_alloc */ + .tp_new=(newfunc)green_new, /* tp_new */ + .tp_free=PyObject_GC_Del, /* tp_free */ + .tp_is_gc=(inquiry)green_is_gc, /* tp_is_gc */ +}; + +#endif + +// Local Variables: +// flycheck-clang-include-path: ("/opt/local/Library/Frameworks/Python.framework/Versions/3.8/include/python3.8") +// End: diff --git a/venv/Lib/site-packages/greenlet/PyGreenlet.hpp b/venv/Lib/site-packages/greenlet/PyGreenlet.hpp new file mode 100644 index 00000000..df6cd805 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/PyGreenlet.hpp @@ -0,0 +1,35 @@ +#ifndef PYGREENLET_HPP +#define PYGREENLET_HPP + + +#include "greenlet.h" +#include "greenlet_compiler_compat.hpp" +#include "greenlet_refs.hpp" + + +using greenlet::refs::OwnedGreenlet; +using greenlet::refs::BorrowedGreenlet; +using greenlet::refs::BorrowedObject;; +using greenlet::refs::OwnedObject; +using greenlet::refs::PyErrPieces; + + +// XXX: These doesn't really belong here, it's not a Python slot. +static OwnedObject internal_green_throw(BorrowedGreenlet self, PyErrPieces& err_pieces); + +static PyGreenlet* green_new(PyTypeObject* type, PyObject* UNUSED(args), PyObject* UNUSED(kwds)); +static int green_clear(PyGreenlet* self); +static int green_init(PyGreenlet* self, PyObject* args, PyObject* kwargs); +static int green_setparent(PyGreenlet* self, PyObject* nparent, void* UNUSED(context)); +static int green_setrun(PyGreenlet* self, PyObject* nrun, void* UNUSED(context)); +static int green_traverse(PyGreenlet* self, visitproc visit, void* arg); +static void green_dealloc(PyGreenlet* self); +static PyObject* green_getparent(PyGreenlet* self, void* UNUSED(context)); + +static int green_is_gc(PyObject* self); +static PyObject* green_getdead(PyGreenlet* self, void* UNUSED(context)); +static PyObject* green_getrun(PyGreenlet* self, void* UNUSED(context)); +static int green_setcontext(PyGreenlet* self, PyObject* nctx, void* UNUSED(context)); +static PyObject* green_getframe(PyGreenlet* self, void* UNUSED(context)); +static PyObject* green_repr(PyGreenlet* self); +#endif diff --git a/venv/Lib/site-packages/greenlet/PyGreenletUnswitchable.cpp b/venv/Lib/site-packages/greenlet/PyGreenletUnswitchable.cpp new file mode 100644 index 00000000..1b768ee3 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/PyGreenletUnswitchable.cpp @@ -0,0 +1,147 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +/** + Implementation of the Python slots for PyGreenletUnswitchable_Type +*/ +#ifndef PY_GREENLET_UNSWITCHABLE_CPP +#define PY_GREENLET_UNSWITCHABLE_CPP + + + +#define PY_SSIZE_T_CLEAN +#include +#include "structmember.h" // PyMemberDef + +#include "greenlet_internal.hpp" +// Code after this point can assume access to things declared in stdint.h, +// including the fixed-width types. This goes for the platform-specific switch functions +// as well. +#include "greenlet_refs.hpp" +#include "greenlet_slp_switch.hpp" + +#include "greenlet_thread_support.hpp" +#include "TGreenlet.hpp" + +#include "TGreenlet.cpp" +#include "TGreenletGlobals.cpp" +#include "TThreadStateDestroy.cpp" + + +using greenlet::LockGuard; +using greenlet::LockInitError; +using greenlet::PyErrOccurred; +using greenlet::Require; + +using greenlet::g_handle_exit; +using greenlet::single_result; + +using greenlet::Greenlet; +using greenlet::UserGreenlet; +using greenlet::MainGreenlet; +using greenlet::BrokenGreenlet; +using greenlet::ThreadState; +using greenlet::PythonState; + + +#include "PyGreenlet.hpp" + +static PyGreenlet* +green_unswitchable_new(PyTypeObject* type, PyObject* UNUSED(args), PyObject* UNUSED(kwds)) +{ + PyGreenlet* o = + (PyGreenlet*)PyBaseObject_Type.tp_new(type, mod_globs->empty_tuple, mod_globs->empty_dict); + if (o) { + new BrokenGreenlet(o, GET_THREAD_STATE().state().borrow_current()); + assert(Py_REFCNT(o) == 1); + } + return o; +} + +static PyObject* +green_unswitchable_getforce(PyGreenlet* self, void* UNUSED(context)) +{ + BrokenGreenlet* broken = dynamic_cast(self->pimpl); + return PyBool_FromLong(broken->_force_switch_error); +} + +static int +green_unswitchable_setforce(PyGreenlet* self, PyObject* nforce, void* UNUSED(context)) +{ + if (!nforce) { + PyErr_SetString( + PyExc_AttributeError, + "Cannot delete force_switch_error" + ); + return -1; + } + BrokenGreenlet* broken = dynamic_cast(self->pimpl); + int is_true = PyObject_IsTrue(nforce); + if (is_true == -1) { + return -1; + } + broken->_force_switch_error = is_true; + return 0; +} + +static PyObject* +green_unswitchable_getforceslp(PyGreenlet* self, void* UNUSED(context)) +{ + BrokenGreenlet* broken = dynamic_cast(self->pimpl); + return PyBool_FromLong(broken->_force_slp_switch_error); +} + +static int +green_unswitchable_setforceslp(PyGreenlet* self, PyObject* nforce, void* UNUSED(context)) +{ + if (!nforce) { + PyErr_SetString( + PyExc_AttributeError, + "Cannot delete force_slp_switch_error" + ); + return -1; + } + BrokenGreenlet* broken = dynamic_cast(self->pimpl); + int is_true = PyObject_IsTrue(nforce); + if (is_true == -1) { + return -1; + } + broken->_force_slp_switch_error = is_true; + return 0; +} + +static PyGetSetDef green_unswitchable_getsets[] = { + /* name, getter, setter, doc, closure (context pointer) */ + { + .name="force_switch_error", + .get=(getter)green_unswitchable_getforce, + .set=(setter)green_unswitchable_setforce, + .doc=NULL + }, + { + .name="force_slp_switch_error", + .get=(getter)green_unswitchable_getforceslp, + .set=(setter)green_unswitchable_setforceslp, + .doc=nullptr + }, + {.name=nullptr} +}; + +PyTypeObject PyGreenletUnswitchable_Type = { + .ob_base=PyVarObject_HEAD_INIT(NULL, 0) + .tp_name="greenlet._greenlet.UnswitchableGreenlet", + .tp_dealloc= (destructor)green_dealloc, /* tp_dealloc */ + .tp_flags=G_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ + .tp_doc="Undocumented internal class", /* tp_doc */ + .tp_traverse=(traverseproc)green_traverse, /* tp_traverse */ + .tp_clear=(inquiry)green_clear, /* tp_clear */ + + .tp_getset=green_unswitchable_getsets, /* tp_getset */ + .tp_base=&PyGreenlet_Type, /* tp_base */ + .tp_init=(initproc)green_init, /* tp_init */ + .tp_alloc=PyType_GenericAlloc, /* tp_alloc */ + .tp_new=(newfunc)green_unswitchable_new, /* tp_new */ + .tp_free=PyObject_GC_Del, /* tp_free */ + .tp_is_gc=(inquiry)green_is_gc, /* tp_is_gc */ +}; + + +#endif diff --git a/venv/Lib/site-packages/greenlet/PyModule.cpp b/venv/Lib/site-packages/greenlet/PyModule.cpp new file mode 100644 index 00000000..6adcb5c3 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/PyModule.cpp @@ -0,0 +1,292 @@ +#ifndef PY_MODULE_CPP +#define PY_MODULE_CPP + +#include "greenlet_internal.hpp" + + +#include "TGreenletGlobals.cpp" +#include "TMainGreenlet.cpp" +#include "TThreadStateDestroy.cpp" + +using greenlet::LockGuard; +using greenlet::ThreadState; + +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wunused-function" +# pragma clang diagnostic ignored "-Wunused-variable" +#endif + +PyDoc_STRVAR(mod_getcurrent_doc, + "getcurrent() -> greenlet\n" + "\n" + "Returns the current greenlet (i.e. the one which called this " + "function).\n"); + +static PyObject* +mod_getcurrent(PyObject* UNUSED(module)) +{ + return GET_THREAD_STATE().state().get_current().relinquish_ownership_o(); +} + +PyDoc_STRVAR(mod_settrace_doc, + "settrace(callback) -> object\n" + "\n" + "Sets a new tracing function and returns the previous one.\n"); +static PyObject* +mod_settrace(PyObject* UNUSED(module), PyObject* args) +{ + PyArgParseParam tracefunc; + if (!PyArg_ParseTuple(args, "O", &tracefunc)) { + return NULL; + } + ThreadState& state = GET_THREAD_STATE(); + OwnedObject previous = state.get_tracefunc(); + if (!previous) { + previous = Py_None; + } + + state.set_tracefunc(tracefunc); + + return previous.relinquish_ownership(); +} + +PyDoc_STRVAR(mod_gettrace_doc, + "gettrace() -> object\n" + "\n" + "Returns the currently set tracing function, or None.\n"); + +static PyObject* +mod_gettrace(PyObject* UNUSED(module)) +{ + OwnedObject tracefunc = GET_THREAD_STATE().state().get_tracefunc(); + if (!tracefunc) { + tracefunc = Py_None; + } + return tracefunc.relinquish_ownership(); +} + + + +PyDoc_STRVAR(mod_set_thread_local_doc, + "set_thread_local(key, value) -> None\n" + "\n" + "Set a value in the current thread-local dictionary. Debugging only.\n"); + +static PyObject* +mod_set_thread_local(PyObject* UNUSED(module), PyObject* args) +{ + PyArgParseParam key; + PyArgParseParam value; + PyObject* result = NULL; + + if (PyArg_UnpackTuple(args, "set_thread_local", 2, 2, &key, &value)) { + if(PyDict_SetItem( + PyThreadState_GetDict(), // borrow + key, + value) == 0 ) { + // success + Py_INCREF(Py_None); + result = Py_None; + } + } + return result; +} + +PyDoc_STRVAR(mod_get_pending_cleanup_count_doc, + "get_pending_cleanup_count() -> Integer\n" + "\n" + "Get the number of greenlet cleanup operations pending. Testing only.\n"); + + +static PyObject* +mod_get_pending_cleanup_count(PyObject* UNUSED(module)) +{ + LockGuard cleanup_lock(*mod_globs->thread_states_to_destroy_lock); + return PyLong_FromSize_t(mod_globs->thread_states_to_destroy.size()); +} + +PyDoc_STRVAR(mod_get_total_main_greenlets_doc, + "get_total_main_greenlets() -> Integer\n" + "\n" + "Quickly return the number of main greenlets that exist. Testing only.\n"); + +static PyObject* +mod_get_total_main_greenlets(PyObject* UNUSED(module)) +{ + return PyLong_FromSize_t(G_TOTAL_MAIN_GREENLETS); +} + + + +PyDoc_STRVAR(mod_get_clocks_used_doing_optional_cleanup_doc, + "get_clocks_used_doing_optional_cleanup() -> Integer\n" + "\n" + "Get the number of clock ticks the program has used doing optional " + "greenlet cleanup.\n" + "Beginning in greenlet 2.0, greenlet tries to find and dispose of greenlets\n" + "that leaked after a thread exited. This requires invoking Python's garbage collector,\n" + "which may have a performance cost proportional to the number of live objects.\n" + "This function returns the amount of processor time\n" + "greenlet has used to do this. In programs that run with very large amounts of live\n" + "objects, this metric can be used to decide whether the cost of doing this cleanup\n" + "is worth the memory leak being corrected. If not, you can disable the cleanup\n" + "using ``enable_optional_cleanup(False)``.\n" + "The units are arbitrary and can only be compared to themselves (similarly to ``time.clock()``);\n" + "for example, to see how it scales with your heap. You can attempt to convert them into seconds\n" + "by dividing by the value of CLOCKS_PER_SEC." + "If cleanup has been disabled, returns None." + "\n" + "This is an implementation specific, provisional API. It may be changed or removed\n" + "in the future.\n" + ".. versionadded:: 2.0" + ); +static PyObject* +mod_get_clocks_used_doing_optional_cleanup(PyObject* UNUSED(module)) +{ + std::clock_t& clocks = ThreadState::clocks_used_doing_gc(); + + if (clocks == std::clock_t(-1)) { + Py_RETURN_NONE; + } + // This might not actually work on some implementations; clock_t + // is an opaque type. + return PyLong_FromSsize_t(clocks); +} + +PyDoc_STRVAR(mod_enable_optional_cleanup_doc, + "mod_enable_optional_cleanup(bool) -> None\n" + "\n" + "Enable or disable optional cleanup operations.\n" + "See ``get_clocks_used_doing_optional_cleanup()`` for details.\n" + ); +static PyObject* +mod_enable_optional_cleanup(PyObject* UNUSED(module), PyObject* flag) +{ + int is_true = PyObject_IsTrue(flag); + if (is_true == -1) { + return nullptr; + } + + std::clock_t& clocks = ThreadState::clocks_used_doing_gc(); + if (is_true) { + // If we already have a value, we don't want to lose it. + if (clocks == std::clock_t(-1)) { + clocks = 0; + } + } + else { + clocks = std::clock_t(-1); + } + Py_RETURN_NONE; +} + + + + +#if !GREENLET_PY313 +PyDoc_STRVAR(mod_get_tstate_trash_delete_nesting_doc, + "get_tstate_trash_delete_nesting() -> Integer\n" + "\n" + "Return the 'trash can' nesting level. Testing only.\n"); +static PyObject* +mod_get_tstate_trash_delete_nesting(PyObject* UNUSED(module)) +{ + PyThreadState* tstate = PyThreadState_GET(); + +#if GREENLET_PY312 + return PyLong_FromLong(tstate->trash.delete_nesting); +#else + return PyLong_FromLong(tstate->trash_delete_nesting); +#endif +} +#endif + + + + +static PyMethodDef GreenMethods[] = { + { + .ml_name="getcurrent", + .ml_meth=(PyCFunction)mod_getcurrent, + .ml_flags=METH_NOARGS, + .ml_doc=mod_getcurrent_doc + }, + { + .ml_name="settrace", + .ml_meth=(PyCFunction)mod_settrace, + .ml_flags=METH_VARARGS, + .ml_doc=mod_settrace_doc + }, + { + .ml_name="gettrace", + .ml_meth=(PyCFunction)mod_gettrace, + .ml_flags=METH_NOARGS, + .ml_doc=mod_gettrace_doc + }, + { + .ml_name="set_thread_local", + .ml_meth=(PyCFunction)mod_set_thread_local, + .ml_flags=METH_VARARGS, + .ml_doc=mod_set_thread_local_doc + }, + { + .ml_name="get_pending_cleanup_count", + .ml_meth=(PyCFunction)mod_get_pending_cleanup_count, + .ml_flags=METH_NOARGS, + .ml_doc=mod_get_pending_cleanup_count_doc + }, + { + .ml_name="get_total_main_greenlets", + .ml_meth=(PyCFunction)mod_get_total_main_greenlets, + .ml_flags=METH_NOARGS, + .ml_doc=mod_get_total_main_greenlets_doc + }, + { + .ml_name="get_clocks_used_doing_optional_cleanup", + .ml_meth=(PyCFunction)mod_get_clocks_used_doing_optional_cleanup, + .ml_flags=METH_NOARGS, + .ml_doc=mod_get_clocks_used_doing_optional_cleanup_doc + }, + { + .ml_name="enable_optional_cleanup", + .ml_meth=(PyCFunction)mod_enable_optional_cleanup, + .ml_flags=METH_O, + .ml_doc=mod_enable_optional_cleanup_doc + }, +#if !GREENLET_PY313 + { + .ml_name="get_tstate_trash_delete_nesting", + .ml_meth=(PyCFunction)mod_get_tstate_trash_delete_nesting, + .ml_flags=METH_NOARGS, + .ml_doc=mod_get_tstate_trash_delete_nesting_doc + }, +#endif + {.ml_name=NULL, .ml_meth=NULL} /* Sentinel */ +}; + +static const char* const copy_on_greentype[] = { + "getcurrent", + "error", + "GreenletExit", + "settrace", + "gettrace", + NULL +}; + +static struct PyModuleDef greenlet_module_def = { + .m_base=PyModuleDef_HEAD_INIT, + .m_name="greenlet._greenlet", + .m_doc=NULL, + .m_size=-1, + .m_methods=GreenMethods, +}; + + +#endif + +#ifdef __clang__ +# pragma clang diagnostic pop +#elif defined(__GNUC__) +# pragma GCC diagnostic pop +#endif diff --git a/venv/Lib/site-packages/greenlet/TBrokenGreenlet.cpp b/venv/Lib/site-packages/greenlet/TBrokenGreenlet.cpp new file mode 100644 index 00000000..7e9ab5be --- /dev/null +++ b/venv/Lib/site-packages/greenlet/TBrokenGreenlet.cpp @@ -0,0 +1,45 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +/** + * Implementation of greenlet::UserGreenlet. + * + * Format with: + * clang-format -i --style=file src/greenlet/greenlet.c + * + * + * Fix missing braces with: + * clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements" +*/ + +#include "TGreenlet.hpp" + +namespace greenlet { + +void* BrokenGreenlet::operator new(size_t UNUSED(count)) +{ + return allocator.allocate(1); +} + + +void BrokenGreenlet::operator delete(void* ptr) +{ + return allocator.deallocate(static_cast(ptr), + 1); +} + +greenlet::PythonAllocator greenlet::BrokenGreenlet::allocator; + +bool +BrokenGreenlet::force_slp_switch_error() const noexcept +{ + return this->_force_slp_switch_error; +} + +UserGreenlet::switchstack_result_t BrokenGreenlet::g_switchstack(void) +{ + if (this->_force_switch_error) { + return switchstack_result_t(-1); + } + return UserGreenlet::g_switchstack(); +} + +}; //namespace greenlet diff --git a/venv/Lib/site-packages/greenlet/TExceptionState.cpp b/venv/Lib/site-packages/greenlet/TExceptionState.cpp new file mode 100644 index 00000000..08a94ae8 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/TExceptionState.cpp @@ -0,0 +1,62 @@ +#ifndef GREENLET_EXCEPTION_STATE_CPP +#define GREENLET_EXCEPTION_STATE_CPP + +#include +#include "TGreenlet.hpp" + +namespace greenlet { + + +ExceptionState::ExceptionState() +{ + this->clear(); +} + +void ExceptionState::operator<<(const PyThreadState *const tstate) noexcept +{ + this->exc_info = tstate->exc_info; + this->exc_state = tstate->exc_state; +} + +void ExceptionState::operator>>(PyThreadState *const tstate) noexcept +{ + tstate->exc_state = this->exc_state; + tstate->exc_info = + this->exc_info ? this->exc_info : &tstate->exc_state; + this->clear(); +} + +void ExceptionState::clear() noexcept +{ + this->exc_info = nullptr; + this->exc_state.exc_value = nullptr; +#if !GREENLET_PY311 + this->exc_state.exc_type = nullptr; + this->exc_state.exc_traceback = nullptr; +#endif + this->exc_state.previous_item = nullptr; +} + +int ExceptionState::tp_traverse(visitproc visit, void* arg) noexcept +{ + Py_VISIT(this->exc_state.exc_value); +#if !GREENLET_PY311 + Py_VISIT(this->exc_state.exc_type); + Py_VISIT(this->exc_state.exc_traceback); +#endif + return 0; +} + +void ExceptionState::tp_clear() noexcept +{ + Py_CLEAR(this->exc_state.exc_value); +#if !GREENLET_PY311 + Py_CLEAR(this->exc_state.exc_type); + Py_CLEAR(this->exc_state.exc_traceback); +#endif +} + + +}; // namespace greenlet + +#endif // GREENLET_EXCEPTION_STATE_CPP diff --git a/venv/Lib/site-packages/greenlet/TGreenlet.cpp b/venv/Lib/site-packages/greenlet/TGreenlet.cpp new file mode 100644 index 00000000..4698a178 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/TGreenlet.cpp @@ -0,0 +1,718 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +/** + * Implementation of greenlet::Greenlet. + * + * Format with: + * clang-format -i --style=file src/greenlet/greenlet.c + * + * + * Fix missing braces with: + * clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements" +*/ +#ifndef TGREENLET_CPP +#define TGREENLET_CPP +#include "greenlet_internal.hpp" +#include "TGreenlet.hpp" + + +#include "TGreenletGlobals.cpp" +#include "TThreadStateDestroy.cpp" + +namespace greenlet { + +Greenlet::Greenlet(PyGreenlet* p) + : Greenlet(p, StackState()) +{ +} + +Greenlet::Greenlet(PyGreenlet* p, const StackState& initial_stack) + : _self(p), stack_state(initial_stack) +{ + assert(p->pimpl == nullptr); + p->pimpl = this; +} + +Greenlet::~Greenlet() +{ + // XXX: Can't do this. tp_clear is a virtual function, and by the + // time we're here, we've sliced off our child classes. + //this->tp_clear(); + this->_self->pimpl = nullptr; +} + +bool +Greenlet::force_slp_switch_error() const noexcept +{ + return false; +} + +void +Greenlet::release_args() +{ + this->switch_args.CLEAR(); +} + +/** + * CAUTION: This will allocate memory and may trigger garbage + * collection and arbitrary Python code. + */ +OwnedObject +Greenlet::throw_GreenletExit_during_dealloc(const ThreadState& UNUSED(current_thread_state)) +{ + // If we're killed because we lost all references in the + // middle of a switch, that's ok. Don't reset the args/kwargs, + // we still want to pass them to the parent. + PyErr_SetString(mod_globs->PyExc_GreenletExit, + "Killing the greenlet because all references have vanished."); + // To get here it had to have run before + return this->g_switch(); +} + +inline void +Greenlet::slp_restore_state() noexcept +{ +#ifdef SLP_BEFORE_RESTORE_STATE + SLP_BEFORE_RESTORE_STATE(); +#endif + this->stack_state.copy_heap_to_stack( + this->thread_state()->borrow_current()->stack_state); +} + + +inline int +Greenlet::slp_save_state(char *const stackref) noexcept +{ + // XXX: This used to happen in the middle, before saving, but + // after finding the next owner. Does that matter? This is + // only defined for Sparc/GCC where it flushes register + // windows to the stack (I think) +#ifdef SLP_BEFORE_SAVE_STATE + SLP_BEFORE_SAVE_STATE(); +#endif + return this->stack_state.copy_stack_to_heap(stackref, + this->thread_state()->borrow_current()->stack_state); +} + +/** + * CAUTION: This will allocate memory and may trigger garbage + * collection and arbitrary Python code. + */ +OwnedObject +Greenlet::on_switchstack_or_initialstub_failure( + Greenlet* target, + const Greenlet::switchstack_result_t& err, + const bool target_was_me, + const bool was_initial_stub) +{ + // If we get here, either g_initialstub() + // failed, or g_switchstack() failed. Either one of those + // cases SHOULD leave us in the original greenlet with a valid stack. + if (!PyErr_Occurred()) { + PyErr_SetString( + PyExc_SystemError, + was_initial_stub + ? "Failed to switch stacks into a greenlet for the first time." + : "Failed to switch stacks into a running greenlet."); + } + this->release_args(); + + if (target && !target_was_me) { + target->murder_in_place(); + } + + assert(!err.the_new_current_greenlet); + assert(!err.origin_greenlet); + return OwnedObject(); + +} + +OwnedGreenlet +Greenlet::g_switchstack_success() noexcept +{ + PyThreadState* tstate = PyThreadState_GET(); + // restore the saved state + this->python_state >> tstate; + this->exception_state >> tstate; + + // The thread state hasn't been changed yet. + ThreadState* thread_state = this->thread_state(); + OwnedGreenlet result(thread_state->get_current()); + thread_state->set_current(this->self()); + //assert(thread_state->borrow_current().borrow() == this->_self); + return result; +} + +Greenlet::switchstack_result_t +Greenlet::g_switchstack(void) +{ + // if any of these assertions fail, it's likely because we + // switched away and tried to switch back to us. Early stages of + // switching are not reentrant because we re-use ``this->args()``. + // Switching away would happen if we trigger a garbage collection + // (by just using some Python APIs that happen to allocate Python + // objects) and some garbage had weakref callbacks or __del__ that + // switches (people don't write code like that by hand, but with + // gevent it's possible without realizing it) + assert(this->args() || PyErr_Occurred()); + { /* save state */ + if (this->thread_state()->is_current(this->self())) { + // Hmm, nothing to do. + // TODO: Does this bypass trace events that are + // important? + return switchstack_result_t(0, + this, this->thread_state()->borrow_current()); + } + BorrowedGreenlet current = this->thread_state()->borrow_current(); + PyThreadState* tstate = PyThreadState_GET(); + + current->python_state << tstate; + current->exception_state << tstate; + this->python_state.will_switch_from(tstate); + switching_thread_state = this; + current->expose_frames(); + } + assert(this->args() || PyErr_Occurred()); + // If this is the first switch into a greenlet, this will + // return twice, once with 1 in the new greenlet, once with 0 + // in the origin. + int err; + if (this->force_slp_switch_error()) { + err = -1; + } + else { + err = slp_switch(); + } + + if (err < 0) { /* error */ + // Tested by + // test_greenlet.TestBrokenGreenlets.test_failed_to_slp_switch_into_running + // + // It's not clear if it's worth trying to clean up and + // continue here. Failing to switch stacks is a big deal which + // may not be recoverable (who knows what state the stack is in). + // Also, we've stolen references in preparation for calling + // ``g_switchstack_success()`` and we don't have a clean + // mechanism for backing that all out. + Py_FatalError("greenlet: Failed low-level slp_switch(). The stack is probably corrupt."); + } + + // No stack-based variables are valid anymore. + + // But the global is volatile so we can reload it without the + // compiler caching it from earlier. + Greenlet* greenlet_that_switched_in = switching_thread_state; // aka this + switching_thread_state = nullptr; + // except that no stack variables are valid, we would: + // assert(this == greenlet_that_switched_in); + + // switchstack success is where we restore the exception state, + // etc. It returns the origin greenlet because its convenient. + + OwnedGreenlet origin = greenlet_that_switched_in->g_switchstack_success(); + assert(greenlet_that_switched_in->args() || PyErr_Occurred()); + return switchstack_result_t(err, greenlet_that_switched_in, origin); +} + + +inline void +Greenlet::check_switch_allowed() const +{ + // TODO: Make this take a parameter of the current greenlet, + // or current main greenlet, to make the check for + // cross-thread switching cheaper. Surely somewhere up the + // call stack we've already accessed the thread local variable. + + // We expect to always have a main greenlet now; accessing the thread state + // created it. However, if we get here and cleanup has already + // begun because we're a greenlet that was running in a + // (now dead) thread, these invariants will not hold true. In + // fact, accessing `this->thread_state` may not even be possible. + + // If the thread this greenlet was running in is dead, + // we'll still have a reference to a main greenlet, but the + // thread state pointer we have is bogus. + // TODO: Give the objects an API to determine if they belong + // to a dead thread. + + const BorrowedMainGreenlet main_greenlet = this->find_main_greenlet_in_lineage(); + + if (!main_greenlet) { + throw PyErrOccurred(mod_globs->PyExc_GreenletError, + "cannot switch to a garbage collected greenlet"); + } + + if (!main_greenlet->thread_state()) { + throw PyErrOccurred(mod_globs->PyExc_GreenletError, + "cannot switch to a different thread (which happens to have exited)"); + } + + // The main greenlet we found was from the .parent lineage. + // That may or may not have any relationship to the main + // greenlet of the running thread. We can't actually access + // our this->thread_state members to try to check that, + // because it could be in the process of getting destroyed, + // but setting the main_greenlet->thread_state member to NULL + // may not be visible yet. So we need to check against the + // current thread state (once the cheaper checks are out of + // the way) + const BorrowedMainGreenlet current_main_greenlet = GET_THREAD_STATE().state().borrow_main_greenlet(); + if ( + // lineage main greenlet is not this thread's greenlet + current_main_greenlet != main_greenlet + || ( + // atteched to some thread + this->main_greenlet() + // XXX: Same condition as above. Was this supposed to be + // this->main_greenlet()? + && current_main_greenlet != main_greenlet) + // switching into a known dead thread (XXX: which, if we get here, + // is bad, because we just accessed the thread state, which is + // gone!) + || (!current_main_greenlet->thread_state())) { + // CAUTION: This may trigger memory allocations, gc, and + // arbitrary Python code. + throw PyErrOccurred( + mod_globs->PyExc_GreenletError, + "Cannot switch to a different thread\n\tCurrent: %R\n\tExpected: %R", + current_main_greenlet, main_greenlet); + } +} + +const OwnedObject +Greenlet::context() const +{ + using greenlet::PythonStateContext; + OwnedObject result; + + if (this->is_currently_running_in_some_thread()) { + /* Currently running greenlet: context is stored in the thread state, + not the greenlet object. */ + if (GET_THREAD_STATE().state().is_current(this->self())) { + result = PythonStateContext::context(PyThreadState_GET()); + } + else { + throw ValueError( + "cannot get context of a " + "greenlet that is running in a different thread"); + } + } + else { + /* Greenlet is not running: just return context. */ + result = this->python_state.context(); + } + if (!result) { + result = OwnedObject::None(); + } + return result; +} + + +void +Greenlet::context(BorrowedObject given) +{ + using greenlet::PythonStateContext; + if (!given) { + throw AttributeError("can't delete context attribute"); + } + if (given.is_None()) { + /* "Empty context" is stored as NULL, not None. */ + given = nullptr; + } + + //checks type, incrs refcnt + greenlet::refs::OwnedContext context(given); + PyThreadState* tstate = PyThreadState_GET(); + + if (this->is_currently_running_in_some_thread()) { + if (!GET_THREAD_STATE().state().is_current(this->self())) { + throw ValueError("cannot set context of a greenlet" + " that is running in a different thread"); + } + + /* Currently running greenlet: context is stored in the thread state, + not the greenlet object. */ + OwnedObject octx = OwnedObject::consuming(PythonStateContext::context(tstate)); + PythonStateContext::context(tstate, context.relinquish_ownership()); + } + else { + /* Greenlet is not running: just set context. Note that the + greenlet may be dead.*/ + this->python_state.context() = context; + } +} + +/** + * CAUTION: May invoke arbitrary Python code. + * + * Figure out what the result of ``greenlet.switch(arg, kwargs)`` + * should be and transfers ownership of it to the left-hand-side. + * + * If switch() was just passed an arg tuple, then we'll just return that. + * If only keyword arguments were passed, then we'll pass the keyword + * argument dict. Otherwise, we'll create a tuple of (args, kwargs) and + * return both. + * + * CAUTION: This may allocate a new tuple object, which may + * cause the Python garbage collector to run, which in turn may + * run arbitrary Python code that switches. + */ +OwnedObject& operator<<=(OwnedObject& lhs, greenlet::SwitchingArgs& rhs) noexcept +{ + // Because this may invoke arbitrary Python code, which could + // result in switching back to us, we need to get the + // arguments locally on the stack. + assert(rhs); + OwnedObject args = rhs.args(); + OwnedObject kwargs = rhs.kwargs(); + rhs.CLEAR(); + // We shouldn't be called twice for the same switch. + assert(args || kwargs); + assert(!rhs); + + if (!kwargs) { + lhs = args; + } + else if (!PyDict_Size(kwargs.borrow())) { + lhs = args; + } + else if (!PySequence_Length(args.borrow())) { + lhs = kwargs; + } + else { + // PyTuple_Pack allocates memory, may GC, may run arbitrary + // Python code. + lhs = OwnedObject::consuming(PyTuple_Pack(2, args.borrow(), kwargs.borrow())); + } + return lhs; +} + +static OwnedObject +g_handle_exit(const OwnedObject& greenlet_result) +{ + if (!greenlet_result && mod_globs->PyExc_GreenletExit.PyExceptionMatches()) { + /* catch and ignore GreenletExit */ + PyErrFetchParam val; + PyErr_Fetch(PyErrFetchParam(), val, PyErrFetchParam()); + if (!val) { + return OwnedObject::None(); + } + return OwnedObject(val); + } + + if (greenlet_result) { + // package the result into a 1-tuple + // PyTuple_Pack increments the reference of its arguments, + // so we always need to decref the greenlet result; + // the owner will do that. + return OwnedObject::consuming(PyTuple_Pack(1, greenlet_result.borrow())); + } + + return OwnedObject(); +} + + + +/** + * May run arbitrary Python code. + */ +OwnedObject +Greenlet::g_switch_finish(const switchstack_result_t& err) +{ + assert(err.the_new_current_greenlet == this); + + ThreadState& state = *this->thread_state(); + // Because calling the trace function could do arbitrary things, + // including switching away from this greenlet and then maybe + // switching back, we need to capture the arguments now so that + // they don't change. + OwnedObject result; + if (this->args()) { + result <<= this->args(); + } + else { + assert(PyErr_Occurred()); + } + assert(!this->args()); + try { + // Our only caller handles the bad error case + assert(err.status >= 0); + assert(state.borrow_current() == this->self()); + if (OwnedObject tracefunc = state.get_tracefunc()) { + assert(result || PyErr_Occurred()); + g_calltrace(tracefunc, + result ? mod_globs->event_switch : mod_globs->event_throw, + err.origin_greenlet, + this->self()); + } + // The above could have invoked arbitrary Python code, but + // it couldn't switch back to this object and *also* + // throw an exception, so the args won't have changed. + + if (PyErr_Occurred()) { + // We get here if we fell of the end of the run() function + // raising an exception. The switch itself was + // successful, but the function raised. + // valgrind reports that memory allocated here can still + // be reached after a test run. + throw PyErrOccurred::from_current(); + } + return result; + } + catch (const PyErrOccurred&) { + /* Turn switch errors into switch throws */ + /* Turn trace errors into switch throws */ + this->release_args(); + throw; + } +} + +void +Greenlet::g_calltrace(const OwnedObject& tracefunc, + const greenlet::refs::ImmortalEventName& event, + const BorrowedGreenlet& origin, + const BorrowedGreenlet& target) +{ + PyErrPieces saved_exc; + try { + TracingGuard tracing_guard; + // TODO: We have saved the active exception (if any) that's + // about to be raised. In the 'throw' case, we could provide + // the exception to the tracefunction, which seems very helpful. + tracing_guard.CallTraceFunction(tracefunc, event, origin, target); + } + catch (const PyErrOccurred&) { + // In case of exceptions trace function is removed, + // and any existing exception is replaced with the tracing + // exception. + GET_THREAD_STATE().state().set_tracefunc(Py_None); + throw; + } + + saved_exc.PyErrRestore(); + assert( + (event == mod_globs->event_throw && PyErr_Occurred()) + || (event == mod_globs->event_switch && !PyErr_Occurred()) + ); +} + +void +Greenlet::murder_in_place() +{ + if (this->active()) { + assert(!this->is_currently_running_in_some_thread()); + this->deactivate_and_free(); + } +} + +inline void +Greenlet::deactivate_and_free() +{ + if (!this->active()) { + return; + } + // Throw away any saved stack. + this->stack_state = StackState(); + assert(!this->stack_state.active()); + // Throw away any Python references. + // We're holding a borrowed reference to the last + // frame we executed. Since we borrowed it, the + // normal traversal, clear, and dealloc functions + // ignore it, meaning it leaks. (The thread state + // object can't find it to clear it when that's + // deallocated either, because by definition if we + // got an object on this list, it wasn't + // running and the thread state doesn't have + // this frame.) + // So here, we *do* clear it. + this->python_state.tp_clear(true); +} + +bool +Greenlet::belongs_to_thread(const ThreadState* thread_state) const +{ + if (!this->thread_state() // not running anywhere, or thread + // exited + || !thread_state) { // same, or there is no thread state. + return false; + } + return true; +} + + +void +Greenlet::deallocing_greenlet_in_thread(const ThreadState* current_thread_state) +{ + /* Cannot raise an exception to kill the greenlet if + it is not running in the same thread! */ + if (this->belongs_to_thread(current_thread_state)) { + assert(current_thread_state); + // To get here it had to have run before + /* Send the greenlet a GreenletExit exception. */ + + // We don't care about the return value, only whether an + // exception happened. + this->throw_GreenletExit_during_dealloc(*current_thread_state); + return; + } + + // Not the same thread! Temporarily save the greenlet + // into its thread's deleteme list, *if* it exists. + // If that thread has already exited, and processed its pending + // cleanup, we'll never be able to clean everything up: we won't + // be able to raise an exception. + // That's mostly OK! Since we can't add it to a list, our refcount + // won't increase, and we'll go ahead with the DECREFs later. + ThreadState *const thread_state = this->thread_state(); + if (thread_state) { + thread_state->delete_when_thread_running(this->self()); + } + else { + // The thread is dead, we can't raise an exception. + // We need to make it look non-active, though, so that dealloc + // finishes killing it. + this->deactivate_and_free(); + } + return; +} + + +int +Greenlet::tp_traverse(visitproc visit, void* arg) +{ + + int result; + if ((result = this->exception_state.tp_traverse(visit, arg)) != 0) { + return result; + } + //XXX: This is ugly. But so is handling everything having to do + //with the top frame. + bool visit_top_frame = this->was_running_in_dead_thread(); + // When true, the thread is dead. Our implicit weak reference to the + // frame is now all that's left; we consider ourselves to + // strongly own it now. + if ((result = this->python_state.tp_traverse(visit, arg, visit_top_frame)) != 0) { + return result; + } + return 0; +} + +int +Greenlet::tp_clear() +{ + bool own_top_frame = this->was_running_in_dead_thread(); + this->exception_state.tp_clear(); + this->python_state.tp_clear(own_top_frame); + return 0; +} + +bool Greenlet::is_currently_running_in_some_thread() const +{ + return this->stack_state.active() && !this->python_state.top_frame(); +} + +#if GREENLET_PY312 +void GREENLET_NOINLINE(Greenlet::expose_frames)() +{ + if (!this->python_state.top_frame()) { + return; + } + + _PyInterpreterFrame* last_complete_iframe = nullptr; + _PyInterpreterFrame* iframe = this->python_state.top_frame()->f_frame; + while (iframe) { + // We must make a copy before looking at the iframe contents, + // since iframe might point to a portion of the greenlet's C stack + // that was spilled when switching greenlets. + _PyInterpreterFrame iframe_copy; + this->stack_state.copy_from_stack(&iframe_copy, iframe, sizeof(*iframe)); + if (!_PyFrame_IsIncomplete(&iframe_copy)) { + // If the iframe were OWNED_BY_CSTACK then it would always be + // incomplete. Since it's not incomplete, it's not on the C stack + // and we can access it through the original `iframe` pointer + // directly. This is important since GetFrameObject might + // lazily _create_ the frame object and we don't want the + // interpreter to lose track of it. + assert(iframe_copy.owner != FRAME_OWNED_BY_CSTACK); + + // We really want to just write: + // PyFrameObject* frame = _PyFrame_GetFrameObject(iframe); + // but _PyFrame_GetFrameObject calls _PyFrame_MakeAndSetFrameObject + // which is not a visible symbol in libpython. The easiest + // way to get a public function to call it is using + // PyFrame_GetBack, which is defined as follows: + // assert(frame != NULL); + // assert(!_PyFrame_IsIncomplete(frame->f_frame)); + // PyFrameObject *back = frame->f_back; + // if (back == NULL) { + // _PyInterpreterFrame *prev = frame->f_frame->previous; + // prev = _PyFrame_GetFirstComplete(prev); + // if (prev) { + // back = _PyFrame_GetFrameObject(prev); + // } + // } + // return (PyFrameObject*)Py_XNewRef(back); + if (!iframe->frame_obj) { + PyFrameObject dummy_frame; + _PyInterpreterFrame dummy_iframe; + dummy_frame.f_back = nullptr; + dummy_frame.f_frame = &dummy_iframe; + // force the iframe to be considered complete without + // needing to check its code object: + dummy_iframe.owner = FRAME_OWNED_BY_GENERATOR; + dummy_iframe.previous = iframe; + assert(!_PyFrame_IsIncomplete(&dummy_iframe)); + // Drop the returned reference immediately; the iframe + // continues to hold a strong reference + Py_XDECREF(PyFrame_GetBack(&dummy_frame)); + assert(iframe->frame_obj); + } + + // This is a complete frame, so make the last one of those we saw + // point at it, bypassing any incomplete frames (which may have + // been on the C stack) in between the two. We're overwriting + // last_complete_iframe->previous and need that to be reversible, + // so we store the original previous ptr in the frame object + // (which we must have created on a previous iteration through + // this loop). The frame object has a bunch of storage that is + // only used when its iframe is OWNED_BY_FRAME_OBJECT, which only + // occurs when the frame object outlives the frame's execution, + // which can't have happened yet because the frame is currently + // executing as far as the interpreter is concerned. So, we can + // reuse it for our own purposes. + assert(iframe->owner == FRAME_OWNED_BY_THREAD + || iframe->owner == FRAME_OWNED_BY_GENERATOR); + if (last_complete_iframe) { + assert(last_complete_iframe->frame_obj); + memcpy(&last_complete_iframe->frame_obj->_f_frame_data[0], + &last_complete_iframe->previous, sizeof(void *)); + last_complete_iframe->previous = iframe; + } + last_complete_iframe = iframe; + } + // Frames that are OWNED_BY_FRAME_OBJECT are linked via the + // frame's f_back while all others are linked via the iframe's + // previous ptr. Since all the frames we traverse are running + // as far as the interpreter is concerned, we don't have to + // worry about the OWNED_BY_FRAME_OBJECT case. + iframe = iframe_copy.previous; + } + + // Give the outermost complete iframe a null previous pointer to + // account for any potential incomplete/C-stack iframes between it + // and the actual top-of-stack + if (last_complete_iframe) { + assert(last_complete_iframe->frame_obj); + memcpy(&last_complete_iframe->frame_obj->_f_frame_data[0], + &last_complete_iframe->previous, sizeof(void *)); + last_complete_iframe->previous = nullptr; + } +} +#else +void Greenlet::expose_frames() +{ + +} +#endif + +}; // namespace greenlet +#endif diff --git a/venv/Lib/site-packages/greenlet/TGreenlet.hpp b/venv/Lib/site-packages/greenlet/TGreenlet.hpp new file mode 100644 index 00000000..f69b881d --- /dev/null +++ b/venv/Lib/site-packages/greenlet/TGreenlet.hpp @@ -0,0 +1,820 @@ +#ifndef GREENLET_GREENLET_HPP +#define GREENLET_GREENLET_HPP +/* + * Declarations of the core data structures. +*/ + +#define PY_SSIZE_T_CLEAN +#include + +#include "greenlet_compiler_compat.hpp" +#include "greenlet_refs.hpp" +#include "greenlet_cpython_compat.hpp" +#include "greenlet_allocator.hpp" + +using greenlet::refs::OwnedObject; +using greenlet::refs::OwnedGreenlet; +using greenlet::refs::OwnedMainGreenlet; +using greenlet::refs::BorrowedGreenlet; + +#if PY_VERSION_HEX < 0x30B00A6 +# define _PyCFrame CFrame +# define _PyInterpreterFrame _interpreter_frame +#endif + +#if GREENLET_PY312 +# define Py_BUILD_CORE +# include "internal/pycore_frame.h" +#endif + +#if GREENLET_PY314 +# include "internal/pycore_interpframe_structs.h" +# include "internal/pycore_interpframe.h" +#endif + +// XXX: TODO: Work to remove all virtual functions +// for speed of calling and size of objects (no vtable). +// One pattern is the Curiously Recurring Template +namespace greenlet +{ + class ExceptionState + { + private: + G_NO_COPIES_OF_CLS(ExceptionState); + + // Even though these are borrowed objects, we actually own + // them, when they're not null. + // XXX: Express that in the API. + private: + _PyErr_StackItem* exc_info; + _PyErr_StackItem exc_state; + public: + ExceptionState(); + void operator<<(const PyThreadState *const tstate) noexcept; + void operator>>(PyThreadState* tstate) noexcept; + void clear() noexcept; + + int tp_traverse(visitproc visit, void* arg) noexcept; + void tp_clear() noexcept; + }; + + template + void operator<<(const PyThreadState *const tstate, T& exc); + + class PythonStateContext + { + protected: + greenlet::refs::OwnedContext _context; + public: + inline const greenlet::refs::OwnedContext& context() const + { + return this->_context; + } + inline greenlet::refs::OwnedContext& context() + { + return this->_context; + } + + inline void tp_clear() + { + this->_context.CLEAR(); + } + + template + inline static PyObject* context(T* tstate) + { + return tstate->context; + } + + template + inline static void context(T* tstate, PyObject* new_context) + { + tstate->context = new_context; + tstate->context_ver++; + } + }; + class SwitchingArgs; + class PythonState : public PythonStateContext + { + public: + typedef greenlet::refs::OwnedReference OwnedFrame; + private: + G_NO_COPIES_OF_CLS(PythonState); + // We own this if we're suspended (although currently we don't + // tp_traverse into it; that's a TODO). If we're running, it's + // empty. If we get deallocated and *still* have a frame, it + // won't be reachable from the place that normally decref's + // it, so we need to do it (hence owning it). + OwnedFrame _top_frame; +#if GREENLET_USE_CFRAME + _PyCFrame* cframe; + int use_tracing; +#endif +#if GREENLET_PY314 + int py_recursion_depth; +#elif GREENLET_PY312 + int py_recursion_depth; + int c_recursion_depth; +#else + int recursion_depth; +#endif +#if GREENLET_PY313 + PyObject *delete_later; +#else + int trash_delete_nesting; +#endif +#if GREENLET_PY311 + _PyInterpreterFrame* current_frame; + _PyStackChunk* datastack_chunk; + PyObject** datastack_top; + PyObject** datastack_limit; +#endif + // The PyInterpreterFrame list on 3.12+ contains some entries that are + // on the C stack, which can't be directly accessed while a greenlet is + // suspended. In order to keep greenlet gr_frame introspection working, + // we adjust stack switching to rewrite the interpreter frame list + // to skip these C-stack frames; we call this "exposing" the greenlet's + // frames because it makes them valid to work with in Python. Then when + // the greenlet is resumed we need to remember to reverse the operation + // we did. The C-stack frames are "entry frames" which are a low-level + // interpreter detail; they're not needed for introspection, but do + // need to be present for the eval loop to work. + void unexpose_frames(); + + public: + + PythonState(); + // You can use this for testing whether we have a frame + // or not. It returns const so they can't modify it. + const OwnedFrame& top_frame() const noexcept; + + inline void operator<<(const PyThreadState *const tstate) noexcept; + inline void operator>>(PyThreadState* tstate) noexcept; + void clear() noexcept; + + int tp_traverse(visitproc visit, void* arg, bool visit_top_frame) noexcept; + void tp_clear(bool own_top_frame) noexcept; + void set_initial_state(const PyThreadState* const tstate) noexcept; +#if GREENLET_USE_CFRAME + void set_new_cframe(_PyCFrame& frame) noexcept; +#endif + + void may_switch_away() noexcept; + inline void will_switch_from(PyThreadState *const origin_tstate) noexcept; + void did_finish(PyThreadState* tstate) noexcept; + }; + + class StackState + { + // By having only plain C (POD) members, no virtual functions + // or bases, we get a trivial assignment operator generated + // for us. However, that's not safe since we do manage memory. + // So we declare an assignment operator that only works if we + // don't have any memory allocated. (We don't use + // std::shared_ptr for reference counting just to keep this + // object small) + private: + char* _stack_start; + char* stack_stop; + char* stack_copy; + intptr_t _stack_saved; + StackState* stack_prev; + inline int copy_stack_to_heap_up_to(const char* const stop) noexcept; + inline void free_stack_copy() noexcept; + + public: + /** + * Creates a started, but inactive, state, using *current* + * as the previous. + */ + StackState(void* mark, StackState& current); + /** + * Creates an inactive, unstarted, state. + */ + StackState(); + ~StackState(); + StackState(const StackState& other); + StackState& operator=(const StackState& other); + inline void copy_heap_to_stack(const StackState& current) noexcept; + inline int copy_stack_to_heap(char* const stackref, const StackState& current) noexcept; + inline bool started() const noexcept; + inline bool main() const noexcept; + inline bool active() const noexcept; + inline void set_active() noexcept; + inline void set_inactive() noexcept; + inline intptr_t stack_saved() const noexcept; + inline char* stack_start() const noexcept; + static inline StackState make_main() noexcept; +#ifdef GREENLET_USE_STDIO + friend std::ostream& operator<<(std::ostream& os, const StackState& s); +#endif + + // Fill in [dest, dest + n) with the values that would be at + // [src, src + n) while this greenlet is running. This is like memcpy + // except that if the greenlet is suspended it accounts for the portion + // of the greenlet's stack that was spilled to the heap. `src` may + // be on this greenlet's stack, or on the heap, but not on a different + // greenlet's stack. + void copy_from_stack(void* dest, const void* src, size_t n) const; + }; +#ifdef GREENLET_USE_STDIO + std::ostream& operator<<(std::ostream& os, const StackState& s); +#endif + + class SwitchingArgs + { + private: + G_NO_ASSIGNMENT_OF_CLS(SwitchingArgs); + // If args and kwargs are both false (NULL), this is a *throw*, not a + // switch. PyErr_... must have been called already. + OwnedObject _args; + OwnedObject _kwargs; + public: + + SwitchingArgs() + {} + + SwitchingArgs(const OwnedObject& args, const OwnedObject& kwargs) + : _args(args), + _kwargs(kwargs) + {} + + SwitchingArgs(const SwitchingArgs& other) + : _args(other._args), + _kwargs(other._kwargs) + {} + + const OwnedObject& args() + { + return this->_args; + } + + const OwnedObject& kwargs() + { + return this->_kwargs; + } + + /** + * Moves ownership from the argument to this object. + */ + SwitchingArgs& operator<<=(SwitchingArgs& other) + { + if (this != &other) { + this->_args = other._args; + this->_kwargs = other._kwargs; + other.CLEAR(); + } + return *this; + } + + /** + * Acquires ownership of the argument (consumes the reference). + */ + SwitchingArgs& operator<<=(PyObject* args) + { + this->_args = OwnedObject::consuming(args); + this->_kwargs.CLEAR(); + return *this; + } + + /** + * Acquires ownership of the argument. + * + * Sets the args to be the given value; clears the kwargs. + */ + SwitchingArgs& operator<<=(OwnedObject& args) + { + assert(&args != &this->_args); + this->_args = args; + this->_kwargs.CLEAR(); + args.CLEAR(); + + return *this; + } + + explicit operator bool() const noexcept + { + return this->_args || this->_kwargs; + } + + inline void CLEAR() + { + this->_args.CLEAR(); + this->_kwargs.CLEAR(); + } + + const std::string as_str() const noexcept + { + return PyUnicode_AsUTF8( + OwnedObject::consuming( + PyUnicode_FromFormat( + "SwitchingArgs(args=%R, kwargs=%R)", + this->_args.borrow(), + this->_kwargs.borrow() + ) + ).borrow() + ); + } + }; + + class ThreadState; + + class UserGreenlet; + class MainGreenlet; + + class Greenlet + { + private: + G_NO_COPIES_OF_CLS(Greenlet); + PyGreenlet* const _self; + private: + // XXX: Work to remove these. + friend class ThreadState; + friend class UserGreenlet; + friend class MainGreenlet; + protected: + ExceptionState exception_state; + SwitchingArgs switch_args; + StackState stack_state; + PythonState python_state; + Greenlet(PyGreenlet* p, const StackState& initial_state); + public: + // This constructor takes ownership of the PyGreenlet, by + // setting ``p->pimpl = this;``. + Greenlet(PyGreenlet* p); + virtual ~Greenlet(); + + const OwnedObject context() const; + + // You MUST call this _very_ early in the switching process to + // prepare anything that may need prepared. This might perform + // garbage collections or otherwise run arbitrary Python code. + // + // One specific use of it is for Python 3.11+, preventing + // running arbitrary code at unsafe times. See + // PythonState::may_switch_away(). + inline void may_switch_away() + { + this->python_state.may_switch_away(); + } + + inline void context(refs::BorrowedObject new_context); + + inline SwitchingArgs& args() + { + return this->switch_args; + } + + virtual const refs::BorrowedMainGreenlet main_greenlet() const = 0; + + inline intptr_t stack_saved() const noexcept + { + return this->stack_state.stack_saved(); + } + + // This is used by the macro SLP_SAVE_STATE to compute the + // difference in stack sizes. It might be nice to handle the + // computation ourself, but the type of the result + // varies by platform, so doing it in the macro is the + // simplest way. + inline const char* stack_start() const noexcept + { + return this->stack_state.stack_start(); + } + + virtual OwnedObject throw_GreenletExit_during_dealloc(const ThreadState& current_thread_state); + virtual OwnedObject g_switch() = 0; + /** + * Force the greenlet to appear dead. Used when it's not + * possible to throw an exception into a greenlet anymore. + * + * This losses access to the thread state and the main greenlet. + */ + virtual void murder_in_place(); + + /** + * Called when somebody notices we were running in a dead + * thread to allow cleaning up resources (because we can't + * raise GreenletExit into it anymore). + * This is very similar to ``murder_in_place()``, except that + * it DOES NOT lose the main greenlet or thread state. + */ + inline void deactivate_and_free(); + + + // Called when some thread wants to deallocate a greenlet + // object. + // The thread may or may not be the same thread the greenlet + // was running in. + // The thread state will be null if the thread the greenlet + // was running in was known to have exited. + void deallocing_greenlet_in_thread(const ThreadState* current_state); + + // Must be called on 3.12+ before exposing a suspended greenlet's + // frames to user code. This rewrites the linked list of interpreter + // frames to skip the ones that are being stored on the C stack (which + // can't be safely accessed while the greenlet is suspended because + // that stack space might be hosting a different greenlet), and + // sets PythonState::frames_were_exposed so we remember to restore + // the original list before resuming the greenlet. The C-stack frames + // are a low-level interpreter implementation detail; while they're + // important to the bytecode eval loop, they're superfluous for + // introspection purposes. + void expose_frames(); + + + // TODO: Figure out how to make these non-public. + inline void slp_restore_state() noexcept; + inline int slp_save_state(char *const stackref) noexcept; + + inline bool is_currently_running_in_some_thread() const; + virtual bool belongs_to_thread(const ThreadState* state) const; + + inline bool started() const + { + return this->stack_state.started(); + } + inline bool active() const + { + return this->stack_state.active(); + } + inline bool main() const + { + return this->stack_state.main(); + } + virtual refs::BorrowedMainGreenlet find_main_greenlet_in_lineage() const = 0; + + virtual const OwnedGreenlet parent() const = 0; + virtual void parent(const refs::BorrowedObject new_parent) = 0; + + inline const PythonState::OwnedFrame& top_frame() + { + return this->python_state.top_frame(); + } + + virtual const OwnedObject& run() const = 0; + virtual void run(const refs::BorrowedObject nrun) = 0; + + + virtual int tp_traverse(visitproc visit, void* arg); + virtual int tp_clear(); + + + // Return the thread state that the greenlet is running in, or + // null if the greenlet is not running or the thread is known + // to have exited. + virtual ThreadState* thread_state() const noexcept = 0; + + // Return true if the greenlet is known to have been running + // (active) in a thread that has now exited. + virtual bool was_running_in_dead_thread() const noexcept = 0; + + // Return a borrowed greenlet that is the Python object + // this object represents. + inline BorrowedGreenlet self() const noexcept + { + return BorrowedGreenlet(this->_self); + } + + // For testing. If this returns true, we should pretend that + // slp_switch() failed. + virtual bool force_slp_switch_error() const noexcept; + + protected: + inline void release_args(); + + // The functions that must not be inlined are declared virtual. + // We also mark them as protected, not private, so that the + // compiler is forced to call them through a function pointer. + // (A sufficiently smart compiler could directly call a private + // virtual function since it can never be overridden in a + // subclass). + + // Also TODO: Switch away from integer error codes and to enums, + // or throw exceptions when possible. + struct switchstack_result_t + { + int status; + Greenlet* the_new_current_greenlet; + OwnedGreenlet origin_greenlet; + + switchstack_result_t() + : status(0), + the_new_current_greenlet(nullptr) + {} + + switchstack_result_t(int err) + : status(err), + the_new_current_greenlet(nullptr) + {} + + switchstack_result_t(int err, Greenlet* state, OwnedGreenlet& origin) + : status(err), + the_new_current_greenlet(state), + origin_greenlet(origin) + { + } + + switchstack_result_t(int err, Greenlet* state, const BorrowedGreenlet& origin) + : status(err), + the_new_current_greenlet(state), + origin_greenlet(origin) + { + } + + switchstack_result_t(const switchstack_result_t& other) + : status(other.status), + the_new_current_greenlet(other.the_new_current_greenlet), + origin_greenlet(other.origin_greenlet) + {} + + switchstack_result_t& operator=(const switchstack_result_t& other) + { + this->status = other.status; + this->the_new_current_greenlet = other.the_new_current_greenlet; + this->origin_greenlet = other.origin_greenlet; + return *this; + } + }; + + OwnedObject on_switchstack_or_initialstub_failure( + Greenlet* target, + const switchstack_result_t& err, + const bool target_was_me=false, + const bool was_initial_stub=false); + + // Returns the previous greenlet we just switched away from. + virtual OwnedGreenlet g_switchstack_success() noexcept; + + + // Check the preconditions for switching to this greenlet; if they + // aren't met, throws PyErrOccurred. Most callers will want to + // catch this and clear the arguments + inline void check_switch_allowed() const; + class GreenletStartedWhileInPython : public std::runtime_error + { + public: + GreenletStartedWhileInPython() : std::runtime_error("") + {} + }; + + protected: + + + /** + Perform a stack switch into this greenlet. + + This temporarily sets the global variable + ``switching_thread_state`` to this greenlet; as soon as the + call to ``slp_switch`` completes, this is reset to NULL. + Consequently, this depends on the GIL. + + TODO: Adopt the stackman model and pass ``slp_switch`` a + callback function and context pointer; this eliminates the + need for global variables altogether. + + Because the stack switch happens in this function, this + function can't use its own stack (local) variables, set + before the switch, and then accessed after the switch. + + Further, you con't even access ``g_thread_state_global`` + before and after the switch from the global variable. + Because it is thread local some compilers cache it in a + register/on the stack, notably new versions of MSVC; this + breaks with strange crashes sometime later, because writing + to anything in ``g_thread_state_global`` after the switch + is actually writing to random memory. For this reason, we + call a non-inlined function to finish the operation. (XXX: + The ``/GT`` MSVC compiler argument probably fixes that.) + + It is very important that stack switch is 'atomic', i.e. no + calls into other Python code allowed (except very few that + are safe), because global variables are very fragile. (This + should no longer be the case with thread-local variables.) + + */ + // Made virtual to facilitate subclassing UserGreenlet for testing. + virtual switchstack_result_t g_switchstack(void); + +class TracingGuard +{ +private: + PyThreadState* tstate; +public: + TracingGuard() + : tstate(PyThreadState_GET()) + { + PyThreadState_EnterTracing(this->tstate); + } + + ~TracingGuard() + { + PyThreadState_LeaveTracing(this->tstate); + this->tstate = nullptr; + } + + inline void CallTraceFunction(const OwnedObject& tracefunc, + const greenlet::refs::ImmortalEventName& event, + const BorrowedGreenlet& origin, + const BorrowedGreenlet& target) + { + // TODO: This calls tracefunc(event, (origin, target)). Add a shortcut + // function for that that's specialized to avoid the Py_BuildValue + // string parsing, or start with just using "ON" format with PyTuple_Pack(2, + // origin, target). That seems like what the N format is meant + // for. + // XXX: Why does event not automatically cast back to a PyObject? + // It tries to call the "deleted constructor ImmortalEventName + // const" instead. + assert(tracefunc); + assert(event); + assert(origin); + assert(target); + greenlet::refs::NewReference retval( + PyObject_CallFunction( + tracefunc.borrow(), + "O(OO)", + event.borrow(), + origin.borrow(), + target.borrow() + )); + if (!retval) { + throw PyErrOccurred::from_current(); + } + } +}; + + static void + g_calltrace(const OwnedObject& tracefunc, + const greenlet::refs::ImmortalEventName& event, + const greenlet::refs::BorrowedGreenlet& origin, + const BorrowedGreenlet& target); + private: + OwnedObject g_switch_finish(const switchstack_result_t& err); + + }; + + class UserGreenlet : public Greenlet + { + private: + static greenlet::PythonAllocator allocator; + OwnedMainGreenlet _main_greenlet; + OwnedObject _run_callable; + OwnedGreenlet _parent; + public: + static void* operator new(size_t UNUSED(count)); + static void operator delete(void* ptr); + + UserGreenlet(PyGreenlet* p, BorrowedGreenlet the_parent); + virtual ~UserGreenlet(); + + virtual refs::BorrowedMainGreenlet find_main_greenlet_in_lineage() const; + virtual bool was_running_in_dead_thread() const noexcept; + virtual ThreadState* thread_state() const noexcept; + virtual OwnedObject g_switch(); + virtual const OwnedObject& run() const + { + if (this->started() || !this->_run_callable) { + throw AttributeError("run"); + } + return this->_run_callable; + } + virtual void run(const refs::BorrowedObject nrun); + + virtual const OwnedGreenlet parent() const; + virtual void parent(const refs::BorrowedObject new_parent); + + virtual const refs::BorrowedMainGreenlet main_greenlet() const; + + virtual void murder_in_place(); + virtual bool belongs_to_thread(const ThreadState* state) const; + virtual int tp_traverse(visitproc visit, void* arg); + virtual int tp_clear(); + class ParentIsCurrentGuard + { + private: + OwnedGreenlet oldparent; + UserGreenlet* greenlet; + G_NO_COPIES_OF_CLS(ParentIsCurrentGuard); + public: + ParentIsCurrentGuard(UserGreenlet* p, const ThreadState& thread_state); + ~ParentIsCurrentGuard(); + }; + virtual OwnedObject throw_GreenletExit_during_dealloc(const ThreadState& current_thread_state); + protected: + virtual switchstack_result_t g_initialstub(void* mark); + private: + // This function isn't meant to return. + // This accepts raw pointers and the ownership of them at the + // same time. The caller should use ``inner_bootstrap(origin.relinquish_ownership())``. + void inner_bootstrap(PyGreenlet* origin_greenlet, PyObject* run); + }; + + class BrokenGreenlet : public UserGreenlet + { + private: + static greenlet::PythonAllocator allocator; + public: + bool _force_switch_error = false; + bool _force_slp_switch_error = false; + + static void* operator new(size_t UNUSED(count)); + static void operator delete(void* ptr); + BrokenGreenlet(PyGreenlet* p, BorrowedGreenlet the_parent) + : UserGreenlet(p, the_parent) + {} + virtual ~BrokenGreenlet() + {} + + virtual switchstack_result_t g_switchstack(void); + virtual bool force_slp_switch_error() const noexcept; + + }; + + class MainGreenlet : public Greenlet + { + private: + static greenlet::PythonAllocator allocator; + refs::BorrowedMainGreenlet _self; + ThreadState* _thread_state; + G_NO_COPIES_OF_CLS(MainGreenlet); + public: + static void* operator new(size_t UNUSED(count)); + static void operator delete(void* ptr); + + MainGreenlet(refs::BorrowedMainGreenlet::PyType*, ThreadState*); + virtual ~MainGreenlet(); + + + virtual const OwnedObject& run() const; + virtual void run(const refs::BorrowedObject nrun); + + virtual const OwnedGreenlet parent() const; + virtual void parent(const refs::BorrowedObject new_parent); + + virtual const refs::BorrowedMainGreenlet main_greenlet() const; + + virtual refs::BorrowedMainGreenlet find_main_greenlet_in_lineage() const; + virtual bool was_running_in_dead_thread() const noexcept; + virtual ThreadState* thread_state() const noexcept; + void thread_state(ThreadState*) noexcept; + virtual OwnedObject g_switch(); + virtual int tp_traverse(visitproc visit, void* arg); + }; + + // Instantiate one on the stack to save the GC state, + // and then disable GC. When it goes out of scope, GC will be + // restored to its original state. Sadly, these APIs are only + // available on 3.10+; luckily, we only need them on 3.11+. +#if GREENLET_PY310 + class GCDisabledGuard + { + private: + int was_enabled = 0; + public: + GCDisabledGuard() + : was_enabled(PyGC_IsEnabled()) + { + PyGC_Disable(); + } + + ~GCDisabledGuard() + { + if (this->was_enabled) { + PyGC_Enable(); + } + } + }; +#endif + + OwnedObject& operator<<=(OwnedObject& lhs, greenlet::SwitchingArgs& rhs) noexcept; + + //TODO: Greenlet::g_switch() should call this automatically on its + //return value. As it is, the module code is calling it. + static inline OwnedObject + single_result(const OwnedObject& results) + { + if (results + && PyTuple_Check(results.borrow()) + && PyTuple_GET_SIZE(results.borrow()) == 1) { + PyObject* result = PyTuple_GET_ITEM(results.borrow(), 0); + assert(result); + return OwnedObject::owning(result); + } + return results; + } + + + static OwnedObject + g_handle_exit(const OwnedObject& greenlet_result); + + + template + void operator<<(const PyThreadState *const lhs, T& rhs) + { + rhs.operator<<(lhs); + } + +} // namespace greenlet ; + +#endif diff --git a/venv/Lib/site-packages/greenlet/TGreenletGlobals.cpp b/venv/Lib/site-packages/greenlet/TGreenletGlobals.cpp new file mode 100644 index 00000000..0087d2ff --- /dev/null +++ b/venv/Lib/site-packages/greenlet/TGreenletGlobals.cpp @@ -0,0 +1,94 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +/** + * Implementation of GreenletGlobals. + * + * Format with: + * clang-format -i --style=file src/greenlet/greenlet.c + * + * + * Fix missing braces with: + * clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements" +*/ +#ifndef T_GREENLET_GLOBALS +#define T_GREENLET_GLOBALS + +#include "greenlet_refs.hpp" +#include "greenlet_exceptions.hpp" +#include "greenlet_thread_support.hpp" +#include "greenlet_internal.hpp" + +namespace greenlet { + +// This encapsulates what were previously module global "constants" +// established at init time. +// This is a step towards Python3 style module state that allows +// reloading. +// +// In an earlier iteration of this code, we used placement new to be +// able to allocate this object statically still, so that references +// to its members don't incur an extra pointer indirection. +// But under some scenarios, that could result in crashes at +// shutdown because apparently the destructor was getting run twice? +class GreenletGlobals +{ + +public: + const greenlet::refs::ImmortalEventName event_switch; + const greenlet::refs::ImmortalEventName event_throw; + const greenlet::refs::ImmortalException PyExc_GreenletError; + const greenlet::refs::ImmortalException PyExc_GreenletExit; + const greenlet::refs::ImmortalObject empty_tuple; + const greenlet::refs::ImmortalObject empty_dict; + const greenlet::refs::ImmortalString str_run; + Mutex* const thread_states_to_destroy_lock; + greenlet::cleanup_queue_t thread_states_to_destroy; + + GreenletGlobals() : + event_switch("switch"), + event_throw("throw"), + PyExc_GreenletError("greenlet.error"), + PyExc_GreenletExit("greenlet.GreenletExit", PyExc_BaseException), + empty_tuple(Require(PyTuple_New(0))), + empty_dict(Require(PyDict_New())), + str_run("run"), + thread_states_to_destroy_lock(new Mutex()) + {} + + ~GreenletGlobals() + { + // This object is (currently) effectively immortal, and not + // just because of those placement new tricks; if we try to + // deallocate the static object we allocated, and overwrote, + // we would be doing so at C++ teardown time, which is after + // the final Python GIL is released, and we can't use the API + // then. + // (The members will still be destructed, but they also don't + // do any deallocation.) + } + + void queue_to_destroy(ThreadState* ts) const + { + // we're currently accessed through a static const object, + // implicitly marking our members as const, so code can't just + // call push_back (or pop_back) without casting away the + // const. + // + // Do that for callers. + greenlet::cleanup_queue_t& q = const_cast(this->thread_states_to_destroy); + q.push_back(ts); + } + + ThreadState* take_next_to_destroy() const + { + greenlet::cleanup_queue_t& q = const_cast(this->thread_states_to_destroy); + ThreadState* result = q.back(); + q.pop_back(); + return result; + } +}; + +}; // namespace greenlet + +static const greenlet::GreenletGlobals* mod_globs; + +#endif // T_GREENLET_GLOBALS diff --git a/venv/Lib/site-packages/greenlet/TMainGreenlet.cpp b/venv/Lib/site-packages/greenlet/TMainGreenlet.cpp new file mode 100644 index 00000000..a2a9cfe4 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/TMainGreenlet.cpp @@ -0,0 +1,153 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +/** + * Implementation of greenlet::MainGreenlet. + * + * Format with: + * clang-format -i --style=file src/greenlet/greenlet.c + * + * + * Fix missing braces with: + * clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements" +*/ +#ifndef T_MAIN_GREENLET_CPP +#define T_MAIN_GREENLET_CPP + +#include "TGreenlet.hpp" + + + +// Protected by the GIL. Incremented when we create a main greenlet, +// in a new thread, decremented when it is destroyed. +static Py_ssize_t G_TOTAL_MAIN_GREENLETS; + +namespace greenlet { +greenlet::PythonAllocator MainGreenlet::allocator; + +void* MainGreenlet::operator new(size_t UNUSED(count)) +{ + return allocator.allocate(1); +} + + +void MainGreenlet::operator delete(void* ptr) +{ + return allocator.deallocate(static_cast(ptr), + 1); +} + + +MainGreenlet::MainGreenlet(PyGreenlet* p, ThreadState* state) + : Greenlet(p, StackState::make_main()), + _self(p), + _thread_state(state) +{ + G_TOTAL_MAIN_GREENLETS++; +} + +MainGreenlet::~MainGreenlet() +{ + G_TOTAL_MAIN_GREENLETS--; + this->tp_clear(); +} + +ThreadState* +MainGreenlet::thread_state() const noexcept +{ + return this->_thread_state; +} + +void +MainGreenlet::thread_state(ThreadState* t) noexcept +{ + assert(!t); + this->_thread_state = t; +} + + +const BorrowedMainGreenlet +MainGreenlet::main_greenlet() const +{ + return this->_self; +} + +BorrowedMainGreenlet +MainGreenlet::find_main_greenlet_in_lineage() const +{ + return BorrowedMainGreenlet(this->_self); +} + +bool +MainGreenlet::was_running_in_dead_thread() const noexcept +{ + return !this->_thread_state; +} + +OwnedObject +MainGreenlet::g_switch() +{ + try { + this->check_switch_allowed(); + } + catch (const PyErrOccurred&) { + this->release_args(); + throw; + } + + switchstack_result_t err = this->g_switchstack(); + if (err.status < 0) { + // XXX: This code path is untested, but it is shared + // with the UserGreenlet path that is tested. + return this->on_switchstack_or_initialstub_failure( + this, + err, + true, // target was me + false // was initial stub + ); + } + + return err.the_new_current_greenlet->g_switch_finish(err); +} + +int +MainGreenlet::tp_traverse(visitproc visit, void* arg) +{ + if (this->_thread_state) { + // we've already traversed main, (self), don't do it again. + int result = this->_thread_state->tp_traverse(visit, arg, false); + if (result) { + return result; + } + } + return Greenlet::tp_traverse(visit, arg); +} + +const OwnedObject& +MainGreenlet::run() const +{ + throw AttributeError("Main greenlets do not have a run attribute."); +} + +void +MainGreenlet::run(const BorrowedObject UNUSED(nrun)) +{ + throw AttributeError("Main greenlets do not have a run attribute."); +} + +void +MainGreenlet::parent(const BorrowedObject raw_new_parent) +{ + if (!raw_new_parent) { + throw AttributeError("can't delete attribute"); + } + throw AttributeError("cannot set the parent of a main greenlet"); +} + +const OwnedGreenlet +MainGreenlet::parent() const +{ + return OwnedGreenlet(); // null becomes None +} + +}; // namespace greenlet + +#endif diff --git a/venv/Lib/site-packages/greenlet/TPythonState.cpp b/venv/Lib/site-packages/greenlet/TPythonState.cpp new file mode 100644 index 00000000..a7f743cf --- /dev/null +++ b/venv/Lib/site-packages/greenlet/TPythonState.cpp @@ -0,0 +1,402 @@ +#ifndef GREENLET_PYTHON_STATE_CPP +#define GREENLET_PYTHON_STATE_CPP + +#include +#include "TGreenlet.hpp" + +namespace greenlet { + +PythonState::PythonState() + : _top_frame() +#if GREENLET_USE_CFRAME + ,cframe(nullptr) + ,use_tracing(0) +#endif +#if GREENLET_PY314 + ,py_recursion_depth(0) +#elif GREENLET_PY312 + ,py_recursion_depth(0) + ,c_recursion_depth(0) +#else + ,recursion_depth(0) +#endif +#if GREENLET_PY313 + ,delete_later(nullptr) +#else + ,trash_delete_nesting(0) +#endif +#if GREENLET_PY311 + ,current_frame(nullptr) + ,datastack_chunk(nullptr) + ,datastack_top(nullptr) + ,datastack_limit(nullptr) +#endif +{ +#if GREENLET_USE_CFRAME + /* + The PyThreadState->cframe pointer usually points to memory on + the stack, alloceted in a call into PyEval_EvalFrameDefault. + + Initially, before any evaluation begins, it points to the + initial PyThreadState object's ``root_cframe`` object, which is + statically allocated for the lifetime of the thread. + + A greenlet can last for longer than a call to + PyEval_EvalFrameDefault, so we can't set its ``cframe`` pointer + to be the current ``PyThreadState->cframe``; nor could we use + one from the greenlet parent for the same reason. Yet a further + no: we can't allocate one scoped to the greenlet and then + destroy it when the greenlet is deallocated, because inside the + interpreter the _PyCFrame objects form a linked list, and that too + can result in accessing memory beyond its dynamic lifetime (if + the greenlet doesn't actually finish before it dies, its entry + could still be in the list). + + Using the ``root_cframe`` is problematic, though, because its + members are never modified by the interpreter and are set to 0, + meaning that its ``use_tracing`` flag is never updated. We don't + want to modify that value in the ``root_cframe`` ourself: it + *shouldn't* matter much because we should probably never get + back to the point where that's the only cframe on the stack; + even if it did matter, the major consequence of an incorrect + value for ``use_tracing`` is that if its true the interpreter + does some extra work --- however, it's just good code hygiene. + + Our solution: before a greenlet runs, after its initial + creation, it uses the ``root_cframe`` just to have something to + put there. However, once the greenlet is actually switched to + for the first time, ``g_initialstub`` (which doesn't actually + "return" while the greenlet is running) stores a new _PyCFrame on + its local stack, and copies the appropriate values from the + currently running _PyCFrame; this is then made the _PyCFrame for the + newly-minted greenlet. ``g_initialstub`` then proceeds to call + ``glet.run()``, which results in ``PyEval_...`` adding the + _PyCFrame to the list. Switches continue as normal. Finally, when + the greenlet finishes, the call to ``glet.run()`` returns and + the _PyCFrame is taken out of the linked list and the stack value + is now unused and free to expire. + + XXX: I think we can do better. If we're deallocing in the same + thread, can't we traverse the list and unlink our frame? + Can we just keep a reference to the thread state in case we + dealloc in another thread? (Is that even possible if we're still + running and haven't returned from g_initialstub?) + */ + this->cframe = &PyThreadState_GET()->root_cframe; +#endif +} + + +inline void PythonState::may_switch_away() noexcept +{ +#if GREENLET_PY311 + // PyThreadState_GetFrame is probably going to have to allocate a + // new frame object. That may trigger garbage collection. Because + // we call this during the early phases of a switch (it doesn't + // matter to which greenlet, as this has a global effect), if a GC + // triggers a switch away, two things can happen, both bad: + // - We might not get switched back to, halting forward progress. + // this is pathological, but possible. + // - We might get switched back to with a different set of + // arguments or a throw instead of a switch. That would corrupt + // our state (specifically, PyErr_Occurred() and this->args() + // would no longer agree). + // + // Thus, when we call this API, we need to have GC disabled. + // This method serves as a bottleneck we call when maybe beginning + // a switch. In this way, it is always safe -- no risk of GC -- to + // use ``_GetFrame()`` whenever we need to, just as it was in + // <=3.10 (because subsequent calls will be cached and not + // allocate memory). + + GCDisabledGuard no_gc; + Py_XDECREF(PyThreadState_GetFrame(PyThreadState_GET())); +#endif +} + +void PythonState::operator<<(const PyThreadState *const tstate) noexcept +{ + this->_context.steal(tstate->context); +#if GREENLET_USE_CFRAME + /* + IMPORTANT: ``cframe`` is a pointer into the STACK. Thus, because + the call to ``slp_switch()`` changes the contents of the stack, + you cannot read from ``ts_current->cframe`` after that call and + necessarily get the same values you get from reading it here. + Anything you need to restore from now to then must be saved in a + global/threadlocal variable (because we can't use stack + variables here either). For things that need to persist across + the switch, use `will_switch_from`. + */ + this->cframe = tstate->cframe; + #if !GREENLET_PY312 + this->use_tracing = tstate->cframe->use_tracing; + #endif +#endif // GREENLET_USE_CFRAME +#if GREENLET_PY311 + #if GREENLET_PY314 + this->py_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining; + #elif GREENLET_PY312 + this->py_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining; + this->c_recursion_depth = Py_C_RECURSION_LIMIT - tstate->c_recursion_remaining; + #else // not 312 + this->recursion_depth = tstate->recursion_limit - tstate->recursion_remaining; + #endif // GREENLET_PY312 + #if GREENLET_PY313 + this->current_frame = tstate->current_frame; + #elif GREENLET_USE_CFRAME + this->current_frame = tstate->cframe->current_frame; + #endif + this->datastack_chunk = tstate->datastack_chunk; + this->datastack_top = tstate->datastack_top; + this->datastack_limit = tstate->datastack_limit; + + PyFrameObject *frame = PyThreadState_GetFrame((PyThreadState *)tstate); + Py_XDECREF(frame); // PyThreadState_GetFrame gives us a new + // reference. + this->_top_frame.steal(frame); + #if GREENLET_PY313 + this->delete_later = Py_XNewRef(tstate->delete_later); + #elif GREENLET_PY312 + this->trash_delete_nesting = tstate->trash.delete_nesting; + #else // not 312 + this->trash_delete_nesting = tstate->trash_delete_nesting; + #endif // GREENLET_PY312 +#else // Not 311 + this->recursion_depth = tstate->recursion_depth; + this->_top_frame.steal(tstate->frame); + this->trash_delete_nesting = tstate->trash_delete_nesting; +#endif // GREENLET_PY311 +} + +#if GREENLET_PY312 +void GREENLET_NOINLINE(PythonState::unexpose_frames)() +{ + if (!this->top_frame()) { + return; + } + + // See GreenletState::expose_frames() and the comment on frames_were_exposed + // for more information about this logic. + _PyInterpreterFrame *iframe = this->_top_frame->f_frame; + while (iframe != nullptr) { + _PyInterpreterFrame *prev_exposed = iframe->previous; + assert(iframe->frame_obj); + memcpy(&iframe->previous, &iframe->frame_obj->_f_frame_data[0], + sizeof(void *)); + iframe = prev_exposed; + } +} +#else +void PythonState::unexpose_frames() +{} +#endif + +void PythonState::operator>>(PyThreadState *const tstate) noexcept +{ + tstate->context = this->_context.relinquish_ownership(); + /* Incrementing this value invalidates the contextvars cache, + which would otherwise remain valid across switches */ + tstate->context_ver++; +#if GREENLET_USE_CFRAME + tstate->cframe = this->cframe; + /* + If we were tracing, we need to keep tracing. + There should never be the possibility of hitting the + root_cframe here. See note above about why we can't + just copy this from ``origin->cframe->use_tracing``. + */ + #if !GREENLET_PY312 + tstate->cframe->use_tracing = this->use_tracing; + #endif +#endif // GREENLET_USE_CFRAME +#if GREENLET_PY311 + #if GREENLET_PY314 + tstate->py_recursion_remaining = tstate->py_recursion_limit - this->py_recursion_depth; + this->unexpose_frames(); + #elif GREENLET_PY312 + tstate->py_recursion_remaining = tstate->py_recursion_limit - this->py_recursion_depth; + tstate->c_recursion_remaining = Py_C_RECURSION_LIMIT - this->c_recursion_depth; + this->unexpose_frames(); + #else // \/ 3.11 + tstate->recursion_remaining = tstate->recursion_limit - this->recursion_depth; + #endif // GREENLET_PY312 + #if GREENLET_PY313 + tstate->current_frame = this->current_frame; + #elif GREENLET_USE_CFRAME + tstate->cframe->current_frame = this->current_frame; + #endif + tstate->datastack_chunk = this->datastack_chunk; + tstate->datastack_top = this->datastack_top; + tstate->datastack_limit = this->datastack_limit; + this->_top_frame.relinquish_ownership(); + #if GREENLET_PY313 + Py_XDECREF(tstate->delete_later); + tstate->delete_later = this->delete_later; + Py_CLEAR(this->delete_later); + #elif GREENLET_PY312 + tstate->trash.delete_nesting = this->trash_delete_nesting; + #else // not 3.12 + tstate->trash_delete_nesting = this->trash_delete_nesting; + #endif // GREENLET_PY312 +#else // not 3.11 + tstate->frame = this->_top_frame.relinquish_ownership(); + tstate->recursion_depth = this->recursion_depth; + tstate->trash_delete_nesting = this->trash_delete_nesting; +#endif // GREENLET_PY311 +} + +inline void PythonState::will_switch_from(PyThreadState *const origin_tstate) noexcept +{ +#if GREENLET_USE_CFRAME && !GREENLET_PY312 + // The weird thing is, we don't actually save this for an + // effect on the current greenlet, it's saved for an + // effect on the target greenlet. That is, we want + // continuity of this setting across the greenlet switch. + this->use_tracing = origin_tstate->cframe->use_tracing; +#endif +} + +void PythonState::set_initial_state(const PyThreadState* const tstate) noexcept +{ + this->_top_frame = nullptr; +#if GREENLET_PY314 + this->py_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining; +#elif GREENLET_PY312 + this->py_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining; + // XXX: TODO: Comment from a reviewer: + // Should this be ``Py_C_RECURSION_LIMIT - tstate->c_recursion_remaining``? + // But to me it looks more like that might not be the right + // initialization either? + this->c_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining; +#elif GREENLET_PY311 + this->recursion_depth = tstate->recursion_limit - tstate->recursion_remaining; +#else + this->recursion_depth = tstate->recursion_depth; +#endif +} +// TODO: Better state management about when we own the top frame. +int PythonState::tp_traverse(visitproc visit, void* arg, bool own_top_frame) noexcept +{ + Py_VISIT(this->_context.borrow()); + if (own_top_frame) { + Py_VISIT(this->_top_frame.borrow()); + } + return 0; +} + +void PythonState::tp_clear(bool own_top_frame) noexcept +{ + PythonStateContext::tp_clear(); + // If we get here owning a frame, + // we got dealloc'd without being finished. We may or may not be + // in the same thread. + if (own_top_frame) { + this->_top_frame.CLEAR(); + } +} + +#if GREENLET_USE_CFRAME +void PythonState::set_new_cframe(_PyCFrame& frame) noexcept +{ + frame = *PyThreadState_GET()->cframe; + /* Make the target greenlet refer to the stack value. */ + this->cframe = &frame; + /* + And restore the link to the previous frame so this one gets + unliked appropriately. + */ + this->cframe->previous = &PyThreadState_GET()->root_cframe; +} +#endif + +const PythonState::OwnedFrame& PythonState::top_frame() const noexcept +{ + return this->_top_frame; +} + +void PythonState::did_finish(PyThreadState* tstate) noexcept +{ +#if GREENLET_PY311 + // See https://github.com/gevent/gevent/issues/1924 and + // https://github.com/python-greenlet/greenlet/issues/328. In + // short, Python 3.11 allocates memory for frames as a sort of + // linked list that's kept as part of PyThreadState in the + // ``datastack_chunk`` member and friends. These are saved and + // restored as part of switching greenlets. + // + // When we initially switch to a greenlet, we set those to NULL. + // That causes the frame management code to treat this like a + // brand new thread and start a fresh list of chunks, beginning + // with a new "root" chunk. As we make calls in this greenlet, + // those chunks get added, and as calls return, they get popped. + // But the frame code (pystate.c) is careful to make sure that the + // root chunk never gets popped. + // + // Thus, when a greenlet exits for the last time, there will be at + // least a single root chunk that we must be responsible for + // deallocating. + // + // The complex part is that these chunks are allocated and freed + // using ``_PyObject_VirtualAlloc``/``Free``. Those aren't public + // functions, and they aren't exported for linking. It so happens + // that we know they are just thin wrappers around the Arena + // allocator, so we can use that directly to deallocate in a + // compatible way. + // + // CAUTION: Check this implementation detail on every major version. + // + // It might be nice to be able to do this in our destructor, but + // can we be sure that no one else is using that memory? Plus, as + // described below, our pointers may not even be valid anymore. As + // a special case, there is one time that we know we can do this, + // and that's from the destructor of the associated UserGreenlet + // (NOT main greenlet) + PyObjectArenaAllocator alloc; + _PyStackChunk* chunk = nullptr; + if (tstate) { + // We really did finish, we can never be switched to again. + chunk = tstate->datastack_chunk; + // Unfortunately, we can't do much sanity checking. Our + // this->datastack_chunk pointer is out of date (evaluation may + // have popped down through it already) so we can't verify that + // we deallocate it. I don't think we can even check datastack_top + // for the same reason. + + PyObject_GetArenaAllocator(&alloc); + tstate->datastack_chunk = nullptr; + tstate->datastack_limit = nullptr; + tstate->datastack_top = nullptr; + + } + else if (this->datastack_chunk) { + // The UserGreenlet (NOT the main greenlet!) is being deallocated. If we're + // still holding a stack chunk, it's garbage because we know + // we can never switch back to let cPython clean it up. + // Because the last time we got switched away from, and we + // haven't run since then, we know our chain is valid and can + // be dealloced. + chunk = this->datastack_chunk; + PyObject_GetArenaAllocator(&alloc); + } + + if (alloc.free && chunk) { + // In case the arena mechanism has been torn down already. + while (chunk) { + _PyStackChunk *prev = chunk->previous; + chunk->previous = nullptr; + alloc.free(alloc.ctx, chunk, chunk->size); + chunk = prev; + } + } + + this->datastack_chunk = nullptr; + this->datastack_limit = nullptr; + this->datastack_top = nullptr; +#endif +} + + +}; // namespace greenlet + +#endif // GREENLET_PYTHON_STATE_CPP diff --git a/venv/Lib/site-packages/greenlet/TStackState.cpp b/venv/Lib/site-packages/greenlet/TStackState.cpp new file mode 100644 index 00000000..9743ab51 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/TStackState.cpp @@ -0,0 +1,265 @@ +#ifndef GREENLET_STACK_STATE_CPP +#define GREENLET_STACK_STATE_CPP + +#include "TGreenlet.hpp" + +namespace greenlet { + +#ifdef GREENLET_USE_STDIO +#include +using std::cerr; +using std::endl; + +std::ostream& operator<<(std::ostream& os, const StackState& s) +{ + os << "StackState(stack_start=" << (void*)s._stack_start + << ", stack_stop=" << (void*)s.stack_stop + << ", stack_copy=" << (void*)s.stack_copy + << ", stack_saved=" << s._stack_saved + << ", stack_prev=" << s.stack_prev + << ", addr=" << &s + << ")"; + return os; +} +#endif + +StackState::StackState(void* mark, StackState& current) + : _stack_start(nullptr), + stack_stop((char*)mark), + stack_copy(nullptr), + _stack_saved(0), + /* Skip a dying greenlet */ + stack_prev(current._stack_start + ? ¤t + : current.stack_prev) +{ +} + +StackState::StackState() + : _stack_start(nullptr), + stack_stop(nullptr), + stack_copy(nullptr), + _stack_saved(0), + stack_prev(nullptr) +{ +} + +StackState::StackState(const StackState& other) +// can't use a delegating constructor because of +// MSVC for Python 2.7 + : _stack_start(nullptr), + stack_stop(nullptr), + stack_copy(nullptr), + _stack_saved(0), + stack_prev(nullptr) +{ + this->operator=(other); +} + +StackState& StackState::operator=(const StackState& other) +{ + if (&other == this) { + return *this; + } + if (other._stack_saved) { + throw std::runtime_error("Refusing to steal memory."); + } + + //If we have memory allocated, dispose of it + this->free_stack_copy(); + + this->_stack_start = other._stack_start; + this->stack_stop = other.stack_stop; + this->stack_copy = other.stack_copy; + this->_stack_saved = other._stack_saved; + this->stack_prev = other.stack_prev; + return *this; +} + +inline void StackState::free_stack_copy() noexcept +{ + PyMem_Free(this->stack_copy); + this->stack_copy = nullptr; + this->_stack_saved = 0; +} + +inline void StackState::copy_heap_to_stack(const StackState& current) noexcept +{ + + /* Restore the heap copy back into the C stack */ + if (this->_stack_saved != 0) { + memcpy(this->_stack_start, this->stack_copy, this->_stack_saved); + this->free_stack_copy(); + } + StackState* owner = const_cast(¤t); + if (!owner->_stack_start) { + owner = owner->stack_prev; /* greenlet is dying, skip it */ + } + while (owner && owner->stack_stop <= this->stack_stop) { + // cerr << "\tOwner: " << owner << endl; + owner = owner->stack_prev; /* find greenlet with more stack */ + } + this->stack_prev = owner; + // cerr << "\tFinished with: " << *this << endl; +} + +inline int StackState::copy_stack_to_heap_up_to(const char* const stop) noexcept +{ + /* Save more of g's stack into the heap -- at least up to 'stop' + g->stack_stop |________| + | | + | __ stop . . . . . + | | ==> . . + |________| _______ + | | | | + | | | | + g->stack_start | | |_______| g->stack_copy + */ + intptr_t sz1 = this->_stack_saved; + intptr_t sz2 = stop - this->_stack_start; + assert(this->_stack_start); + if (sz2 > sz1) { + char* c = (char*)PyMem_Realloc(this->stack_copy, sz2); + if (!c) { + PyErr_NoMemory(); + return -1; + } + memcpy(c + sz1, this->_stack_start + sz1, sz2 - sz1); + this->stack_copy = c; + this->_stack_saved = sz2; + } + return 0; +} + +inline int StackState::copy_stack_to_heap(char* const stackref, + const StackState& current) noexcept +{ + /* must free all the C stack up to target_stop */ + const char* const target_stop = this->stack_stop; + + StackState* owner = const_cast(¤t); + assert(owner->_stack_saved == 0); // everything is present on the stack + if (!owner->_stack_start) { + owner = owner->stack_prev; /* not saved if dying */ + } + else { + owner->_stack_start = stackref; + } + + while (owner->stack_stop < target_stop) { + /* ts_current is entierely within the area to free */ + if (owner->copy_stack_to_heap_up_to(owner->stack_stop)) { + return -1; /* XXX */ + } + owner = owner->stack_prev; + } + if (owner != this) { + if (owner->copy_stack_to_heap_up_to(target_stop)) { + return -1; /* XXX */ + } + } + return 0; +} + +inline bool StackState::started() const noexcept +{ + return this->stack_stop != nullptr; +} + +inline bool StackState::main() const noexcept +{ + return this->stack_stop == (char*)-1; +} + +inline bool StackState::active() const noexcept +{ + return this->_stack_start != nullptr; +} + +inline void StackState::set_active() noexcept +{ + assert(this->_stack_start == nullptr); + this->_stack_start = (char*)1; +} + +inline void StackState::set_inactive() noexcept +{ + this->_stack_start = nullptr; + // XXX: What if we still have memory out there? + // That case is actually triggered by + // test_issue251_issue252_explicit_reference_not_collectable (greenlet.tests.test_leaks.TestLeaks) + // and + // test_issue251_issue252_need_to_collect_in_background + // (greenlet.tests.test_leaks.TestLeaks) + // + // Those objects never get deallocated, so the destructor never + // runs. + // It *seems* safe to clean up the memory here? + if (this->_stack_saved) { + this->free_stack_copy(); + } +} + +inline intptr_t StackState::stack_saved() const noexcept +{ + return this->_stack_saved; +} + +inline char* StackState::stack_start() const noexcept +{ + return this->_stack_start; +} + + +inline StackState StackState::make_main() noexcept +{ + StackState s; + s._stack_start = (char*)1; + s.stack_stop = (char*)-1; + return s; +} + +StackState::~StackState() +{ + if (this->_stack_saved != 0) { + this->free_stack_copy(); + } +} + +void StackState::copy_from_stack(void* vdest, const void* vsrc, size_t n) const +{ + char* dest = static_cast(vdest); + const char* src = static_cast(vsrc); + if (src + n <= this->_stack_start + || src >= this->_stack_start + this->_stack_saved + || this->_stack_saved == 0) { + // Nothing we're copying was spilled from the stack + memcpy(dest, src, n); + return; + } + + if (src < this->_stack_start) { + // Copy the part before the saved stack. + // We know src + n > _stack_start due to the test above. + const size_t nbefore = this->_stack_start - src; + memcpy(dest, src, nbefore); + dest += nbefore; + src += nbefore; + n -= nbefore; + } + // We know src >= _stack_start after the before-copy, and + // src < _stack_start + _stack_saved due to the first if condition + size_t nspilled = std::min(n, this->_stack_start + this->_stack_saved - src); + memcpy(dest, this->stack_copy + (src - this->_stack_start), nspilled); + dest += nspilled; + src += nspilled; + n -= nspilled; + if (n > 0) { + // Copy the part after the saved stack + memcpy(dest, src, n); + } +} + +}; // namespace greenlet + +#endif // GREENLET_STACK_STATE_CPP diff --git a/venv/Lib/site-packages/greenlet/TThreadState.hpp b/venv/Lib/site-packages/greenlet/TThreadState.hpp new file mode 100644 index 00000000..e4e6f6cb --- /dev/null +++ b/venv/Lib/site-packages/greenlet/TThreadState.hpp @@ -0,0 +1,497 @@ +#ifndef GREENLET_THREAD_STATE_HPP +#define GREENLET_THREAD_STATE_HPP + +#include +#include + +#include "greenlet_internal.hpp" +#include "greenlet_refs.hpp" +#include "greenlet_thread_support.hpp" + +using greenlet::refs::BorrowedObject; +using greenlet::refs::BorrowedGreenlet; +using greenlet::refs::BorrowedMainGreenlet; +using greenlet::refs::OwnedMainGreenlet; +using greenlet::refs::OwnedObject; +using greenlet::refs::OwnedGreenlet; +using greenlet::refs::OwnedList; +using greenlet::refs::PyErrFetchParam; +using greenlet::refs::PyArgParseParam; +using greenlet::refs::ImmortalString; +using greenlet::refs::CreatedModule; +using greenlet::refs::PyErrPieces; +using greenlet::refs::NewReference; + +namespace greenlet { +/** + * Thread-local state of greenlets. + * + * Each native thread will get exactly one of these objects, + * automatically accessed through the best available thread-local + * mechanism the compiler supports (``thread_local`` for C++11 + * compilers or ``__thread``/``declspec(thread)`` for older GCC/clang + * or MSVC, respectively.) + * + * Previously, we kept thread-local state mostly in a bunch of + * ``static volatile`` variables in the main greenlet file.. This had + * the problem of requiring extra checks, loops, and great care + * accessing these variables if we potentially invoked any Python code + * that could release the GIL, because the state could change out from + * under us. Making the variables thread-local solves this problem. + * + * When we detected that a greenlet API accessing the current greenlet + * was invoked from a different thread than the greenlet belonged to, + * we stored a reference to the greenlet in the Python thread + * dictionary for the thread the greenlet belonged to. This could lead + * to memory leaks if the thread then exited (because of a reference + * cycle, as greenlets referred to the thread dictionary, and deleting + * non-current greenlets leaked their frame plus perhaps arguments on + * the C stack). If a thread exited while still having running + * greenlet objects (perhaps that had just switched back to the main + * greenlet), and did not invoke one of the greenlet APIs *in that + * thread, immediately before it exited, without some other thread + * then being invoked*, such a leak was guaranteed. + * + * This can be partly solved by using compiler thread-local variables + * instead of the Python thread dictionary, thus avoiding a cycle. + * + * To fully solve this problem, we need a reliable way to know that a + * thread is done and we should clean up the main greenlet. On POSIX, + * we can use the destructor function of ``pthread_key_create``, but + * there's nothing similar on Windows; a C++11 thread local object + * reliably invokes its destructor when the thread it belongs to exits + * (non-C++11 compilers offer ``__thread`` or ``declspec(thread)`` to + * create thread-local variables, but they can't hold C++ objects that + * invoke destructors; the C++11 version is the most portable solution + * I found). When the thread exits, we can drop references and + * otherwise manipulate greenlets and frames that we know can no + * longer be switched to. For compilers that don't support C++11 + * thread locals, we have a solution that uses the python thread + * dictionary, though it may not collect everything as promptly as + * other compilers do, if some other library is using the thread + * dictionary and has a cycle or extra reference. + * + * There are two small wrinkles. The first is that when the thread + * exits, it is too late to actually invoke Python APIs: the Python + * thread state is gone, and the GIL is released. To solve *this* + * problem, our destructor uses ``Py_AddPendingCall`` to transfer the + * destruction work to the main thread. (This is not an issue for the + * dictionary solution.) + * + * The second is that once the thread exits, the thread local object + * is invalid and we can't even access a pointer to it, so we can't + * pass it to ``Py_AddPendingCall``. This is handled by actually using + * a second object that's thread local (ThreadStateCreator) and having + * it dynamically allocate this object so it can live until the + * pending call runs. + */ + + + +class ThreadState { +private: + // As of commit 08ad1dd7012b101db953f492e0021fb08634afad + // this class needed 56 bytes in o Py_DEBUG build + // on 64-bit macOS 11. + // Adding the vector takes us up to 80 bytes () + + /* Strong reference to the main greenlet */ + OwnedMainGreenlet main_greenlet; + + /* Strong reference to the current greenlet. */ + OwnedGreenlet current_greenlet; + + /* Strong reference to the trace function, if any. */ + OwnedObject tracefunc; + + typedef std::vector > deleteme_t; + /* A vector of raw PyGreenlet pointers representing things that need + deleted when this thread is running. The vector owns the + references, but you need to manually INCREF/DECREF as you use + them. We don't use a vector because we + make copy of this vector, and that would become O(n) as all the + refcounts are incremented in the copy. + */ + deleteme_t deleteme; + +#ifdef GREENLET_NEEDS_EXCEPTION_STATE_SAVED + void* exception_state; +#endif + + static std::clock_t _clocks_used_doing_gc; + static ImmortalString get_referrers_name; + static PythonAllocator allocator; + + G_NO_COPIES_OF_CLS(ThreadState); + + + // Allocates a main greenlet for the thread state. If this fails, + // exits the process. Called only during constructing a ThreadState. + MainGreenlet* alloc_main() + { + PyGreenlet* gmain; + + /* create the main greenlet for this thread */ + gmain = reinterpret_cast(PyType_GenericAlloc(&PyGreenlet_Type, 0)); + if (gmain == NULL) { + throw PyFatalError("alloc_main failed to alloc"); //exits the process + } + + MainGreenlet* const main = new MainGreenlet(gmain, this); + + assert(Py_REFCNT(gmain) == 1); + assert(gmain->pimpl == main); + return main; + } + + +public: + static void* operator new(size_t UNUSED(count)) + { + return ThreadState::allocator.allocate(1); + } + + static void operator delete(void* ptr) + { + return ThreadState::allocator.deallocate(static_cast(ptr), + 1); + } + + static void init() + { + ThreadState::get_referrers_name = "get_referrers"; + ThreadState::_clocks_used_doing_gc = 0; + } + + ThreadState() + { + +#ifdef GREENLET_NEEDS_EXCEPTION_STATE_SAVED + this->exception_state = slp_get_exception_state(); +#endif + + // XXX: Potentially dangerous, exposing a not fully + // constructed object. + MainGreenlet* const main = this->alloc_main(); + this->main_greenlet = OwnedMainGreenlet::consuming( + main->self() + ); + assert(this->main_greenlet); + this->current_greenlet = main->self(); + // The main greenlet starts with 1 refs: The returned one. We + // then copied it to the current greenlet. + assert(this->main_greenlet.REFCNT() == 2); + } + + inline void restore_exception_state() + { +#ifdef GREENLET_NEEDS_EXCEPTION_STATE_SAVED + // It's probably important this be inlined and only call C + // functions to avoid adding an SEH frame. + slp_set_exception_state(this->exception_state); +#endif + } + + inline bool has_main_greenlet() const noexcept + { + return bool(this->main_greenlet); + } + + // Called from the ThreadStateCreator when we're in non-standard + // threading mode. In that case, there is an object in the Python + // thread state dictionary that points to us. The main greenlet + // also traverses into us, in which case it's crucial not to + // traverse back into the main greenlet. + int tp_traverse(visitproc visit, void* arg, bool traverse_main=true) + { + if (traverse_main) { + Py_VISIT(main_greenlet.borrow_o()); + } + if (traverse_main || current_greenlet != main_greenlet) { + Py_VISIT(current_greenlet.borrow_o()); + } + Py_VISIT(tracefunc.borrow()); + return 0; + } + + inline BorrowedMainGreenlet borrow_main_greenlet() const noexcept + { + assert(this->main_greenlet); + assert(this->main_greenlet.REFCNT() >= 2); + return this->main_greenlet; + }; + + inline OwnedMainGreenlet get_main_greenlet() const noexcept + { + return this->main_greenlet; + } + + /** + * In addition to returning a new reference to the currunt + * greenlet, this performs any maintenance needed. + */ + inline OwnedGreenlet get_current() + { + /* green_dealloc() cannot delete greenlets from other threads, so + it stores them in the thread dict; delete them now. */ + this->clear_deleteme_list(); + //assert(this->current_greenlet->main_greenlet == this->main_greenlet); + //assert(this->main_greenlet->main_greenlet == this->main_greenlet); + return this->current_greenlet; + } + + /** + * As for non-const get_current(); + */ + inline BorrowedGreenlet borrow_current() + { + this->clear_deleteme_list(); + return this->current_greenlet; + } + + /** + * Does no maintenance. + */ + inline OwnedGreenlet get_current() const + { + return this->current_greenlet; + } + + template + inline bool is_current(const refs::PyObjectPointer& obj) const + { + return this->current_greenlet.borrow_o() == obj.borrow_o(); + } + + inline void set_current(const OwnedGreenlet& target) + { + this->current_greenlet = target; + } + +private: + /** + * Deref and remove the greenlets from the deleteme list. Must be + * holding the GIL. + * + * If *murder* is true, then we must be called from a different + * thread than the one that these greenlets were running in. + * In that case, if the greenlet was actually running, we destroy + * the frame reference and otherwise make it appear dead before + * proceeding; otherwise, we would try (and fail) to raise an + * exception in it and wind up right back in this list. + */ + inline void clear_deleteme_list(const bool murder=false) + { + if (!this->deleteme.empty()) { + // It's possible we could add items to this list while + // running Python code if there's a thread switch, so we + // need to defensively copy it before that can happen. + deleteme_t copy = this->deleteme; + this->deleteme.clear(); // in case things come back on the list + for(deleteme_t::iterator it = copy.begin(), end = copy.end(); + it != end; + ++it ) { + PyGreenlet* to_del = *it; + if (murder) { + // Force each greenlet to appear dead; we can't raise an + // exception into it anymore anyway. + to_del->pimpl->murder_in_place(); + } + + // The only reference to these greenlets should be in + // this list, decreffing them should let them be + // deleted again, triggering calls to green_dealloc() + // in the correct thread (if we're not murdering). + // This may run arbitrary Python code and switch + // threads or greenlets! + Py_DECREF(to_del); + if (PyErr_Occurred()) { + PyErr_WriteUnraisable(nullptr); + PyErr_Clear(); + } + } + } + } + +public: + + /** + * Returns a new reference, or a false object. + */ + inline OwnedObject get_tracefunc() const + { + return tracefunc; + }; + + + inline void set_tracefunc(BorrowedObject tracefunc) + { + assert(tracefunc); + if (tracefunc == BorrowedObject(Py_None)) { + this->tracefunc.CLEAR(); + } + else { + this->tracefunc = tracefunc; + } + } + + /** + * Given a reference to a greenlet that some other thread + * attempted to delete (has a refcount of 0) store it for later + * deletion when the thread this state belongs to is current. + */ + inline void delete_when_thread_running(PyGreenlet* to_del) + { + Py_INCREF(to_del); + this->deleteme.push_back(to_del); + } + + /** + * Set to std::clock_t(-1) to disable. + */ + inline static std::clock_t& clocks_used_doing_gc() + { + return ThreadState::_clocks_used_doing_gc; + } + + ~ThreadState() + { + if (!PyInterpreterState_Head()) { + // We shouldn't get here (our callers protect us) + // but if we do, all we can do is bail early. + return; + } + + // We should not have an "origin" greenlet; that only exists + // for the temporary time during a switch, which should not + // be in progress as the thread dies. + //assert(!this->switching_state.origin); + + this->tracefunc.CLEAR(); + + // Forcibly GC as much as we can. + this->clear_deleteme_list(true); + + // The pending call did this. + assert(this->main_greenlet->thread_state() == nullptr); + + // If the main greenlet is the current greenlet, + // then we "fell off the end" and the thread died. + // It's possible that there is some other greenlet that + // switched to us, leaving a reference to the main greenlet + // on the stack, somewhere uncollectible. Try to detect that. + if (this->current_greenlet == this->main_greenlet && this->current_greenlet) { + assert(this->current_greenlet->is_currently_running_in_some_thread()); + // Drop one reference we hold. + this->current_greenlet.CLEAR(); + assert(!this->current_greenlet); + // Only our reference to the main greenlet should be left, + // But hold onto the pointer in case we need to do extra cleanup. + PyGreenlet* old_main_greenlet = this->main_greenlet.borrow(); + Py_ssize_t cnt = this->main_greenlet.REFCNT(); + this->main_greenlet.CLEAR(); + if (ThreadState::_clocks_used_doing_gc != std::clock_t(-1) + && cnt == 2 && Py_REFCNT(old_main_greenlet) == 1) { + // Highly likely that the reference is somewhere on + // the stack, not reachable by GC. Verify. + // XXX: This is O(n) in the total number of objects. + // TODO: Add a way to disable this at runtime, and + // another way to report on it. + std::clock_t begin = std::clock(); + NewReference gc(PyImport_ImportModule("gc")); + if (gc) { + OwnedObject get_referrers = gc.PyRequireAttr(ThreadState::get_referrers_name); + OwnedList refs(get_referrers.PyCall(old_main_greenlet)); + if (refs && refs.empty()) { + assert(refs.REFCNT() == 1); + // We found nothing! So we left a dangling + // reference: Probably the last thing some + // other greenlet did was call + // 'getcurrent().parent.switch()' to switch + // back to us. Clean it up. This will be the + // case on CPython 3.7 and newer, as they use + // an internal calling conversion that avoids + // creating method objects and storing them on + // the stack. + Py_DECREF(old_main_greenlet); + } + else if (refs + && refs.size() == 1 + && PyCFunction_Check(refs.at(0)) + && Py_REFCNT(refs.at(0)) == 2) { + assert(refs.REFCNT() == 1); + // Ok, we found a C method that refers to the + // main greenlet, and its only referenced + // twice, once in the list we just created, + // once from...somewhere else. If we can't + // find where else, then this is a leak. + // This happens in older versions of CPython + // that create a bound method object somewhere + // on the stack that we'll never get back to. + if (PyCFunction_GetFunction(refs.at(0).borrow()) == (PyCFunction)green_switch) { + BorrowedObject function_w = refs.at(0); + refs.clear(); // destroy the reference + // from the list. + // back to one reference. Can *it* be + // found? + assert(function_w.REFCNT() == 1); + refs = get_referrers.PyCall(function_w); + if (refs && refs.empty()) { + // Nope, it can't be found so it won't + // ever be GC'd. Drop it. + Py_CLEAR(function_w); + } + } + } + std::clock_t end = std::clock(); + ThreadState::_clocks_used_doing_gc += (end - begin); + } + } + } + + // We need to make sure this greenlet appears to be dead, + // because otherwise deallocing it would fail to raise an + // exception in it (the thread is dead) and put it back in our + // deleteme list. + if (this->current_greenlet) { + this->current_greenlet->murder_in_place(); + this->current_greenlet.CLEAR(); + } + + if (this->main_greenlet) { + // Couldn't have been the main greenlet that was running + // when the thread exited (because we already cleared this + // pointer if it was). This shouldn't be possible? + + // If the main greenlet was current when the thread died (it + // should be, right?) then we cleared its self pointer above + // when we cleared the current greenlet's main greenlet pointer. + // assert(this->main_greenlet->main_greenlet == this->main_greenlet + // || !this->main_greenlet->main_greenlet); + // // self reference, probably gone + // this->main_greenlet->main_greenlet.CLEAR(); + + // This will actually go away when the ivar is destructed. + this->main_greenlet.CLEAR(); + } + + if (PyErr_Occurred()) { + PyErr_WriteUnraisable(NULL); + PyErr_Clear(); + } + + } + +}; + +ImmortalString ThreadState::get_referrers_name(nullptr); +PythonAllocator ThreadState::allocator; +std::clock_t ThreadState::_clocks_used_doing_gc(0); + + + + + +}; // namespace greenlet + +#endif diff --git a/venv/Lib/site-packages/greenlet/TThreadStateCreator.hpp b/venv/Lib/site-packages/greenlet/TThreadStateCreator.hpp new file mode 100644 index 00000000..2ec7ab55 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/TThreadStateCreator.hpp @@ -0,0 +1,102 @@ +#ifndef GREENLET_THREAD_STATE_CREATOR_HPP +#define GREENLET_THREAD_STATE_CREATOR_HPP + +#include +#include + +#include "greenlet_internal.hpp" +#include "greenlet_refs.hpp" +#include "greenlet_thread_support.hpp" + +#include "TThreadState.hpp" + +namespace greenlet { + + +typedef void (*ThreadStateDestructor)(ThreadState* const); + +template +class ThreadStateCreator +{ +private: + // Initialized to 1, and, if still 1, created on access. + // Set to 0 on destruction. + ThreadState* _state; + G_NO_COPIES_OF_CLS(ThreadStateCreator); + + inline bool has_initialized_state() const noexcept + { + return this->_state != (ThreadState*)1; + } + + inline bool has_state() const noexcept + { + return this->has_initialized_state() && this->_state != nullptr; + } + +public: + + // Only one of these, auto created per thread. + // Constructing the state constructs the MainGreenlet. + ThreadStateCreator() : + _state((ThreadState*)1) + { + } + + ~ThreadStateCreator() + { + if (this->has_state()) { + Destructor(this->_state); + } + + this->_state = nullptr; + } + + inline ThreadState& state() + { + // The main greenlet will own this pointer when it is created, + // which will be right after this. The plan is to give every + // greenlet a pointer to the main greenlet for the thread it + // runs in; if we are doing something cross-thread, we need to + // access the pointer from the main greenlet. Deleting the + // thread, and hence the thread-local storage, will delete the + // state pointer in the main greenlet. + if (!this->has_initialized_state()) { + // XXX: Assuming allocation never fails + this->_state = new ThreadState; + // For non-standard threading, we need to store an object + // in the Python thread state dictionary so that it can be + // DECREF'd when the thread ends (ideally; the dict could + // last longer) and clean this object up. + } + if (!this->_state) { + throw std::runtime_error("Accessing state after destruction."); + } + return *this->_state; + } + + operator ThreadState&() + { + return this->state(); + } + + operator ThreadState*() + { + return &this->state(); + } + + inline int tp_traverse(visitproc visit, void* arg) + { + if (this->has_state()) { + return this->_state->tp_traverse(visit, arg); + } + return 0; + } + +}; + + + +}; // namespace greenlet + +#endif diff --git a/venv/Lib/site-packages/greenlet/TThreadStateDestroy.cpp b/venv/Lib/site-packages/greenlet/TThreadStateDestroy.cpp new file mode 100644 index 00000000..449b7887 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/TThreadStateDestroy.cpp @@ -0,0 +1,217 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +/** + * Implementation of the ThreadState destructors. + * + * Format with: + * clang-format -i --style=file src/greenlet/greenlet.c + * + * + * Fix missing braces with: + * clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements" +*/ +#ifndef T_THREADSTATE_DESTROY +#define T_THREADSTATE_DESTROY + +#include "TGreenlet.hpp" + +#include "greenlet_thread_support.hpp" +#include "greenlet_compiler_compat.hpp" +#include "TGreenletGlobals.cpp" +#include "TThreadState.hpp" +#include "TThreadStateCreator.hpp" + +namespace greenlet { + +extern "C" { + +struct ThreadState_DestroyNoGIL +{ + /** + This function uses the same lock that the PendingCallback does + */ + static void + MarkGreenletDeadAndQueueCleanup(ThreadState* const state) + { +#if GREENLET_BROKEN_THREAD_LOCAL_CLEANUP_JUST_LEAK + return; +#endif + // We are *NOT* holding the GIL. Our thread is in the middle + // of its death throes and the Python thread state is already + // gone so we can't use most Python APIs. One that is safe is + // ``Py_AddPendingCall``, unless the interpreter itself has + // been torn down. There is a limited number of calls that can + // be queued: 32 (NPENDINGCALLS) in CPython 3.10, so we + // coalesce these calls using our own queue. + + if (!MarkGreenletDeadIfNeeded(state)) { + // No state, or no greenlet + return; + } + + // XXX: Because we don't have the GIL, this is a race condition. + if (!PyInterpreterState_Head()) { + // We have to leak the thread state, if the + // interpreter has shut down when we're getting + // deallocated, we can't run the cleanup code that + // deleting it would imply. + return; + } + + AddToCleanupQueue(state); + + } + +private: + + // If the state has an allocated main greenlet: + // - mark the greenlet as dead by disassociating it from the state; + // - return 1 + // Otherwise, return 0. + static bool + MarkGreenletDeadIfNeeded(ThreadState* const state) + { + if (state && state->has_main_greenlet()) { + // mark the thread as dead ASAP. + // this is racy! If we try to throw or switch to a + // greenlet from this thread from some other thread before + // we clear the state pointer, it won't realize the state + // is dead which can crash the process. + PyGreenlet* p(state->borrow_main_greenlet().borrow()); + assert(p->pimpl->thread_state() == state || p->pimpl->thread_state() == nullptr); + dynamic_cast(p->pimpl)->thread_state(nullptr); + return true; + } + return false; + } + + static void + AddToCleanupQueue(ThreadState* const state) + { + assert(state && state->has_main_greenlet()); + + // NOTE: Because we're not holding the GIL here, some other + // Python thread could run and call ``os.fork()``, which would + // be bad if that happened while we are holding the cleanup + // lock (it wouldn't function in the child process). + // Make a best effort to try to keep the duration we hold the + // lock short. + // TODO: On platforms that support it, use ``pthread_atfork`` to + // drop this lock. + LockGuard cleanup_lock(*mod_globs->thread_states_to_destroy_lock); + + mod_globs->queue_to_destroy(state); + if (mod_globs->thread_states_to_destroy.size() == 1) { + // We added the first item to the queue. We need to schedule + // the cleanup. + + // A size greater than 1 means that we have already added the pending call, + // and in fact, it may be executing now. + // If it is executing, our lock makes sure that it will see the item we just added + // to the queue on its next iteration (after we release the lock) + // + // A size of 1 means there is no pending call, OR the pending call is + // currently executing, has dropped the lock, and is deleting the last item + // from the queue; its next iteration will go ahead and delete the item we just added. + // And the pending call we schedule here will have no work to do. + int result = AddPendingCall( + PendingCallback_DestroyQueueWithGIL, + nullptr); + if (result < 0) { + // Hmm, what can we do here? + fprintf(stderr, + "greenlet: WARNING: failed in call to Py_AddPendingCall; " + "expect a memory leak.\n"); + } + } + } + + static int + PendingCallback_DestroyQueueWithGIL(void* UNUSED(arg)) + { + // We're holding the GIL here, so no Python code should be able to + // run to call ``os.fork()``. + while (1) { + ThreadState* to_destroy; + { + LockGuard cleanup_lock(*mod_globs->thread_states_to_destroy_lock); + if (mod_globs->thread_states_to_destroy.empty()) { + break; + } + to_destroy = mod_globs->take_next_to_destroy(); + } + assert(to_destroy); + assert(to_destroy->has_main_greenlet()); + // Drop the lock while we do the actual deletion. + // This allows other calls to MarkGreenletDeadAndQueueCleanup + // to enter and add to our queue. + DestroyOneWithGIL(to_destroy); + } + return 0; + } + + static void + DestroyOneWithGIL(const ThreadState* const state) + { + // Holding the GIL. + // Passed a non-shared pointer to the actual thread state. + // state -> main greenlet + assert(state->has_main_greenlet()); + PyGreenlet* main(state->borrow_main_greenlet()); + // When we need to do cross-thread operations, we check this. + // A NULL value means the thread died some time ago. + // We do this here, rather than in a Python dealloc function + // for the greenlet, in case there's still a reference out + // there. + dynamic_cast(main->pimpl)->thread_state(nullptr); + + delete state; // Deleting this runs the destructor, DECREFs the main greenlet. + } + + + static int AddPendingCall(int (*func)(void*), void* arg) + { + // If the interpreter is in the middle of finalizing, we can't add a + // pending call. Trying to do so will end up in a SIGSEGV, as + // Py_AddPendingCall will not be able to get the interpreter and will + // try to dereference a NULL pointer. It's possible this can still + // segfault if we happen to get context switched, and maybe we should + // just always implement our own AddPendingCall, but I'd like to see if + // this works first +#if GREENLET_PY313 + if (Py_IsFinalizing()) { +#else + if (_Py_IsFinalizing()) { +#endif +#ifdef GREENLET_DEBUG + // No need to log in the general case. Yes, we'll leak, + // but we're shutting down so it should be ok. + fprintf(stderr, + "greenlet: WARNING: Interpreter is finalizing. Ignoring " + "call to Py_AddPendingCall; \n"); +#endif + return 0; + } + return Py_AddPendingCall(func, arg); + } + + + + + +}; +}; + +}; // namespace greenlet + +// The intent when GET_THREAD_STATE() is needed multiple times in a +// function is to take a reference to its return value in a local +// variable, to avoid the thread-local indirection. On some platforms +// (macOS), accessing a thread-local involves a function call (plus an +// initial function call in each function that uses a thread local); +// in contrast, static volatile variables are at some pre-computed +// offset. +typedef greenlet::ThreadStateCreator ThreadStateCreator; +static thread_local ThreadStateCreator g_thread_state_global; +#define GET_THREAD_STATE() g_thread_state_global + +#endif //T_THREADSTATE_DESTROY diff --git a/venv/Lib/site-packages/greenlet/TUserGreenlet.cpp b/venv/Lib/site-packages/greenlet/TUserGreenlet.cpp new file mode 100644 index 00000000..73a81330 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/TUserGreenlet.cpp @@ -0,0 +1,662 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +/** + * Implementation of greenlet::UserGreenlet. + * + * Format with: + * clang-format -i --style=file src/greenlet/greenlet.c + * + * + * Fix missing braces with: + * clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements" +*/ +#ifndef T_USER_GREENLET_CPP +#define T_USER_GREENLET_CPP + +#include "greenlet_internal.hpp" +#include "TGreenlet.hpp" + +#include "TThreadStateDestroy.cpp" + + +namespace greenlet { +using greenlet::refs::BorrowedMainGreenlet; +greenlet::PythonAllocator UserGreenlet::allocator; + +void* UserGreenlet::operator new(size_t UNUSED(count)) +{ + return allocator.allocate(1); +} + + +void UserGreenlet::operator delete(void* ptr) +{ + return allocator.deallocate(static_cast(ptr), + 1); +} + + +UserGreenlet::UserGreenlet(PyGreenlet* p, BorrowedGreenlet the_parent) + : Greenlet(p), _parent(the_parent) +{ +} + +UserGreenlet::~UserGreenlet() +{ + // Python 3.11: If we don't clear out the raw frame datastack + // when deleting an unfinished greenlet, + // TestLeaks.test_untracked_memory_doesnt_increase_unfinished_thread_dealloc_in_main fails. + this->python_state.did_finish(nullptr); + this->tp_clear(); +} + + +const BorrowedMainGreenlet +UserGreenlet::main_greenlet() const +{ + return this->_main_greenlet; +} + + +BorrowedMainGreenlet +UserGreenlet::find_main_greenlet_in_lineage() const +{ + if (this->started()) { + assert(this->_main_greenlet); + return BorrowedMainGreenlet(this->_main_greenlet); + } + + if (!this->_parent) { + /* garbage collected greenlet in chain */ + // XXX: WHAT? + return BorrowedMainGreenlet(nullptr); + } + + return this->_parent->find_main_greenlet_in_lineage(); +} + + +/** + * CAUTION: This will allocate memory and may trigger garbage + * collection and arbitrary Python code. + */ +OwnedObject +UserGreenlet::throw_GreenletExit_during_dealloc(const ThreadState& current_thread_state) +{ + /* The dying greenlet cannot be a parent of ts_current + because the 'parent' field chain would hold a + reference */ + UserGreenlet::ParentIsCurrentGuard with_current_parent(this, current_thread_state); + + // We don't care about the return value, only whether an + // exception happened. Whether or not an exception happens, + // we need to restore the parent in case the greenlet gets + // resurrected. + return Greenlet::throw_GreenletExit_during_dealloc(current_thread_state); +} + +ThreadState* +UserGreenlet::thread_state() const noexcept +{ + // TODO: maybe make this throw, if the thread state isn't there? + // if (!this->main_greenlet) { + // throw std::runtime_error("No thread state"); // TODO: Better exception + // } + if (!this->_main_greenlet) { + return nullptr; + } + return this->_main_greenlet->thread_state(); +} + + +bool +UserGreenlet::was_running_in_dead_thread() const noexcept +{ + return this->_main_greenlet && !this->thread_state(); +} + +OwnedObject +UserGreenlet::g_switch() +{ + assert(this->args() || PyErr_Occurred()); + + try { + this->check_switch_allowed(); + } + catch (const PyErrOccurred&) { + this->release_args(); + throw; + } + + // Switching greenlets used to attempt to clean out ones that need + // deleted *if* we detected a thread switch. Should it still do + // that? + // An issue is that if we delete a greenlet from another thread, + // it gets queued to this thread, and ``kill_greenlet()`` switches + // back into the greenlet + + /* find the real target by ignoring dead greenlets, + and if necessary starting a greenlet. */ + switchstack_result_t err; + Greenlet* target = this; + // TODO: probably cleaner to handle the case where we do + // switch to ourself separately from the other cases. + // This can probably even further be simplified if we keep + // track of the switching_state we're going for and just call + // into g_switch() if it's not ourself. The main problem with that + // is that we would be using more stack space. + bool target_was_me = true; + bool was_initial_stub = false; + while (target) { + if (target->active()) { + if (!target_was_me) { + target->args() <<= this->args(); + assert(!this->args()); + } + err = target->g_switchstack(); + break; + } + if (!target->started()) { + // We never encounter a main greenlet that's not started. + assert(!target->main()); + UserGreenlet* real_target = static_cast(target); + assert(real_target); + void* dummymarker; + was_initial_stub = true; + if (!target_was_me) { + target->args() <<= this->args(); + assert(!this->args()); + } + try { + // This can only throw back to us while we're + // still in this greenlet. Once the new greenlet + // is bootstrapped, it has its own exception state. + err = real_target->g_initialstub(&dummymarker); + } + catch (const PyErrOccurred&) { + this->release_args(); + throw; + } + catch (const GreenletStartedWhileInPython&) { + // The greenlet was started sometime before this + // greenlet actually switched to it, i.e., + // "concurrent" calls to switch() or throw(). + // We need to retry the switch. + // Note that the current greenlet has been reset + // to this one (or we wouldn't be running!) + continue; + } + break; + } + + target = target->parent(); + target_was_me = false; + } + // The ``this`` pointer and all other stack or register based + // variables are invalid now, at least where things succeed + // above. + // But this one, probably not so much? It's not clear if it's + // safe to throw an exception at this point. + + if (err.status < 0) { + // If we get here, either g_initialstub() + // failed, or g_switchstack() failed. Either one of those + // cases SHOULD leave us in the original greenlet with a valid + // stack. + return this->on_switchstack_or_initialstub_failure(target, err, target_was_me, was_initial_stub); + } + + // err.the_new_current_greenlet would be the same as ``target``, + // if target wasn't probably corrupt. + return err.the_new_current_greenlet->g_switch_finish(err); +} + + + +Greenlet::switchstack_result_t +UserGreenlet::g_initialstub(void* mark) +{ + OwnedObject run; + + // We need to grab a reference to the current switch arguments + // in case we're entered concurrently during the call to + // GetAttr() and have to try again. + // We'll restore them when we return in that case. + // Scope them tightly to avoid ref leaks. + { + SwitchingArgs args(this->args()); + + /* save exception in case getattr clears it */ + PyErrPieces saved; + + /* + self.run is the object to call in the new greenlet. + This could run arbitrary python code and switch greenlets! + */ + run = this->self().PyRequireAttr(mod_globs->str_run); + /* restore saved exception */ + saved.PyErrRestore(); + + + /* recheck that it's safe to switch in case greenlet reparented anywhere above */ + this->check_switch_allowed(); + + /* by the time we got here another start could happen elsewhere, + * that means it should now be a regular switch. + * This can happen if the Python code is a subclass that implements + * __getattribute__ or __getattr__, or makes ``run`` a descriptor; + * all of those can run arbitrary code that switches back into + * this greenlet. + */ + if (this->stack_state.started()) { + // the successful switch cleared these out, we need to + // restore our version. They will be copied on up to the + // next target. + assert(!this->args()); + this->args() <<= args; + throw GreenletStartedWhileInPython(); + } + } + + // Sweet, if we got here, we have the go-ahead and will switch + // greenlets. + // Nothing we do from here on out should allow for a thread or + // greenlet switch: No arbitrary calls to Python, including + // decref'ing + +#if GREENLET_USE_CFRAME + /* OK, we need it, we're about to switch greenlets, save the state. */ + /* + See green_new(). This is a stack-allocated variable used + while *self* is in PyObject_Call(). + We want to defer copying the state info until we're sure + we need it and are in a stable place to do so. + */ + _PyCFrame trace_info; + + this->python_state.set_new_cframe(trace_info); +#endif + /* start the greenlet */ + ThreadState& thread_state = GET_THREAD_STATE().state(); + this->stack_state = StackState(mark, + thread_state.borrow_current()->stack_state); + this->python_state.set_initial_state(PyThreadState_GET()); + this->exception_state.clear(); + this->_main_greenlet = thread_state.get_main_greenlet(); + + /* perform the initial switch */ + switchstack_result_t err = this->g_switchstack(); + /* returns twice! + The 1st time with ``err == 1``: we are in the new greenlet. + This one owns a greenlet that used to be current. + The 2nd time with ``err <= 0``: back in the caller's + greenlet; this happens if the child finishes or switches + explicitly to us. Either way, the ``err`` variable is + created twice at the same memory location, but possibly + having different ``origin`` values. Note that it's not + constructed for the second time until the switch actually happens. + */ + if (err.status == 1) { + // In the new greenlet. + + // This never returns! Calling inner_bootstrap steals + // the contents of our run object within this stack frame, so + // it is not valid to do anything with it. + try { + this->inner_bootstrap(err.origin_greenlet.relinquish_ownership(), + run.relinquish_ownership()); + } + // Getting a C++ exception here isn't good. It's probably a + // bug in the underlying greenlet, meaning it's probably a + // C++ extension. We're going to abort anyway, but try to + // display some nice information *if* possible. Some obscure + // platforms don't properly support this (old 32-bit Arm, see see + // https://github.com/python-greenlet/greenlet/issues/385); that's not + // great, but should usually be OK because, as mentioned above, we're + // terminating anyway. + // + // The catching is tested by + // ``test_cpp.CPPTests.test_unhandled_exception_in_greenlet_aborts``. + // + // PyErrOccurred can theoretically be thrown by + // inner_bootstrap() -> g_switch_finish(), but that should + // never make it back to here. It is a std::exception and + // would be caught if it is. + catch (const std::exception& e) { + std::string base = "greenlet: Unhandled C++ exception: "; + base += e.what(); + Py_FatalError(base.c_str()); + } + catch (...) { + // Some compilers/runtimes use exceptions internally. + // It appears that GCC on Linux with libstdc++ throws an + // exception internally at process shutdown time to unwind + // stacks and clean up resources. Depending on exactly + // where we are when the process exits, that could result + // in an unknown exception getting here. If we + // Py_FatalError() or abort() here, we interfere with + // orderly process shutdown. Throwing the exception on up + // is the right thing to do. + // + // gevent's ``examples/dns_mass_resolve.py`` demonstrates this. +#ifndef NDEBUG + fprintf(stderr, + "greenlet: inner_bootstrap threw unknown exception; " + "is the process terminating?\n"); +#endif + throw; + } + Py_FatalError("greenlet: inner_bootstrap returned with no exception.\n"); + } + + + // In contrast, notice that we're keeping the origin greenlet + // around as an owned reference; we need it to call the trace + // function for the switch back into the parent. It was only + // captured at the time the switch actually happened, though, + // so we haven't been keeping an extra reference around this + // whole time. + + /* back in the parent */ + if (err.status < 0) { + /* start failed badly, restore greenlet state */ + this->stack_state = StackState(); + this->_main_greenlet.CLEAR(); + // CAUTION: This may run arbitrary Python code. + run.CLEAR(); // inner_bootstrap didn't run, we own the reference. + } + + // In the success case, the spawned code (inner_bootstrap) will + // take care of decrefing this, so we relinquish ownership so as + // to not double-decref. + + run.relinquish_ownership(); + + return err; +} + + +void +UserGreenlet::inner_bootstrap(PyGreenlet* origin_greenlet, PyObject* run) +{ + // The arguments here would be another great place for move. + // As it is, we take them as a reference so that when we clear + // them we clear what's on the stack above us. Do that NOW, and + // without using a C++ RAII object, + // so there's no way that exiting the parent frame can clear it, + // or we clear it unexpectedly. This arises in the context of the + // interpreter shutting down. See https://github.com/python-greenlet/greenlet/issues/325 + //PyObject* run = _run.relinquish_ownership(); + + /* in the new greenlet */ + assert(this->thread_state()->borrow_current() == BorrowedGreenlet(this->_self)); + // C++ exceptions cannot propagate to the parent greenlet from + // here. (TODO: Do we need a catch(...) clause, perhaps on the + // function itself? ALl we could do is terminate the program.) + // NOTE: On 32-bit Windows, the call chain is extremely + // important here in ways that are subtle, having to do with + // the depth of the SEH list. The call to restore it MUST NOT + // add a new SEH handler to the list, or we'll restore it to + // the wrong thing. + this->thread_state()->restore_exception_state(); + /* stack variables from above are no good and also will not unwind! */ + // EXCEPT: That can't be true, we access run, among others, here. + + this->stack_state.set_active(); /* running */ + + // We're about to possibly run Python code again, which + // could switch back/away to/from us, so we need to grab the + // arguments locally. + SwitchingArgs args; + args <<= this->args(); + assert(!this->args()); + + // XXX: We could clear this much earlier, right? + // Or would that introduce the possibility of running Python + // code when we don't want to? + // CAUTION: This may run arbitrary Python code. + this->_run_callable.CLEAR(); + + + // The first switch we need to manually call the trace + // function here instead of in g_switch_finish, because we + // never return there. + if (OwnedObject tracefunc = this->thread_state()->get_tracefunc()) { + OwnedGreenlet trace_origin; + trace_origin = origin_greenlet; + try { + g_calltrace(tracefunc, + args ? mod_globs->event_switch : mod_globs->event_throw, + trace_origin, + this->_self); + } + catch (const PyErrOccurred&) { + /* Turn trace errors into switch throws */ + args.CLEAR(); + } + } + + // We no longer need the origin, it was only here for + // tracing. + // We may never actually exit this stack frame so we need + // to explicitly clear it. + // This could run Python code and switch. + Py_CLEAR(origin_greenlet); + + OwnedObject result; + if (!args) { + /* pending exception */ + result = NULL; + } + else { + /* call g.run(*args, **kwargs) */ + // This could result in further switches + try { + //result = run.PyCall(args.args(), args.kwargs()); + // CAUTION: Just invoking this, before the function even + // runs, may cause memory allocations, which may trigger + // GC, which may run arbitrary Python code. + result = OwnedObject::consuming(PyObject_Call(run, args.args().borrow(), args.kwargs().borrow())); + } + catch (...) { + // Unhandled C++ exception! + + // If we declare ourselves as noexcept, if we don't catch + // this here, most platforms will just abort() the + // process. But on 64-bit Windows with older versions of + // the C runtime, this can actually corrupt memory and + // just return. We see this when compiling with the + // Windows 7.0 SDK targeting Windows Server 2008, but not + // when using the Appveyor Visual Studio 2019 image. So + // this currently only affects Python 2.7 on Windows 64. + // That is, the tests pass and the runtime aborts + // everywhere else. + // + // However, if we catch it and try to continue with a + // Python error, then all Windows 64 bit platforms corrupt + // memory. So all we can do is manually abort, hopefully + // with a good error message. (Note that the above was + // tested WITHOUT the `/EHr` switch being used at compile + // time, so MSVC may have "optimized" out important + // checking. Using that switch, we may be in a better + // place in terms of memory corruption.) But sometimes it + // can't be caught here at all, which is confusing but not + // terribly surprising; so again, the G_NOEXCEPT_WIN32 + // plus "/EHr". + // + // Hopefully the basic C stdlib is still functional enough + // for us to at least print an error. + // + // It gets more complicated than that, though, on some + // platforms, specifically at least Linux/gcc/libstdc++. They use + // an exception to unwind the stack when a background + // thread exits. (See comments about noexcept.) So this + // may not actually represent anything untoward. On those + // platforms we allow throws of this to propagate, or + // attempt to anyway. +# if defined(WIN32) || defined(_WIN32) + Py_FatalError( + "greenlet: Unhandled C++ exception from a greenlet run function. " + "Because memory is likely corrupted, terminating process."); + std::abort(); +#else + throw; +#endif + } + } + // These lines may run arbitrary code + args.CLEAR(); + Py_CLEAR(run); + + if (!result + && mod_globs->PyExc_GreenletExit.PyExceptionMatches() + && (this->args())) { + // This can happen, for example, if our only reference + // goes away after we switch back to the parent. + // See test_dealloc_switch_args_not_lost + PyErrPieces clear_error; + result <<= this->args(); + result = single_result(result); + } + this->release_args(); + this->python_state.did_finish(PyThreadState_GET()); + + result = g_handle_exit(result); + assert(this->thread_state()->borrow_current() == this->_self); + + /* jump back to parent */ + this->stack_state.set_inactive(); /* dead */ + + + // TODO: Can we decref some things here? Release our main greenlet + // and maybe parent? + for (Greenlet* parent = this->_parent; + parent; + parent = parent->parent()) { + // We need to somewhere consume a reference to + // the result; in most cases we'll never have control + // back in this stack frame again. Calling + // green_switch actually adds another reference! + // This would probably be clearer with a specific API + // to hand results to the parent. + parent->args() <<= result; + assert(!result); + // The parent greenlet now owns the result; in the + // typical case we'll never get back here to assign to + // result and thus release the reference. + try { + result = parent->g_switch(); + } + catch (const PyErrOccurred&) { + // Ignore, keep passing the error on up. + } + + /* Return here means switch to parent failed, + * in which case we throw *current* exception + * to the next parent in chain. + */ + assert(!result); + } + /* We ran out of parents, cannot continue */ + PyErr_WriteUnraisable(this->self().borrow_o()); + Py_FatalError("greenlet: ran out of parent greenlets while propagating exception; " + "cannot continue"); + std::abort(); +} + +void +UserGreenlet::run(const BorrowedObject nrun) +{ + if (this->started()) { + throw AttributeError( + "run cannot be set " + "after the start of the greenlet"); + } + this->_run_callable = nrun; +} + +const OwnedGreenlet +UserGreenlet::parent() const +{ + return this->_parent; +} + +void +UserGreenlet::parent(const BorrowedObject raw_new_parent) +{ + if (!raw_new_parent) { + throw AttributeError("can't delete attribute"); + } + + BorrowedMainGreenlet main_greenlet_of_new_parent; + BorrowedGreenlet new_parent(raw_new_parent.borrow()); // could + // throw + // TypeError! + for (BorrowedGreenlet p = new_parent; p; p = p->parent()) { + if (p == this->self()) { + throw ValueError("cyclic parent chain"); + } + main_greenlet_of_new_parent = p->main_greenlet(); + } + + if (!main_greenlet_of_new_parent) { + throw ValueError("parent must not be garbage collected"); + } + + if (this->started() + && this->_main_greenlet != main_greenlet_of_new_parent) { + throw ValueError("parent cannot be on a different thread"); + } + + this->_parent = new_parent; +} + +void +UserGreenlet::murder_in_place() +{ + this->_main_greenlet.CLEAR(); + Greenlet::murder_in_place(); +} + +bool +UserGreenlet::belongs_to_thread(const ThreadState* thread_state) const +{ + return Greenlet::belongs_to_thread(thread_state) && this->_main_greenlet == thread_state->borrow_main_greenlet(); +} + + +int +UserGreenlet::tp_traverse(visitproc visit, void* arg) +{ + Py_VISIT(this->_parent.borrow_o()); + Py_VISIT(this->_main_greenlet.borrow_o()); + Py_VISIT(this->_run_callable.borrow_o()); + + return Greenlet::tp_traverse(visit, arg); +} + +int +UserGreenlet::tp_clear() +{ + Greenlet::tp_clear(); + this->_parent.CLEAR(); + this->_main_greenlet.CLEAR(); + this->_run_callable.CLEAR(); + return 0; +} + +UserGreenlet::ParentIsCurrentGuard::ParentIsCurrentGuard(UserGreenlet* p, + const ThreadState& thread_state) + : oldparent(p->_parent), + greenlet(p) +{ + p->_parent = thread_state.get_current(); +} + +UserGreenlet::ParentIsCurrentGuard::~ParentIsCurrentGuard() +{ + this->greenlet->_parent = oldparent; + oldparent.CLEAR(); +} + +}; //namespace greenlet +#endif diff --git a/venv/Lib/site-packages/greenlet/__init__.py b/venv/Lib/site-packages/greenlet/__init__.py new file mode 100644 index 00000000..9483b7c3 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/__init__.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +""" +The root of the greenlet package. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +__all__ = [ + '__version__', + '_C_API', + + 'GreenletExit', + 'error', + + 'getcurrent', + 'greenlet', + + 'gettrace', + 'settrace', +] + +# pylint:disable=no-name-in-module + +### +# Metadata +### +__version__ = '3.2.2' +from ._greenlet import _C_API # pylint:disable=no-name-in-module + +### +# Exceptions +### +from ._greenlet import GreenletExit +from ._greenlet import error + +### +# greenlets +### +from ._greenlet import getcurrent +from ._greenlet import greenlet + +### +# tracing +### +try: + from ._greenlet import gettrace + from ._greenlet import settrace +except ImportError: + # Tracing wasn't supported. + # XXX: The option to disable it was removed in 1.0, + # so this branch should be dead code. + pass + +### +# Constants +# These constants aren't documented and aren't recommended. +# In 1.0, USE_GC and USE_TRACING are always true, and USE_CONTEXT_VARS +# is the same as ``sys.version_info[:2] >= 3.7`` +### +from ._greenlet import GREENLET_USE_CONTEXT_VARS # pylint:disable=unused-import +from ._greenlet import GREENLET_USE_GC # pylint:disable=unused-import +from ._greenlet import GREENLET_USE_TRACING # pylint:disable=unused-import + +# Controlling the use of the gc module. Provisional API for this greenlet +# implementation in 2.0. +from ._greenlet import CLOCKS_PER_SEC # pylint:disable=unused-import +from ._greenlet import enable_optional_cleanup # pylint:disable=unused-import +from ._greenlet import get_clocks_used_doing_optional_cleanup # pylint:disable=unused-import + +# Other APIS in the _greenlet module are for test support. diff --git a/venv/Lib/site-packages/greenlet/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/greenlet/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..36c1ee50 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/_greenlet.cp312-win_amd64.pyd b/venv/Lib/site-packages/greenlet/_greenlet.cp312-win_amd64.pyd new file mode 100644 index 00000000..f3025469 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/_greenlet.cp312-win_amd64.pyd differ diff --git a/venv/Lib/site-packages/greenlet/greenlet.cpp b/venv/Lib/site-packages/greenlet/greenlet.cpp new file mode 100644 index 00000000..e8d92a00 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/greenlet.cpp @@ -0,0 +1,320 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +/* Format with: + * clang-format -i --style=file src/greenlet/greenlet.c + * + * + * Fix missing braces with: + * clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements" +*/ +#include +#include +#include +#include + + +#define PY_SSIZE_T_CLEAN +#include +#include "structmember.h" // PyMemberDef + +#include "greenlet_internal.hpp" +// Code after this point can assume access to things declared in stdint.h, +// including the fixed-width types. This goes for the platform-specific switch functions +// as well. +#include "greenlet_refs.hpp" +#include "greenlet_slp_switch.hpp" + +#include "greenlet_thread_support.hpp" +#include "TGreenlet.hpp" + +#include "TGreenletGlobals.cpp" + +#include "TGreenlet.cpp" +#include "TMainGreenlet.cpp" +#include "TUserGreenlet.cpp" +#include "TBrokenGreenlet.cpp" +#include "TExceptionState.cpp" +#include "TPythonState.cpp" +#include "TStackState.cpp" + +#include "TThreadState.hpp" +#include "TThreadStateCreator.hpp" +#include "TThreadStateDestroy.cpp" + +#include "PyGreenlet.cpp" +#include "PyGreenletUnswitchable.cpp" +#include "CObjects.cpp" + +using greenlet::LockGuard; +using greenlet::LockInitError; +using greenlet::PyErrOccurred; +using greenlet::Require; + +using greenlet::g_handle_exit; +using greenlet::single_result; + +using greenlet::Greenlet; +using greenlet::UserGreenlet; +using greenlet::MainGreenlet; +using greenlet::BrokenGreenlet; +using greenlet::ThreadState; +using greenlet::PythonState; + + + +// ******* Implementation of things from included files +template +greenlet::refs::_BorrowedGreenlet& greenlet::refs::_BorrowedGreenlet::operator=(const greenlet::refs::BorrowedObject& other) +{ + this->_set_raw_pointer(static_cast(other)); + return *this; +} + +template +inline greenlet::refs::_BorrowedGreenlet::operator Greenlet*() const noexcept +{ + if (!this->p) { + return nullptr; + } + return reinterpret_cast(this->p)->pimpl; +} + +template +greenlet::refs::_BorrowedGreenlet::_BorrowedGreenlet(const BorrowedObject& p) + : BorrowedReference(nullptr) +{ + + this->_set_raw_pointer(p.borrow()); +} + +template +inline greenlet::refs::_OwnedGreenlet::operator Greenlet*() const noexcept +{ + if (!this->p) { + return nullptr; + } + return reinterpret_cast(this->p)->pimpl; +} + + + +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wmissing-field-initializers" +# pragma clang diagnostic ignored "-Wwritable-strings" +#elif defined(__GNUC__) +# pragma GCC diagnostic push +// warning: ISO C++ forbids converting a string constant to ‘char*’ +// (The python APIs aren't const correct and accept writable char*) +# pragma GCC diagnostic ignored "-Wwrite-strings" +#endif + + +/*********************************************************** + +A PyGreenlet is a range of C stack addresses that must be +saved and restored in such a way that the full range of the +stack contains valid data when we switch to it. + +Stack layout for a greenlet: + + | ^^^ | + | older data | + | | + stack_stop . |_______________| + . | | + . | greenlet data | + . | in stack | + . * |_______________| . . _____________ stack_copy + stack_saved + . | | | | + . | data | |greenlet data| + . | unrelated | | saved | + . | to | | in heap | + stack_start . | this | . . |_____________| stack_copy + | greenlet | + | | + | newer data | + | vvv | + + +Note that a greenlet's stack data is typically partly at its correct +place in the stack, and partly saved away in the heap, but always in +the above configuration: two blocks, the more recent one in the heap +and the older one still in the stack (either block may be empty). + +Greenlets are chained: each points to the previous greenlet, which is +the one that owns the data currently in the C stack above my +stack_stop. The currently running greenlet is the first element of +this chain. The main (initial) greenlet is the last one. Greenlets +whose stack is entirely in the heap can be skipped from the chain. + +The chain is not related to execution order, but only to the order +in which bits of C stack happen to belong to greenlets at a particular +point in time. + +The main greenlet doesn't have a stack_stop: it is responsible for the +complete rest of the C stack, and we don't know where it begins. We +use (char*) -1, the largest possible address. + +States: + stack_stop == NULL && stack_start == NULL: did not start yet + stack_stop != NULL && stack_start == NULL: already finished + stack_stop != NULL && stack_start != NULL: active + +The running greenlet's stack_start is undefined but not NULL. + + ***********************************************************/ + + + + +/***********************************************************/ + +/* Some functions must not be inlined: + * slp_restore_state, when inlined into slp_switch might cause + it to restore stack over its own local variables + * slp_save_state, when inlined would add its own local + variables to the saved stack, wasting space + * slp_switch, cannot be inlined for obvious reasons + * g_initialstub, when inlined would receive a pointer into its + own stack frame, leading to incomplete stack save/restore + +g_initialstub is a member function and declared virtual so that the +compiler always calls it through a vtable. + +slp_save_state and slp_restore_state are also member functions. They +are called from trampoline functions that themselves are declared as +not eligible for inlining. +*/ + +extern "C" { +static int GREENLET_NOINLINE(slp_save_state_trampoline)(char* stackref) +{ + return switching_thread_state->slp_save_state(stackref); +} +static void GREENLET_NOINLINE(slp_restore_state_trampoline)() +{ + switching_thread_state->slp_restore_state(); +} +} + + +/***********************************************************/ + + +#include "PyModule.cpp" + + + +static PyObject* +greenlet_internal_mod_init() noexcept +{ + static void* _PyGreenlet_API[PyGreenlet_API_pointers]; + + try { + CreatedModule m(greenlet_module_def); + + Require(PyType_Ready(&PyGreenlet_Type)); + Require(PyType_Ready(&PyGreenletUnswitchable_Type)); + + mod_globs = new greenlet::GreenletGlobals; + ThreadState::init(); + + m.PyAddObject("greenlet", PyGreenlet_Type); + m.PyAddObject("UnswitchableGreenlet", PyGreenletUnswitchable_Type); + m.PyAddObject("error", mod_globs->PyExc_GreenletError); + m.PyAddObject("GreenletExit", mod_globs->PyExc_GreenletExit); + + m.PyAddObject("GREENLET_USE_GC", 1); + m.PyAddObject("GREENLET_USE_TRACING", 1); + m.PyAddObject("GREENLET_USE_CONTEXT_VARS", 1L); + m.PyAddObject("GREENLET_USE_STANDARD_THREADING", 1L); + + OwnedObject clocks_per_sec = OwnedObject::consuming(PyLong_FromSsize_t(CLOCKS_PER_SEC)); + m.PyAddObject("CLOCKS_PER_SEC", clocks_per_sec); + + /* also publish module-level data as attributes of the greentype. */ + // XXX: This is weird, and enables a strange pattern of + // confusing the class greenlet with the module greenlet; with + // the exception of (possibly) ``getcurrent()``, this + // shouldn't be encouraged so don't add new items here. + for (const char* const* p = copy_on_greentype; *p; p++) { + OwnedObject o = m.PyRequireAttr(*p); + PyDict_SetItemString(PyGreenlet_Type.tp_dict, *p, o.borrow()); + } + + /* + * Expose C API + */ + + /* types */ + _PyGreenlet_API[PyGreenlet_Type_NUM] = (void*)&PyGreenlet_Type; + + /* exceptions */ + _PyGreenlet_API[PyExc_GreenletError_NUM] = (void*)mod_globs->PyExc_GreenletError; + _PyGreenlet_API[PyExc_GreenletExit_NUM] = (void*)mod_globs->PyExc_GreenletExit; + + /* methods */ + _PyGreenlet_API[PyGreenlet_New_NUM] = (void*)PyGreenlet_New; + _PyGreenlet_API[PyGreenlet_GetCurrent_NUM] = (void*)PyGreenlet_GetCurrent; + _PyGreenlet_API[PyGreenlet_Throw_NUM] = (void*)PyGreenlet_Throw; + _PyGreenlet_API[PyGreenlet_Switch_NUM] = (void*)PyGreenlet_Switch; + _PyGreenlet_API[PyGreenlet_SetParent_NUM] = (void*)PyGreenlet_SetParent; + + /* Previously macros, but now need to be functions externally. */ + _PyGreenlet_API[PyGreenlet_MAIN_NUM] = (void*)Extern_PyGreenlet_MAIN; + _PyGreenlet_API[PyGreenlet_STARTED_NUM] = (void*)Extern_PyGreenlet_STARTED; + _PyGreenlet_API[PyGreenlet_ACTIVE_NUM] = (void*)Extern_PyGreenlet_ACTIVE; + _PyGreenlet_API[PyGreenlet_GET_PARENT_NUM] = (void*)Extern_PyGreenlet_GET_PARENT; + + /* XXX: Note that our module name is ``greenlet._greenlet``, but for + backwards compatibility with existing C code, we need the _C_API to + be directly in greenlet. + */ + const NewReference c_api_object(Require( + PyCapsule_New( + (void*)_PyGreenlet_API, + "greenlet._C_API", + NULL))); + m.PyAddObject("_C_API", c_api_object); + assert(c_api_object.REFCNT() == 2); + + // cerr << "Sizes:" + // << "\n\tGreenlet : " << sizeof(Greenlet) + // << "\n\tUserGreenlet : " << sizeof(UserGreenlet) + // << "\n\tMainGreenlet : " << sizeof(MainGreenlet) + // << "\n\tExceptionState : " << sizeof(greenlet::ExceptionState) + // << "\n\tPythonState : " << sizeof(greenlet::PythonState) + // << "\n\tStackState : " << sizeof(greenlet::StackState) + // << "\n\tSwitchingArgs : " << sizeof(greenlet::SwitchingArgs) + // << "\n\tOwnedObject : " << sizeof(greenlet::refs::OwnedObject) + // << "\n\tBorrowedObject : " << sizeof(greenlet::refs::BorrowedObject) + // << "\n\tPyGreenlet : " << sizeof(PyGreenlet) + // << endl; + + return m.borrow(); // But really it's the main reference. + } + catch (const LockInitError& e) { + PyErr_SetString(PyExc_MemoryError, e.what()); + return NULL; + } + catch (const PyErrOccurred&) { + return NULL; + } + +} + +extern "C" { + +PyMODINIT_FUNC +PyInit__greenlet(void) +{ + return greenlet_internal_mod_init(); +} + +}; // extern C + +#ifdef __clang__ +# pragma clang diagnostic pop +#elif defined(__GNUC__) +# pragma GCC diagnostic pop +#endif diff --git a/venv/Lib/site-packages/greenlet/greenlet.h b/venv/Lib/site-packages/greenlet/greenlet.h new file mode 100644 index 00000000..d02a16e4 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/greenlet.h @@ -0,0 +1,164 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ + +/* Greenlet object interface */ + +#ifndef Py_GREENLETOBJECT_H +#define Py_GREENLETOBJECT_H + + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is deprecated and undocumented. It does not change. */ +#define GREENLET_VERSION "1.0.0" + +#ifndef GREENLET_MODULE +#define implementation_ptr_t void* +#endif + +typedef struct _greenlet { + PyObject_HEAD + PyObject* weakreflist; + PyObject* dict; + implementation_ptr_t pimpl; +} PyGreenlet; + +#define PyGreenlet_Check(op) (op && PyObject_TypeCheck(op, &PyGreenlet_Type)) + + +/* C API functions */ + +/* Total number of symbols that are exported */ +#define PyGreenlet_API_pointers 12 + +#define PyGreenlet_Type_NUM 0 +#define PyExc_GreenletError_NUM 1 +#define PyExc_GreenletExit_NUM 2 + +#define PyGreenlet_New_NUM 3 +#define PyGreenlet_GetCurrent_NUM 4 +#define PyGreenlet_Throw_NUM 5 +#define PyGreenlet_Switch_NUM 6 +#define PyGreenlet_SetParent_NUM 7 + +#define PyGreenlet_MAIN_NUM 8 +#define PyGreenlet_STARTED_NUM 9 +#define PyGreenlet_ACTIVE_NUM 10 +#define PyGreenlet_GET_PARENT_NUM 11 + +#ifndef GREENLET_MODULE +/* This section is used by modules that uses the greenlet C API */ +static void** _PyGreenlet_API = NULL; + +# define PyGreenlet_Type \ + (*(PyTypeObject*)_PyGreenlet_API[PyGreenlet_Type_NUM]) + +# define PyExc_GreenletError \ + ((PyObject*)_PyGreenlet_API[PyExc_GreenletError_NUM]) + +# define PyExc_GreenletExit \ + ((PyObject*)_PyGreenlet_API[PyExc_GreenletExit_NUM]) + +/* + * PyGreenlet_New(PyObject *args) + * + * greenlet.greenlet(run, parent=None) + */ +# define PyGreenlet_New \ + (*(PyGreenlet * (*)(PyObject * run, PyGreenlet * parent)) \ + _PyGreenlet_API[PyGreenlet_New_NUM]) + +/* + * PyGreenlet_GetCurrent(void) + * + * greenlet.getcurrent() + */ +# define PyGreenlet_GetCurrent \ + (*(PyGreenlet * (*)(void)) _PyGreenlet_API[PyGreenlet_GetCurrent_NUM]) + +/* + * PyGreenlet_Throw( + * PyGreenlet *greenlet, + * PyObject *typ, + * PyObject *val, + * PyObject *tb) + * + * g.throw(...) + */ +# define PyGreenlet_Throw \ + (*(PyObject * (*)(PyGreenlet * self, \ + PyObject * typ, \ + PyObject * val, \ + PyObject * tb)) \ + _PyGreenlet_API[PyGreenlet_Throw_NUM]) + +/* + * PyGreenlet_Switch(PyGreenlet *greenlet, PyObject *args) + * + * g.switch(*args, **kwargs) + */ +# define PyGreenlet_Switch \ + (*(PyObject * \ + (*)(PyGreenlet * greenlet, PyObject * args, PyObject * kwargs)) \ + _PyGreenlet_API[PyGreenlet_Switch_NUM]) + +/* + * PyGreenlet_SetParent(PyObject *greenlet, PyObject *new_parent) + * + * g.parent = new_parent + */ +# define PyGreenlet_SetParent \ + (*(int (*)(PyGreenlet * greenlet, PyGreenlet * nparent)) \ + _PyGreenlet_API[PyGreenlet_SetParent_NUM]) + +/* + * PyGreenlet_GetParent(PyObject* greenlet) + * + * return greenlet.parent; + * + * This could return NULL even if there is no exception active. + * If it does not return NULL, you are responsible for decrementing the + * reference count. + */ +# define PyGreenlet_GetParent \ + (*(PyGreenlet* (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_GET_PARENT_NUM]) + +/* + * deprecated, undocumented alias. + */ +# define PyGreenlet_GET_PARENT PyGreenlet_GetParent + +# define PyGreenlet_MAIN \ + (*(int (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_MAIN_NUM]) + +# define PyGreenlet_STARTED \ + (*(int (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_STARTED_NUM]) + +# define PyGreenlet_ACTIVE \ + (*(int (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_ACTIVE_NUM]) + + + + +/* Macro that imports greenlet and initializes C API */ +/* NOTE: This has actually moved to ``greenlet._greenlet._C_API``, but we + keep the older definition to be sure older code that might have a copy of + the header still works. */ +# define PyGreenlet_Import() \ + { \ + _PyGreenlet_API = (void**)PyCapsule_Import("greenlet._C_API", 0); \ + } + +#endif /* GREENLET_MODULE */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_GREENLETOBJECT_H */ diff --git a/venv/Lib/site-packages/greenlet/greenlet_allocator.hpp b/venv/Lib/site-packages/greenlet/greenlet_allocator.hpp new file mode 100644 index 00000000..b452f544 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/greenlet_allocator.hpp @@ -0,0 +1,63 @@ +#ifndef GREENLET_ALLOCATOR_HPP +#define GREENLET_ALLOCATOR_HPP + +#define PY_SSIZE_T_CLEAN +#include +#include +#include "greenlet_compiler_compat.hpp" + + +namespace greenlet +{ + // This allocator is stateless; all instances are identical. + // It can *ONLY* be used when we're sure we're holding the GIL + // (Python's allocators require the GIL). + template + struct PythonAllocator : public std::allocator { + + PythonAllocator(const PythonAllocator& UNUSED(other)) + : std::allocator() + { + } + + PythonAllocator(const std::allocator other) + : std::allocator(other) + {} + + template + PythonAllocator(const std::allocator& other) + : std::allocator(other) + { + } + + PythonAllocator() : std::allocator() {} + + T* allocate(size_t number_objects, const void* UNUSED(hint)=0) + { + void* p; + if (number_objects == 1) + p = PyObject_Malloc(sizeof(T)); + else + p = PyMem_Malloc(sizeof(T) * number_objects); + return static_cast(p); + } + + void deallocate(T* t, size_t n) + { + void* p = t; + if (n == 1) { + PyObject_Free(p); + } + else + PyMem_Free(p); + } + // This member is deprecated in C++17 and removed in C++20 + template< class U > + struct rebind { + typedef PythonAllocator other; + }; + + }; +} + +#endif diff --git a/venv/Lib/site-packages/greenlet/greenlet_compiler_compat.hpp b/venv/Lib/site-packages/greenlet/greenlet_compiler_compat.hpp new file mode 100644 index 00000000..af24bd83 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/greenlet_compiler_compat.hpp @@ -0,0 +1,98 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +#ifndef GREENLET_COMPILER_COMPAT_HPP +#define GREENLET_COMPILER_COMPAT_HPP + +/** + * Definitions to aid with compatibility with different compilers. + * + * .. caution:: Use extreme care with noexcept. + * Some compilers and runtimes, specifically gcc/libgcc/libstdc++ on + * Linux, implement stack unwinding by throwing an uncatchable + * exception, one that specifically does not appear to be an active + * exception to the rest of the runtime. If this happens while we're in a noexcept function, + * we have violated our dynamic exception contract, and so the runtime + * will call std::terminate(), which kills the process with the + * unhelpful message "terminate called without an active exception". + * + * This has happened in this scenario: A background thread is running + * a greenlet that has made a native call and released the GIL. + * Meanwhile, the main thread finishes and starts shutting down the + * interpreter. When the background thread is scheduled again and + * attempts to obtain the GIL, it notices that the interpreter is + * exiting and calls ``pthread_exit()``. This in turn starts to unwind + * the stack by throwing that exception. But we had the ``PyCall`` + * functions annotated as noexcept, so the runtime terminated us. + * + * #2 0x00007fab26fec2b7 in std::terminate() () from /lib/x86_64-linux-gnu/libstdc++.so.6 + * #3 0x00007fab26febb3c in __gxx_personality_v0 () from /lib/x86_64-linux-gnu/libstdc++.so.6 + * #4 0x00007fab26f34de6 in ?? () from /lib/x86_64-linux-gnu/libgcc_s.so.1 + * #6 0x00007fab276a34c6 in __GI___pthread_unwind at ./nptl/unwind.c:130 + * #7 0x00007fab2769bd3a in __do_cancel () at ../sysdeps/nptl/pthreadP.h:280 + * #8 __GI___pthread_exit (value=value@entry=0x0) at ./nptl/pthread_exit.c:36 + * #9 0x000000000052e567 in PyThread_exit_thread () at ../Python/thread_pthread.h:370 + * #10 0x00000000004d60b5 in take_gil at ../Python/ceval_gil.h:224 + * #11 0x00000000004d65f9 in PyEval_RestoreThread at ../Python/ceval.c:467 + * #12 0x000000000060cce3 in setipaddr at ../Modules/socketmodule.c:1203 + * #13 0x00000000006101cd in socket_gethostbyname + */ + +#include + +# define G_NO_COPIES_OF_CLS(Cls) private: \ + Cls(const Cls& other) = delete; \ + Cls& operator=(const Cls& other) = delete + +# define G_NO_ASSIGNMENT_OF_CLS(Cls) private: \ + Cls& operator=(const Cls& other) = delete + +# define G_NO_COPY_CONSTRUCTOR_OF_CLS(Cls) private: \ + Cls(const Cls& other) = delete; + + +// CAUTION: MSVC is stupidly picky: +// +// "The compiler ignores, without warning, any __declspec keywords +// placed after * or & and in front of the variable identifier in a +// declaration." +// (https://docs.microsoft.com/en-us/cpp/cpp/declspec?view=msvc-160) +// +// So pointer return types must be handled differently (because of the +// trailing *), or you get inscrutable compiler warnings like "error +// C2059: syntax error: ''" +// +// In C++ 11, there is a standard syntax for attributes, and +// GCC defines an attribute to use with this: [[gnu:noinline]]. +// In the future, this is expected to become standard. + +#if defined(__GNUC__) || defined(__clang__) +/* We used to check for GCC 4+ or 3.4+, but those compilers are + laughably out of date. Just assume they support it. */ +# define GREENLET_NOINLINE(name) __attribute__((noinline)) name +# define GREENLET_NOINLINE_P(rtype, name) rtype __attribute__((noinline)) name +# define UNUSED(x) UNUSED_ ## x __attribute__((__unused__)) +#elif defined(_MSC_VER) +/* We used to check for && (_MSC_VER >= 1300) but that's also out of date. */ +# define GREENLET_NOINLINE(name) __declspec(noinline) name +# define GREENLET_NOINLINE_P(rtype, name) __declspec(noinline) rtype name +# define UNUSED(x) UNUSED_ ## x +#endif + +#if defined(_MSC_VER) +# define G_NOEXCEPT_WIN32 noexcept +#else +# define G_NOEXCEPT_WIN32 +#endif + +#if defined(__GNUC__) && defined(__POWERPC__) && defined(__APPLE__) +// 32-bit PPC/MacOSX. Only known to be tested on unreleased versions +// of macOS 10.6 using a macports build gcc 14. It appears that +// running C++ destructors of thread-local variables is broken. + +// See https://github.com/python-greenlet/greenlet/pull/419 +# define GREENLET_BROKEN_THREAD_LOCAL_CLEANUP_JUST_LEAK 1 +#else +# define GREENLET_BROKEN_THREAD_LOCAL_CLEANUP_JUST_LEAK 0 +#endif + + +#endif diff --git a/venv/Lib/site-packages/greenlet/greenlet_cpython_compat.hpp b/venv/Lib/site-packages/greenlet/greenlet_cpython_compat.hpp new file mode 100644 index 00000000..979d6f94 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/greenlet_cpython_compat.hpp @@ -0,0 +1,148 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +#ifndef GREENLET_CPYTHON_COMPAT_H +#define GREENLET_CPYTHON_COMPAT_H + +/** + * Helpers for compatibility with multiple versions of CPython. + */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" + + +#if PY_VERSION_HEX >= 0x30A00B1 +# define GREENLET_PY310 1 +#else +# define GREENLET_PY310 0 +#endif + +/* +Python 3.10 beta 1 changed tstate->use_tracing to a nested cframe member. +See https://github.com/python/cpython/pull/25276 +We have to save and restore this as well. + +Python 3.13 removed PyThreadState.cframe (GH-108035). +*/ +#if GREENLET_PY310 && PY_VERSION_HEX < 0x30D0000 +# define GREENLET_USE_CFRAME 1 +#else +# define GREENLET_USE_CFRAME 0 +#endif + + +#if PY_VERSION_HEX >= 0x30B00A4 +/* +Greenlet won't compile on anything older than Python 3.11 alpha 4 (see +https://bugs.python.org/issue46090). Summary of breaking internal changes: +- Python 3.11 alpha 1 changed how frame objects are represented internally. + - https://github.com/python/cpython/pull/30122 +- Python 3.11 alpha 3 changed how recursion limits are stored. + - https://github.com/python/cpython/pull/29524 +- Python 3.11 alpha 4 changed how exception state is stored. It also includes a + change to help greenlet save and restore the interpreter frame "data stack". + - https://github.com/python/cpython/pull/30122 + - https://github.com/python/cpython/pull/30234 +*/ +# define GREENLET_PY311 1 +#else +# define GREENLET_PY311 0 +#endif + + +#if PY_VERSION_HEX >= 0x30C0000 +# define GREENLET_PY312 1 +#else +# define GREENLET_PY312 0 +#endif + +#if PY_VERSION_HEX >= 0x30D0000 +# define GREENLET_PY313 1 +#else +# define GREENLET_PY313 0 +#endif + +#if PY_VERSION_HEX >= 0x30E0000 +# define GREENLET_PY314 1 +#else +# define GREENLET_PY314 0 +#endif + +#ifndef Py_SET_REFCNT +/* Py_REFCNT and Py_SIZE macros are converted to functions +https://bugs.python.org/issue39573 */ +# define Py_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) +#endif + +#ifndef _Py_DEC_REFTOTAL +/* _Py_DEC_REFTOTAL macro has been removed from Python 3.9 by: + https://github.com/python/cpython/commit/49932fec62c616ec88da52642339d83ae719e924 + + The symbol we use to replace it was removed by at least 3.12. +*/ +# ifdef Py_REF_DEBUG +# if GREENLET_PY312 +# define _Py_DEC_REFTOTAL +# else +# define _Py_DEC_REFTOTAL _Py_RefTotal-- +# endif +# else +# define _Py_DEC_REFTOTAL +# endif +#endif +// Define these flags like Cython does if we're on an old version. +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif + +#ifndef Py_TPFLAGS_HAVE_VERSION_TAG + #define Py_TPFLAGS_HAVE_VERSION_TAG 0 +#endif + +#define G_TPFLAGS_DEFAULT Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_VERSION_TAG | Py_TPFLAGS_CHECKTYPES | Py_TPFLAGS_HAVE_NEWBUFFER | Py_TPFLAGS_HAVE_GC + + +#if PY_VERSION_HEX < 0x03090000 +// The official version only became available in 3.9 +# define PyObject_GC_IsTracked(o) _PyObject_GC_IS_TRACKED(o) +#endif + + +// bpo-43760 added PyThreadState_EnterTracing() to Python 3.11.0a2 +#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION) +static inline void PyThreadState_EnterTracing(PyThreadState *tstate) +{ + tstate->tracing++; +#if PY_VERSION_HEX >= 0x030A00A1 + tstate->cframe->use_tracing = 0; +#else + tstate->use_tracing = 0; +#endif +} +#endif + +// bpo-43760 added PyThreadState_LeaveTracing() to Python 3.11.0a2 +#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION) +static inline void PyThreadState_LeaveTracing(PyThreadState *tstate) +{ + tstate->tracing--; + int use_tracing = (tstate->c_tracefunc != NULL + || tstate->c_profilefunc != NULL); +#if PY_VERSION_HEX >= 0x030A00A1 + tstate->cframe->use_tracing = use_tracing; +#else + tstate->use_tracing = use_tracing; +#endif +} +#endif + +#if !defined(Py_C_RECURSION_LIMIT) && defined(C_RECURSION_LIMIT) +# define Py_C_RECURSION_LIMIT C_RECURSION_LIMIT +#endif + +#endif /* GREENLET_CPYTHON_COMPAT_H */ diff --git a/venv/Lib/site-packages/greenlet/greenlet_exceptions.hpp b/venv/Lib/site-packages/greenlet/greenlet_exceptions.hpp new file mode 100644 index 00000000..617f07c2 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/greenlet_exceptions.hpp @@ -0,0 +1,171 @@ +#ifndef GREENLET_EXCEPTIONS_HPP +#define GREENLET_EXCEPTIONS_HPP + +#define PY_SSIZE_T_CLEAN +#include +#include +#include + +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wunused-function" +#endif + +namespace greenlet { + + class PyErrOccurred : public std::runtime_error + { + public: + + // CAUTION: In debug builds, may run arbitrary Python code. + static const PyErrOccurred + from_current() + { + assert(PyErr_Occurred()); +#ifndef NDEBUG + // This is not exception safe, and + // not necessarily safe in general (what if it switches?) + // But we only do this in debug mode, where we are in + // tight control of what exceptions are getting raised and + // can prevent those issues. + + // You can't call PyObject_Str with a pending exception. + PyObject* typ; + PyObject* val; + PyObject* tb; + + PyErr_Fetch(&typ, &val, &tb); + PyObject* typs = PyObject_Str(typ); + PyObject* vals = PyObject_Str(val ? val : typ); + const char* typ_msg = PyUnicode_AsUTF8(typs); + const char* val_msg = PyUnicode_AsUTF8(vals); + PyErr_Restore(typ, val, tb); + + std::string msg(typ_msg); + msg += ": "; + msg += val_msg; + PyErrOccurred ex(msg); + Py_XDECREF(typs); + Py_XDECREF(vals); + + return ex; +#else + return PyErrOccurred(); +#endif + } + + PyErrOccurred() : std::runtime_error("") + { + assert(PyErr_Occurred()); + } + + PyErrOccurred(const std::string& msg) : std::runtime_error(msg) + { + assert(PyErr_Occurred()); + } + + PyErrOccurred(PyObject* exc_kind, const char* const msg) + : std::runtime_error(msg) + { + PyErr_SetString(exc_kind, msg); + } + + PyErrOccurred(PyObject* exc_kind, const std::string msg) + : std::runtime_error(msg) + { + // This copies the c_str, so we don't have any lifetime + // issues to worry about. + PyErr_SetString(exc_kind, msg.c_str()); + } + + PyErrOccurred(PyObject* exc_kind, + const std::string msg, //This is the format + //string; that's not + //usually safe! + + PyObject* borrowed_obj_one, PyObject* borrowed_obj_two) + : std::runtime_error(msg) + { + + //This is designed specifically for the + //``check_switch_allowed`` function. + + // PyObject_Str and PyObject_Repr are safe to call with + // NULL pointers; they return the string "" in that + // case. + // This function always returns null. + PyErr_Format(exc_kind, + msg.c_str(), + borrowed_obj_one, borrowed_obj_two); + } + }; + + class TypeError : public PyErrOccurred + { + public: + TypeError(const char* const what) + : PyErrOccurred(PyExc_TypeError, what) + { + } + TypeError(const std::string what) + : PyErrOccurred(PyExc_TypeError, what) + { + } + }; + + class ValueError : public PyErrOccurred + { + public: + ValueError(const char* const what) + : PyErrOccurred(PyExc_ValueError, what) + { + } + }; + + class AttributeError : public PyErrOccurred + { + public: + AttributeError(const char* const what) + : PyErrOccurred(PyExc_AttributeError, what) + { + } + }; + + /** + * Calls `Py_FatalError` when constructed, so you can't actually + * throw this. It just makes static analysis easier. + */ + class PyFatalError : public std::runtime_error + { + public: + PyFatalError(const char* const msg) + : std::runtime_error(msg) + { + Py_FatalError(msg); + } + }; + + static inline PyObject* + Require(PyObject* p, const std::string& msg="") + { + if (!p) { + throw PyErrOccurred(msg); + } + return p; + }; + + static inline void + Require(const int retval) + { + if (retval < 0) { + throw PyErrOccurred(); + } + }; + + +}; +#ifdef __clang__ +# pragma clang diagnostic pop +#endif + +#endif diff --git a/venv/Lib/site-packages/greenlet/greenlet_internal.hpp b/venv/Lib/site-packages/greenlet/greenlet_internal.hpp new file mode 100644 index 00000000..f2b15d5f --- /dev/null +++ b/venv/Lib/site-packages/greenlet/greenlet_internal.hpp @@ -0,0 +1,107 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +#ifndef GREENLET_INTERNAL_H +#define GREENLET_INTERNAL_H +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wunused-function" +#endif + +/** + * Implementation helpers. + * + * C++ templates and inline functions should go here. + */ +#define PY_SSIZE_T_CLEAN +#include "greenlet_compiler_compat.hpp" +#include "greenlet_cpython_compat.hpp" +#include "greenlet_exceptions.hpp" +#include "TGreenlet.hpp" +#include "greenlet_allocator.hpp" + +#include +#include + +#define GREENLET_MODULE +struct _greenlet; +typedef struct _greenlet PyGreenlet; +namespace greenlet { + + class ThreadState; + // We can't use the PythonAllocator for this, because we push to it + // from the thread state destructor, which doesn't have the GIL, + // and Python's allocators can only be called with the GIL. + typedef std::vector cleanup_queue_t; + +}; + + +#define implementation_ptr_t greenlet::Greenlet* + + +#include "greenlet.h" + +void +greenlet::refs::MainGreenletExactChecker(void *p) +{ + if (!p) { + return; + } + // We control the class of the main greenlet exactly. + if (Py_TYPE(p) != &PyGreenlet_Type) { + std::string err("MainGreenlet: Expected exactly a greenlet, not a "); + err += Py_TYPE(p)->tp_name; + throw greenlet::TypeError(err); + } + + // Greenlets from dead threads no longer respond to main() with a + // true value; so in that case we need to perform an additional + // check. + Greenlet* g = static_cast(p)->pimpl; + if (g->main()) { + return; + } + if (!dynamic_cast(g)) { + std::string err("MainGreenlet: Expected exactly a main greenlet, not a "); + err += Py_TYPE(p)->tp_name; + throw greenlet::TypeError(err); + } +} + + + +template +inline greenlet::Greenlet* greenlet::refs::_OwnedGreenlet::operator->() const noexcept +{ + return reinterpret_cast(this->p)->pimpl; +} + +template +inline greenlet::Greenlet* greenlet::refs::_BorrowedGreenlet::operator->() const noexcept +{ + return reinterpret_cast(this->p)->pimpl; +} + +#include +#include + + +extern PyTypeObject PyGreenlet_Type; + + + +/** + * Forward declarations needed in multiple files. + */ +static PyObject* green_switch(PyGreenlet* self, PyObject* args, PyObject* kwargs); + + +#ifdef __clang__ +# pragma clang diagnostic pop +#endif + + +#endif + +// Local Variables: +// flycheck-clang-include-path: ("../../include" "/opt/local/Library/Frameworks/Python.framework/Versions/3.10/include/python3.10") +// End: diff --git a/venv/Lib/site-packages/greenlet/greenlet_refs.hpp b/venv/Lib/site-packages/greenlet/greenlet_refs.hpp new file mode 100644 index 00000000..b7e5e3f2 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/greenlet_refs.hpp @@ -0,0 +1,1118 @@ +#ifndef GREENLET_REFS_HPP +#define GREENLET_REFS_HPP + +#define PY_SSIZE_T_CLEAN +#include + +#include + +//#include "greenlet_internal.hpp" +#include "greenlet_compiler_compat.hpp" +#include "greenlet_cpython_compat.hpp" +#include "greenlet_exceptions.hpp" + +struct _greenlet; +struct _PyMainGreenlet; + +typedef struct _greenlet PyGreenlet; +extern PyTypeObject PyGreenlet_Type; + + +#ifdef GREENLET_USE_STDIO +#include +using std::cerr; +using std::endl; +#endif + +namespace greenlet +{ + class Greenlet; + + namespace refs + { + // Type checkers throw a TypeError if the argument is not + // null, and isn't of the required Python type. + // (We can't use most of the defined type checkers + // like PyList_Check, etc, directly, because they are + // implemented as macros.) + typedef void (*TypeChecker)(void*); + + void + NoOpChecker(void*) + { + return; + } + + void + GreenletChecker(void *p) + { + if (!p) { + return; + } + + PyTypeObject* typ = Py_TYPE(p); + // fast, common path. (PyObject_TypeCheck is a macro or + // static inline function, and it also does a + // direct comparison of the type pointers, but its fast + // path only handles one type) + if (typ == &PyGreenlet_Type) { + return; + } + + if (!PyObject_TypeCheck(p, &PyGreenlet_Type)) { + std::string err("GreenletChecker: Expected any type of greenlet, not "); + err += Py_TYPE(p)->tp_name; + throw TypeError(err); + } + } + + void + MainGreenletExactChecker(void *p); + + template + class PyObjectPointer; + + template + class OwnedReference; + + + template + class BorrowedReference; + + typedef BorrowedReference BorrowedObject; + typedef OwnedReference OwnedObject; + + class ImmortalObject; + class ImmortalString; + + template + class _OwnedGreenlet; + + typedef _OwnedGreenlet OwnedGreenlet; + typedef _OwnedGreenlet OwnedMainGreenlet; + + template + class _BorrowedGreenlet; + + typedef _BorrowedGreenlet BorrowedGreenlet; + + void + ContextExactChecker(void *p) + { + if (!p) { + return; + } + if (!PyContext_CheckExact(p)) { + throw TypeError( + "greenlet context must be a contextvars.Context or None" + ); + } + } + + typedef OwnedReference OwnedContext; + } +} + +namespace greenlet { + + + namespace refs { + // A set of classes to make reference counting rules in python + // code explicit. + // + // Rules of use: + // (1) Functions returning a new reference that the caller of the + // function is expected to dispose of should return a + // ``OwnedObject`` object. This object automatically releases its + // reference when it goes out of scope. It works like a ``std::shared_ptr`` + // and can be copied or used as a function parameter (but don't do + // that). Note that constructing a ``OwnedObject`` from a + // PyObject* steals the reference. + // (2) Parameters to functions should be either a + // ``OwnedObject&``, or, more generally, a ``PyObjectPointer&``. + // If the function needs to create its own new reference, it can + // do so by copying to a local ``OwnedObject``. + // (3) Functions returning an existing pointer that is NOT + // incref'd, and which the caller MUST NOT decref, + // should return a ``BorrowedObject``. + + // XXX: The following two paragraphs do not hold for all platforms. + // Notably, 32-bit PPC Linux passes structs by reference, not by + // value, so this actually doesn't work. (Although that's the only + // platform that doesn't work on.) DO NOT ATTEMPT IT. The + // unfortunate consequence of that is that the slots which we + // *know* are already type safe will wind up calling the type + // checker function (when we had the slots accepting + // BorrowedGreenlet, this was bypassed), so this slows us down. + // TODO: Optimize this again. + + // For a class with a single pointer member, whose constructor + // does nothing but copy a pointer parameter into the member, and + // which can then be converted back to the pointer type, compilers + // generate code that's the same as just passing the pointer. + // That is, func(BorrowedObject x) called like ``PyObject* p = + // ...; f(p)`` has 0 overhead. Similarly, they "unpack" to the + // pointer type with 0 overhead. + // + // If there are no virtual functions, no complex inheritance (maybe?) and + // no destructor, these can be directly used as parameters in + // Python callbacks like tp_init: the layout is the same as a + // single pointer. Only subclasses with trivial constructors that + // do nothing but set the single pointer member are safe to use + // that way. + + + // This is the base class for things that can be done with a + // PyObject pointer. It assumes nothing about memory management. + // NOTE: Nothing is virtual, so subclasses shouldn't add new + // storage fields or try to override these methods. + template + class PyObjectPointer + { + public: + typedef T PyType; + protected: + T* p; + public: + PyObjectPointer(T* it=nullptr) : p(it) + { + TC(p); + } + + // We don't allow automatic casting to PyObject* at this + // level, because then we could be passed to Py_DECREF/INCREF, + // but we want nothing to do with memory management. If you + // know better, then you can use the get() method, like on a + // std::shared_ptr. Except we name it borrow() to clarify that + // if this is a reference-tracked object, the pointer you get + // back will go away when the object does. + // TODO: This should probably not exist here, but be moved + // down to relevant sub-types. + + T* borrow() const noexcept + { + return this->p; + } + + PyObject* borrow_o() const noexcept + { + return reinterpret_cast(this->p); + } + + T* operator->() const noexcept + { + return this->p; + } + + bool is_None() const noexcept + { + return this->p == Py_None; + } + + PyObject* acquire_or_None() const noexcept + { + PyObject* result = this->p ? reinterpret_cast(this->p) : Py_None; + Py_INCREF(result); + return result; + } + + explicit operator bool() const noexcept + { + return this->p != nullptr; + } + + bool operator!() const noexcept + { + return this->p == nullptr; + } + + Py_ssize_t REFCNT() const noexcept + { + return p ? Py_REFCNT(p) : -42; + } + + PyTypeObject* TYPE() const noexcept + { + return p ? Py_TYPE(p) : nullptr; + } + + inline OwnedObject PyStr() const noexcept; + inline const std::string as_str() const noexcept; + inline OwnedObject PyGetAttr(const ImmortalObject& name) const noexcept; + inline OwnedObject PyRequireAttr(const char* const name) const; + inline OwnedObject PyRequireAttr(const ImmortalString& name) const; + inline OwnedObject PyCall(const BorrowedObject& arg) const; + inline OwnedObject PyCall(PyGreenlet* arg) const ; + inline OwnedObject PyCall(PyObject* arg) const ; + // PyObject_Call(this, args, kwargs); + inline OwnedObject PyCall(const BorrowedObject args, + const BorrowedObject kwargs) const; + inline OwnedObject PyCall(const OwnedObject& args, + const OwnedObject& kwargs) const; + + protected: + void _set_raw_pointer(void* t) + { + TC(t); + p = reinterpret_cast(t); + } + void* _get_raw_pointer() const + { + return p; + } + }; + +#ifdef GREENLET_USE_STDIO + template + std::ostream& operator<<(std::ostream& os, const PyObjectPointer& s) + { + const std::type_info& t = typeid(s); + os << t.name() + << "(addr=" << s.borrow() + << ", refcnt=" << s.REFCNT() + << ", value=" << s.as_str() + << ")"; + + return os; + } +#endif + + template + inline bool operator==(const PyObjectPointer& lhs, const PyObject* const rhs) noexcept + { + return static_cast(lhs.borrow_o()) == static_cast(rhs); + } + + template + inline bool operator==(const PyObjectPointer& lhs, const PyObjectPointer& rhs) noexcept + { + return lhs.borrow_o() == rhs.borrow_o(); + } + + template + inline bool operator!=(const PyObjectPointer& lhs, + const PyObjectPointer& rhs) noexcept + { + return lhs.borrow_o() != rhs.borrow_o(); + } + + template + class OwnedReference : public PyObjectPointer + { + private: + friend class OwnedList; + + protected: + explicit OwnedReference(T* it) : PyObjectPointer(it) + { + } + + public: + + // Constructors + + static OwnedReference consuming(PyObject* p) + { + return OwnedReference(reinterpret_cast(p)); + } + + static OwnedReference owning(T* p) + { + OwnedReference result(p); + Py_XINCREF(result.p); + return result; + } + + OwnedReference() : PyObjectPointer(nullptr) + {} + + explicit OwnedReference(const PyObjectPointer<>& other) + : PyObjectPointer(nullptr) + { + T* op = other.borrow(); + TC(op); + this->p = other.borrow(); + Py_XINCREF(this->p); + } + + // It would be good to make use of the C++11 distinction + // between move and copy operations, e.g., constructing from a + // pointer should be a move operation. + // In the common case of ``OwnedObject x = Py_SomeFunction()``, + // the call to the copy constructor will be elided completely. + OwnedReference(const OwnedReference& other) + : PyObjectPointer(other.p) + { + Py_XINCREF(this->p); + } + + static OwnedReference None() + { + Py_INCREF(Py_None); + return OwnedReference(Py_None); + } + + // We can assign from exactly our type without any extra checking + OwnedReference& operator=(const OwnedReference& other) + { + Py_XINCREF(other.p); + const T* tmp = this->p; + this->p = other.p; + Py_XDECREF(tmp); + return *this; + } + + OwnedReference& operator=(const BorrowedReference other) + { + return this->operator=(other.borrow()); + } + + OwnedReference& operator=(T* const other) + { + TC(other); + Py_XINCREF(other); + T* tmp = this->p; + this->p = other; + Py_XDECREF(tmp); + return *this; + } + + // We can assign from an arbitrary reference type + // if it passes our check. + template + OwnedReference& operator=(const OwnedReference& other) + { + X* op = other.borrow(); + TC(op); + return this->operator=(reinterpret_cast(op)); + } + + inline void steal(T* other) + { + assert(this->p == nullptr); + TC(other); + this->p = other; + } + + T* relinquish_ownership() + { + T* result = this->p; + this->p = nullptr; + return result; + } + + T* acquire() const + { + // Return a new reference. + // TODO: This may go away when we have reference objects + // throughout the code. + Py_XINCREF(this->p); + return this->p; + } + + // Nothing else declares a destructor, we're the leaf, so we + // should be able to get away without virtual. + ~OwnedReference() + { + Py_CLEAR(this->p); + } + + void CLEAR() + { + Py_CLEAR(this->p); + assert(this->p == nullptr); + } + }; + + static inline + void operator<<=(PyObject*& target, OwnedObject& o) + { + target = o.relinquish_ownership(); + } + + + class NewReference : public OwnedObject + { + private: + G_NO_COPIES_OF_CLS(NewReference); + public: + // Consumes the reference. Only use this + // for API return values. + NewReference(PyObject* it) : OwnedObject(it) + { + } + }; + + class NewDictReference : public NewReference + { + private: + G_NO_COPIES_OF_CLS(NewDictReference); + public: + NewDictReference() : NewReference(PyDict_New()) + { + if (!this->p) { + throw PyErrOccurred(); + } + } + + void SetItem(const char* const key, PyObject* value) + { + Require(PyDict_SetItemString(this->p, key, value)); + } + + void SetItem(const PyObjectPointer<>& key, PyObject* value) + { + Require(PyDict_SetItem(this->p, key.borrow_o(), value)); + } + }; + + template + class _OwnedGreenlet: public OwnedReference + { + private: + protected: + _OwnedGreenlet(T* it) : OwnedReference(it) + {} + + public: + _OwnedGreenlet() : OwnedReference() + {} + + _OwnedGreenlet(const _OwnedGreenlet& other) : OwnedReference(other) + { + } + _OwnedGreenlet(OwnedMainGreenlet& other) : + OwnedReference(reinterpret_cast(other.acquire())) + { + } + _OwnedGreenlet(const BorrowedGreenlet& other); + // Steals a reference. + static _OwnedGreenlet consuming(PyGreenlet* it) + { + return _OwnedGreenlet(reinterpret_cast(it)); + } + + inline _OwnedGreenlet& operator=(const OwnedGreenlet& other) + { + return this->operator=(other.borrow()); + } + + inline _OwnedGreenlet& operator=(const BorrowedGreenlet& other); + + _OwnedGreenlet& operator=(const OwnedMainGreenlet& other) + { + PyGreenlet* owned = other.acquire(); + Py_XDECREF(this->p); + this->p = reinterpret_cast(owned); + return *this; + } + + _OwnedGreenlet& operator=(T* const other) + { + OwnedReference::operator=(other); + return *this; + } + + T* relinquish_ownership() + { + T* result = this->p; + this->p = nullptr; + return result; + } + + PyObject* relinquish_ownership_o() + { + return reinterpret_cast(relinquish_ownership()); + } + + inline Greenlet* operator->() const noexcept; + inline operator Greenlet*() const noexcept; + }; + + template + class BorrowedReference : public PyObjectPointer + { + public: + // Allow implicit creation from PyObject* pointers as we + // transition to using these classes. Also allow automatic + // conversion to PyObject* for passing to C API calls and even + // for Py_INCREF/DECREF, because we ourselves do no memory management. + BorrowedReference(T* it) : PyObjectPointer(it) + {} + + BorrowedReference(const PyObjectPointer& ref) : PyObjectPointer(ref.borrow()) + {} + + BorrowedReference() : PyObjectPointer(nullptr) + {} + + operator T*() const + { + return this->p; + } + }; + + typedef BorrowedReference BorrowedObject; + //typedef BorrowedReference BorrowedGreenlet; + + template + class _BorrowedGreenlet : public BorrowedReference + { + public: + _BorrowedGreenlet() : + BorrowedReference(nullptr) + {} + + _BorrowedGreenlet(T* it) : + BorrowedReference(it) + {} + + _BorrowedGreenlet(const BorrowedObject& it); + + _BorrowedGreenlet(const OwnedGreenlet& it) : + BorrowedReference(it.borrow()) + {} + + _BorrowedGreenlet& operator=(const BorrowedObject& other); + + // We get one of these for PyGreenlet, but one for PyObject + // is handy as well + operator PyObject*() const + { + return reinterpret_cast(this->p); + } + Greenlet* operator->() const noexcept; + operator Greenlet*() const noexcept; + }; + + typedef _BorrowedGreenlet BorrowedGreenlet; + + template + _OwnedGreenlet::_OwnedGreenlet(const BorrowedGreenlet& other) + : OwnedReference(reinterpret_cast(other.borrow())) + { + Py_XINCREF(this->p); + } + + + class BorrowedMainGreenlet + : public _BorrowedGreenlet + { + public: + BorrowedMainGreenlet(const OwnedMainGreenlet& it) : + _BorrowedGreenlet(it.borrow()) + {} + BorrowedMainGreenlet(PyGreenlet* it=nullptr) + : _BorrowedGreenlet(it) + {} + }; + + template + _OwnedGreenlet& _OwnedGreenlet::operator=(const BorrowedGreenlet& other) + { + return this->operator=(other.borrow()); + } + + + class ImmortalObject : public PyObjectPointer<> + { + private: + G_NO_ASSIGNMENT_OF_CLS(ImmortalObject); + public: + explicit ImmortalObject(PyObject* it) : PyObjectPointer<>(it) + { + } + + ImmortalObject(const ImmortalObject& other) + : PyObjectPointer<>(other.p) + { + + } + + /** + * Become the new owner of the object. Does not change the + * reference count. + */ + ImmortalObject& operator=(PyObject* it) + { + assert(this->p == nullptr); + this->p = it; + return *this; + } + + static ImmortalObject consuming(PyObject* it) + { + return ImmortalObject(it); + } + + inline operator PyObject*() const + { + return this->p; + } + }; + + class ImmortalString : public ImmortalObject + { + private: + G_NO_COPIES_OF_CLS(ImmortalString); + const char* str; + public: + ImmortalString(const char* const str) : + ImmortalObject(str ? Require(PyUnicode_InternFromString(str)) : nullptr) + { + this->str = str; + } + + inline ImmortalString& operator=(const char* const str) + { + if (!this->p) { + this->p = Require(PyUnicode_InternFromString(str)); + this->str = str; + } + else { + assert(this->str == str); + } + return *this; + } + + inline operator std::string() const + { + return this->str; + } + + }; + + class ImmortalEventName : public ImmortalString + { + private: + G_NO_COPIES_OF_CLS(ImmortalEventName); + public: + ImmortalEventName(const char* const str) : ImmortalString(str) + {} + }; + + class ImmortalException : public ImmortalObject + { + private: + G_NO_COPIES_OF_CLS(ImmortalException); + public: + ImmortalException(const char* const name, PyObject* base=nullptr) : + ImmortalObject(name + // Python 2.7 isn't const correct + ? Require(PyErr_NewException((char*)name, base, nullptr)) + : nullptr) + {} + + inline bool PyExceptionMatches() const + { + return PyErr_ExceptionMatches(this->p) > 0; + } + + }; + + template + inline OwnedObject PyObjectPointer::PyStr() const noexcept + { + if (!this->p) { + return OwnedObject(); + } + return OwnedObject::consuming(PyObject_Str(reinterpret_cast(this->p))); + } + + template + inline const std::string PyObjectPointer::as_str() const noexcept + { + // NOTE: This is not Python exception safe. + if (this->p) { + // The Python APIs return a cached char* value that's only valid + // as long as the original object stays around, and we're + // about to (probably) toss it. Hence the copy to std::string. + OwnedObject py_str = this->PyStr(); + if (!py_str) { + return "(nil)"; + } + return PyUnicode_AsUTF8(py_str.borrow()); + } + return "(nil)"; + } + + template + inline OwnedObject PyObjectPointer::PyGetAttr(const ImmortalObject& name) const noexcept + { + assert(this->p); + return OwnedObject::consuming(PyObject_GetAttr(reinterpret_cast(this->p), name)); + } + + template + inline OwnedObject PyObjectPointer::PyRequireAttr(const char* const name) const + { + assert(this->p); + return OwnedObject::consuming(Require(PyObject_GetAttrString(this->p, name), name)); + } + + template + inline OwnedObject PyObjectPointer::PyRequireAttr(const ImmortalString& name) const + { + assert(this->p); + return OwnedObject::consuming(Require( + PyObject_GetAttr( + reinterpret_cast(this->p), + name + ), + name + )); + } + + template + inline OwnedObject PyObjectPointer::PyCall(const BorrowedObject& arg) const + { + return this->PyCall(arg.borrow()); + } + + template + inline OwnedObject PyObjectPointer::PyCall(PyGreenlet* arg) const + { + return this->PyCall(reinterpret_cast(arg)); + } + + template + inline OwnedObject PyObjectPointer::PyCall(PyObject* arg) const + { + assert(this->p); + return OwnedObject::consuming(PyObject_CallFunctionObjArgs(this->p, arg, NULL)); + } + + template + inline OwnedObject PyObjectPointer::PyCall(const BorrowedObject args, + const BorrowedObject kwargs) const + { + assert(this->p); + return OwnedObject::consuming(PyObject_Call(this->p, args, kwargs)); + } + + template + inline OwnedObject PyObjectPointer::PyCall(const OwnedObject& args, + const OwnedObject& kwargs) const + { + assert(this->p); + return OwnedObject::consuming(PyObject_Call(this->p, args.borrow(), kwargs.borrow())); + } + + inline void + ListChecker(void * p) + { + if (!p) { + return; + } + if (!PyList_Check(p)) { + throw TypeError("Expected a list"); + } + } + + class OwnedList : public OwnedReference + { + private: + G_NO_ASSIGNMENT_OF_CLS(OwnedList); + public: + // TODO: Would like to use move. + explicit OwnedList(const OwnedObject& other) + : OwnedReference(other) + { + } + + OwnedList& operator=(const OwnedObject& other) + { + if (other && PyList_Check(other.p)) { + // Valid list. Own a new reference to it, discard the + // reference to what we did own. + PyObject* new_ptr = other.p; + Py_INCREF(new_ptr); + Py_XDECREF(this->p); + this->p = new_ptr; + } + else { + // Either the other object was NULL (an error) or it + // wasn't a list. Either way, we're now invalidated. + Py_XDECREF(this->p); + this->p = nullptr; + } + return *this; + } + + inline bool empty() const + { + return PyList_GET_SIZE(p) == 0; + } + + inline Py_ssize_t size() const + { + return PyList_GET_SIZE(p); + } + + inline BorrowedObject at(const Py_ssize_t index) const + { + return PyList_GET_ITEM(p, index); + } + + inline void clear() + { + PyList_SetSlice(p, 0, PyList_GET_SIZE(p), NULL); + } + }; + + // Use this to represent the module object used at module init + // time. + // This could either be a borrowed (Py2) or new (Py3) reference; + // either way, we don't want to do any memory management + // on it here, Python itself will handle that. + // XXX: Actually, that's not quite right. On Python 3, if an + // exception occurs before we return to the interpreter, this will + // leak; but all previous versions also had that problem. + class CreatedModule : public PyObjectPointer<> + { + private: + G_NO_COPIES_OF_CLS(CreatedModule); + public: + CreatedModule(PyModuleDef& mod_def) : PyObjectPointer<>( + Require(PyModule_Create(&mod_def))) + { + } + + // PyAddObject(): Add a reference to the object to the module. + // On return, the reference count of the object is unchanged. + // + // The docs warn that PyModule_AddObject only steals the + // reference on success, so if it fails after we've incref'd + // or allocated, we're responsible for the decref. + void PyAddObject(const char* name, const long new_bool) + { + OwnedObject p = OwnedObject::consuming(Require(PyBool_FromLong(new_bool))); + this->PyAddObject(name, p); + } + + void PyAddObject(const char* name, const OwnedObject& new_object) + { + // The caller already owns a reference they will decref + // when their variable goes out of scope, we still need to + // incref/decref. + this->PyAddObject(name, new_object.borrow()); + } + + void PyAddObject(const char* name, const ImmortalObject& new_object) + { + this->PyAddObject(name, new_object.borrow()); + } + + void PyAddObject(const char* name, PyTypeObject& type) + { + this->PyAddObject(name, reinterpret_cast(&type)); + } + + void PyAddObject(const char* name, PyObject* new_object) + { + Py_INCREF(new_object); + try { + Require(PyModule_AddObject(this->p, name, new_object)); + } + catch (const PyErrOccurred&) { + Py_DECREF(p); + throw; + } + } + }; + + class PyErrFetchParam : public PyObjectPointer<> + { + // Not an owned object, because we can't be initialized with + // one, and we only sometimes acquire ownership. + private: + G_NO_COPIES_OF_CLS(PyErrFetchParam); + public: + // To allow declaring these and passing them to + // PyErr_Fetch we implement the empty constructor, + // and the address operator. + PyErrFetchParam() : PyObjectPointer<>(nullptr) + { + } + + PyObject** operator&() + { + return &this->p; + } + + // This allows us to pass one directly without the &, + // BUT it has higher precedence than the bool operator + // if it's not explicit. + operator PyObject**() + { + return &this->p; + } + + // We don't want to be able to pass these to Py_DECREF and + // such so we don't have the implicit PyObject* conversion. + + inline PyObject* relinquish_ownership() + { + PyObject* result = this->p; + this->p = nullptr; + return result; + } + + ~PyErrFetchParam() + { + Py_XDECREF(p); + } + }; + + class OwnedErrPiece : public OwnedObject + { + private: + + public: + // Unlike OwnedObject, this increments the refcount. + OwnedErrPiece(PyObject* p=nullptr) : OwnedObject(p) + { + this->acquire(); + } + + PyObject** operator&() + { + return &this->p; + } + + inline operator PyObject*() const + { + return this->p; + } + + operator PyTypeObject*() const + { + return reinterpret_cast(this->p); + } + }; + + class PyErrPieces + { + private: + OwnedErrPiece type; + OwnedErrPiece instance; + OwnedErrPiece traceback; + bool restored; + public: + // Takes new references; if we're destroyed before + // restoring the error, we drop the references. + PyErrPieces(PyObject* t, PyObject* v, PyObject* tb) : + type(t), + instance(v), + traceback(tb), + restored(0) + { + this->normalize(); + } + + PyErrPieces() : + restored(0) + { + // PyErr_Fetch transfers ownership to us, so + // we don't actually need to INCREF; but we *do* + // need to DECREF if we're not restored. + PyErrFetchParam t, v, tb; + PyErr_Fetch(&t, &v, &tb); + type.steal(t.relinquish_ownership()); + instance.steal(v.relinquish_ownership()); + traceback.steal(tb.relinquish_ownership()); + } + + void PyErrRestore() + { + // can only do this once + assert(!this->restored); + this->restored = true; + PyErr_Restore( + this->type.relinquish_ownership(), + this->instance.relinquish_ownership(), + this->traceback.relinquish_ownership()); + assert(!this->type && !this->instance && !this->traceback); + } + + private: + void normalize() + { + // First, check the traceback argument, replacing None, + // with NULL + if (traceback.is_None()) { + traceback = nullptr; + } + + if (traceback && !PyTraceBack_Check(traceback.borrow())) { + throw PyErrOccurred(PyExc_TypeError, + "throw() third argument must be a traceback object"); + } + + if (PyExceptionClass_Check(type)) { + // If we just had a type, we'll now have a type and + // instance. + // The type's refcount will have gone up by one + // because of the instance and the instance will have + // a refcount of one. Either way, we owned, and still + // do own, exactly one reference. + PyErr_NormalizeException(&type, &instance, &traceback); + + } + else if (PyExceptionInstance_Check(type)) { + /* Raising an instance --- usually that means an + object that is a subclass of BaseException, but on + Python 2, that can also mean an arbitrary old-style + object. The value should be a dummy. */ + if (instance && !instance.is_None()) { + throw PyErrOccurred( + PyExc_TypeError, + "instance exception may not have a separate value"); + } + /* Normalize to raise , */ + this->instance = this->type; + this->type = PyExceptionInstance_Class(instance.borrow()); + + /* + It would be tempting to do this: + + Py_ssize_t type_count = Py_REFCNT(Py_TYPE(instance.borrow())); + this->type = PyExceptionInstance_Class(instance.borrow()); + assert(this->type.REFCNT() == type_count + 1); + + But that doesn't work on Python 2 in the case of + old-style instances: The result of Py_TYPE is going to + be the global shared that all + old-style classes have, while the return of Instance_Class() + will be the Python-level class object. The two are unrelated. + */ + } + else { + /* Not something you can raise. throw() fails. */ + PyErr_Format(PyExc_TypeError, + "exceptions must be classes, or instances, not %s", + Py_TYPE(type.borrow())->tp_name); + throw PyErrOccurred(); + } + } + }; + + // PyArg_Parse's O argument returns a borrowed reference. + class PyArgParseParam : public BorrowedObject + { + private: + G_NO_COPIES_OF_CLS(PyArgParseParam); + public: + explicit PyArgParseParam(PyObject* p=nullptr) : BorrowedObject(p) + { + } + + inline PyObject** operator&() + { + return &this->p; + } + }; + +};}; + +#endif diff --git a/venv/Lib/site-packages/greenlet/greenlet_slp_switch.hpp b/venv/Lib/site-packages/greenlet/greenlet_slp_switch.hpp new file mode 100644 index 00000000..bd4b7ae1 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/greenlet_slp_switch.hpp @@ -0,0 +1,99 @@ +#ifndef GREENLET_SLP_SWITCH_HPP +#define GREENLET_SLP_SWITCH_HPP + +#include "greenlet_compiler_compat.hpp" +#include "greenlet_refs.hpp" + +/* + * the following macros are spliced into the OS/compiler + * specific code, in order to simplify maintenance. + */ +// We can save about 10% of the time it takes to switch greenlets if +// we thread the thread state through the slp_save_state() and the +// following slp_restore_state() calls from +// slp_switch()->g_switchstack() (which already needs to access it). +// +// However: +// +// that requires changing the prototypes and implementations of the +// switching functions. If we just change the prototype of +// slp_switch() to accept the argument and update the macros, without +// changing the implementation of slp_switch(), we get crashes on +// 64-bit Linux and 32-bit x86 (for reasons that aren't 100% clear); +// on the other hand, 64-bit macOS seems to be fine. Also, 64-bit +// windows is an issue because slp_switch is written fully in assembly +// and currently ignores its argument so some code would have to be +// adjusted there to pass the argument on to the +// ``slp_save_state_asm()`` function (but interestingly, because of +// the calling convention, the extra argument is just ignored and +// things function fine, albeit slower, if we just modify +// ``slp_save_state_asm`()` to fetch the pointer to pass to the +// macro.) +// +// Our compromise is to use a *glabal*, untracked, weak, pointer +// to the necessary thread state during the process of switching only. +// This is safe because we're protected by the GIL, and if we're +// running this code, the thread isn't exiting. This also nets us a +// 10-12% speed improvement. + +static greenlet::Greenlet* volatile switching_thread_state = nullptr; + + +extern "C" { +static int GREENLET_NOINLINE(slp_save_state_trampoline)(char* stackref); +static void GREENLET_NOINLINE(slp_restore_state_trampoline)(); +} + + +#define SLP_SAVE_STATE(stackref, stsizediff) \ +do { \ + assert(switching_thread_state); \ + stackref += STACK_MAGIC; \ + if (slp_save_state_trampoline((char*)stackref)) \ + return -1; \ + if (!switching_thread_state->active()) \ + return 1; \ + stsizediff = switching_thread_state->stack_start() - (char*)stackref; \ +} while (0) + +#define SLP_RESTORE_STATE() slp_restore_state_trampoline() + +#define SLP_EVAL +extern "C" { +#define slp_switch GREENLET_NOINLINE(slp_switch) +#include "slp_platformselect.h" +} +#undef slp_switch + +#ifndef STACK_MAGIC +# error \ + "greenlet needs to be ported to this platform, or taught how to detect your compiler properly." +#endif /* !STACK_MAGIC */ + + + +#ifdef EXTERNAL_ASM +/* CCP addition: Make these functions, to be called from assembler. + * The token include file for the given platform should enable the + * EXTERNAL_ASM define so that this is included. + */ +extern "C" { +intptr_t +slp_save_state_asm(intptr_t* ref) +{ + intptr_t diff; + SLP_SAVE_STATE(ref, diff); + return diff; +} + +void +slp_restore_state_asm(void) +{ + SLP_RESTORE_STATE(); +} + +extern int slp_switch(void); +}; +#endif + +#endif diff --git a/venv/Lib/site-packages/greenlet/greenlet_thread_support.hpp b/venv/Lib/site-packages/greenlet/greenlet_thread_support.hpp new file mode 100644 index 00000000..3ded7d2b --- /dev/null +++ b/venv/Lib/site-packages/greenlet/greenlet_thread_support.hpp @@ -0,0 +1,31 @@ +#ifndef GREENLET_THREAD_SUPPORT_HPP +#define GREENLET_THREAD_SUPPORT_HPP + +/** + * Defines various utility functions to help greenlet integrate well + * with threads. This used to be needed when we supported Python + * 2.7 on Windows, which used a very old compiler. We wrote an + * alternative implementation using Python APIs and POSIX or Windows + * APIs, but that's no longer needed. So this file is a shadow of its + * former self --- but may be needed in the future. + */ + +#include +#include +#include + +#include "greenlet_compiler_compat.hpp" + +namespace greenlet { + typedef std::mutex Mutex; + typedef std::lock_guard LockGuard; + class LockInitError : public std::runtime_error + { + public: + LockInitError(const char* what) : std::runtime_error(what) + {}; + }; +}; + + +#endif /* GREENLET_THREAD_SUPPORT_HPP */ diff --git a/venv/Lib/site-packages/greenlet/platform/__init__.py b/venv/Lib/site-packages/greenlet/platform/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/greenlet/platform/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/greenlet/platform/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..0a0b59b5 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/platform/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/platform/setup_switch_x64_masm.cmd b/venv/Lib/site-packages/greenlet/platform/setup_switch_x64_masm.cmd new file mode 100644 index 00000000..038ced29 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/setup_switch_x64_masm.cmd @@ -0,0 +1,2 @@ +call "C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\vcvarsall.bat" amd64 +ml64 /nologo /c /Fo switch_x64_masm.obj switch_x64_masm.asm diff --git a/venv/Lib/site-packages/greenlet/platform/switch_aarch64_gcc.h b/venv/Lib/site-packages/greenlet/platform/switch_aarch64_gcc.h new file mode 100644 index 00000000..058617c4 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_aarch64_gcc.h @@ -0,0 +1,124 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 07-Sep-16 Add clang support using x register naming. Fredrik Fornwall + * 13-Apr-13 Add support for strange GCC caller-save decisions + * 08-Apr-13 File creation. Michael Matz + * + * NOTES + * + * Simply save all callee saved registers + * + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL +#define STACK_MAGIC 0 +#define REGS_TO_SAVE "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", \ + "x27", "x28", "x30" /* aka lr */, \ + "v8", "v9", "v10", "v11", \ + "v12", "v13", "v14", "v15" + +/* + * Recall: + asm asm-qualifiers ( AssemblerTemplate + : OutputOperands + [ : InputOperands + [ : Clobbers ] ]) + + or (if asm-qualifiers contains 'goto') + + asm asm-qualifiers ( AssemblerTemplate + : OutputOperands + : InputOperands + : Clobbers + : GotoLabels) + + and OutputOperands are + + [ [asmSymbolicName] ] constraint (cvariablename) + + When a name is given, refer to it as ``%[the name]``. + When not given, ``%i`` where ``i`` is the zero-based index. + + constraints starting with ``=`` means only writing; ``+`` means + reading and writing. + + This is followed by ``r`` (must be register) or ``m`` (must be memory) + and these can be combined. + + The ``cvariablename`` is actually an lvalue expression. + + In AArch65, 31 general purpose registers. If named X0... they are + 64-bit. If named W0... they are the bottom 32 bits of the + corresponding 64 bit register. + + XZR and WZR are hardcoded to 0, and ignore writes. + + Arguments are in X0..X7. C++ uses X0 for ``this``. X0 holds simple return + values (?) + + Whenever a W register is written, the top half of the X register is zeroed. + */ + +static int +slp_switch(void) +{ + int err; + void *fp; + /* Windowz uses a 32-bit long on a 64-bit platform, unlike the rest of + the world, and in theory we can be compiled with GCC/llvm on 64-bit + windows. So we need a fixed-width type. + */ + int64_t *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("str x29, %0" : "=m"(fp) : : ); + __asm__ ("mov %0, sp" : "=r" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "add sp,sp,%0\n" + "add x29,x29,%0\n" + : + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + /* SLP_SAVE_STATE macro contains some return statements + (of -1 and 1). It falls through only when + the return value of slp_save_state() is zero, which + is placed in x0. + In that case we (slp_switch) also want to return zero + (also in x0 of course). + Now, some GCC versions (seen with 4.8) think it's a + good idea to save/restore x0 around the call to + slp_restore_state(), instead of simply zeroing it + at the return below. But slp_restore_state + writes random values to the stack slot used for this + save/restore (from when it once was saved above in + SLP_SAVE_STATE, when it was still uninitialized), so + "restoring" that precious zero actually makes us + return random values. There are some ways to make + GCC not use that zero value in the normal return path + (e.g. making err volatile, but that costs a little + stack space), and the simplest is to call a function + that returns an unknown value (which happens to be zero), + so the saved/restored value is unused. + + Thus, this line stores a 0 into the ``err`` variable + (which must be held in a register for this instruction, + of course). The ``w`` qualifier causes the instruction + to use W0 instead of X0, otherwise we get a warning + about a value size mismatch (because err is an int, + and aarch64 platforms are LP64: 32-bit int, 64 bit long + and pointer). + */ + __asm__ volatile ("mov %w0, #0" : "=r" (err)); + } + __asm__ volatile ("ldr x29, %0" : : "m" (fp) :); + __asm__ volatile ("" : : : REGS_TO_SAVE); + return err; +} + +#endif diff --git a/venv/Lib/site-packages/greenlet/platform/switch_alpha_unix.h b/venv/Lib/site-packages/greenlet/platform/switch_alpha_unix.h new file mode 100644 index 00000000..7e07abfc --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_alpha_unix.h @@ -0,0 +1,30 @@ +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "$9", "$10", "$11", "$12", "$13", "$14", "$15", \ + "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9" + +static int +slp_switch(void) +{ + int ret; + long *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("mov $30, %0" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "addq $30, %0, $30\n\t" + : /* no outputs */ + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("mov $31, %0" : "=r" (ret) : ); + return ret; +} + +#endif diff --git a/venv/Lib/site-packages/greenlet/platform/switch_amd64_unix.h b/venv/Lib/site-packages/greenlet/platform/switch_amd64_unix.h new file mode 100644 index 00000000..d4701105 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_amd64_unix.h @@ -0,0 +1,87 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 3-May-13 Ralf Schmitt + * Add support for strange GCC caller-save decisions + * (ported from switch_aarch64_gcc.h) + * 18-Aug-11 Alexey Borzenkov + * Correctly save rbp, csr and cw + * 01-Apr-04 Hye-Shik Chang + * Ported from i386 to amd64. + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for spark + * 31-Avr-02 Armin Rigo + * Added ebx, esi and edi register-saves. + * 01-Mar-02 Samual M. Rushing + * Ported from i386. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +/* #define STACK_MAGIC 3 */ +/* the above works fine with gcc 2.96, but 2.95.3 wants this */ +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "r12", "r13", "r14", "r15" + +static int +slp_switch(void) +{ + int err; + void* rbp; + void* rbx; + unsigned int csr; + unsigned short cw; + /* This used to be declared 'register', but that does nothing in + modern compilers and is explicitly forbidden in some new + standards. */ + long *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("fstcw %0" : "=m" (cw)); + __asm__ volatile ("stmxcsr %0" : "=m" (csr)); + __asm__ volatile ("movq %%rbp, %0" : "=m" (rbp)); + __asm__ volatile ("movq %%rbx, %0" : "=m" (rbx)); + __asm__ ("movq %%rsp, %0" : "=g" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "addq %0, %%rsp\n" + "addq %0, %%rbp\n" + : + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + __asm__ volatile ("xorq %%rax, %%rax" : "=a" (err)); + } + __asm__ volatile ("movq %0, %%rbx" : : "m" (rbx)); + __asm__ volatile ("movq %0, %%rbp" : : "m" (rbp)); + __asm__ volatile ("ldmxcsr %0" : : "m" (csr)); + __asm__ volatile ("fldcw %0" : : "m" (cw)); + __asm__ volatile ("" : : : REGS_TO_SAVE); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/venv/Lib/site-packages/greenlet/platform/switch_arm32_gcc.h b/venv/Lib/site-packages/greenlet/platform/switch_arm32_gcc.h new file mode 100644 index 00000000..655003aa --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_arm32_gcc.h @@ -0,0 +1,79 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 14-Aug-06 File creation. Ported from Arm Thumb. Sylvain Baro + * 3-Sep-06 Commented out saving of r1-r3 (r4 already commented out) as I + * read that these do not need to be saved. Also added notes and + * errors related to the frame pointer. Richard Tew. + * + * NOTES + * + * It is not possible to detect if fp is used or not, so the supplied + * switch function needs to support it, so that you can remove it if + * it does not apply to you. + * + * POSSIBLE ERRORS + * + * "fp cannot be used in asm here" + * + * - Try commenting out "fp" in REGS_TO_SAVE. + * + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL +#define STACK_MAGIC 0 +#define REG_SP "sp" +#define REG_SPSP "sp,sp" +#ifdef __thumb__ +#define REG_FP "r7" +#define REG_FPFP "r7,r7" +#define REGS_TO_SAVE_GENERAL "r4", "r5", "r6", "r8", "r9", "r10", "r11", "lr" +#else +#define REG_FP "fp" +#define REG_FPFP "fp,fp" +#define REGS_TO_SAVE_GENERAL "r4", "r5", "r6", "r7", "r8", "r9", "r10", "lr" +#endif +#if defined(__SOFTFP__) +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL +#elif defined(__VFP_FP__) +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "d8", "d9", "d10", "d11", \ + "d12", "d13", "d14", "d15" +#elif defined(__MAVERICK__) +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "mvf4", "mvf5", "mvf6", "mvf7", \ + "mvf8", "mvf9", "mvf10", "mvf11", \ + "mvf12", "mvf13", "mvf14", "mvf15" +#else +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "f4", "f5", "f6", "f7" +#endif + +static int +#ifdef __GNUC__ +__attribute__((optimize("no-omit-frame-pointer"))) +#endif +slp_switch(void) +{ + void *fp; + int *stackref, stsizediff; + int result; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("mov r0," REG_FP "\n\tstr r0,%0" : "=m" (fp) : : "r0"); + __asm__ ("mov %0," REG_SP : "=r" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "add " REG_SPSP ",%0\n" + "add " REG_FPFP ",%0\n" + : + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("ldr r0,%1\n\tmov " REG_FP ",r0\n\tmov %0, #0" : "=r" (result) : "m" (fp) : "r0"); + __asm__ volatile ("" : : : REGS_TO_SAVE); + return result; +} + +#endif diff --git a/venv/Lib/site-packages/greenlet/platform/switch_arm32_ios.h b/venv/Lib/site-packages/greenlet/platform/switch_arm32_ios.h new file mode 100644 index 00000000..9e640e15 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_arm32_ios.h @@ -0,0 +1,67 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 31-May-15 iOS support. Ported from arm32. Proton + * + * NOTES + * + * It is not possible to detect if fp is used or not, so the supplied + * switch function needs to support it, so that you can remove it if + * it does not apply to you. + * + * POSSIBLE ERRORS + * + * "fp cannot be used in asm here" + * + * - Try commenting out "fp" in REGS_TO_SAVE. + * + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 0 +#define REG_SP "sp" +#define REG_SPSP "sp,sp" +#define REG_FP "r7" +#define REG_FPFP "r7,r7" +#define REGS_TO_SAVE_GENERAL "r4", "r5", "r6", "r8", "r10", "r11", "lr" +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "d8", "d9", "d10", "d11", \ + "d12", "d13", "d14", "d15" + +static int +#ifdef __GNUC__ +__attribute__((optimize("no-omit-frame-pointer"))) +#endif +slp_switch(void) +{ + void *fp; + int *stackref, stsizediff, result; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("str " REG_FP ",%0" : "=m" (fp)); + __asm__ ("mov %0," REG_SP : "=r" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "add " REG_SPSP ",%0\n" + "add " REG_FPFP ",%0\n" + : + : "r" (stsizediff) + : REGS_TO_SAVE /* Clobber registers, force compiler to + * recalculate address of void *fp from REG_SP or REG_FP */ + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ( + "ldr " REG_FP ", %1\n\t" + "mov %0, #0" + : "=r" (result) + : "m" (fp) + : REGS_TO_SAVE /* Force compiler to restore saved registers after this */ + ); + return result; +} + +#endif diff --git a/venv/Lib/site-packages/greenlet/platform/switch_arm64_masm.asm b/venv/Lib/site-packages/greenlet/platform/switch_arm64_masm.asm new file mode 100644 index 00000000..29f9c225 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_arm64_masm.asm @@ -0,0 +1,53 @@ + AREA switch_arm64_masm, CODE, READONLY; + GLOBAL slp_switch [FUNC] + EXTERN slp_save_state_asm + EXTERN slp_restore_state_asm + +slp_switch + ; push callee saved registers to stack + stp x19, x20, [sp, #-16]! + stp x21, x22, [sp, #-16]! + stp x23, x24, [sp, #-16]! + stp x25, x26, [sp, #-16]! + stp x27, x28, [sp, #-16]! + stp x29, x30, [sp, #-16]! + stp d8, d9, [sp, #-16]! + stp d10, d11, [sp, #-16]! + stp d12, d13, [sp, #-16]! + stp d14, d15, [sp, #-16]! + + ; call slp_save_state_asm with stack pointer + mov x0, sp + bl slp_save_state_asm + + ; early return for return value of 1 and -1 + cmp x0, #-1 + b.eq RETURN + cmp x0, #1 + b.eq RETURN + + ; increment stack and frame pointer + add sp, sp, x0 + add x29, x29, x0 + + bl slp_restore_state_asm + + ; store return value for successful completion of routine + mov x0, #0 + +RETURN + ; pop registers from stack + ldp d14, d15, [sp], #16 + ldp d12, d13, [sp], #16 + ldp d10, d11, [sp], #16 + ldp d8, d9, [sp], #16 + ldp x29, x30, [sp], #16 + ldp x27, x28, [sp], #16 + ldp x25, x26, [sp], #16 + ldp x23, x24, [sp], #16 + ldp x21, x22, [sp], #16 + ldp x19, x20, [sp], #16 + + ret + + END diff --git a/venv/Lib/site-packages/greenlet/platform/switch_arm64_masm.obj b/venv/Lib/site-packages/greenlet/platform/switch_arm64_masm.obj new file mode 100644 index 00000000..f6f220e4 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/platform/switch_arm64_masm.obj differ diff --git a/venv/Lib/site-packages/greenlet/platform/switch_arm64_msvc.h b/venv/Lib/site-packages/greenlet/platform/switch_arm64_msvc.h new file mode 100644 index 00000000..7ab7f45b --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_arm64_msvc.h @@ -0,0 +1,17 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 21-Oct-21 Niyas Sait + * First version to enable win/arm64 support. + */ + +#define STACK_REFPLUS 1 +#define STACK_MAGIC 0 + +/* Use the generic support for an external assembly language slp_switch function. */ +#define EXTERNAL_ASM + +#ifdef SLP_EVAL +/* This always uses the external masm assembly file. */ +#endif \ No newline at end of file diff --git a/venv/Lib/site-packages/greenlet/platform/switch_csky_gcc.h b/venv/Lib/site-packages/greenlet/platform/switch_csky_gcc.h new file mode 100644 index 00000000..ac469d3a --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_csky_gcc.h @@ -0,0 +1,48 @@ +#ifdef SLP_EVAL +#define STACK_MAGIC 0 +#define REG_FP "r8" +#ifdef __CSKYABIV2__ +#define REGS_TO_SAVE_GENERAL "r4", "r5", "r6", "r7", "r9", "r10", "r11", "r15",\ + "r16", "r17", "r18", "r19", "r20", "r21", "r22",\ + "r23", "r24", "r25" + +#if defined (__CSKY_HARD_FLOAT__) || (__CSKY_VDSP__) +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "vr8", "vr9", "vr10", "vr11", "vr12",\ + "vr13", "vr14", "vr15" +#else +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL +#endif +#else +#define REGS_TO_SAVE "r9", "r10", "r11", "r12", "r13", "r15" +#endif + + +static int +#ifdef __GNUC__ +__attribute__((optimize("no-omit-frame-pointer"))) +#endif +slp_switch(void) +{ + int *stackref, stsizediff; + int result; + + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ ("mov %0, sp" : "=r" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "addu sp,%0\n" + "addu "REG_FP",%0\n" + : + : "r" (stsizediff) + ); + + SLP_RESTORE_STATE(); + } + __asm__ volatile ("movi %0, 0" : "=r" (result)); + __asm__ volatile ("" : : : REGS_TO_SAVE); + + return result; +} + +#endif diff --git a/venv/Lib/site-packages/greenlet/platform/switch_loongarch64_linux.h b/venv/Lib/site-packages/greenlet/platform/switch_loongarch64_linux.h new file mode 100644 index 00000000..9eaf34ef --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_loongarch64_linux.h @@ -0,0 +1,31 @@ +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "s0", "s1", "s2", "s3", "s4", "s5", \ + "s6", "s7", "s8", "fp", \ + "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" + +static int +slp_switch(void) +{ + int ret; + long *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("move %0, $sp" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "add.d $sp, $sp, %0\n\t" + : /* no outputs */ + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("move %0, $zero" : "=r" (ret) : ); + return ret; +} + +#endif diff --git a/venv/Lib/site-packages/greenlet/platform/switch_m68k_gcc.h b/venv/Lib/site-packages/greenlet/platform/switch_m68k_gcc.h new file mode 100644 index 00000000..da761c2d --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_m68k_gcc.h @@ -0,0 +1,38 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 2014-01-06 Andreas Schwab + * File created. + */ + +#ifdef SLP_EVAL + +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "%d2", "%d3", "%d4", "%d5", "%d6", "%d7", \ + "%a2", "%a3", "%a4" + +static int +slp_switch(void) +{ + int err; + int *stackref, stsizediff; + void *fp, *a5; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("move.l %%fp, %0" : "=m"(fp)); + __asm__ volatile ("move.l %%a5, %0" : "=m"(a5)); + __asm__ ("move.l %%sp, %0" : "=r"(stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ("add.l %0, %%sp; add.l %0, %%fp" : : "r"(stsizediff)); + SLP_RESTORE_STATE(); + __asm__ volatile ("clr.l %0" : "=g" (err)); + } + __asm__ volatile ("move.l %0, %%a5" : : "m"(a5)); + __asm__ volatile ("move.l %0, %%fp" : : "m"(fp)); + __asm__ volatile ("" : : : REGS_TO_SAVE); + return err; +} + +#endif diff --git a/venv/Lib/site-packages/greenlet/platform/switch_mips_unix.h b/venv/Lib/site-packages/greenlet/platform/switch_mips_unix.h new file mode 100644 index 00000000..b9003e94 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_mips_unix.h @@ -0,0 +1,64 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 20-Sep-14 Matt Madison + * Re-code the saving of the gp register for MIPS64. + * 05-Jan-08 Thiemo Seufer + * Ported from ppc. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "$16", "$17", "$18", "$19", "$20", "$21", "$22", \ + "$23", "$30" +static int +slp_switch(void) +{ + int err; + int *stackref, stsizediff; +#ifdef __mips64 + uint64_t gpsave; +#endif + __asm__ __volatile__ ("" : : : REGS_TO_SAVE); +#ifdef __mips64 + __asm__ __volatile__ ("sd $28,%0" : "=m" (gpsave) : : ); +#endif + __asm__ ("move %0, $29" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ __volatile__ ( +#ifdef __mips64 + "daddu $29, %0\n" +#else + "addu $29, %0\n" +#endif + : /* no outputs */ + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } +#ifdef __mips64 + __asm__ __volatile__ ("ld $28,%0" : : "m" (gpsave) : ); +#endif + __asm__ __volatile__ ("" : : : REGS_TO_SAVE); + __asm__ __volatile__ ("move %0, $0" : "=r" (err)); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/venv/Lib/site-packages/greenlet/platform/switch_ppc64_aix.h b/venv/Lib/site-packages/greenlet/platform/switch_ppc64_aix.h new file mode 100644 index 00000000..e7e0b877 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_ppc64_aix.h @@ -0,0 +1,103 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 16-Oct-20 Jesse Gorzinski + * Copied from Linux PPC64 implementation + * 04-Sep-18 Alexey Borzenkov + * Workaround a gcc bug using manual save/restore of r30 + * 21-Mar-18 Tulio Magno Quites Machado Filho + * Added r30 to the list of saved registers in order to fully comply with + * both ppc64 ELFv1 ABI and the ppc64le ELFv2 ABI, that classify this + * register as a nonvolatile register used for local variables. + * 21-Mar-18 Laszlo Boszormenyi + * Save r2 (TOC pointer) manually. + * 10-Dec-13 Ulrich Weigand + * Support ELFv2 ABI. Save float/vector registers. + * 09-Mar-12 Michael Ellerman + * 64-bit implementation, copied from 32-bit. + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'r31' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 14-Jan-04 Bob Ippolito + * added cr2-cr4 to the registers to be saved. + * Open questions: Should we save FP registers? + * What about vector registers? + * Differences between darwin and unix? + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 04-Oct-02 Gustavo Niemeyer + * Ported from MacOS version. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 29-Jun-02 Christian Tismer + * Added register 13-29, 31 saves. The same way as + * Armin Rigo did for the x86_unix version. + * This seems to be now fully functional! + * 04-Mar-02 Hye-Shik Chang + * Ported from i386. + * 31-Jul-12 Trevor Bowen + * Changed memory constraints to register only. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 6 + +#if defined(__ALTIVEC__) +#define ALTIVEC_REGS \ + "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", \ + "v28", "v29", "v30", "v31", +#else +#define ALTIVEC_REGS +#endif + +#define REGS_TO_SAVE "r14", "r15", "r16", "r17", "r18", "r19", "r20", \ + "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \ + "r31", \ + "fr14", "fr15", "fr16", "fr17", "fr18", "fr19", "fr20", "fr21", \ + "fr22", "fr23", "fr24", "fr25", "fr26", "fr27", "fr28", "fr29", \ + "fr30", "fr31", \ + ALTIVEC_REGS \ + "cr2", "cr3", "cr4" + +static int +slp_switch(void) +{ + int err; + long *stackref, stsizediff; + void * toc; + void * r30; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("std 2, %0" : "=m" (toc)); + __asm__ volatile ("std 30, %0" : "=m" (r30)); + __asm__ ("mr %0, 1" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "mr 11, %0\n" + "add 1, 1, 11\n" + : /* no outputs */ + : "r" (stsizediff) + : "11" + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("ld 30, %0" : : "m" (r30)); + __asm__ volatile ("ld 2, %0" : : "m" (toc)); + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("li %0, 0" : "=r" (err)); + return err; +} + +#endif diff --git a/venv/Lib/site-packages/greenlet/platform/switch_ppc64_linux.h b/venv/Lib/site-packages/greenlet/platform/switch_ppc64_linux.h new file mode 100644 index 00000000..3c324d00 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_ppc64_linux.h @@ -0,0 +1,105 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 04-Sep-18 Alexey Borzenkov + * Workaround a gcc bug using manual save/restore of r30 + * 21-Mar-18 Tulio Magno Quites Machado Filho + * Added r30 to the list of saved registers in order to fully comply with + * both ppc64 ELFv1 ABI and the ppc64le ELFv2 ABI, that classify this + * register as a nonvolatile register used for local variables. + * 21-Mar-18 Laszlo Boszormenyi + * Save r2 (TOC pointer) manually. + * 10-Dec-13 Ulrich Weigand + * Support ELFv2 ABI. Save float/vector registers. + * 09-Mar-12 Michael Ellerman + * 64-bit implementation, copied from 32-bit. + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'r31' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 14-Jan-04 Bob Ippolito + * added cr2-cr4 to the registers to be saved. + * Open questions: Should we save FP registers? + * What about vector registers? + * Differences between darwin and unix? + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 04-Oct-02 Gustavo Niemeyer + * Ported from MacOS version. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 29-Jun-02 Christian Tismer + * Added register 13-29, 31 saves. The same way as + * Armin Rigo did for the x86_unix version. + * This seems to be now fully functional! + * 04-Mar-02 Hye-Shik Chang + * Ported from i386. + * 31-Jul-12 Trevor Bowen + * Changed memory constraints to register only. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#if _CALL_ELF == 2 +#define STACK_MAGIC 4 +#else +#define STACK_MAGIC 6 +#endif + +#if defined(__ALTIVEC__) +#define ALTIVEC_REGS \ + "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", \ + "v28", "v29", "v30", "v31", +#else +#define ALTIVEC_REGS +#endif + +#define REGS_TO_SAVE "r14", "r15", "r16", "r17", "r18", "r19", "r20", \ + "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \ + "r31", \ + "fr14", "fr15", "fr16", "fr17", "fr18", "fr19", "fr20", "fr21", \ + "fr22", "fr23", "fr24", "fr25", "fr26", "fr27", "fr28", "fr29", \ + "fr30", "fr31", \ + ALTIVEC_REGS \ + "cr2", "cr3", "cr4" + +static int +slp_switch(void) +{ + int err; + long *stackref, stsizediff; + void * toc; + void * r30; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("std 2, %0" : "=m" (toc)); + __asm__ volatile ("std 30, %0" : "=m" (r30)); + __asm__ ("mr %0, 1" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "mr 11, %0\n" + "add 1, 1, 11\n" + : /* no outputs */ + : "r" (stsizediff) + : "11" + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("ld 30, %0" : : "m" (r30)); + __asm__ volatile ("ld 2, %0" : : "m" (toc)); + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("li %0, 0" : "=r" (err)); + return err; +} + +#endif diff --git a/venv/Lib/site-packages/greenlet/platform/switch_ppc_aix.h b/venv/Lib/site-packages/greenlet/platform/switch_ppc_aix.h new file mode 100644 index 00000000..6d93c132 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_ppc_aix.h @@ -0,0 +1,87 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 07-Mar-11 Floris Bruynooghe + * Do not add stsizediff to general purpose + * register (GPR) 30 as this is a non-volatile and + * unused by the PowerOpen Environment, therefore + * this was modifying a user register instead of the + * frame pointer (which does not seem to exist). + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'r31' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 14-Jan-04 Bob Ippolito + * added cr2-cr4 to the registers to be saved. + * Open questions: Should we save FP registers? + * What about vector registers? + * Differences between darwin and unix? + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 04-Oct-02 Gustavo Niemeyer + * Ported from MacOS version. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 29-Jun-02 Christian Tismer + * Added register 13-29, 31 saves. The same way as + * Armin Rigo did for the x86_unix version. + * This seems to be now fully functional! + * 04-Mar-02 Hye-Shik Chang + * Ported from i386. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 3 + +/* !!!!WARNING!!!! need to add "r31" in the next line if this header file + * is meant to be compiled non-dynamically! + */ +#define REGS_TO_SAVE "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", \ + "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \ + "cr2", "cr3", "cr4" +static int +slp_switch(void) +{ + int err; + int *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ ("mr %0, 1" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "mr 11, %0\n" + "add 1, 1, 11\n" + : /* no outputs */ + : "r" (stsizediff) + : "11" + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("li %0, 0" : "=r" (err)); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/venv/Lib/site-packages/greenlet/platform/switch_ppc_linux.h b/venv/Lib/site-packages/greenlet/platform/switch_ppc_linux.h new file mode 100644 index 00000000..e83ad70a --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_ppc_linux.h @@ -0,0 +1,84 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'r31' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 14-Jan-04 Bob Ippolito + * added cr2-cr4 to the registers to be saved. + * Open questions: Should we save FP registers? + * What about vector registers? + * Differences between darwin and unix? + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 04-Oct-02 Gustavo Niemeyer + * Ported from MacOS version. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 29-Jun-02 Christian Tismer + * Added register 13-29, 31 saves. The same way as + * Armin Rigo did for the x86_unix version. + * This seems to be now fully functional! + * 04-Mar-02 Hye-Shik Chang + * Ported from i386. + * 31-Jul-12 Trevor Bowen + * Changed memory constraints to register only. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 3 + +/* !!!!WARNING!!!! need to add "r31" in the next line if this header file + * is meant to be compiled non-dynamically! + */ +#define REGS_TO_SAVE "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", \ + "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \ + "cr2", "cr3", "cr4" +static int +slp_switch(void) +{ + int err; + int *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ ("mr %0, 1" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "mr 11, %0\n" + "add 1, 1, 11\n" + "add 30, 30, 11\n" + : /* no outputs */ + : "r" (stsizediff) + : "11" + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("li %0, 0" : "=r" (err)); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/venv/Lib/site-packages/greenlet/platform/switch_ppc_macosx.h b/venv/Lib/site-packages/greenlet/platform/switch_ppc_macosx.h new file mode 100644 index 00000000..bd414c68 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_ppc_macosx.h @@ -0,0 +1,82 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'r31' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 14-Jan-04 Bob Ippolito + * added cr2-cr4 to the registers to be saved. + * Open questions: Should we save FP registers? + * What about vector registers? + * Differences between darwin and unix? + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 29-Jun-02 Christian Tismer + * Added register 13-29, 31 saves. The same way as + * Armin Rigo did for the x86_unix version. + * This seems to be now fully functional! + * 04-Mar-02 Hye-Shik Chang + * Ported from i386. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 3 + +/* !!!!WARNING!!!! need to add "r31" in the next line if this header file + * is meant to be compiled non-dynamically! + */ +#define REGS_TO_SAVE "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", \ + "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \ + "cr2", "cr3", "cr4" + +static int +slp_switch(void) +{ + int err; + int *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ ("; asm block 2\n\tmr %0, r1" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "; asm block 3\n" + "\tmr r11, %0\n" + "\tadd r1, r1, r11\n" + "\tadd r30, r30, r11\n" + : /* no outputs */ + : "r" (stsizediff) + : "r11" + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("li %0, 0" : "=r" (err)); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/venv/Lib/site-packages/greenlet/platform/switch_ppc_unix.h b/venv/Lib/site-packages/greenlet/platform/switch_ppc_unix.h new file mode 100644 index 00000000..bb188080 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_ppc_unix.h @@ -0,0 +1,82 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'r31' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 14-Jan-04 Bob Ippolito + * added cr2-cr4 to the registers to be saved. + * Open questions: Should we save FP registers? + * What about vector registers? + * Differences between darwin and unix? + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 04-Oct-02 Gustavo Niemeyer + * Ported from MacOS version. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 29-Jun-02 Christian Tismer + * Added register 13-29, 31 saves. The same way as + * Armin Rigo did for the x86_unix version. + * This seems to be now fully functional! + * 04-Mar-02 Hye-Shik Chang + * Ported from i386. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 3 + +/* !!!!WARNING!!!! need to add "r31" in the next line if this header file + * is meant to be compiled non-dynamically! + */ +#define REGS_TO_SAVE "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", \ + "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \ + "cr2", "cr3", "cr4" +static int +slp_switch(void) +{ + int err; + int *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ ("mr %0, 1" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "mr 11, %0\n" + "add 1, 1, 11\n" + "add 30, 30, 11\n" + : /* no outputs */ + : "r" (stsizediff) + : "11" + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("li %0, 0" : "=r" (err)); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/venv/Lib/site-packages/greenlet/platform/switch_riscv_unix.h b/venv/Lib/site-packages/greenlet/platform/switch_riscv_unix.h new file mode 100644 index 00000000..87611222 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_riscv_unix.h @@ -0,0 +1,41 @@ +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "s1", "s2", "s3", "s4", "s5", \ + "s6", "s7", "s8", "s9", "s10", "s11", "fs0", "fs1", \ + "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", "fs8", "fs9", \ + "fs10", "fs11" + +static int +slp_switch(void) +{ + int ret; + long fp; + long *stackref, stsizediff; + + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("mv %0, fp" : "=r" (fp) : ); + __asm__ volatile ("mv %0, sp" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "add sp, sp, %0\n\t" + "add fp, fp, %0\n\t" + : /* no outputs */ + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); +#if __riscv_xlen == 32 + __asm__ volatile ("lw fp, %0" : : "m" (fp)); +#else + __asm__ volatile ("ld fp, %0" : : "m" (fp)); +#endif + __asm__ volatile ("mv %0, zero" : "=r" (ret) : ); + return ret; +} + +#endif diff --git a/venv/Lib/site-packages/greenlet/platform/switch_s390_unix.h b/venv/Lib/site-packages/greenlet/platform/switch_s390_unix.h new file mode 100644 index 00000000..9199367f --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_s390_unix.h @@ -0,0 +1,87 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 25-Jan-12 Alexey Borzenkov + * Fixed Linux/S390 port to work correctly with + * different optimization options both on 31-bit + * and 64-bit. Thanks to Stefan Raabe for lots + * of testing. + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 06-Oct-02 Gustavo Niemeyer + * Ported to Linux/S390. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#ifdef __s390x__ +#define STACK_MAGIC 20 /* 20 * 8 = 160 bytes of function call area */ +#else +#define STACK_MAGIC 24 /* 24 * 4 = 96 bytes of function call area */ +#endif + +/* Technically, r11-r13 also need saving, but function prolog starts + with stm(g) and since there are so many saved registers already + it won't be optimized, resulting in all r6-r15 being saved */ +#define REGS_TO_SAVE "r6", "r7", "r8", "r9", "r10", "r14", \ + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \ + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15" + +static int +slp_switch(void) +{ + int ret; + long *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); +#ifdef __s390x__ + __asm__ volatile ("lgr %0, 15" : "=r" (stackref) : ); +#else + __asm__ volatile ("lr %0, 15" : "=r" (stackref) : ); +#endif + { + SLP_SAVE_STATE(stackref, stsizediff); +/* N.B. + r11 may be used as the frame pointer, and in that case it cannot be + clobbered and needs offsetting just like the stack pointer (but in cases + where frame pointer isn't used we might clobber it accidentally). What's + scary is that r11 is 2nd (and even 1st when GOT is used) callee saved + register that gcc would chose for surviving function calls. However, + since r6-r10 are clobbered above, their cost for reuse is reduced, so + gcc IRA will chose them over r11 (not seeing r11 is implicitly saved), + making it relatively safe to offset in all cases. :) */ + __asm__ volatile ( +#ifdef __s390x__ + "agr 15, %0\n\t" + "agr 11, %0" +#else + "ar 15, %0\n\t" + "ar 11, %0" +#endif + : /* no outputs */ + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("lhi %0, 0" : "=r" (ret) : ); + return ret; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/venv/Lib/site-packages/greenlet/platform/switch_sh_gcc.h b/venv/Lib/site-packages/greenlet/platform/switch_sh_gcc.h new file mode 100644 index 00000000..5ecc3b39 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_sh_gcc.h @@ -0,0 +1,36 @@ +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL +#define STACK_MAGIC 0 +#define REGS_TO_SAVE "r8", "r9", "r10", "r11", "r13", \ + "fr12", "fr13", "fr14", "fr15" + +// r12 Global context pointer, GP +// r14 Frame pointer, FP +// r15 Stack pointer, SP + +static int +slp_switch(void) +{ + int err; + void* fp; + int *stackref, stsizediff; + __asm__ volatile("" : : : REGS_TO_SAVE); + __asm__ volatile("mov.l r14, %0" : "=m"(fp) : :); + __asm__("mov r15, %0" : "=r"(stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile( + "add %0, r15\n" + "add %0, r14\n" + : /* no outputs */ + : "r"(stsizediff)); + SLP_RESTORE_STATE(); + __asm__ volatile("mov r0, %0" : "=r"(err) : :); + } + __asm__ volatile("mov.l %0, r14" : : "m"(fp) :); + __asm__ volatile("" : : : REGS_TO_SAVE); + return err; +} + +#endif diff --git a/venv/Lib/site-packages/greenlet/platform/switch_sparc_sun_gcc.h b/venv/Lib/site-packages/greenlet/platform/switch_sparc_sun_gcc.h new file mode 100644 index 00000000..96990c39 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_sparc_sun_gcc.h @@ -0,0 +1,92 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 16-May-15 Alexey Borzenkov + * Move stack spilling code inside save/restore functions + * 30-Aug-13 Floris Bruynooghe + Clean the register windows again before returning. + This does not clobber the PIC register as it leaves + the current window intact and is required for multi- + threaded code to work correctly. + * 08-Mar-11 Floris Bruynooghe + * No need to set return value register explicitly + * before the stack and framepointer are adjusted + * as none of the other registers are influenced by + * this. Also don't needlessly clean the windows + * ('ta %0" :: "i" (ST_CLEAN_WINDOWS)') as that + * clobbers the gcc PIC register (%l7). + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * added support for SunOS sparc with gcc + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + + +#define STACK_MAGIC 0 + + +#if defined(__sparcv9) +#define SLP_FLUSHW __asm__ volatile ("flushw") +#else +#define SLP_FLUSHW __asm__ volatile ("ta 3") /* ST_FLUSH_WINDOWS */ +#endif + +/* On sparc we need to spill register windows inside save/restore functions */ +#define SLP_BEFORE_SAVE_STATE() SLP_FLUSHW +#define SLP_BEFORE_RESTORE_STATE() SLP_FLUSHW + + +static int +slp_switch(void) +{ + int err; + int *stackref, stsizediff; + + /* Put current stack pointer into stackref. + * Register spilling is done in save/restore. + */ + __asm__ volatile ("mov %%sp, %0" : "=r" (stackref)); + + { + /* Thou shalt put SLP_SAVE_STATE into a local block */ + /* Copy the current stack onto the heap */ + SLP_SAVE_STATE(stackref, stsizediff); + + /* Increment stack and frame pointer by stsizediff */ + __asm__ volatile ( + "add %0, %%sp, %%sp\n\t" + "add %0, %%fp, %%fp" + : : "r" (stsizediff)); + + /* Copy new stack from it's save store on the heap */ + SLP_RESTORE_STATE(); + + __asm__ volatile ("mov %1, %0" : "=r" (err) : "i" (0)); + return err; + } +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/venv/Lib/site-packages/greenlet/platform/switch_x32_unix.h b/venv/Lib/site-packages/greenlet/platform/switch_x32_unix.h new file mode 100644 index 00000000..893369c7 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_x32_unix.h @@ -0,0 +1,63 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 17-Aug-12 Fantix King + * Ported from amd64. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "r12", "r13", "r14", "r15" + + +static int +slp_switch(void) +{ + void* ebp; + void* ebx; + unsigned int csr; + unsigned short cw; + int err; + int *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("fstcw %0" : "=m" (cw)); + __asm__ volatile ("stmxcsr %0" : "=m" (csr)); + __asm__ volatile ("movl %%ebp, %0" : "=m" (ebp)); + __asm__ volatile ("movl %%ebx, %0" : "=m" (ebx)); + __asm__ ("movl %%esp, %0" : "=g" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "addl %0, %%esp\n" + "addl %0, %%ebp\n" + : + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("movl %0, %%ebx" : : "m" (ebx)); + __asm__ volatile ("movl %0, %%ebp" : : "m" (ebp)); + __asm__ volatile ("ldmxcsr %0" : : "m" (csr)); + __asm__ volatile ("fldcw %0" : : "m" (cw)); + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("xorl %%eax, %%eax" : "=a" (err)); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/venv/Lib/site-packages/greenlet/platform/switch_x64_masm.asm b/venv/Lib/site-packages/greenlet/platform/switch_x64_masm.asm new file mode 100644 index 00000000..f5c72a27 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_x64_masm.asm @@ -0,0 +1,111 @@ +; +; stack switching code for MASM on x641 +; Kristjan Valur Jonsson, sept 2005 +; + + +;prototypes for our calls +slp_save_state_asm PROTO +slp_restore_state_asm PROTO + + +pushxmm MACRO reg + sub rsp, 16 + .allocstack 16 + movaps [rsp], reg ; faster than movups, but we must be aligned + ; .savexmm128 reg, offset (don't know what offset is, no documentation) +ENDM +popxmm MACRO reg + movaps reg, [rsp] ; faster than movups, but we must be aligned + add rsp, 16 +ENDM + +pushreg MACRO reg + push reg + .pushreg reg +ENDM +popreg MACRO reg + pop reg +ENDM + + +.code +slp_switch PROC FRAME + ;realign stack to 16 bytes after return address push, makes the following faster + sub rsp,8 + .allocstack 8 + + pushxmm xmm15 + pushxmm xmm14 + pushxmm xmm13 + pushxmm xmm12 + pushxmm xmm11 + pushxmm xmm10 + pushxmm xmm9 + pushxmm xmm8 + pushxmm xmm7 + pushxmm xmm6 + + pushreg r15 + pushreg r14 + pushreg r13 + pushreg r12 + + pushreg rbp + pushreg rbx + pushreg rdi + pushreg rsi + + sub rsp, 10h ;allocate the singlefunction argument (must be multiple of 16) + .allocstack 10h +.endprolog + + lea rcx, [rsp+10h] ;load stack base that we are saving + call slp_save_state_asm ;pass stackpointer, return offset in eax + cmp rax, 1 + je EXIT1 + cmp rax, -1 + je EXIT2 + ;actual stack switch: + add rsp, rax + call slp_restore_state_asm + xor rax, rax ;return 0 + +EXIT: + + add rsp, 10h + popreg rsi + popreg rdi + popreg rbx + popreg rbp + + popreg r12 + popreg r13 + popreg r14 + popreg r15 + + popxmm xmm6 + popxmm xmm7 + popxmm xmm8 + popxmm xmm9 + popxmm xmm10 + popxmm xmm11 + popxmm xmm12 + popxmm xmm13 + popxmm xmm14 + popxmm xmm15 + + add rsp, 8 + ret + +EXIT1: + mov rax, 1 + jmp EXIT + +EXIT2: + sar rax, 1 + jmp EXIT + +slp_switch ENDP + +END \ No newline at end of file diff --git a/venv/Lib/site-packages/greenlet/platform/switch_x64_masm.obj b/venv/Lib/site-packages/greenlet/platform/switch_x64_masm.obj new file mode 100644 index 00000000..64e3e6b8 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/platform/switch_x64_masm.obj differ diff --git a/venv/Lib/site-packages/greenlet/platform/switch_x64_msvc.h b/venv/Lib/site-packages/greenlet/platform/switch_x64_msvc.h new file mode 100644 index 00000000..601ea560 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_x64_msvc.h @@ -0,0 +1,60 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 26-Sep-02 Christian Tismer + * again as a result of virtualized stack access, + * the compiler used less registers. Needed to + * explicit mention registers in order to get them saved. + * Thanks to Jeff Senn for pointing this out and help. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 01-Mar-02 Christian Tismer + * Initial final version after lots of iterations for i386. + */ + +/* Avoid alloca redefined warning on mingw64 */ +#ifndef alloca +#define alloca _alloca +#endif + +#define STACK_REFPLUS 1 +#define STACK_MAGIC 0 + +/* Use the generic support for an external assembly language slp_switch function. */ +#define EXTERNAL_ASM + +#ifdef SLP_EVAL +/* This always uses the external masm assembly file. */ +#endif + +/* + * further self-processing support + */ + +/* we have IsBadReadPtr available, so we can peek at objects */ +/* +#define STACKLESS_SPY + +#ifdef IMPLEMENT_STACKLESSMODULE +#include "Windows.h" +#define CANNOT_READ_MEM(p, bytes) IsBadReadPtr(p, bytes) + +static int IS_ON_STACK(void*p) +{ + int stackref; + intptr_t stackbase = ((intptr_t)&stackref) & 0xfffff000; + return (intptr_t)p >= stackbase && (intptr_t)p < stackbase + 0x00100000; +} + +#endif +*/ \ No newline at end of file diff --git a/venv/Lib/site-packages/greenlet/platform/switch_x86_msvc.h b/venv/Lib/site-packages/greenlet/platform/switch_x86_msvc.h new file mode 100644 index 00000000..0f3a59f5 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_x86_msvc.h @@ -0,0 +1,326 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 26-Sep-02 Christian Tismer + * again as a result of virtualized stack access, + * the compiler used less registers. Needed to + * explicit mention registers in order to get them saved. + * Thanks to Jeff Senn for pointing this out and help. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 01-Mar-02 Christian Tismer + * Initial final version after lots of iterations for i386. + */ + +#define alloca _alloca + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 0 + +/* Some magic to quell warnings and keep slp_switch() from crashing when built + with VC90. Disable global optimizations, and the warning: frame pointer + register 'ebp' modified by inline assembly code. + + We used to just disable global optimizations ("g") but upstream stackless + Python, as well as stackman, turn off all optimizations. + +References: +https://github.com/stackless-dev/stackman/blob/dbc72fe5207a2055e658c819fdeab9731dee78b9/stackman/platforms/switch_x86_msvc.h +https://github.com/stackless-dev/stackless/blob/main-slp/Stackless/platf/switch_x86_msvc.h +*/ +#define WIN32_LEAN_AND_MEAN +#include + +#pragma optimize("", off) /* so that autos are stored on the stack */ +#pragma warning(disable:4731) +#pragma warning(disable:4733) /* disable warning about modifying FS[0] */ + +/** + * Most modern compilers and environments handle C++ exceptions without any + * special help from us. MSVC on 32-bit windows is an exception. There, C++ + * exceptions are dealt with using Windows' Structured Exception Handling + * (SEH). + * + * SEH is implemented as a singly linked list of nodes. The + * head of this list is stored in the Thread Information Block, which itself + * is pointed to from the FS register. It's the first field in the structure, + * or offset 0, so we can access it using assembly FS:[0], or the compiler + * intrinsics and field offset information from the headers (as we do below). + * Somewhat unusually, the tail of the list doesn't have prev == NULL, it has + * prev == 0xFFFFFFFF. + * + * SEH was designed for C, and traditionally uses the MSVC compiler + * intrinsincs __try{}/__except{}. It is also utilized for C++ exceptions by + * MSVC; there, every throw of a C++ exception raises a SEH error with the + * ExceptionCode 0xE06D7363; the SEH handler list is then traversed to + * deal with the exception. + * + * If the SEH list is corrupt, then when a C++ exception is thrown the program + * will abruptly exit with exit code 1. This does not use std::terminate(), so + * std::set_terminate() is useless to debug this. + * + * The SEH list is closely tied to the call stack; entering a function that + * uses __try{} or most C++ functions will push a new handler onto the front + * of the list. Returning from the function will remove the handler. Saving + * and restoring the head node of the SEH list (FS:[0]) per-greenlet is NOT + * ENOUGH to make SEH or exceptions work. + * + * Stack switching breaks SEH because the call stack no longer necessarily + * matches the SEH list. For example, given greenlet A that switches to + * greenlet B, at the moment of entering greenlet B, we will have any SEH + * handlers from greenlet A on the SEH list; greenlet B can then add its own + * handlers to the SEH list. When greenlet B switches back to greenlet A, + * greenlet B's handlers would still be on the SEH stack, but when switch() + * returns control to greenlet A, we have replaced the contents of the stack + * in memory, so all the address that greenlet B added to the SEH list are now + * invalid: part of the call stack has been unwound, but the SEH list was out + * of sync with the call stack. The net effect is that exception handling + * stops working. + * + * Thus, when switching greenlets, we need to be sure that the SEH list + * matches the effective call stack, "cutting out" any handlers that were + * pushed by the greenlet that switched out and which are no longer valid. + * + * The easiest way to do this is to capture the SEH list at the time the main + * greenlet for a thread is created, and, when initially starting a greenlet, + * start a new SEH list for it, which contains nothing but the handler + * established for the new greenlet itself, with the tail being the handlers + * for the main greenlet. If we then save and restore the SEH per-greenlet, + * they won't interfere with each others SEH lists. (No greenlet can unwind + * the call stack past the handlers established by the main greenlet). + * + * By observation, a new thread starts with three SEH handlers on the list. By + * the time we get around to creating the main greenlet, though, there can be + * many more, established by transient calls that lead to the creation of the + * main greenlet. Therefore, 3 is a magic constant telling us when to perform + * the initial slice. + * + * All of this can be debugged using a vectored exception handler, which + * operates independently of the SEH handler list, and is called first. + * Walking the SEH list at key points can also be helpful. + * + * References: + * https://en.wikipedia.org/wiki/Win32_Thread_Information_Block + * https://devblogs.microsoft.com/oldnewthing/20100730-00/?p=13273 + * https://docs.microsoft.com/en-us/cpp/cpp/try-except-statement?view=msvc-160 + * https://docs.microsoft.com/en-us/cpp/cpp/structured-exception-handling-c-cpp?view=msvc-160 + * https://docs.microsoft.com/en-us/windows/win32/debug/structured-exception-handling + * https://docs.microsoft.com/en-us/windows/win32/debug/using-a-vectored-exception-handler + * https://bytepointer.com/resources/pietrek_crash_course_depths_of_win32_seh.htm + */ +#define GREENLET_NEEDS_EXCEPTION_STATE_SAVED + + +typedef struct _GExceptionRegistration { + struct _GExceptionRegistration* prev; + void* handler_f; +} GExceptionRegistration; + +static void +slp_set_exception_state(const void *const seh_state) +{ + // Because the stack from from which we do this is ALSO a handler, and + // that one we want to keep, we need to relink the current SEH handler + // frame to point to this one, cutting out the middle men, as it were. + // + // Entering a try block doesn't change the SEH frame, but entering a + // function containing a try block does. + GExceptionRegistration* current_seh_state = (GExceptionRegistration*)__readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList)); + current_seh_state->prev = (GExceptionRegistration*)seh_state; +} + + +static GExceptionRegistration* +x86_slp_get_third_oldest_handler() +{ + GExceptionRegistration* a = NULL; /* Closest to the top */ + GExceptionRegistration* b = NULL; /* second */ + GExceptionRegistration* c = NULL; + GExceptionRegistration* seh_state = (GExceptionRegistration*)__readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList)); + a = b = c = seh_state; + + while (seh_state && seh_state != (GExceptionRegistration*)0xFFFFFFFF) { + if ((void*)seh_state->prev < (void*)100) { + fprintf(stderr, "\tERROR: Broken SEH chain.\n"); + return NULL; + } + a = b; + b = c; + c = seh_state; + + seh_state = seh_state->prev; + } + return a ? a : (b ? b : c); +} + + +static void* +slp_get_exception_state() +{ + // XXX: There appear to be three SEH handlers on the stack already at the + // start of the thread. Is that a guarantee? Almost certainly not. Yet in + // all observed cases it has been three. This is consistent with + // faulthandler off or on, and optimizations off or on. It may not be + // consistent with other operating system versions, though: we only have + // CI on one or two versions (don't ask what there are). + // In theory we could capture the number of handlers on the chain when + // PyInit__greenlet is called: there are probably only the default + // handlers at that point (unless we're embedded and people have used + // __try/__except or a C++ handler)? + return x86_slp_get_third_oldest_handler(); +} + +static int +slp_switch(void) +{ + /* MASM syntax is typically reversed from other assemblers. + It is usually + */ + int *stackref, stsizediff; + /* store the structured exception state for this stack */ + DWORD seh_state = __readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList)); + __asm mov stackref, esp; + /* modify EBX, ESI and EDI in order to get them preserved */ + __asm mov ebx, ebx; + __asm xchg esi, edi; + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm { + mov eax, stsizediff + add esp, eax + add ebp, eax + } + SLP_RESTORE_STATE(); + } + __writefsdword(FIELD_OFFSET(NT_TIB, ExceptionList), seh_state); + return 0; +} + +/* re-enable ebp warning and global optimizations. */ +#pragma optimize("", on) +#pragma warning(default:4731) +#pragma warning(default:4733) /* disable warning about modifying FS[0] */ + + +#endif + +/* + * further self-processing support + */ + +/* we have IsBadReadPtr available, so we can peek at objects */ +#define STACKLESS_SPY + +#ifdef GREENLET_DEBUG + +#define CANNOT_READ_MEM(p, bytes) IsBadReadPtr(p, bytes) + +static int IS_ON_STACK(void*p) +{ + int stackref; + int stackbase = ((int)&stackref) & 0xfffff000; + return (int)p >= stackbase && (int)p < stackbase + 0x00100000; +} + +static void +x86_slp_show_seh_chain() +{ + GExceptionRegistration* seh_state = (GExceptionRegistration*)__readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList)); + fprintf(stderr, "====== SEH Chain ======\n"); + while (seh_state && seh_state != (GExceptionRegistration*)0xFFFFFFFF) { + fprintf(stderr, "\tSEH_chain addr: %p handler: %p prev: %p\n", + seh_state, + seh_state->handler_f, seh_state->prev); + if ((void*)seh_state->prev < (void*)100) { + fprintf(stderr, "\tERROR: Broken chain.\n"); + break; + } + seh_state = seh_state->prev; + } + fprintf(stderr, "====== End SEH Chain ======\n"); + fflush(NULL); + return; +} + +//addVectoredExceptionHandler constants: +//CALL_FIRST means call this exception handler first; +//CALL_LAST means call this exception handler last +#define CALL_FIRST 1 +#define CALL_LAST 0 + +LONG WINAPI +GreenletVectorHandler(PEXCEPTION_POINTERS ExceptionInfo) +{ + // We get one of these for every C++ exception, with code + // E06D7363 + // This is a special value that means "C++ exception from MSVC" + // https://devblogs.microsoft.com/oldnewthing/20100730-00/?p=13273 + // + // Install in the module init function with: + // AddVectoredExceptionHandler(CALL_FIRST, GreenletVectorHandler); + PEXCEPTION_RECORD ExceptionRecord = ExceptionInfo->ExceptionRecord; + + fprintf(stderr, + "GOT VECTORED EXCEPTION:\n" + "\tExceptionCode : %p\n" + "\tExceptionFlags : %p\n" + "\tExceptionAddr : %p\n" + "\tNumberparams : %ld\n", + ExceptionRecord->ExceptionCode, + ExceptionRecord->ExceptionFlags, + ExceptionRecord->ExceptionAddress, + ExceptionRecord->NumberParameters + ); + if (ExceptionRecord->ExceptionFlags & 1) { + fprintf(stderr, "\t\tEH_NONCONTINUABLE\n" ); + } + if (ExceptionRecord->ExceptionFlags & 2) { + fprintf(stderr, "\t\tEH_UNWINDING\n" ); + } + if (ExceptionRecord->ExceptionFlags & 4) { + fprintf(stderr, "\t\tEH_EXIT_UNWIND\n" ); + } + if (ExceptionRecord->ExceptionFlags & 8) { + fprintf(stderr, "\t\tEH_STACK_INVALID\n" ); + } + if (ExceptionRecord->ExceptionFlags & 0x10) { + fprintf(stderr, "\t\tEH_NESTED_CALL\n" ); + } + if (ExceptionRecord->ExceptionFlags & 0x20) { + fprintf(stderr, "\t\tEH_TARGET_UNWIND\n" ); + } + if (ExceptionRecord->ExceptionFlags & 0x40) { + fprintf(stderr, "\t\tEH_COLLIDED_UNWIND\n" ); + } + fprintf(stderr, "\n"); + fflush(NULL); + for(DWORD i = 0; i < ExceptionRecord->NumberParameters; i++) { + fprintf(stderr, "\t\t\tParam %ld: %lX\n", i, ExceptionRecord->ExceptionInformation[i]); + } + + if (ExceptionRecord->NumberParameters == 3) { + fprintf(stderr, "\tAbout to traverse SEH chain\n"); + // C++ Exception records have 3 params. + x86_slp_show_seh_chain(); + } + + return EXCEPTION_CONTINUE_SEARCH; +} + + + + +#endif diff --git a/venv/Lib/site-packages/greenlet/platform/switch_x86_unix.h b/venv/Lib/site-packages/greenlet/platform/switch_x86_unix.h new file mode 100644 index 00000000..493fa6ba --- /dev/null +++ b/venv/Lib/site-packages/greenlet/platform/switch_x86_unix.h @@ -0,0 +1,105 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 3-May-13 Ralf Schmitt + * Add support for strange GCC caller-save decisions + * (ported from switch_aarch64_gcc.h) + * 19-Aug-11 Alexey Borzenkov + * Correctly save ebp, ebx and cw + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'ebx' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for spark + * 31-Avr-02 Armin Rigo + * Added ebx, esi and edi register-saves. + * 01-Mar-02 Samual M. Rushing + * Ported from i386. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +/* #define STACK_MAGIC 3 */ +/* the above works fine with gcc 2.96, but 2.95.3 wants this */ +#define STACK_MAGIC 0 + +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) +# define ATTR_NOCLONE __attribute__((noclone)) +#else +# define ATTR_NOCLONE +#endif + +static int +slp_switch(void) +{ + int err; +#ifdef _WIN32 + void *seh; +#endif + void *ebp, *ebx; + unsigned short cw; + int *stackref, stsizediff; + __asm__ volatile ("" : : : "esi", "edi"); + __asm__ volatile ("fstcw %0" : "=m" (cw)); + __asm__ volatile ("movl %%ebp, %0" : "=m" (ebp)); + __asm__ volatile ("movl %%ebx, %0" : "=m" (ebx)); +#ifdef _WIN32 + __asm__ volatile ( + "movl %%fs:0x0, %%eax\n" + "movl %%eax, %0\n" + : "=m" (seh) + : + : "eax"); +#endif + __asm__ ("movl %%esp, %0" : "=g" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "addl %0, %%esp\n" + "addl %0, %%ebp\n" + : + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + __asm__ volatile ("xorl %%eax, %%eax" : "=a" (err)); + } +#ifdef _WIN32 + __asm__ volatile ( + "movl %0, %%eax\n" + "movl %%eax, %%fs:0x0\n" + : + : "m" (seh) + : "eax"); +#endif + __asm__ volatile ("movl %0, %%ebx" : : "m" (ebx)); + __asm__ volatile ("movl %0, %%ebp" : : "m" (ebp)); + __asm__ volatile ("fldcw %0" : : "m" (cw)); + __asm__ volatile ("" : : : "esi", "edi"); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/venv/Lib/site-packages/greenlet/slp_platformselect.h b/venv/Lib/site-packages/greenlet/slp_platformselect.h new file mode 100644 index 00000000..225c67ba --- /dev/null +++ b/venv/Lib/site-packages/greenlet/slp_platformselect.h @@ -0,0 +1,75 @@ +/* + * Platform Selection for Stackless Python + */ +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(MS_WIN32) && !defined(MS_WIN64) && defined(_M_IX86) && defined(_MSC_VER) +# include "platform/switch_x86_msvc.h" /* MS Visual Studio on X86 */ +#elif defined(MS_WIN64) && defined(_M_X64) && defined(_MSC_VER) || defined(__MINGW64__) +# include "platform/switch_x64_msvc.h" /* MS Visual Studio on X64 */ +#elif defined(MS_WIN64) && defined(_M_ARM64) +# include "platform/switch_arm64_msvc.h" /* MS Visual Studio on ARM64 */ +#elif defined(__GNUC__) && defined(__amd64__) && defined(__ILP32__) +# include "platform/switch_x32_unix.h" /* gcc on amd64 with x32 ABI */ +#elif defined(__GNUC__) && defined(__amd64__) +# include "platform/switch_amd64_unix.h" /* gcc on amd64 */ +#elif defined(__GNUC__) && defined(__i386__) +# include "platform/switch_x86_unix.h" /* gcc on X86 */ +#elif defined(__GNUC__) && defined(__powerpc64__) && (defined(__linux__) || defined(__FreeBSD__)) +# include "platform/switch_ppc64_linux.h" /* gcc on PowerPC 64-bit */ +#elif defined(__GNUC__) && defined(__PPC__) && (defined(__linux__) || defined(__FreeBSD__)) +# include "platform/switch_ppc_linux.h" /* gcc on PowerPC */ +#elif defined(__GNUC__) && defined(__POWERPC__) && defined(__APPLE__) +# include "platform/switch_ppc_macosx.h" /* Apple MacOS X on 32-bit PowerPC */ +#elif defined(__GNUC__) && defined(__powerpc64__) && defined(_AIX) +# include "platform/switch_ppc64_aix.h" /* gcc on AIX/PowerPC 64-bit */ +#elif defined(__GNUC__) && defined(_ARCH_PPC) && defined(_AIX) +# include "platform/switch_ppc_aix.h" /* gcc on AIX/PowerPC */ +#elif defined(__GNUC__) && defined(__powerpc__) && defined(__NetBSD__) +#include "platform/switch_ppc_unix.h" /* gcc on NetBSD/powerpc */ +#elif defined(__GNUC__) && defined(sparc) +# include "platform/switch_sparc_sun_gcc.h" /* SunOS sparc with gcc */ +#elif defined(__SUNPRO_C) && defined(sparc) && defined(sun) +# include "platform/switch_sparc_sun_gcc.h" /* SunStudio on amd64 */ +#elif defined(__SUNPRO_C) && defined(__amd64__) && defined(sun) +# include "platform/switch_amd64_unix.h" /* SunStudio on amd64 */ +#elif defined(__SUNPRO_C) && defined(__i386__) && defined(sun) +# include "platform/switch_x86_unix.h" /* SunStudio on x86 */ +#elif defined(__GNUC__) && defined(__s390__) && defined(__linux__) +# include "platform/switch_s390_unix.h" /* Linux/S390 */ +#elif defined(__GNUC__) && defined(__s390x__) && defined(__linux__) +# include "platform/switch_s390_unix.h" /* Linux/S390 zSeries (64-bit) */ +#elif defined(__GNUC__) && defined(__arm__) +# ifdef __APPLE__ +# include +# endif +# if TARGET_OS_IPHONE +# include "platform/switch_arm32_ios.h" /* iPhone OS on arm32 */ +# else +# include "platform/switch_arm32_gcc.h" /* gcc using arm32 */ +# endif +#elif defined(__GNUC__) && defined(__mips__) && defined(__linux__) +# include "platform/switch_mips_unix.h" /* Linux/MIPS */ +#elif defined(__GNUC__) && defined(__aarch64__) +# include "platform/switch_aarch64_gcc.h" /* Aarch64 ABI */ +#elif defined(__GNUC__) && defined(__mc68000__) +# include "platform/switch_m68k_gcc.h" /* gcc on m68k */ +#elif defined(__GNUC__) && defined(__csky__) +#include "platform/switch_csky_gcc.h" /* gcc on csky */ +# elif defined(__GNUC__) && defined(__riscv) +# include "platform/switch_riscv_unix.h" /* gcc on RISC-V */ +#elif defined(__GNUC__) && defined(__alpha__) +# include "platform/switch_alpha_unix.h" /* gcc on DEC Alpha */ +#elif defined(MS_WIN32) && defined(__llvm__) && defined(__aarch64__) +# include "platform/switch_aarch64_gcc.h" /* LLVM Aarch64 ABI for Windows */ +#elif defined(__GNUC__) && defined(__loongarch64) && defined(__linux__) +# include "platform/switch_loongarch64_linux.h" /* LoongArch64 */ +#elif defined(__GNUC__) && defined(__sh__) +# include "platform/switch_sh_gcc.h" /* SuperH */ +#endif + +#ifdef __cplusplus +}; +#endif diff --git a/venv/Lib/site-packages/greenlet/tests/__init__.py b/venv/Lib/site-packages/greenlet/tests/__init__.py new file mode 100644 index 00000000..5929f2a7 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/__init__.py @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- +""" +Tests for greenlet. + +""" +import os +import sys +import unittest + +from gc import collect +from gc import get_objects +from threading import active_count as active_thread_count +from time import sleep +from time import time + +import psutil + +from greenlet import greenlet as RawGreenlet +from greenlet import getcurrent + +from greenlet._greenlet import get_pending_cleanup_count +from greenlet._greenlet import get_total_main_greenlets + +from . import leakcheck + +PY312 = sys.version_info[:2] >= (3, 12) +PY313 = sys.version_info[:2] >= (3, 13) +# XXX: First tested on 3.14a7. Revisit all uses of this on later versions to ensure they +# are still valid. +PY314 = sys.version_info[:2] >= (3, 14) + +WIN = sys.platform.startswith("win") +RUNNING_ON_GITHUB_ACTIONS = os.environ.get('GITHUB_ACTIONS') +RUNNING_ON_TRAVIS = os.environ.get('TRAVIS') or RUNNING_ON_GITHUB_ACTIONS +RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR') +RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR +RUNNING_ON_MANYLINUX = os.environ.get('GREENLET_MANYLINUX') + +class TestCaseMetaClass(type): + # wrap each test method with + # a) leak checks + def __new__(cls, classname, bases, classDict): + # pylint and pep8 fight over what this should be called (mcs or cls). + # pylint gets it right, but we can't scope disable pep8, so we go with + # its convention. + # pylint: disable=bad-mcs-classmethod-argument + check_totalrefcount = True + + # Python 3: must copy, we mutate the classDict. Interestingly enough, + # it doesn't actually error out, but under 3.6 we wind up wrapping + # and re-wrapping the same items over and over and over. + for key, value in list(classDict.items()): + if key.startswith('test') and callable(value): + classDict.pop(key) + if check_totalrefcount: + value = leakcheck.wrap_refcount(value) + classDict[key] = value + return type.__new__(cls, classname, bases, classDict) + + +class TestCase(unittest.TestCase, metaclass=TestCaseMetaClass): + + cleanup_attempt_sleep_duration = 0.001 + cleanup_max_sleep_seconds = 1 + + def wait_for_pending_cleanups(self, + initial_active_threads=None, + initial_main_greenlets=None): + initial_active_threads = initial_active_threads or self.threads_before_test + initial_main_greenlets = initial_main_greenlets or self.main_greenlets_before_test + sleep_time = self.cleanup_attempt_sleep_duration + # NOTE: This is racy! A Python-level thread object may be dead + # and gone, but the C thread may not yet have fired its + # destructors and added to the queue. There's no particular + # way to know that's about to happen. We try to watch the + # Python threads to make sure they, at least, have gone away. + # Counting the main greenlets, which we can easily do deterministically, + # also helps. + + # Always sleep at least once to let other threads run + sleep(sleep_time) + quit_after = time() + self.cleanup_max_sleep_seconds + # TODO: We could add an API that calls us back when a particular main greenlet is deleted? + # It would have to drop the GIL + while ( + get_pending_cleanup_count() + or active_thread_count() > initial_active_threads + or (not self.expect_greenlet_leak + and get_total_main_greenlets() > initial_main_greenlets)): + sleep(sleep_time) + if time() > quit_after: + print("Time limit exceeded.") + print("Threads: Waiting for only", initial_active_threads, + "-->", active_thread_count()) + print("MGlets : Waiting for only", initial_main_greenlets, + "-->", get_total_main_greenlets()) + break + collect() + + def count_objects(self, kind=list, exact_kind=True): + # pylint:disable=unidiomatic-typecheck + # Collect the garbage. + for _ in range(3): + collect() + if exact_kind: + return sum( + 1 + for x in get_objects() + if type(x) is kind + ) + # instances + return sum( + 1 + for x in get_objects() + if isinstance(x, kind) + ) + + greenlets_before_test = 0 + threads_before_test = 0 + main_greenlets_before_test = 0 + expect_greenlet_leak = False + + def count_greenlets(self): + """ + Find all the greenlets and subclasses tracked by the GC. + """ + return self.count_objects(RawGreenlet, False) + + def setUp(self): + # Ensure the main greenlet exists, otherwise the first test + # gets a false positive leak + super().setUp() + getcurrent() + self.threads_before_test = active_thread_count() + self.main_greenlets_before_test = get_total_main_greenlets() + self.wait_for_pending_cleanups(self.threads_before_test, self.main_greenlets_before_test) + self.greenlets_before_test = self.count_greenlets() + + def tearDown(self): + if getattr(self, 'skipTearDown', False): + return + + self.wait_for_pending_cleanups(self.threads_before_test, self.main_greenlets_before_test) + super().tearDown() + + def get_expected_returncodes_for_aborted_process(self): + import signal + # The child should be aborted in an unusual way. On POSIX + # platforms, this is done with abort() and signal.SIGABRT, + # which is reflected in a negative return value; however, on + # Windows, even though we observe the child print "Fatal + # Python error: Aborted" and in older versions of the C + # runtime "This application has requested the Runtime to + # terminate it in an unusual way," it always has an exit code + # of 3. This is interesting because 3 is the error code for + # ERROR_PATH_NOT_FOUND; BUT: the C runtime abort() function + # also uses this code. + # + # If we link to the static C library on Windows, the error + # code changes to '0xc0000409' (hex(3221226505)), which + # apparently is STATUS_STACK_BUFFER_OVERRUN; but "What this + # means is that nowadays when you get a + # STATUS_STACK_BUFFER_OVERRUN, it doesn’t actually mean that + # there is a stack buffer overrun. It just means that the + # application decided to terminate itself with great haste." + # + # + # On windows, we've also seen '0xc0000005' (hex(3221225477)). + # That's "Access Violation" + # + # See + # https://devblogs.microsoft.com/oldnewthing/20110519-00/?p=10623 + # and + # https://docs.microsoft.com/en-us/previous-versions/k089yyh0(v=vs.140)?redirectedfrom=MSDN + # and + # https://devblogs.microsoft.com/oldnewthing/20190108-00/?p=100655 + expected_exit = ( + -signal.SIGABRT, + # But beginning on Python 3.11, the faulthandler + # that prints the C backtraces sometimes segfaults after + # reporting the exception but before printing the stack. + # This has only been seen on linux/gcc. + -signal.SIGSEGV, + ) if not WIN else ( + 3, + 0xc0000409, + 0xc0000005, + ) + return expected_exit + + def get_process_uss(self): + """ + Return the current process's USS in bytes. + + uss is available on Linux, macOS, Windows. Also known as + "Unique Set Size", this is the memory which is unique to a + process and which would be freed if the process was terminated + right now. + + If this is not supported by ``psutil``, this raises the + :exc:`unittest.SkipTest` exception. + """ + try: + return psutil.Process().memory_full_info().uss + except AttributeError as e: + raise unittest.SkipTest("uss not supported") from e + + def run_script(self, script_name, show_output=True): + import subprocess + script = os.path.join( + os.path.dirname(__file__), + script_name, + ) + + try: + return subprocess.check_output([sys.executable, script], + encoding='utf-8', + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as ex: + if show_output: + print('-----') + print('Failed to run script', script) + print('~~~~~') + print(ex.output) + print('------') + raise + + + def assertScriptRaises(self, script_name, exitcodes=None): + import subprocess + with self.assertRaises(subprocess.CalledProcessError) as exc: + output = self.run_script(script_name, show_output=False) + __traceback_info__ = output + # We're going to fail the assertion if we get here, at least + # preserve the output in the traceback. + + if exitcodes is None: + exitcodes = self.get_expected_returncodes_for_aborted_process() + self.assertIn(exc.exception.returncode, exitcodes) + return exc.exception diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..7dba28d1 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_clearing_run_switches.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_clearing_run_switches.cpython-312.pyc new file mode 100644 index 00000000..0354b2e4 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_clearing_run_switches.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_cpp_exception.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_cpp_exception.cpython-312.pyc new file mode 100644 index 00000000..99299e98 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_cpp_exception.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_initialstub_already_started.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_initialstub_already_started.cpython-312.pyc new file mode 100644 index 00000000..3216a544 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_initialstub_already_started.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_slp_switch.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_slp_switch.cpython-312.pyc new file mode 100644 index 00000000..d6378ec9 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_slp_switch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_switch_three_greenlets.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_switch_three_greenlets.cpython-312.pyc new file mode 100644 index 00000000..4de72505 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_switch_three_greenlets.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_switch_three_greenlets2.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_switch_three_greenlets2.cpython-312.pyc new file mode 100644 index 00000000..2fd930bf Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_switch_three_greenlets2.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_switch_two_greenlets.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_switch_two_greenlets.cpython-312.pyc new file mode 100644 index 00000000..a24de99d Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/fail_switch_two_greenlets.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/leakcheck.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/leakcheck.cpython-312.pyc new file mode 100644 index 00000000..abd6de3f Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/leakcheck.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_contextvars.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_contextvars.cpython-312.pyc new file mode 100644 index 00000000..e1f59a95 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_contextvars.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_cpp.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_cpp.cpython-312.pyc new file mode 100644 index 00000000..b89bdd24 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_cpp.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_extension_interface.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_extension_interface.cpython-312.pyc new file mode 100644 index 00000000..42a690cf Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_extension_interface.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_gc.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_gc.cpython-312.pyc new file mode 100644 index 00000000..9840159f Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_gc.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_generator.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_generator.cpython-312.pyc new file mode 100644 index 00000000..ed9fd019 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_generator.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_generator_nested.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_generator_nested.cpython-312.pyc new file mode 100644 index 00000000..28e06e11 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_generator_nested.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_greenlet.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_greenlet.cpython-312.pyc new file mode 100644 index 00000000..b9bc9a86 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_greenlet.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_greenlet_trash.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_greenlet_trash.cpython-312.pyc new file mode 100644 index 00000000..9d0e0be9 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_greenlet_trash.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_leaks.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_leaks.cpython-312.pyc new file mode 100644 index 00000000..ec344b3b Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_leaks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_stack_saved.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_stack_saved.cpython-312.pyc new file mode 100644 index 00000000..becf7db1 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_stack_saved.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_throw.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_throw.cpython-312.pyc new file mode 100644 index 00000000..9a057cf3 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_throw.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_tracing.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_tracing.cpython-312.pyc new file mode 100644 index 00000000..7ce107ae Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_tracing.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_version.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_version.cpython-312.pyc new file mode 100644 index 00000000..deefb090 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_version.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/__pycache__/test_weakref.cpython-312.pyc b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_weakref.cpython-312.pyc new file mode 100644 index 00000000..c6da30da Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/__pycache__/test_weakref.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/greenlet/tests/_test_extension.c b/venv/Lib/site-packages/greenlet/tests/_test_extension.c new file mode 100644 index 00000000..05e81c03 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/_test_extension.c @@ -0,0 +1,231 @@ +/* This is a set of functions used by test_extension_interface.py to test the + * Greenlet C API. + */ + +#include "../greenlet.h" + +#ifndef Py_RETURN_NONE +# define Py_RETURN_NONE return Py_INCREF(Py_None), Py_None +#endif + +#define TEST_MODULE_NAME "_test_extension" + +static PyObject* +test_switch(PyObject* self, PyObject* greenlet) +{ + PyObject* result = NULL; + + if (greenlet == NULL || !PyGreenlet_Check(greenlet)) { + PyErr_BadArgument(); + return NULL; + } + + result = PyGreenlet_Switch((PyGreenlet*)greenlet, NULL, NULL); + if (result == NULL) { + if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_AssertionError, + "greenlet.switch() failed for some reason."); + } + return NULL; + } + Py_INCREF(result); + return result; +} + +static PyObject* +test_switch_kwargs(PyObject* self, PyObject* args, PyObject* kwargs) +{ + PyGreenlet* g = NULL; + PyObject* result = NULL; + + PyArg_ParseTuple(args, "O!", &PyGreenlet_Type, &g); + + if (g == NULL || !PyGreenlet_Check(g)) { + PyErr_BadArgument(); + return NULL; + } + + result = PyGreenlet_Switch(g, NULL, kwargs); + if (result == NULL) { + if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_AssertionError, + "greenlet.switch() failed for some reason."); + } + return NULL; + } + Py_XINCREF(result); + return result; +} + +static PyObject* +test_getcurrent(PyObject* self) +{ + PyGreenlet* g = PyGreenlet_GetCurrent(); + if (g == NULL || !PyGreenlet_Check(g) || !PyGreenlet_ACTIVE(g)) { + PyErr_SetString(PyExc_AssertionError, + "getcurrent() returned an invalid greenlet"); + Py_XDECREF(g); + return NULL; + } + Py_DECREF(g); + Py_RETURN_NONE; +} + +static PyObject* +test_setparent(PyObject* self, PyObject* arg) +{ + PyGreenlet* current; + PyGreenlet* greenlet = NULL; + + if (arg == NULL || !PyGreenlet_Check(arg)) { + PyErr_BadArgument(); + return NULL; + } + if ((current = PyGreenlet_GetCurrent()) == NULL) { + return NULL; + } + greenlet = (PyGreenlet*)arg; + if (PyGreenlet_SetParent(greenlet, current)) { + Py_DECREF(current); + return NULL; + } + Py_DECREF(current); + if (PyGreenlet_Switch(greenlet, NULL, NULL) == NULL) { + return NULL; + } + Py_RETURN_NONE; +} + +static PyObject* +test_new_greenlet(PyObject* self, PyObject* callable) +{ + PyObject* result = NULL; + PyGreenlet* greenlet = PyGreenlet_New(callable, NULL); + + if (!greenlet) { + return NULL; + } + + result = PyGreenlet_Switch(greenlet, NULL, NULL); + Py_CLEAR(greenlet); + if (result == NULL) { + return NULL; + } + + Py_INCREF(result); + return result; +} + +static PyObject* +test_raise_dead_greenlet(PyObject* self) +{ + PyErr_SetString(PyExc_GreenletExit, "test GreenletExit exception."); + return NULL; +} + +static PyObject* +test_raise_greenlet_error(PyObject* self) +{ + PyErr_SetString(PyExc_GreenletError, "test greenlet.error exception"); + return NULL; +} + +static PyObject* +test_throw(PyObject* self, PyGreenlet* g) +{ + const char msg[] = "take that sucka!"; + PyObject* msg_obj = Py_BuildValue("s", msg); + PyGreenlet_Throw(g, PyExc_ValueError, msg_obj, NULL); + Py_DECREF(msg_obj); + if (PyErr_Occurred()) { + return NULL; + } + Py_RETURN_NONE; +} + +static PyObject* +test_throw_exact(PyObject* self, PyObject* args) +{ + PyGreenlet* g = NULL; + PyObject* typ = NULL; + PyObject* val = NULL; + PyObject* tb = NULL; + + if (!PyArg_ParseTuple(args, "OOOO:throw", &g, &typ, &val, &tb)) { + return NULL; + } + + PyGreenlet_Throw(g, typ, val, tb); + if (PyErr_Occurred()) { + return NULL; + } + Py_RETURN_NONE; +} + +static PyMethodDef test_methods[] = { + {"test_switch", + (PyCFunction)test_switch, + METH_O, + "Switch to the provided greenlet sending provided arguments, and \n" + "return the results."}, + {"test_switch_kwargs", + (PyCFunction)test_switch_kwargs, + METH_VARARGS | METH_KEYWORDS, + "Switch to the provided greenlet sending the provided keyword args."}, + {"test_getcurrent", + (PyCFunction)test_getcurrent, + METH_NOARGS, + "Test PyGreenlet_GetCurrent()"}, + {"test_setparent", + (PyCFunction)test_setparent, + METH_O, + "Se the parent of the provided greenlet and switch to it."}, + {"test_new_greenlet", + (PyCFunction)test_new_greenlet, + METH_O, + "Test PyGreenlet_New()"}, + {"test_raise_dead_greenlet", + (PyCFunction)test_raise_dead_greenlet, + METH_NOARGS, + "Just raise greenlet.GreenletExit"}, + {"test_raise_greenlet_error", + (PyCFunction)test_raise_greenlet_error, + METH_NOARGS, + "Just raise greenlet.error"}, + {"test_throw", + (PyCFunction)test_throw, + METH_O, + "Throw a ValueError at the provided greenlet"}, + {"test_throw_exact", + (PyCFunction)test_throw_exact, + METH_VARARGS, + "Throw exactly the arguments given at the provided greenlet"}, + {NULL, NULL, 0, NULL} +}; + + +#define INITERROR return NULL + +static struct PyModuleDef moduledef = {PyModuleDef_HEAD_INIT, + TEST_MODULE_NAME, + NULL, + 0, + test_methods, + NULL, + NULL, + NULL, + NULL}; + +PyMODINIT_FUNC +PyInit__test_extension(void) +{ + PyObject* module = NULL; + module = PyModule_Create(&moduledef); + + if (module == NULL) { + return NULL; + } + + PyGreenlet_Import(); + return module; +} diff --git a/venv/Lib/site-packages/greenlet/tests/_test_extension.cp312-win_amd64.pyd b/venv/Lib/site-packages/greenlet/tests/_test_extension.cp312-win_amd64.pyd new file mode 100644 index 00000000..3d7750c1 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/_test_extension.cp312-win_amd64.pyd differ diff --git a/venv/Lib/site-packages/greenlet/tests/_test_extension_cpp.cp312-win_amd64.pyd b/venv/Lib/site-packages/greenlet/tests/_test_extension_cpp.cp312-win_amd64.pyd new file mode 100644 index 00000000..dd4a88e9 Binary files /dev/null and b/venv/Lib/site-packages/greenlet/tests/_test_extension_cpp.cp312-win_amd64.pyd differ diff --git a/venv/Lib/site-packages/greenlet/tests/_test_extension_cpp.cpp b/venv/Lib/site-packages/greenlet/tests/_test_extension_cpp.cpp new file mode 100644 index 00000000..5cbe6a76 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/_test_extension_cpp.cpp @@ -0,0 +1,226 @@ +/* This is a set of functions used to test C++ exceptions are not + * broken during greenlet switches + */ + +#include "../greenlet.h" +#include "../greenlet_compiler_compat.hpp" +#include +#include + +struct exception_t { + int depth; + exception_t(int depth) : depth(depth) {} +}; + +/* Functions are called via pointers to prevent inlining */ +static void (*p_test_exception_throw_nonstd)(int depth); +static void (*p_test_exception_throw_std)(); +static PyObject* (*p_test_exception_switch_recurse)(int depth, int left); + +static void +test_exception_throw_nonstd(int depth) +{ + throw exception_t(depth); +} + +static void +test_exception_throw_std() +{ + throw std::runtime_error("Thrown from an extension."); +} + +static PyObject* +test_exception_switch_recurse(int depth, int left) +{ + if (left > 0) { + return p_test_exception_switch_recurse(depth, left - 1); + } + + PyObject* result = NULL; + PyGreenlet* self = PyGreenlet_GetCurrent(); + if (self == NULL) + return NULL; + + try { + if (PyGreenlet_Switch(PyGreenlet_GET_PARENT(self), NULL, NULL) == NULL) { + Py_DECREF(self); + return NULL; + } + p_test_exception_throw_nonstd(depth); + PyErr_SetString(PyExc_RuntimeError, + "throwing C++ exception didn't work"); + } + catch (const exception_t& e) { + if (e.depth != depth) + PyErr_SetString(PyExc_AssertionError, "depth mismatch"); + else + result = PyLong_FromLong(depth); + } + catch (...) { + PyErr_SetString(PyExc_RuntimeError, "unexpected C++ exception"); + } + + Py_DECREF(self); + return result; +} + +/* test_exception_switch(int depth) + * - recurses depth times + * - switches to parent inside try/catch block + * - throws an exception that (expected to be caught in the same function) + * - verifies depth matches (exceptions shouldn't be caught in other greenlets) + */ +static PyObject* +test_exception_switch(PyObject* UNUSED(self), PyObject* args) +{ + int depth; + if (!PyArg_ParseTuple(args, "i", &depth)) + return NULL; + return p_test_exception_switch_recurse(depth, depth); +} + + +static PyObject* +py_test_exception_throw_nonstd(PyObject* self, PyObject* args) +{ + if (!PyArg_ParseTuple(args, "")) + return NULL; + p_test_exception_throw_nonstd(0); + PyErr_SetString(PyExc_AssertionError, "unreachable code running after throw"); + return NULL; +} + +static PyObject* +py_test_exception_throw_std(PyObject* self, PyObject* args) +{ + if (!PyArg_ParseTuple(args, "")) + return NULL; + p_test_exception_throw_std(); + PyErr_SetString(PyExc_AssertionError, "unreachable code running after throw"); + return NULL; +} + +static PyObject* +py_test_call(PyObject* self, PyObject* arg) +{ + PyObject* noargs = PyTuple_New(0); + PyObject* ret = PyObject_Call(arg, noargs, nullptr); + Py_DECREF(noargs); + return ret; +} + + + +/* test_exception_switch_and_do_in_g2(g2func) + * - creates new greenlet g2 to run g2func + * - switches to g2 inside try/catch block + * - verifies that no exception has been caught + * + * it is used together with test_exception_throw to verify that unhandled + * exceptions thrown in one greenlet do not propagate to other greenlet nor + * segfault the process. + */ +static PyObject* +test_exception_switch_and_do_in_g2(PyObject* self, PyObject* args) +{ + PyObject* g2func = NULL; + PyObject* result = NULL; + + if (!PyArg_ParseTuple(args, "O", &g2func)) + return NULL; + PyGreenlet* g2 = PyGreenlet_New(g2func, NULL); + if (!g2) { + return NULL; + } + + try { + result = PyGreenlet_Switch(g2, NULL, NULL); + if (!result) { + return NULL; + } + } + catch (const exception_t& e) { + /* if we are here the memory can be already corrupted and the program + * might crash before below py-level exception might become printed. + * -> print something to stderr to make it clear that we had entered + * this catch block. + * See comments in inner_bootstrap() + */ +#if defined(WIN32) || defined(_WIN32) + fprintf(stderr, "C++ exception unexpectedly caught in g1\n"); + PyErr_SetString(PyExc_AssertionError, "C++ exception unexpectedly caught in g1"); + Py_XDECREF(result); + return NULL; +#else + throw; +#endif + } + + Py_XDECREF(result); + Py_RETURN_NONE; +} + +static PyMethodDef test_methods[] = { + {"test_exception_switch", + (PyCFunction)&test_exception_switch, + METH_VARARGS, + "Switches to parent twice, to test exception handling and greenlet " + "switching."}, + {"test_exception_switch_and_do_in_g2", + (PyCFunction)&test_exception_switch_and_do_in_g2, + METH_VARARGS, + "Creates new greenlet g2 to run g2func and switches to it inside try/catch " + "block. Used together with test_exception_throw to verify that unhandled " + "C++ exceptions thrown in a greenlet doe not corrupt memory."}, + {"test_exception_throw_nonstd", + (PyCFunction)&py_test_exception_throw_nonstd, + METH_VARARGS, + "Throws non-standard C++ exception. Calling this function directly should abort the process." + }, + {"test_exception_throw_std", + (PyCFunction)&py_test_exception_throw_std, + METH_VARARGS, + "Throws standard C++ exception. Calling this function directly should abort the process." + }, + {"test_call", + (PyCFunction)&py_test_call, + METH_O, + "Call the given callable. Unlike calling it directly, this creates a " + "new C-level stack frame, which may be helpful in testing." + }, + {NULL, NULL, 0, NULL} +}; + + +static struct PyModuleDef moduledef = {PyModuleDef_HEAD_INIT, + "greenlet.tests._test_extension_cpp", + NULL, + 0, + test_methods, + NULL, + NULL, + NULL, + NULL}; + +PyMODINIT_FUNC +PyInit__test_extension_cpp(void) +{ + PyObject* module = NULL; + + module = PyModule_Create(&moduledef); + + if (module == NULL) { + return NULL; + } + + PyGreenlet_Import(); + if (_PyGreenlet_API == NULL) { + return NULL; + } + + p_test_exception_throw_nonstd = test_exception_throw_nonstd; + p_test_exception_throw_std = test_exception_throw_std; + p_test_exception_switch_recurse = test_exception_switch_recurse; + + return module; +} diff --git a/venv/Lib/site-packages/greenlet/tests/fail_clearing_run_switches.py b/venv/Lib/site-packages/greenlet/tests/fail_clearing_run_switches.py new file mode 100644 index 00000000..6dd1492f --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/fail_clearing_run_switches.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +""" +If we have a run callable passed to the constructor or set as an +attribute, but we don't actually use that (because ``__getattribute__`` +or the like interferes), then when we clear callable before beginning +to run, there's an opportunity for Python code to run. + +""" +import greenlet + +g = None +main = greenlet.getcurrent() + +results = [] + +class RunCallable: + + def __del__(self): + results.append(('RunCallable', '__del__')) + main.switch('from RunCallable') + + +class G(greenlet.greenlet): + + def __getattribute__(self, name): + if name == 'run': + results.append(('G.__getattribute__', 'run')) + return run_func + return object.__getattribute__(self, name) + + +def run_func(): + results.append(('run_func', 'enter')) + + +g = G(RunCallable()) +# Try to start G. It will get to the point where it deletes +# its run callable C++ variable in inner_bootstrap. That triggers +# the __del__ method, which switches back to main before g +# actually even starts running. +x = g.switch() +results.append(('main: g.switch()', x)) +# In the C++ code, this results in g->g_switch() appearing to return, even though +# it has yet to run. +print('In main with', x, flush=True) +g.switch() +print('RESULTS', results) diff --git a/venv/Lib/site-packages/greenlet/tests/fail_cpp_exception.py b/venv/Lib/site-packages/greenlet/tests/fail_cpp_exception.py new file mode 100644 index 00000000..fa4dc2eb --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/fail_cpp_exception.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +""" +Helper for testing a C++ exception throw aborts the process. + +Takes one argument, the name of the function in :mod:`_test_extension_cpp` to call. +""" +import sys +import greenlet +from greenlet.tests import _test_extension_cpp +print('fail_cpp_exception is running') + +def run_unhandled_exception_in_greenlet_aborts(): + def _(): + _test_extension_cpp.test_exception_switch_and_do_in_g2( + _test_extension_cpp.test_exception_throw_nonstd + ) + g1 = greenlet.greenlet(_) + g1.switch() + + +func_name = sys.argv[1] +try: + func = getattr(_test_extension_cpp, func_name) +except AttributeError: + if func_name == run_unhandled_exception_in_greenlet_aborts.__name__: + func = run_unhandled_exception_in_greenlet_aborts + elif func_name == 'run_as_greenlet_target': + g = greenlet.greenlet(_test_extension_cpp.test_exception_throw_std) + func = g.switch + else: + raise +print('raising', func, flush=True) +func() diff --git a/venv/Lib/site-packages/greenlet/tests/fail_initialstub_already_started.py b/venv/Lib/site-packages/greenlet/tests/fail_initialstub_already_started.py new file mode 100644 index 00000000..c1a44efd --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/fail_initialstub_already_started.py @@ -0,0 +1,78 @@ +""" +Testing initialstub throwing an already started exception. +""" + +import greenlet + +a = None +b = None +c = None +main = greenlet.getcurrent() + +# If we switch into a dead greenlet, +# we go looking for its parents. +# if a parent is not yet started, we start it. + +results = [] + +def a_run(*args): + #results.append('A') + results.append(('Begin A', args)) + + +def c_run(): + results.append('Begin C') + b.switch('From C') + results.append('C done') + +class A(greenlet.greenlet): pass + +class B(greenlet.greenlet): + doing_it = False + def __getattribute__(self, name): + if name == 'run' and not self.doing_it: + assert greenlet.getcurrent() is c + self.doing_it = True + results.append('Switch to b from B.__getattribute__ in ' + + type(greenlet.getcurrent()).__name__) + b.switch() + results.append('B.__getattribute__ back from main in ' + + type(greenlet.getcurrent()).__name__) + if name == 'run': + name = '_B_run' + return object.__getattribute__(self, name) + + def _B_run(self, *arg): + results.append(('Begin B', arg)) + results.append('_B_run switching to main') + main.switch('From B') + +class C(greenlet.greenlet): + pass +a = A(a_run) +b = B(parent=a) +c = C(c_run, b) + +# Start a child; while running, it will start B, +# but starting B will ALSO start B. +result = c.switch() +results.append(('main from c', result)) + +# Switch back to C, which was in the middle of switching +# already. This will throw the ``GreenletStartedWhileInPython`` +# exception, which results in parent A getting started (B is finished) +c.switch() + +results.append(('A dead?', a.dead, 'B dead?', b.dead, 'C dead?', c.dead)) + +# A and B should both be dead now. +assert a.dead +assert b.dead +assert not c.dead + +result = c.switch() +results.append(('main from c.2', result)) +# Now C is dead +assert c.dead + +print("RESULTS:", results) diff --git a/venv/Lib/site-packages/greenlet/tests/fail_slp_switch.py b/venv/Lib/site-packages/greenlet/tests/fail_slp_switch.py new file mode 100644 index 00000000..09905269 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/fail_slp_switch.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +""" +A test helper for seeing what happens when slp_switch() +fails. +""" +# pragma: no cover + +import greenlet + + +print('fail_slp_switch is running', flush=True) + +runs = [] +def func(): + runs.append(1) + greenlet.getcurrent().parent.switch() + runs.append(2) + greenlet.getcurrent().parent.switch() + runs.append(3) + +g = greenlet._greenlet.UnswitchableGreenlet(func) +g.switch() +assert runs == [1] +g.switch() +assert runs == [1, 2] +g.force_slp_switch_error = True + +# This should crash. +g.switch() diff --git a/venv/Lib/site-packages/greenlet/tests/fail_switch_three_greenlets.py b/venv/Lib/site-packages/greenlet/tests/fail_switch_three_greenlets.py new file mode 100644 index 00000000..e151b19a --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/fail_switch_three_greenlets.py @@ -0,0 +1,44 @@ +""" +Uses a trace function to switch greenlets at unexpected times. + +In the trace function, we switch from the current greenlet to another +greenlet, which switches +""" +import greenlet + +g1 = None +g2 = None + +switch_to_g2 = False + +def tracefunc(*args): + print('TRACE', *args) + global switch_to_g2 + if switch_to_g2: + switch_to_g2 = False + g2.switch() + print('\tLEAVE TRACE', *args) + +def g1_run(): + print('In g1_run') + global switch_to_g2 + switch_to_g2 = True + from_parent = greenlet.getcurrent().parent.switch() + print('Return to g1_run') + print('From parent', from_parent) + +def g2_run(): + #g1.switch() + greenlet.getcurrent().parent.switch() + +greenlet.settrace(tracefunc) + +g1 = greenlet.greenlet(g1_run) +g2 = greenlet.greenlet(g2_run) + +# This switch didn't actually finish! +# And if it did, it would raise TypeError +# because g1_run() doesn't take any arguments. +g1.switch(1) +print('Back in main') +g1.switch(2) diff --git a/venv/Lib/site-packages/greenlet/tests/fail_switch_three_greenlets2.py b/venv/Lib/site-packages/greenlet/tests/fail_switch_three_greenlets2.py new file mode 100644 index 00000000..1f6b66bc --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/fail_switch_three_greenlets2.py @@ -0,0 +1,55 @@ +""" +Like fail_switch_three_greenlets, but the call into g1_run would actually be +valid. +""" +import greenlet + +g1 = None +g2 = None + +switch_to_g2 = True + +results = [] + +def tracefunc(*args): + results.append(('trace', args[0])) + print('TRACE', *args) + global switch_to_g2 + if switch_to_g2: + switch_to_g2 = False + g2.switch('g2 from tracefunc') + print('\tLEAVE TRACE', *args) + +def g1_run(arg): + results.append(('g1 arg', arg)) + print('In g1_run') + from_parent = greenlet.getcurrent().parent.switch('from g1_run') + results.append(('g1 from parent', from_parent)) + return 'g1 done' + +def g2_run(arg): + #g1.switch() + results.append(('g2 arg', arg)) + parent = greenlet.getcurrent().parent.switch('from g2_run') + global switch_to_g2 + switch_to_g2 = False + results.append(('g2 from parent', parent)) + return 'g2 done' + + +greenlet.settrace(tracefunc) + +g1 = greenlet.greenlet(g1_run) +g2 = greenlet.greenlet(g2_run) + +x = g1.switch('g1 from main') +results.append(('main g1', x)) +print('Back in main', x) +x = g1.switch('g2 from main') +results.append(('main g2', x)) +print('back in amain again', x) +x = g1.switch('g1 from main 2') +results.append(('main g1.2', x)) +x = g2.switch() +results.append(('main g2.2', x)) +print("RESULTS:", results) diff --git a/venv/Lib/site-packages/greenlet/tests/fail_switch_two_greenlets.py b/venv/Lib/site-packages/greenlet/tests/fail_switch_two_greenlets.py new file mode 100644 index 00000000..3e52345a --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/fail_switch_two_greenlets.py @@ -0,0 +1,41 @@ +""" +Uses a trace function to switch greenlets at unexpected times. + +In the trace function, we switch from the current greenlet to another +greenlet, which switches +""" +import greenlet + +g1 = None +g2 = None + +switch_to_g2 = False + +def tracefunc(*args): + print('TRACE', *args) + global switch_to_g2 + if switch_to_g2: + switch_to_g2 = False + g2.switch() + print('\tLEAVE TRACE', *args) + +def g1_run(): + print('In g1_run') + global switch_to_g2 + switch_to_g2 = True + greenlet.getcurrent().parent.switch() + print('Return to g1_run') + print('Falling off end of g1_run') + +def g2_run(): + g1.switch() + print('Falling off end of g2') + +greenlet.settrace(tracefunc) + +g1 = greenlet.greenlet(g1_run) +g2 = greenlet.greenlet(g2_run) + +g1.switch() +print('Falling off end of main') +g2.switch() diff --git a/venv/Lib/site-packages/greenlet/tests/leakcheck.py b/venv/Lib/site-packages/greenlet/tests/leakcheck.py new file mode 100644 index 00000000..a5152fb2 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/leakcheck.py @@ -0,0 +1,319 @@ +# Copyright (c) 2018 gevent community +# Copyright (c) 2021 greenlet community +# +# This was originally part of gevent's test suite. The main author +# (Jason Madden) vendored a copy of it into greenlet. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +from __future__ import print_function + +import os +import sys +import gc + +from functools import wraps +import unittest + + +import objgraph + +# graphviz 0.18 (Nov 7 2021), available only on Python 3.6 and newer, +# has added type hints (sigh). It wants to use ``typing.Literal`` for +# some stuff, but that's only available on Python 3.9+. If that's not +# found, it creates a ``unittest.mock.MagicMock`` object and annotates +# with that. These are GC'able objects, and doing almost *anything* +# with them results in an explosion of objects. For example, trying to +# compare them for equality creates new objects. This causes our +# leakchecks to fail, with reports like: +# +# greenlet.tests.leakcheck.LeakCheckError: refcount increased by [337, 1333, 343, 430, 530, 643, 769] +# _Call 1820 +546 +# dict 4094 +76 +# MagicProxy 585 +73 +# tuple 2693 +66 +# _CallList 24 +3 +# weakref 1441 +1 +# function 5996 +1 +# type 736 +1 +# cell 592 +1 +# MagicMock 8 +1 +# +# To avoid this, we *could* filter this type of object out early. In +# principle it could leak, but we don't use mocks in greenlet, so it +# doesn't leak from us. However, a further issue is that ``MagicMock`` +# objects have subobjects that are also GC'able, like ``_Call``, and +# those create new mocks of their own too. So we'd have to filter them +# as well, and they're not public. That's OK, we can workaround the +# problem by being very careful to never compare by equality or other +# user-defined operators, only using object identity or other builtin +# functions. + +RUNNING_ON_GITHUB_ACTIONS = os.environ.get('GITHUB_ACTIONS') +RUNNING_ON_TRAVIS = os.environ.get('TRAVIS') or RUNNING_ON_GITHUB_ACTIONS +RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR') +RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR +RUNNING_ON_MANYLINUX = os.environ.get('GREENLET_MANYLINUX') +SKIP_LEAKCHECKS = RUNNING_ON_MANYLINUX or os.environ.get('GREENLET_SKIP_LEAKCHECKS') +SKIP_FAILING_LEAKCHECKS = os.environ.get('GREENLET_SKIP_FAILING_LEAKCHECKS') +ONLY_FAILING_LEAKCHECKS = os.environ.get('GREENLET_ONLY_FAILING_LEAKCHECKS') + +def ignores_leakcheck(func): + """ + Ignore the given object during leakchecks. + + Can be applied to a method, in which case the method will run, but + will not be subject to leak checks. + + If applied to a class, the entire class will be skipped during leakchecks. This + is intended to be used for classes that are very slow and cause problems such as + test timeouts; typically it will be used for classes that are subclasses of a base + class and specify variants of behaviour (such as pool sizes). + """ + func.ignore_leakcheck = True + return func + +def fails_leakcheck(func): + """ + Mark that the function is known to leak. + """ + func.fails_leakcheck = True + if SKIP_FAILING_LEAKCHECKS: + func = unittest.skip("Skipping known failures")(func) + return func + +class LeakCheckError(AssertionError): + pass + +if hasattr(sys, 'getobjects'): + # In a Python build with ``--with-trace-refs``, make objgraph + # trace *all* the objects, not just those that are tracked by the + # GC + class _MockGC(object): + def get_objects(self): + return sys.getobjects(0) # pylint:disable=no-member + def __getattr__(self, name): + return getattr(gc, name) + objgraph.gc = _MockGC() + fails_strict_leakcheck = fails_leakcheck +else: + def fails_strict_leakcheck(func): + """ + Decorator for a function that is known to fail when running + strict (``sys.getobjects()``) leakchecks. + + This type of leakcheck finds all objects, even those, such as + strings, which are not tracked by the garbage collector. + """ + return func + +class ignores_types_in_strict_leakcheck(object): + def __init__(self, types): + self.types = types + def __call__(self, func): + func.leakcheck_ignore_types = self.types + return func + +class _RefCountChecker(object): + + # Some builtin things that we ignore + # XXX: Those things were ignored by gevent, but they're important here, + # presumably. + IGNORED_TYPES = () #(tuple, dict, types.FrameType, types.TracebackType) + + def __init__(self, testcase, function): + self.testcase = testcase + self.function = function + self.deltas = [] + self.peak_stats = {} + self.ignored_types = () + + # The very first time we are called, we have already been + # self.setUp() by the test runner, so we don't need to do it again. + self.needs_setUp = False + + def _include_object_p(self, obj): + # pylint:disable=too-many-return-statements + # + # See the comment block at the top. We must be careful to + # avoid invoking user-defined operations. + if obj is self: + return False + kind = type(obj) + # ``self._include_object_p == obj`` returns NotImplemented + # for non-function objects, which causes the interpreter + # to try to reverse the order of arguments...which leads + # to the explosion of mock objects. We don't want that, so we implement + # the check manually. + if kind == type(self._include_object_p): + try: + # pylint:disable=not-callable + exact_method_equals = self._include_object_p.__eq__(obj) + except AttributeError: + # Python 2.7 methods may only have __cmp__, and that raises a + # TypeError for non-method arguments + # pylint:disable=no-member + exact_method_equals = self._include_object_p.__cmp__(obj) == 0 + + if exact_method_equals is not NotImplemented and exact_method_equals: + return False + + # Similarly, we need to check identity in our __dict__ to avoid mock explosions. + for x in self.__dict__.values(): + if obj is x: + return False + + + if kind in self.ignored_types or kind in self.IGNORED_TYPES: + return False + + return True + + def _growth(self): + return objgraph.growth(limit=None, peak_stats=self.peak_stats, + filter=self._include_object_p) + + def _report_diff(self, growth): + if not growth: + return "" + + lines = [] + width = max(len(name) for name, _, _ in growth) + for name, count, delta in growth: + lines.append('%-*s%9d %+9d' % (width, name, count, delta)) + + diff = '\n'.join(lines) + return diff + + + def _run_test(self, args, kwargs): + gc_enabled = gc.isenabled() + gc.disable() + + if self.needs_setUp: + self.testcase.setUp() + self.testcase.skipTearDown = False + try: + self.function(self.testcase, *args, **kwargs) + finally: + self.testcase.tearDown() + self.testcase.doCleanups() + self.testcase.skipTearDown = True + self.needs_setUp = True + if gc_enabled: + gc.enable() + + def _growth_after(self): + # Grab post snapshot + # pylint:disable=no-member + if 'urlparse' in sys.modules: + sys.modules['urlparse'].clear_cache() + if 'urllib.parse' in sys.modules: + sys.modules['urllib.parse'].clear_cache() + + return self._growth() + + def _check_deltas(self, growth): + # Return false when we have decided there is no leak, + # true if we should keep looping, raises an assertion + # if we have decided there is a leak. + + deltas = self.deltas + if not deltas: + # We haven't run yet, no data, keep looping + return True + + if gc.garbage: + raise LeakCheckError("Generated uncollectable garbage %r" % (gc.garbage,)) + + + # the following configurations are classified as "no leak" + # [0, 0] + # [x, 0, 0] + # [... a, b, c, d] where a+b+c+d = 0 + # + # the following configurations are classified as "leak" + # [... z, z, z] where z > 0 + + if deltas[-2:] == [0, 0] and len(deltas) in (2, 3): + return False + + if deltas[-3:] == [0, 0, 0]: + return False + + if len(deltas) >= 4 and sum(deltas[-4:]) == 0: + return False + + if len(deltas) >= 3 and deltas[-1] > 0 and deltas[-1] == deltas[-2] and deltas[-2] == deltas[-3]: + diff = self._report_diff(growth) + raise LeakCheckError('refcount increased by %r\n%s' % (deltas, diff)) + + # OK, we don't know for sure yet. Let's search for more + if sum(deltas[-3:]) <= 0 or sum(deltas[-4:]) <= 0 or deltas[-4:].count(0) >= 2: + # this is suspicious, so give a few more runs + limit = 11 + else: + limit = 7 + if len(deltas) >= limit: + raise LeakCheckError('refcount increased by %r\n%s' + % (deltas, + self._report_diff(growth))) + + # We couldn't decide yet, keep going + return True + + def __call__(self, args, kwargs): + for _ in range(3): + gc.collect() + + expect_failure = getattr(self.function, 'fails_leakcheck', False) + if expect_failure: + self.testcase.expect_greenlet_leak = True + self.ignored_types = getattr(self.function, "leakcheck_ignore_types", ()) + + # Capture state before; the incremental will be + # updated by each call to _growth_after + growth = self._growth() + + try: + while self._check_deltas(growth): + self._run_test(args, kwargs) + + growth = self._growth_after() + + self.deltas.append(sum((stat[2] for stat in growth))) + except LeakCheckError: + if not expect_failure: + raise + else: + if expect_failure: + raise LeakCheckError("Expected %s to leak but it did not." % (self.function,)) + +def wrap_refcount(method): + if getattr(method, 'ignore_leakcheck', False) or SKIP_LEAKCHECKS: + return method + + @wraps(method) + def wrapper(self, *args, **kwargs): # pylint:disable=too-many-branches + if getattr(self, 'ignore_leakcheck', False): + raise unittest.SkipTest("This class ignored during leakchecks") + if ONLY_FAILING_LEAKCHECKS and not getattr(method, 'fails_leakcheck', False): + raise unittest.SkipTest("Only running tests that fail leakchecks.") + return _RefCountChecker(self, method)(args, kwargs) + + return wrapper diff --git a/venv/Lib/site-packages/greenlet/tests/test_contextvars.py b/venv/Lib/site-packages/greenlet/tests/test_contextvars.py new file mode 100644 index 00000000..b0d1ccf3 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_contextvars.py @@ -0,0 +1,312 @@ +from __future__ import print_function + +import gc +import sys +import unittest + +from functools import partial +from unittest import skipUnless +from unittest import skipIf + +from greenlet import greenlet +from greenlet import getcurrent +from . import TestCase +from . import PY314 + +try: + from contextvars import Context + from contextvars import ContextVar + from contextvars import copy_context + # From the documentation: + # + # Important: Context Variables should be created at the top module + # level and never in closures. Context objects hold strong + # references to context variables which prevents context variables + # from being properly garbage collected. + ID_VAR = ContextVar("id", default=None) + VAR_VAR = ContextVar("var", default=None) + ContextVar = None +except ImportError: + Context = ContextVar = copy_context = None + +# We don't support testing if greenlet's built-in context var support is disabled. +@skipUnless(Context is not None, "ContextVar not supported") +class ContextVarsTests(TestCase): + def _new_ctx_run(self, *args, **kwargs): + return copy_context().run(*args, **kwargs) + + def _increment(self, greenlet_id, callback, counts, expect): + ctx_var = ID_VAR + if expect is None: + self.assertIsNone(ctx_var.get()) + else: + self.assertEqual(ctx_var.get(), expect) + ctx_var.set(greenlet_id) + for _ in range(2): + counts[ctx_var.get()] += 1 + callback() + + def _test_context(self, propagate_by): + # pylint:disable=too-many-branches + ID_VAR.set(0) + + callback = getcurrent().switch + counts = dict((i, 0) for i in range(5)) + + lets = [ + greenlet(partial( + partial( + copy_context().run, + self._increment + ) if propagate_by == "run" else self._increment, + greenlet_id=i, + callback=callback, + counts=counts, + expect=( + i - 1 if propagate_by == "share" else + 0 if propagate_by in ("set", "run") else None + ) + )) + for i in range(1, 5) + ] + + for let in lets: + if propagate_by == "set": + let.gr_context = copy_context() + elif propagate_by == "share": + let.gr_context = getcurrent().gr_context + + for i in range(2): + counts[ID_VAR.get()] += 1 + for let in lets: + let.switch() + + if propagate_by == "run": + # Must leave each context.run() in reverse order of entry + for let in reversed(lets): + let.switch() + else: + # No context.run(), so fine to exit in any order. + for let in lets: + let.switch() + + for let in lets: + self.assertTrue(let.dead) + # When using run(), we leave the run() as the greenlet dies, + # and there's no context "underneath". When not using run(), + # gr_context still reflects the context the greenlet was + # running in. + if propagate_by == 'run': + self.assertIsNone(let.gr_context) + else: + self.assertIsNotNone(let.gr_context) + + + if propagate_by == "share": + self.assertEqual(counts, {0: 1, 1: 1, 2: 1, 3: 1, 4: 6}) + else: + self.assertEqual(set(counts.values()), set([2])) + + def test_context_propagated_by_context_run(self): + self._new_ctx_run(self._test_context, "run") + + def test_context_propagated_by_setting_attribute(self): + self._new_ctx_run(self._test_context, "set") + + def test_context_not_propagated(self): + self._new_ctx_run(self._test_context, None) + + def test_context_shared(self): + self._new_ctx_run(self._test_context, "share") + + def test_break_ctxvars(self): + let1 = greenlet(copy_context().run) + let2 = greenlet(copy_context().run) + let1.switch(getcurrent().switch) + let2.switch(getcurrent().switch) + # Since let2 entered the current context and let1 exits its own, the + # interpreter emits: + # RuntimeError: cannot exit context: thread state references a different context object + let1.switch() + + def test_not_broken_if_using_attribute_instead_of_context_run(self): + let1 = greenlet(getcurrent().switch) + let2 = greenlet(getcurrent().switch) + let1.gr_context = copy_context() + let2.gr_context = copy_context() + let1.switch() + let2.switch() + let1.switch() + let2.switch() + + def test_context_assignment_while_running(self): + # pylint:disable=too-many-statements + ID_VAR.set(None) + + def target(): + self.assertIsNone(ID_VAR.get()) + self.assertIsNone(gr.gr_context) + + # Context is created on first use + ID_VAR.set(1) + self.assertIsInstance(gr.gr_context, Context) + self.assertEqual(ID_VAR.get(), 1) + self.assertEqual(gr.gr_context[ID_VAR], 1) + + # Clearing the context makes it get re-created as another + # empty context when next used + old_context = gr.gr_context + gr.gr_context = None # assign None while running + self.assertIsNone(ID_VAR.get()) + self.assertIsNone(gr.gr_context) + ID_VAR.set(2) + self.assertIsInstance(gr.gr_context, Context) + self.assertEqual(ID_VAR.get(), 2) + self.assertEqual(gr.gr_context[ID_VAR], 2) + + new_context = gr.gr_context + getcurrent().parent.switch((old_context, new_context)) + # parent switches us back to old_context + + self.assertEqual(ID_VAR.get(), 1) + gr.gr_context = new_context # assign non-None while running + self.assertEqual(ID_VAR.get(), 2) + + getcurrent().parent.switch() + # parent switches us back to no context + self.assertIsNone(ID_VAR.get()) + self.assertIsNone(gr.gr_context) + gr.gr_context = old_context + self.assertEqual(ID_VAR.get(), 1) + + getcurrent().parent.switch() + # parent switches us back to no context + self.assertIsNone(ID_VAR.get()) + self.assertIsNone(gr.gr_context) + + gr = greenlet(target) + + with self.assertRaisesRegex(AttributeError, "can't delete context attribute"): + del gr.gr_context + + self.assertIsNone(gr.gr_context) + old_context, new_context = gr.switch() + self.assertIs(new_context, gr.gr_context) + self.assertEqual(old_context[ID_VAR], 1) + self.assertEqual(new_context[ID_VAR], 2) + self.assertEqual(new_context.run(ID_VAR.get), 2) + gr.gr_context = old_context # assign non-None while suspended + gr.switch() + self.assertIs(gr.gr_context, new_context) + gr.gr_context = None # assign None while suspended + gr.switch() + self.assertIs(gr.gr_context, old_context) + gr.gr_context = None + gr.switch() + self.assertIsNone(gr.gr_context) + + # Make sure there are no reference leaks + gr = None + gc.collect() + # Python 3.14 elides reference counting operations + # in some cases. See https://github.com/python/cpython/pull/130708 + self.assertEqual(sys.getrefcount(old_context), 2 if not PY314 else 1) + self.assertEqual(sys.getrefcount(new_context), 2 if not PY314 else 1) + + def test_context_assignment_different_thread(self): + import threading + VAR_VAR.set(None) + ctx = Context() + + is_running = threading.Event() + should_suspend = threading.Event() + did_suspend = threading.Event() + should_exit = threading.Event() + holder = [] + + def greenlet_in_thread_fn(): + VAR_VAR.set(1) + is_running.set() + should_suspend.wait(10) + VAR_VAR.set(2) + getcurrent().parent.switch() + holder.append(VAR_VAR.get()) + + def thread_fn(): + gr = greenlet(greenlet_in_thread_fn) + gr.gr_context = ctx + holder.append(gr) + gr.switch() + did_suspend.set() + should_exit.wait(10) + gr.switch() + del gr + greenlet() # trigger cleanup + + thread = threading.Thread(target=thread_fn, daemon=True) + thread.start() + is_running.wait(10) + gr = holder[0] + + # Can't access or modify context if the greenlet is running + # in a different thread + with self.assertRaisesRegex(ValueError, "running in a different"): + getattr(gr, 'gr_context') + with self.assertRaisesRegex(ValueError, "running in a different"): + gr.gr_context = None + + should_suspend.set() + did_suspend.wait(10) + + # OK to access and modify context if greenlet is suspended + self.assertIs(gr.gr_context, ctx) + self.assertEqual(gr.gr_context[VAR_VAR], 2) + gr.gr_context = None + + should_exit.set() + thread.join(10) + + self.assertEqual(holder, [gr, None]) + + # Context can still be accessed/modified when greenlet is dead: + self.assertIsNone(gr.gr_context) + gr.gr_context = ctx + self.assertIs(gr.gr_context, ctx) + + # Otherwise we leak greenlets on some platforms. + # XXX: Should be able to do this automatically + del holder[:] + gr = None + thread = None + + def test_context_assignment_wrong_type(self): + g = greenlet() + with self.assertRaisesRegex(TypeError, + "greenlet context must be a contextvars.Context or None"): + g.gr_context = self + + +@skipIf(Context is not None, "ContextVar supported") +class NoContextVarsTests(TestCase): + def test_contextvars_errors(self): + let1 = greenlet(getcurrent().switch) + self.assertFalse(hasattr(let1, 'gr_context')) + with self.assertRaises(AttributeError): + getattr(let1, 'gr_context') + + with self.assertRaises(AttributeError): + let1.gr_context = None + + let1.switch() + + with self.assertRaises(AttributeError): + getattr(let1, 'gr_context') + + with self.assertRaises(AttributeError): + let1.gr_context = None + + del let1 + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/Lib/site-packages/greenlet/tests/test_cpp.py b/venv/Lib/site-packages/greenlet/tests/test_cpp.py new file mode 100644 index 00000000..2d0cc9c9 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_cpp.py @@ -0,0 +1,73 @@ +from __future__ import print_function +from __future__ import absolute_import + +import subprocess +import unittest + +import greenlet +from . import _test_extension_cpp +from . import TestCase +from . import WIN + +class CPPTests(TestCase): + def test_exception_switch(self): + greenlets = [] + for i in range(4): + g = greenlet.greenlet(_test_extension_cpp.test_exception_switch) + g.switch(i) + greenlets.append(g) + for i, g in enumerate(greenlets): + self.assertEqual(g.switch(), i) + + def _do_test_unhandled_exception(self, target): + import os + import sys + script = os.path.join( + os.path.dirname(__file__), + 'fail_cpp_exception.py', + ) + args = [sys.executable, script, target.__name__ if not isinstance(target, str) else target] + __traceback_info__ = args + with self.assertRaises(subprocess.CalledProcessError) as exc: + subprocess.check_output( + args, + encoding='utf-8', + stderr=subprocess.STDOUT + ) + + ex = exc.exception + expected_exit = self.get_expected_returncodes_for_aborted_process() + self.assertIn(ex.returncode, expected_exit) + self.assertIn('fail_cpp_exception is running', ex.output) + return ex.output + + + def test_unhandled_nonstd_exception_aborts(self): + # verify that plain unhandled throw aborts + self._do_test_unhandled_exception(_test_extension_cpp.test_exception_throw_nonstd) + + def test_unhandled_std_exception_aborts(self): + # verify that plain unhandled throw aborts + self._do_test_unhandled_exception(_test_extension_cpp.test_exception_throw_std) + + @unittest.skipIf(WIN, "XXX: This does not crash on Windows") + # Meaning the exception is getting lost somewhere... + def test_unhandled_std_exception_as_greenlet_function_aborts(self): + # verify that plain unhandled throw aborts + output = self._do_test_unhandled_exception('run_as_greenlet_target') + self.assertIn( + # We really expect this to be prefixed with "greenlet: Unhandled C++ exception:" + # as added by our handler for std::exception (see TUserGreenlet.cpp), but + # that's not correct everywhere --- our handler never runs before std::terminate + # gets called (for example, on arm32). + 'Thrown from an extension.', + output + ) + + def test_unhandled_exception_in_greenlet_aborts(self): + # verify that unhandled throw called in greenlet aborts too + self._do_test_unhandled_exception('run_unhandled_exception_in_greenlet_aborts') + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/Lib/site-packages/greenlet/tests/test_extension_interface.py b/venv/Lib/site-packages/greenlet/tests/test_extension_interface.py new file mode 100644 index 00000000..34b66567 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_extension_interface.py @@ -0,0 +1,115 @@ +from __future__ import print_function +from __future__ import absolute_import + +import sys + +import greenlet +from . import _test_extension +from . import TestCase + +# pylint:disable=c-extension-no-member + +class CAPITests(TestCase): + def test_switch(self): + self.assertEqual( + 50, _test_extension.test_switch(greenlet.greenlet(lambda: 50))) + + def test_switch_kwargs(self): + def adder(x, y): + return x * y + g = greenlet.greenlet(adder) + self.assertEqual(6, _test_extension.test_switch_kwargs(g, x=3, y=2)) + + def test_setparent(self): + # pylint:disable=disallowed-name + def foo(): + def bar(): + greenlet.getcurrent().parent.switch() + + # This final switch should go back to the main greenlet, since + # the test_setparent() function in the C extension should have + # reparented this greenlet. + greenlet.getcurrent().parent.switch() + raise AssertionError("Should never have reached this code") + child = greenlet.greenlet(bar) + child.switch() + greenlet.getcurrent().parent.switch(child) + greenlet.getcurrent().parent.throw( + AssertionError("Should never reach this code")) + foo_child = greenlet.greenlet(foo).switch() + self.assertEqual(None, _test_extension.test_setparent(foo_child)) + + def test_getcurrent(self): + _test_extension.test_getcurrent() + + def test_new_greenlet(self): + self.assertEqual(-15, _test_extension.test_new_greenlet(lambda: -15)) + + def test_raise_greenlet_dead(self): + self.assertRaises( + greenlet.GreenletExit, _test_extension.test_raise_dead_greenlet) + + def test_raise_greenlet_error(self): + self.assertRaises( + greenlet.error, _test_extension.test_raise_greenlet_error) + + def test_throw(self): + seen = [] + + def foo(): # pylint:disable=disallowed-name + try: + greenlet.getcurrent().parent.switch() + except ValueError: + seen.append(sys.exc_info()[1]) + except greenlet.GreenletExit: + raise AssertionError + g = greenlet.greenlet(foo) + g.switch() + _test_extension.test_throw(g) + self.assertEqual(len(seen), 1) + self.assertTrue( + isinstance(seen[0], ValueError), + "ValueError was not raised in foo()") + self.assertEqual( + str(seen[0]), + 'take that sucka!', + "message doesn't match") + + def test_non_traceback_param(self): + with self.assertRaises(TypeError) as exc: + _test_extension.test_throw_exact( + greenlet.getcurrent(), + Exception, + Exception(), + self + ) + self.assertEqual(str(exc.exception), + "throw() third argument must be a traceback object") + + def test_instance_of_wrong_type(self): + with self.assertRaises(TypeError) as exc: + _test_extension.test_throw_exact( + greenlet.getcurrent(), + Exception(), + BaseException(), + None, + ) + + self.assertEqual(str(exc.exception), + "instance exception may not have a separate value") + + def test_not_throwable(self): + with self.assertRaises(TypeError) as exc: + _test_extension.test_throw_exact( + greenlet.getcurrent(), + "abc", + None, + None, + ) + self.assertEqual(str(exc.exception), + "exceptions must be classes, or instances, not str") + + +if __name__ == '__main__': + import unittest + unittest.main() diff --git a/venv/Lib/site-packages/greenlet/tests/test_gc.py b/venv/Lib/site-packages/greenlet/tests/test_gc.py new file mode 100644 index 00000000..994addb9 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_gc.py @@ -0,0 +1,86 @@ +import gc + +import weakref + +import greenlet + + +from . import TestCase +from .leakcheck import fails_leakcheck +# These only work with greenlet gc support +# which is no longer optional. +assert greenlet.GREENLET_USE_GC + +class GCTests(TestCase): + def test_dead_circular_ref(self): + o = weakref.ref(greenlet.greenlet(greenlet.getcurrent).switch()) + gc.collect() + if o() is not None: + import sys + print("O IS NOT NONE.", sys.getrefcount(o())) + self.assertIsNone(o()) + self.assertFalse(gc.garbage, gc.garbage) + + def test_circular_greenlet(self): + class circular_greenlet(greenlet.greenlet): + self = None + o = circular_greenlet() + o.self = o + o = weakref.ref(o) + gc.collect() + self.assertIsNone(o()) + self.assertFalse(gc.garbage, gc.garbage) + + def test_inactive_ref(self): + class inactive_greenlet(greenlet.greenlet): + def __init__(self): + greenlet.greenlet.__init__(self, run=self.run) + + def run(self): + pass + o = inactive_greenlet() + o = weakref.ref(o) + gc.collect() + self.assertIsNone(o()) + self.assertFalse(gc.garbage, gc.garbage) + + @fails_leakcheck + def test_finalizer_crash(self): + # This test is designed to crash when active greenlets + # are made garbage collectable, until the underlying + # problem is resolved. How does it work: + # - order of object creation is important + # - array is created first, so it is moved to unreachable first + # - we create a cycle between a greenlet and this array + # - we create an object that participates in gc, is only + # referenced by a greenlet, and would corrupt gc lists + # on destruction, the easiest is to use an object with + # a finalizer + # - because array is the first object in unreachable it is + # cleared first, which causes all references to greenlet + # to disappear and causes greenlet to be destroyed, but since + # it is still live it causes a switch during gc, which causes + # an object with finalizer to be destroyed, which causes stack + # corruption and then a crash + + class object_with_finalizer(object): + def __del__(self): + pass + array = [] + parent = greenlet.getcurrent() + def greenlet_body(): + greenlet.getcurrent().object = object_with_finalizer() + try: + parent.switch() + except greenlet.GreenletExit: + print("Got greenlet exit!") + finally: + del greenlet.getcurrent().object + g = greenlet.greenlet(greenlet_body) + g.array = array + array.append(g) + g.switch() + del array + del g + greenlet.getcurrent() + gc.collect() diff --git a/venv/Lib/site-packages/greenlet/tests/test_generator.py b/venv/Lib/site-packages/greenlet/tests/test_generator.py new file mode 100644 index 00000000..ca4a644b --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_generator.py @@ -0,0 +1,59 @@ + +from greenlet import greenlet + +from . import TestCase + +class genlet(greenlet): + parent = None + def __init__(self, *args, **kwds): + self.args = args + self.kwds = kwds + + def run(self): + fn, = self.fn + fn(*self.args, **self.kwds) + + def __iter__(self): + return self + + def __next__(self): + self.parent = greenlet.getcurrent() + result = self.switch() + if self: + return result + + raise StopIteration + + next = __next__ + + +def Yield(value): + g = greenlet.getcurrent() + while not isinstance(g, genlet): + if g is None: + raise RuntimeError('yield outside a genlet') + g = g.parent + g.parent.switch(value) + + +def generator(func): + class Generator(genlet): + fn = (func,) + return Generator + +# ____________________________________________________________ + + +class GeneratorTests(TestCase): + def test_generator(self): + seen = [] + + def g(n): + for i in range(n): + seen.append(i) + Yield(i) + g = generator(g) + for _ in range(3): + for j in g(5): + seen.append(j) + self.assertEqual(seen, 3 * [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]) diff --git a/venv/Lib/site-packages/greenlet/tests/test_generator_nested.py b/venv/Lib/site-packages/greenlet/tests/test_generator_nested.py new file mode 100644 index 00000000..8d752a63 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_generator_nested.py @@ -0,0 +1,168 @@ + +from greenlet import greenlet +from . import TestCase +from .leakcheck import fails_leakcheck + +class genlet(greenlet): + parent = None + def __init__(self, *args, **kwds): + self.args = args + self.kwds = kwds + self.child = None + + def run(self): + # Note the function is packed in a tuple + # to avoid creating a bound method for it. + fn, = self.fn + fn(*self.args, **self.kwds) + + def __iter__(self): + return self + + def set_child(self, child): + self.child = child + + def __next__(self): + if self.child: + child = self.child + while child.child: + tmp = child + child = child.child + tmp.child = None + + result = child.switch() + else: + self.parent = greenlet.getcurrent() + result = self.switch() + + if self: + return result + + raise StopIteration + + next = __next__ + +def Yield(value, level=1): + g = greenlet.getcurrent() + + while level != 0: + if not isinstance(g, genlet): + raise RuntimeError('yield outside a genlet') + if level > 1: + g.parent.set_child(g) + g = g.parent + level -= 1 + + g.switch(value) + + +def Genlet(func): + class TheGenlet(genlet): + fn = (func,) + return TheGenlet + +# ____________________________________________________________ + + +def g1(n, seen): + for i in range(n): + seen.append(i + 1) + yield i + + +def g2(n, seen): + for i in range(n): + seen.append(i + 1) + Yield(i) + +g2 = Genlet(g2) + + +def nested(i): + Yield(i) + + +def g3(n, seen): + for i in range(n): + seen.append(i + 1) + nested(i) +g3 = Genlet(g3) + + +def a(n): + if n == 0: + return + for ii in ax(n - 1): + Yield(ii) + Yield(n) +ax = Genlet(a) + + +def perms(l): + if len(l) > 1: + for e in l: + # No syntactical sugar for generator expressions + x = [Yield([e] + p) for p in perms([x for x in l if x != e])] + assert x + else: + Yield(l) +perms = Genlet(perms) + + +def gr1(n): + for ii in range(1, n): + Yield(ii) + Yield(ii * ii, 2) + +gr1 = Genlet(gr1) + + +def gr2(n, seen): + for ii in gr1(n): + seen.append(ii) + +gr2 = Genlet(gr2) + + +class NestedGeneratorTests(TestCase): + def test_layered_genlets(self): + seen = [] + for ii in gr2(5, seen): + seen.append(ii) + self.assertEqual(seen, [1, 1, 2, 4, 3, 9, 4, 16]) + + @fails_leakcheck + def test_permutations(self): + gen_perms = perms(list(range(4))) + permutations = list(gen_perms) + self.assertEqual(len(permutations), 4 * 3 * 2 * 1) + self.assertIn([0, 1, 2, 3], permutations) + self.assertIn([3, 2, 1, 0], permutations) + res = [] + for ii in zip(perms(list(range(4))), perms(list(range(3)))): + res.append(ii) + self.assertEqual( + res, + [([0, 1, 2, 3], [0, 1, 2]), ([0, 1, 3, 2], [0, 2, 1]), + ([0, 2, 1, 3], [1, 0, 2]), ([0, 2, 3, 1], [1, 2, 0]), + ([0, 3, 1, 2], [2, 0, 1]), ([0, 3, 2, 1], [2, 1, 0])]) + # XXX Test to make sure we are working as a generator expression + + def test_genlet_simple(self): + for g in g1, g2, g3: + seen = [] + for _ in range(3): + for j in g(5, seen): + seen.append(j) + self.assertEqual(seen, 3 * [1, 0, 2, 1, 3, 2, 4, 3, 5, 4]) + + def test_genlet_bad(self): + try: + Yield(10) + except RuntimeError: + pass + + def test_nested_genlets(self): + seen = [] + for ii in ax(5): + seen.append(ii) diff --git a/venv/Lib/site-packages/greenlet/tests/test_greenlet.py b/venv/Lib/site-packages/greenlet/tests/test_greenlet.py new file mode 100644 index 00000000..fd05c0db --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_greenlet.py @@ -0,0 +1,1327 @@ +import gc +import sys +import time +import threading +import unittest + +from abc import ABCMeta +from abc import abstractmethod + +import greenlet +from greenlet import greenlet as RawGreenlet +from . import TestCase +from . import RUNNING_ON_MANYLINUX +from . import PY313 +from . import PY314 +from .leakcheck import fails_leakcheck + + +# We manually manage locks in many tests +# pylint:disable=consider-using-with +# pylint:disable=too-many-public-methods +# This module is quite large. +# TODO: Refactor into separate test files. For example, +# put all the regression tests that used to produce +# crashes in test_greenlet_no_crash; put tests that DO deliberately crash +# the interpreter into test_greenlet_crash. +# pylint:disable=too-many-lines + +class SomeError(Exception): + pass + + +def fmain(seen): + try: + greenlet.getcurrent().parent.switch() + except: + seen.append(sys.exc_info()[0]) + raise + raise SomeError + + +def send_exception(g, exc): + # note: send_exception(g, exc) can be now done with g.throw(exc). + # the purpose of this test is to explicitly check the propagation rules. + def crasher(exc): + raise exc + g1 = RawGreenlet(crasher, parent=g) + g1.switch(exc) + + +class TestGreenlet(TestCase): + + def _do_simple_test(self): + lst = [] + + def f(): + lst.append(1) + greenlet.getcurrent().parent.switch() + lst.append(3) + g = RawGreenlet(f) + lst.append(0) + g.switch() + lst.append(2) + g.switch() + lst.append(4) + self.assertEqual(lst, list(range(5))) + + def test_simple(self): + self._do_simple_test() + + def test_switch_no_run_raises_AttributeError(self): + g = RawGreenlet() + with self.assertRaises(AttributeError) as exc: + g.switch() + + self.assertIn("run", str(exc.exception)) + + def test_throw_no_run_raises_AttributeError(self): + g = RawGreenlet() + with self.assertRaises(AttributeError) as exc: + g.throw(SomeError) + + self.assertIn("run", str(exc.exception)) + + def test_parent_equals_None(self): + g = RawGreenlet(parent=None) + self.assertIsNotNone(g) + self.assertIs(g.parent, greenlet.getcurrent()) + + def test_run_equals_None(self): + g = RawGreenlet(run=None) + self.assertIsNotNone(g) + self.assertIsNone(g.run) + + def test_two_children(self): + lst = [] + + def f(): + lst.append(1) + greenlet.getcurrent().parent.switch() + lst.extend([1, 1]) + g = RawGreenlet(f) + h = RawGreenlet(f) + g.switch() + self.assertEqual(len(lst), 1) + h.switch() + self.assertEqual(len(lst), 2) + h.switch() + self.assertEqual(len(lst), 4) + self.assertEqual(h.dead, True) + g.switch() + self.assertEqual(len(lst), 6) + self.assertEqual(g.dead, True) + + def test_two_recursive_children(self): + lst = [] + + def f(): + lst.append('b') + greenlet.getcurrent().parent.switch() + + def g(): + lst.append('a') + g = RawGreenlet(f) + g.switch() + lst.append('c') + self.assertEqual(sys.getrefcount(g), 2 if not PY314 else 1) + g = RawGreenlet(g) + # Python 3.14 elides reference counting operations + # in some cases. See https://github.com/python/cpython/pull/130708 + self.assertEqual(sys.getrefcount(g), 2 if not PY314 else 1) + g.switch() + self.assertEqual(lst, ['a', 'b', 'c']) + # Just the one in this frame, plus the one on the stack we pass to the function + self.assertEqual(sys.getrefcount(g), 2 if not PY314 else 1) + + def test_threads(self): + success = [] + + def f(): + self._do_simple_test() + success.append(True) + ths = [threading.Thread(target=f) for i in range(10)] + for th in ths: + th.start() + for th in ths: + th.join(10) + self.assertEqual(len(success), len(ths)) + + def test_exception(self): + seen = [] + g1 = RawGreenlet(fmain) + g2 = RawGreenlet(fmain) + g1.switch(seen) + g2.switch(seen) + g2.parent = g1 + + self.assertEqual(seen, []) + #with self.assertRaises(SomeError): + # p("***Switching back") + # g2.switch() + # Creating this as a bound method can reveal bugs that + # are hidden on newer versions of Python that avoid creating + # bound methods for direct expressions; IOW, don't use the `with` + # form! + self.assertRaises(SomeError, g2.switch) + self.assertEqual(seen, [SomeError]) + + value = g2.switch() + self.assertEqual(value, ()) + self.assertEqual(seen, [SomeError]) + + value = g2.switch(25) + self.assertEqual(value, 25) + self.assertEqual(seen, [SomeError]) + + + def test_send_exception(self): + seen = [] + g1 = RawGreenlet(fmain) + g1.switch(seen) + self.assertRaises(KeyError, send_exception, g1, KeyError) + self.assertEqual(seen, [KeyError]) + + def test_dealloc(self): + seen = [] + g1 = RawGreenlet(fmain) + g2 = RawGreenlet(fmain) + g1.switch(seen) + g2.switch(seen) + self.assertEqual(seen, []) + del g1 + gc.collect() + self.assertEqual(seen, [greenlet.GreenletExit]) + del g2 + gc.collect() + self.assertEqual(seen, [greenlet.GreenletExit, greenlet.GreenletExit]) + + def test_dealloc_catches_GreenletExit_throws_other(self): + def run(): + try: + greenlet.getcurrent().parent.switch() + except greenlet.GreenletExit: + raise SomeError from None + + g = RawGreenlet(run) + g.switch() + # Destroying the only reference to the greenlet causes it + # to get GreenletExit; when it in turn raises, even though we're the parent + # we don't get the exception, it just gets printed. + # When we run on 3.8 only, we can use sys.unraisablehook + oldstderr = sys.stderr + from io import StringIO + stderr = sys.stderr = StringIO() + try: + del g + finally: + sys.stderr = oldstderr + + v = stderr.getvalue() + self.assertIn("Exception", v) + self.assertIn('ignored', v) + self.assertIn("SomeError", v) + + + @unittest.skipIf( + PY313 and RUNNING_ON_MANYLINUX, + "Sometimes flaky (getting one GreenletExit in the second list)" + # Probably due to funky timing interactions? + # TODO: FIXME Make that work. + ) + + def test_dealloc_other_thread(self): + seen = [] + someref = [] + + bg_glet_created_running_and_no_longer_ref_in_bg = threading.Event() + fg_ref_released = threading.Event() + bg_should_be_clear = threading.Event() + ok_to_exit_bg_thread = threading.Event() + + def f(): + g1 = RawGreenlet(fmain) + g1.switch(seen) + someref.append(g1) + del g1 + gc.collect() + + bg_glet_created_running_and_no_longer_ref_in_bg.set() + fg_ref_released.wait(3) + + RawGreenlet() # trigger release + bg_should_be_clear.set() + ok_to_exit_bg_thread.wait(3) + RawGreenlet() # One more time + + t = threading.Thread(target=f) + t.start() + bg_glet_created_running_and_no_longer_ref_in_bg.wait(10) + + self.assertEqual(seen, []) + self.assertEqual(len(someref), 1) + del someref[:] + gc.collect() + # g1 is not released immediately because it's from another thread + self.assertEqual(seen, []) + fg_ref_released.set() + bg_should_be_clear.wait(3) + try: + self.assertEqual(seen, [greenlet.GreenletExit]) + finally: + ok_to_exit_bg_thread.set() + t.join(10) + del seen[:] + del someref[:] + + def test_frame(self): + def f1(): + f = sys._getframe(0) # pylint:disable=protected-access + self.assertEqual(f.f_back, None) + greenlet.getcurrent().parent.switch(f) + return "meaning of life" + g = RawGreenlet(f1) + frame = g.switch() + self.assertTrue(frame is g.gr_frame) + self.assertTrue(g) + + from_g = g.switch() + self.assertFalse(g) + self.assertEqual(from_g, 'meaning of life') + self.assertEqual(g.gr_frame, None) + + def test_thread_bug(self): + def runner(x): + g = RawGreenlet(lambda: time.sleep(x)) + g.switch() + t1 = threading.Thread(target=runner, args=(0.2,)) + t2 = threading.Thread(target=runner, args=(0.3,)) + t1.start() + t2.start() + t1.join(10) + t2.join(10) + + def test_switch_kwargs(self): + def run(a, b): + self.assertEqual(a, 4) + self.assertEqual(b, 2) + return 42 + x = RawGreenlet(run).switch(a=4, b=2) + self.assertEqual(x, 42) + + def test_switch_kwargs_to_parent(self): + def run(x): + greenlet.getcurrent().parent.switch(x=x) + greenlet.getcurrent().parent.switch(2, x=3) + return x, x ** 2 + g = RawGreenlet(run) + self.assertEqual({'x': 3}, g.switch(3)) + self.assertEqual(((2,), {'x': 3}), g.switch()) + self.assertEqual((3, 9), g.switch()) + + def test_switch_to_another_thread(self): + data = {} + created_event = threading.Event() + done_event = threading.Event() + + def run(): + data['g'] = RawGreenlet(lambda: None) + created_event.set() + done_event.wait(10) + thread = threading.Thread(target=run) + thread.start() + created_event.wait(10) + with self.assertRaises(greenlet.error): + data['g'].switch() + done_event.set() + thread.join(10) + # XXX: Should handle this automatically + data.clear() + + def test_exc_state(self): + def f(): + try: + raise ValueError('fun') + except: # pylint:disable=bare-except + exc_info = sys.exc_info() + RawGreenlet(h).switch() + self.assertEqual(exc_info, sys.exc_info()) + + def h(): + self.assertEqual(sys.exc_info(), (None, None, None)) + + RawGreenlet(f).switch() + + def test_instance_dict(self): + def f(): + greenlet.getcurrent().test = 42 + def deldict(g): + del g.__dict__ + def setdict(g, value): + g.__dict__ = value + g = RawGreenlet(f) + self.assertEqual(g.__dict__, {}) + g.switch() + self.assertEqual(g.test, 42) + self.assertEqual(g.__dict__, {'test': 42}) + g.__dict__ = g.__dict__ + self.assertEqual(g.__dict__, {'test': 42}) + self.assertRaises(TypeError, deldict, g) + self.assertRaises(TypeError, setdict, g, 42) + + def test_running_greenlet_has_no_run(self): + has_run = [] + def func(): + has_run.append( + hasattr(greenlet.getcurrent(), 'run') + ) + + g = RawGreenlet(func) + g.switch() + self.assertEqual(has_run, [False]) + + def test_deepcopy(self): + import copy + self.assertRaises(TypeError, copy.copy, RawGreenlet()) + self.assertRaises(TypeError, copy.deepcopy, RawGreenlet()) + + def test_parent_restored_on_kill(self): + hub = RawGreenlet(lambda: None) + main = greenlet.getcurrent() + result = [] + def worker(): + try: + # Wait to be killed by going back to the test. + main.switch() + except greenlet.GreenletExit: + # Resurrect and switch to parent + result.append(greenlet.getcurrent().parent) + result.append(greenlet.getcurrent()) + hub.switch() + g = RawGreenlet(worker, parent=hub) + g.switch() + # delete the only reference, thereby raising GreenletExit + del g + self.assertTrue(result) + self.assertIs(result[0], main) + self.assertIs(result[1].parent, hub) + # Delete them, thereby breaking the cycle between the greenlet + # and the frame, which otherwise would never be collectable + # XXX: We should be able to automatically fix this. + del result[:] + hub = None + main = None + + def test_parent_return_failure(self): + # No run causes AttributeError on switch + g1 = RawGreenlet() + # Greenlet that implicitly switches to parent + g2 = RawGreenlet(lambda: None, parent=g1) + # AttributeError should propagate to us, no fatal errors + with self.assertRaises(AttributeError): + g2.switch() + + def test_throw_exception_not_lost(self): + class mygreenlet(RawGreenlet): + def __getattribute__(self, name): + try: + raise Exception # pylint:disable=broad-exception-raised + except: # pylint:disable=bare-except + pass + return RawGreenlet.__getattribute__(self, name) + g = mygreenlet(lambda: None) + self.assertRaises(SomeError, g.throw, SomeError()) + + @fails_leakcheck + def _do_test_throw_to_dead_thread_doesnt_crash(self, wait_for_cleanup=False): + result = [] + def worker(): + greenlet.getcurrent().parent.switch() + + def creator(): + g = RawGreenlet(worker) + g.switch() + result.append(g) + if wait_for_cleanup: + # Let this greenlet eventually be cleaned up. + g.switch() + greenlet.getcurrent() + t = threading.Thread(target=creator) + t.start() + t.join(10) + del t + # But, depending on the operating system, the thread + # deallocator may not actually have run yet! So we can't be + # sure about the error message unless we wait. + if wait_for_cleanup: + self.wait_for_pending_cleanups() + with self.assertRaises(greenlet.error) as exc: + result[0].throw(SomeError) + + if not wait_for_cleanup: + s = str(exc.exception) + self.assertTrue( + s == "cannot switch to a different thread (which happens to have exited)" + or 'Cannot switch' in s + ) + else: + self.assertEqual( + str(exc.exception), + "cannot switch to a different thread (which happens to have exited)", + ) + + if hasattr(result[0].gr_frame, 'clear'): + # The frame is actually executing (it thinks), we can't clear it. + with self.assertRaises(RuntimeError): + result[0].gr_frame.clear() + # Unfortunately, this doesn't actually clear the references, they're in the + # fast local array. + if not wait_for_cleanup: + # f_locals has no clear method in Python 3.13 + if hasattr(result[0].gr_frame.f_locals, 'clear'): + result[0].gr_frame.f_locals.clear() + else: + self.assertIsNone(result[0].gr_frame) + + del creator + worker = None + del result[:] + # XXX: we ought to be able to automatically fix this. + # See issue 252 + self.expect_greenlet_leak = True # direct us not to wait for it to go away + + @fails_leakcheck + def test_throw_to_dead_thread_doesnt_crash(self): + self._do_test_throw_to_dead_thread_doesnt_crash() + + def test_throw_to_dead_thread_doesnt_crash_wait(self): + self._do_test_throw_to_dead_thread_doesnt_crash(True) + + @fails_leakcheck + def test_recursive_startup(self): + class convoluted(RawGreenlet): + def __init__(self): + RawGreenlet.__init__(self) + self.count = 0 + def __getattribute__(self, name): + if name == 'run' and self.count == 0: + self.count = 1 + self.switch(43) + return RawGreenlet.__getattribute__(self, name) + def run(self, value): + while True: + self.parent.switch(value) + g = convoluted() + self.assertEqual(g.switch(42), 43) + # Exits the running greenlet, otherwise it leaks + # XXX: We should be able to automatically fix this + #g.throw(greenlet.GreenletExit) + #del g + self.expect_greenlet_leak = True + + def test_threaded_updatecurrent(self): + # released when main thread should execute + lock1 = threading.Lock() + lock1.acquire() + # released when another thread should execute + lock2 = threading.Lock() + lock2.acquire() + class finalized(object): + def __del__(self): + # happens while in green_updatecurrent() in main greenlet + # should be very careful not to accidentally call it again + # at the same time we must make sure another thread executes + lock2.release() + lock1.acquire() + # now ts_current belongs to another thread + def deallocator(): + greenlet.getcurrent().parent.switch() + def fthread(): + lock2.acquire() + greenlet.getcurrent() + del g[0] + lock1.release() + lock2.acquire() + greenlet.getcurrent() + lock1.release() + main = greenlet.getcurrent() + g = [RawGreenlet(deallocator)] + g[0].bomb = finalized() + g[0].switch() + t = threading.Thread(target=fthread) + t.start() + # let another thread grab ts_current and deallocate g[0] + lock2.release() + lock1.acquire() + # this is the corner stone + # getcurrent() will notice that ts_current belongs to another thread + # and start the update process, which would notice that g[0] should + # be deallocated, and that will execute an object's finalizer. Now, + # that object will let another thread run so it can grab ts_current + # again, which would likely crash the interpreter if there's no + # check for this case at the end of green_updatecurrent(). This test + # passes if getcurrent() returns correct result, but it's likely + # to randomly crash if it's not anyway. + self.assertEqual(greenlet.getcurrent(), main) + # wait for another thread to complete, just in case + t.join(10) + + def test_dealloc_switch_args_not_lost(self): + seen = [] + def worker(): + # wait for the value + value = greenlet.getcurrent().parent.switch() + # delete all references to ourself + del worker[0] + initiator.parent = greenlet.getcurrent().parent + # switch to main with the value, but because + # ts_current is the last reference to us we + # return here immediately, where we resurrect ourself. + try: + greenlet.getcurrent().parent.switch(value) + finally: + seen.append(greenlet.getcurrent()) + def initiator(): + return 42 # implicitly falls thru to parent + + worker = [RawGreenlet(worker)] + + worker[0].switch() # prime worker + initiator = RawGreenlet(initiator, worker[0]) + value = initiator.switch() + self.assertTrue(seen) + self.assertEqual(value, 42) + + def test_tuple_subclass(self): + # The point of this test is to see what happens when a custom + # tuple subclass is used as an object passed directly to the C + # function ``green_switch``; part of ``green_switch`` checks + # the ``len()`` of the ``args`` tuple, and that can call back + # into Python. Here, when it calls back into Python, we + # recursively enter ``green_switch`` again. + + # This test is really only relevant on Python 2. The builtin + # `apply` function directly passes the given args tuple object + # to the underlying function, whereas the Python 3 version + # unpacks and repacks into an actual tuple. This could still + # happen using the C API on Python 3 though. We should write a + # builtin version of apply() ourself. + def _apply(func, a, k): + func(*a, **k) + + class mytuple(tuple): + def __len__(self): + greenlet.getcurrent().switch() + return tuple.__len__(self) + args = mytuple() + kwargs = dict(a=42) + def switchapply(): + _apply(greenlet.getcurrent().parent.switch, args, kwargs) + g = RawGreenlet(switchapply) + self.assertEqual(g.switch(), kwargs) + + def test_abstract_subclasses(self): + AbstractSubclass = ABCMeta( + 'AbstractSubclass', + (RawGreenlet,), + {'run': abstractmethod(lambda self: None)}) + + class BadSubclass(AbstractSubclass): + pass + + class GoodSubclass(AbstractSubclass): + def run(self): + pass + + GoodSubclass() # should not raise + self.assertRaises(TypeError, BadSubclass) + + def test_implicit_parent_with_threads(self): + if not gc.isenabled(): + return # cannot test with disabled gc + N = gc.get_threshold()[0] + if N < 50: + return # cannot test with such a small N + def attempt(): + lock1 = threading.Lock() + lock1.acquire() + lock2 = threading.Lock() + lock2.acquire() + recycled = [False] + def another_thread(): + lock1.acquire() # wait for gc + greenlet.getcurrent() # update ts_current + lock2.release() # release gc + t = threading.Thread(target=another_thread) + t.start() + class gc_callback(object): + def __del__(self): + lock1.release() + lock2.acquire() + recycled[0] = True + class garbage(object): + def __init__(self): + self.cycle = self + self.callback = gc_callback() + l = [] + x = range(N*2) + current = greenlet.getcurrent() + g = garbage() + for _ in x: + g = None # lose reference to garbage + if recycled[0]: + # gc callback called prematurely + t.join(10) + return False + last = RawGreenlet() + if recycled[0]: + break # yes! gc called in green_new + l.append(last) # increase allocation counter + else: + # gc callback not called when expected + gc.collect() + if recycled[0]: + t.join(10) + return False + self.assertEqual(last.parent, current) + for g in l: + self.assertEqual(g.parent, current) + return True + for _ in range(5): + if attempt(): + break + + def test_issue_245_reference_counting_subclass_no_threads(self): + # https://github.com/python-greenlet/greenlet/issues/245 + # Before the fix, this crashed pretty reliably on + # Python 3.10, at least on macOS; but much less reliably on other + # interpreters (memory layout must have changed). + # The threaded test crashed more reliably on more interpreters. + from greenlet import getcurrent + from greenlet import GreenletExit + + class Greenlet(RawGreenlet): + pass + + initial_refs = sys.getrefcount(Greenlet) + # This has to be an instance variable because + # Python 2 raises a SyntaxError if we delete a local + # variable referenced in an inner scope. + self.glets = [] # pylint:disable=attribute-defined-outside-init + + def greenlet_main(): + try: + getcurrent().parent.switch() + except GreenletExit: + self.glets.append(getcurrent()) + + # Before the + for _ in range(10): + Greenlet(greenlet_main).switch() + + del self.glets + self.assertEqual(sys.getrefcount(Greenlet), initial_refs) + + @unittest.skipIf( + PY313 and RUNNING_ON_MANYLINUX, + "The manylinux images appear to hang on this test on 3.13rc2" + # Or perhaps I just got tired of waiting for the 450s timeout. + # Still, it shouldn't take anywhere near that long. Does not reproduce in + # Ubuntu images, on macOS or Windows. + ) + def test_issue_245_reference_counting_subclass_threads(self): + # https://github.com/python-greenlet/greenlet/issues/245 + from threading import Thread + from threading import Event + + from greenlet import getcurrent + + class MyGreenlet(RawGreenlet): + pass + + glets = [] + ref_cleared = Event() + + def greenlet_main(): + getcurrent().parent.switch() + + def thread_main(greenlet_running_event): + mine = MyGreenlet(greenlet_main) + glets.append(mine) + # The greenlets being deleted must be active + mine.switch() + # Don't keep any reference to it in this thread + del mine + # Let main know we published our greenlet. + greenlet_running_event.set() + # Wait for main to let us know the references are + # gone and the greenlet objects no longer reachable + ref_cleared.wait(10) + # The creating thread must call getcurrent() (or a few other + # greenlet APIs) because that's when the thread-local list of dead + # greenlets gets cleared. + getcurrent() + + # We start with 3 references to the subclass: + # - This module + # - Its __mro__ + # - The __subclassess__ attribute of greenlet + # - (If we call gc.get_referents(), we find four entries, including + # some other tuple ``(greenlet)`` that I'm not sure about but must be part + # of the machinery.) + # + # On Python 3.10 it's often enough to just run 3 threads; on Python 2.7, + # more threads are needed, and the results are still + # non-deterministic. Presumably the memory layouts are different + initial_refs = sys.getrefcount(MyGreenlet) + thread_ready_events = [] + for _ in range( + initial_refs + 45 + ): + event = Event() + thread = Thread(target=thread_main, args=(event,)) + thread_ready_events.append(event) + thread.start() + + + for done_event in thread_ready_events: + done_event.wait(10) + + + del glets[:] + ref_cleared.set() + # Let any other thread run; it will crash the interpreter + # if not fixed (or silently corrupt memory and we possibly crash + # later). + self.wait_for_pending_cleanups() + self.assertEqual(sys.getrefcount(MyGreenlet), initial_refs) + + def test_falling_off_end_switches_to_unstarted_parent_raises_error(self): + def no_args(): + return 13 + + parent_never_started = RawGreenlet(no_args) + + def leaf(): + return 42 + + child = RawGreenlet(leaf, parent_never_started) + + # Because the run function takes to arguments + with self.assertRaises(TypeError): + child.switch() + + def test_falling_off_end_switches_to_unstarted_parent_works(self): + def one_arg(x): + return (x, 24) + + parent_never_started = RawGreenlet(one_arg) + + def leaf(): + return 42 + + child = RawGreenlet(leaf, parent_never_started) + + result = child.switch() + self.assertEqual(result, (42, 24)) + + def test_switch_to_dead_greenlet_with_unstarted_perverse_parent(self): + class Parent(RawGreenlet): + def __getattribute__(self, name): + if name == 'run': + raise SomeError + + + parent_never_started = Parent() + seen = [] + child = RawGreenlet(lambda: seen.append(42), parent_never_started) + # Because we automatically start the parent when the child is + # finished + with self.assertRaises(SomeError): + child.switch() + + self.assertEqual(seen, [42]) + + with self.assertRaises(SomeError): + child.switch() + self.assertEqual(seen, [42]) + + def test_switch_to_dead_greenlet_reparent(self): + seen = [] + parent_never_started = RawGreenlet(lambda: seen.append(24)) + child = RawGreenlet(lambda: seen.append(42)) + + child.switch() + self.assertEqual(seen, [42]) + + child.parent = parent_never_started + # This actually is the same as switching to the parent. + result = child.switch() + self.assertIsNone(result) + self.assertEqual(seen, [42, 24]) + + def test_can_access_f_back_of_suspended_greenlet(self): + # This tests our frame rewriting to work around Python 3.12+ having + # some interpreter frames on the C stack. It will crash in the absence + # of that logic. + main = greenlet.getcurrent() + + def outer(): + inner() + + def inner(): + main.switch(sys._getframe(0)) + + hub = RawGreenlet(outer) + # start it + hub.switch() + + # start another greenlet to make sure we aren't relying on + # anything in `hub` still being on the C stack + unrelated = RawGreenlet(lambda: None) + unrelated.switch() + + # now it is suspended + self.assertIsNotNone(hub.gr_frame) + self.assertEqual(hub.gr_frame.f_code.co_name, "inner") + self.assertIsNotNone(hub.gr_frame.f_back) + self.assertEqual(hub.gr_frame.f_back.f_code.co_name, "outer") + # The next line is what would crash + self.assertIsNone(hub.gr_frame.f_back.f_back) + + def test_get_stack_with_nested_c_calls(self): + from functools import partial + from . import _test_extension_cpp + + def recurse(v): + if v > 0: + return v * _test_extension_cpp.test_call(partial(recurse, v - 1)) + return greenlet.getcurrent().parent.switch() + + gr = RawGreenlet(recurse) + gr.switch(5) + frame = gr.gr_frame + for i in range(5): + self.assertEqual(frame.f_locals["v"], i) + frame = frame.f_back + self.assertEqual(frame.f_locals["v"], 5) + self.assertIsNone(frame.f_back) + self.assertEqual(gr.switch(10), 1200) # 1200 = 5! * 10 + + def test_frames_always_exposed(self): + # On Python 3.12 this will crash if we don't set the + # gr_frames_always_exposed attribute. More background: + # https://github.com/python-greenlet/greenlet/issues/388 + main = greenlet.getcurrent() + + def outer(): + inner(sys._getframe(0)) + + def inner(frame): + main.switch(frame) + + gr = RawGreenlet(outer) + frame = gr.switch() + + # Do something else to clobber the part of the C stack used by `gr`, + # so we can't skate by on "it just happened to still be there" + unrelated = RawGreenlet(lambda: None) + unrelated.switch() + + self.assertEqual(frame.f_code.co_name, "outer") + # The next line crashes on 3.12 if we haven't exposed the frames. + self.assertIsNone(frame.f_back) + + +class TestGreenletSetParentErrors(TestCase): + def test_threaded_reparent(self): + data = {} + created_event = threading.Event() + done_event = threading.Event() + + def run(): + data['g'] = RawGreenlet(lambda: None) + created_event.set() + done_event.wait(10) + + def blank(): + greenlet.getcurrent().parent.switch() + + thread = threading.Thread(target=run) + thread.start() + created_event.wait(10) + g = RawGreenlet(blank) + g.switch() + with self.assertRaises(ValueError) as exc: + g.parent = data['g'] + done_event.set() + thread.join(10) + + self.assertEqual(str(exc.exception), "parent cannot be on a different thread") + + def test_unexpected_reparenting(self): + another = [] + def worker(): + g = RawGreenlet(lambda: None) + another.append(g) + g.switch() + t = threading.Thread(target=worker) + t.start() + t.join(10) + # The first time we switch (running g_initialstub(), which is + # when we look up the run attribute) we attempt to change the + # parent to one from another thread (which also happens to be + # dead). ``g_initialstub()`` should detect this and raise a + # greenlet error. + # + # EXCEPT: With the fix for #252, this is actually detected + # sooner, when setting the parent itself. Prior to that fix, + # the main greenlet from the background thread kept a valid + # value for ``run_info``, and appeared to be a valid parent + # until we actually started the greenlet. But now that it's + # cleared, this test is catching whether ``green_setparent`` + # can detect the dead thread. + # + # Further refactoring once again changes this back to a greenlet.error + # + # We need to wait for the cleanup to happen, but we're + # deliberately leaking a main greenlet here. + self.wait_for_pending_cleanups(initial_main_greenlets=self.main_greenlets_before_test + 1) + + class convoluted(RawGreenlet): + def __getattribute__(self, name): + if name == 'run': + self.parent = another[0] # pylint:disable=attribute-defined-outside-init + return RawGreenlet.__getattribute__(self, name) + g = convoluted(lambda: None) + with self.assertRaises(greenlet.error) as exc: + g.switch() + self.assertEqual(str(exc.exception), + "cannot switch to a different thread (which happens to have exited)") + del another[:] + + def test_unexpected_reparenting_thread_running(self): + # Like ``test_unexpected_reparenting``, except the background thread is + # actually still alive. + another = [] + switched_to_greenlet = threading.Event() + keep_main_alive = threading.Event() + def worker(): + g = RawGreenlet(lambda: None) + another.append(g) + g.switch() + switched_to_greenlet.set() + keep_main_alive.wait(10) + class convoluted(RawGreenlet): + def __getattribute__(self, name): + if name == 'run': + self.parent = another[0] # pylint:disable=attribute-defined-outside-init + return RawGreenlet.__getattribute__(self, name) + + t = threading.Thread(target=worker) + t.start() + + switched_to_greenlet.wait(10) + try: + g = convoluted(lambda: None) + + with self.assertRaises(greenlet.error) as exc: + g.switch() + self.assertIn("Cannot switch to a different thread", str(exc.exception)) + self.assertIn("Expected", str(exc.exception)) + self.assertIn("Current", str(exc.exception)) + finally: + keep_main_alive.set() + t.join(10) + # XXX: Should handle this automatically. + del another[:] + + def test_cannot_delete_parent(self): + worker = RawGreenlet(lambda: None) + self.assertIs(worker.parent, greenlet.getcurrent()) + + with self.assertRaises(AttributeError) as exc: + del worker.parent + self.assertEqual(str(exc.exception), "can't delete attribute") + + def test_cannot_delete_parent_of_main(self): + with self.assertRaises(AttributeError) as exc: + del greenlet.getcurrent().parent + self.assertEqual(str(exc.exception), "can't delete attribute") + + + def test_main_greenlet_parent_is_none(self): + # assuming we're in a main greenlet here. + self.assertIsNone(greenlet.getcurrent().parent) + + def test_set_parent_wrong_types(self): + def bg(): + # Go back to main. + greenlet.getcurrent().parent.switch() + + def check(glet): + for p in None, 1, self, "42": + with self.assertRaises(TypeError) as exc: + glet.parent = p + + self.assertEqual( + str(exc.exception), + "GreenletChecker: Expected any type of greenlet, not " + type(p).__name__) + + # First, not running + g = RawGreenlet(bg) + self.assertFalse(g) + check(g) + + # Then when running. + g.switch() + self.assertTrue(g) + check(g) + + # Let it finish + g.switch() + + + def test_trivial_cycle(self): + glet = RawGreenlet(lambda: None) + with self.assertRaises(ValueError) as exc: + glet.parent = glet + self.assertEqual(str(exc.exception), "cyclic parent chain") + + def test_trivial_cycle_main(self): + # This used to produce a ValueError, but we catch it earlier than that now. + with self.assertRaises(AttributeError) as exc: + greenlet.getcurrent().parent = greenlet.getcurrent() + self.assertEqual(str(exc.exception), "cannot set the parent of a main greenlet") + + def test_deeper_cycle(self): + g1 = RawGreenlet(lambda: None) + g2 = RawGreenlet(lambda: None) + g3 = RawGreenlet(lambda: None) + + g1.parent = g2 + g2.parent = g3 + with self.assertRaises(ValueError) as exc: + g3.parent = g1 + self.assertEqual(str(exc.exception), "cyclic parent chain") + + +class TestRepr(TestCase): + + def assertEndsWith(self, got, suffix): + self.assertTrue(got.endswith(suffix), (got, suffix)) + + def test_main_while_running(self): + r = repr(greenlet.getcurrent()) + self.assertEndsWith(r, " current active started main>") + + def test_main_in_background(self): + main = greenlet.getcurrent() + def run(): + return repr(main) + + g = RawGreenlet(run) + r = g.switch() + self.assertEndsWith(r, ' suspended active started main>') + + def test_initial(self): + r = repr(RawGreenlet()) + self.assertEndsWith(r, ' pending>') + + def test_main_from_other_thread(self): + main = greenlet.getcurrent() + + class T(threading.Thread): + original_main = thread_main = None + main_glet = None + def run(self): + self.original_main = repr(main) + self.main_glet = greenlet.getcurrent() + self.thread_main = repr(self.main_glet) + + t = T() + t.start() + t.join(10) + + self.assertEndsWith(t.original_main, ' suspended active started main>') + self.assertEndsWith(t.thread_main, ' current active started main>') + # give the machinery time to notice the death of the thread, + # and clean it up. Note that we don't use + # ``expect_greenlet_leak`` or wait_for_pending_cleanups, + # because at this point we know we have an extra greenlet + # still reachable. + for _ in range(3): + time.sleep(0.001) + + # In the past, main greenlets, even from dead threads, never + # really appear dead. We have fixed that, and we also report + # that the thread is dead in the repr. (Do this multiple times + # to make sure that we don't self-modify and forget our state + # in the C++ code). + for _ in range(3): + self.assertTrue(t.main_glet.dead) + r = repr(t.main_glet) + self.assertEndsWith(r, ' (thread exited) dead>') + + def test_dead(self): + g = RawGreenlet(lambda: None) + g.switch() + self.assertEndsWith(repr(g), ' dead>') + self.assertNotIn('suspended', repr(g)) + self.assertNotIn('started', repr(g)) + self.assertNotIn('active', repr(g)) + + def test_formatting_produces_native_str(self): + # https://github.com/python-greenlet/greenlet/issues/218 + # %s formatting on Python 2 was producing unicode, not str. + + g_dead = RawGreenlet(lambda: None) + g_not_started = RawGreenlet(lambda: None) + g_cur = greenlet.getcurrent() + + for g in g_dead, g_not_started, g_cur: + + self.assertIsInstance( + '%s' % (g,), + str + ) + self.assertIsInstance( + '%r' % (g,), + str, + ) + + +class TestMainGreenlet(TestCase): + # Tests some implementation details, and relies on some + # implementation details. + + def _check_current_is_main(self): + # implementation detail + assert 'main' in repr(greenlet.getcurrent()) + + t = type(greenlet.getcurrent()) + assert 'main' not in repr(t) + return t + + def test_main_greenlet_type_can_be_subclassed(self): + main_type = self._check_current_is_main() + subclass = type('subclass', (main_type,), {}) + self.assertIsNotNone(subclass) + + def test_main_greenlet_is_greenlet(self): + self._check_current_is_main() + self.assertIsInstance(greenlet.getcurrent(), RawGreenlet) + + + +class TestBrokenGreenlets(TestCase): + # Tests for things that used to, or still do, terminate the interpreter. + # This often means doing unsavory things. + + def test_failed_to_initialstub(self): + def func(): + raise AssertionError("Never get here") + + + g = greenlet._greenlet.UnswitchableGreenlet(func) + g.force_switch_error = True + + with self.assertRaisesRegex(SystemError, + "Failed to switch stacks into a greenlet for the first time."): + g.switch() + + def test_failed_to_switch_into_running(self): + runs = [] + def func(): + runs.append(1) + greenlet.getcurrent().parent.switch() + runs.append(2) + greenlet.getcurrent().parent.switch() + runs.append(3) # pragma: no cover + + g = greenlet._greenlet.UnswitchableGreenlet(func) + g.switch() + self.assertEqual(runs, [1]) + g.switch() + self.assertEqual(runs, [1, 2]) + g.force_switch_error = True + + with self.assertRaisesRegex(SystemError, + "Failed to switch stacks into a running greenlet."): + g.switch() + + # If we stopped here, we would fail the leakcheck, because we've left + # the ``inner_bootstrap()`` C frame and its descendents hanging around, + # which have a bunch of Python references. They'll never get cleaned up + # if we don't let the greenlet finish. + g.force_switch_error = False + g.switch() + self.assertEqual(runs, [1, 2, 3]) + + def test_failed_to_slp_switch_into_running(self): + ex = self.assertScriptRaises('fail_slp_switch.py') + + self.assertIn('fail_slp_switch is running', ex.output) + self.assertIn(ex.returncode, self.get_expected_returncodes_for_aborted_process()) + + def test_reentrant_switch_two_greenlets(self): + # Before we started capturing the arguments in g_switch_finish, this could crash. + output = self.run_script('fail_switch_two_greenlets.py') + self.assertIn('In g1_run', output) + self.assertIn('TRACE', output) + self.assertIn('LEAVE TRACE', output) + self.assertIn('Falling off end of main', output) + self.assertIn('Falling off end of g1_run', output) + self.assertIn('Falling off end of g2', output) + + def test_reentrant_switch_three_greenlets(self): + # On debug builds of greenlet, this used to crash with an assertion error; + # on non-debug versions, it ran fine (which it should not do!). + # Now it always crashes correctly with a TypeError + ex = self.assertScriptRaises('fail_switch_three_greenlets.py', exitcodes=(1,)) + + self.assertIn('TypeError', ex.output) + self.assertIn('positional arguments', ex.output) + + def test_reentrant_switch_three_greenlets2(self): + # This actually passed on debug and non-debug builds. It + # should probably have been triggering some debug assertions + # but it didn't. + # + # I think the fixes for the above test also kicked in here. + output = self.run_script('fail_switch_three_greenlets2.py') + self.assertIn( + "RESULTS: [('trace', 'switch'), " + "('trace', 'switch'), ('g2 arg', 'g2 from tracefunc'), " + "('trace', 'switch'), ('main g1', 'from g2_run'), ('trace', 'switch'), " + "('g1 arg', 'g1 from main'), ('trace', 'switch'), ('main g2', 'from g1_run'), " + "('trace', 'switch'), ('g1 from parent', 'g1 from main 2'), ('trace', 'switch'), " + "('main g1.2', 'g1 done'), ('trace', 'switch'), ('g2 from parent', ()), " + "('trace', 'switch'), ('main g2.2', 'g2 done')]", + output + ) + + def test_reentrant_switch_GreenletAlreadyStartedInPython(self): + output = self.run_script('fail_initialstub_already_started.py') + + self.assertIn( + "RESULTS: ['Begin C', 'Switch to b from B.__getattribute__ in C', " + "('Begin B', ()), '_B_run switching to main', ('main from c', 'From B'), " + "'B.__getattribute__ back from main in C', ('Begin A', (None,)), " + "('A dead?', True, 'B dead?', True, 'C dead?', False), " + "'C done', ('main from c.2', None)]", + output + ) + + def test_reentrant_switch_run_callable_has_del(self): + output = self.run_script('fail_clearing_run_switches.py') + self.assertIn( + "RESULTS [" + "('G.__getattribute__', 'run'), ('RunCallable', '__del__'), " + "('main: g.switch()', 'from RunCallable'), ('run_func', 'enter')" + "]", + output + ) + +if __name__ == '__main__': + unittest.main() diff --git a/venv/Lib/site-packages/greenlet/tests/test_greenlet_trash.py b/venv/Lib/site-packages/greenlet/tests/test_greenlet_trash.py new file mode 100644 index 00000000..c1fc1374 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_greenlet_trash.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- +""" +Tests for greenlets interacting with the CPython trash can API. + +The CPython trash can API is not designed to be re-entered from a +single thread. But this can happen using greenlets, if something +during the object deallocation process switches greenlets, and this second +greenlet then causes the trash can to get entered again. Here, we do this +very explicitly, but in other cases (like gevent) it could be arbitrarily more +complicated: for example, a weakref callback might try to acquire a lock that's +already held by another greenlet; that would allow a greenlet switch to occur. + +See https://github.com/gevent/gevent/issues/1909 + +This test is fragile and relies on details of the CPython +implementation (like most of the rest of this package): + + - We enter the trashcan and deferred deallocation after + ``_PyTrash_UNWIND_LEVEL`` calls. This constant, defined in + CPython's object.c, is generally 50. That's basically how many objects are required to + get us into the deferred deallocation situation. + + - The test fails by hitting an ``assert()`` in object.c; if the + build didn't enable assert, then we don't catch this. + + - If the test fails in that way, the interpreter crashes. +""" +from __future__ import print_function, absolute_import, division + +import unittest + + +class TestTrashCanReEnter(unittest.TestCase): + + def test_it(self): + try: + # pylint:disable-next=no-name-in-module + from greenlet._greenlet import get_tstate_trash_delete_nesting # pylint:disable=unused-import + except ImportError: + import sys + # Python 3.13 has not "trash delete nesting" anymore (but "delete later") + assert sys.version_info[:2] >= (3, 13) + self.skipTest("get_tstate_trash_delete_nesting is not available.") + + # Try several times to trigger it, because it isn't 100% + # reliable. + for _ in range(10): + self.check_it() + + def check_it(self): # pylint:disable=too-many-statements + import greenlet + from greenlet._greenlet import get_tstate_trash_delete_nesting # pylint:disable=no-name-in-module + main = greenlet.getcurrent() + + assert get_tstate_trash_delete_nesting() == 0 + + # We expect to be in deferred deallocation after this many + # deallocations have occurred. TODO: I wish we had a better way to do + # this --- that was before get_tstate_trash_delete_nesting; perhaps + # we can use that API to do better? + TRASH_UNWIND_LEVEL = 50 + # How many objects to put in a container; it's the container that + # queues objects for deferred deallocation. + OBJECTS_PER_CONTAINER = 500 + + class Dealloc: # define the class here because we alter class variables each time we run. + """ + An object with a ``__del__`` method. When it starts getting deallocated + from a deferred trash can run, it switches greenlets, allocates more objects + which then also go in the trash can. If we don't save state appropriately, + nesting gets out of order and we can crash the interpreter. + """ + + #: Has our deallocation actually run and switched greenlets? + #: When it does, this will be set to the current greenlet. This should + #: be happening in the main greenlet, so we check that down below. + SPAWNED = False + + #: Has the background greenlet run? + BG_RAN = False + + BG_GLET = None + + #: How many of these things have ever been allocated. + CREATED = 0 + + #: How many of these things have ever been deallocated. + DESTROYED = 0 + + #: How many were destroyed not in the main greenlet. There should always + #: be some. + #: If the test is broken or things change in the trashcan implementation, + #: this may not be correct. + DESTROYED_BG = 0 + + def __init__(self, sequence_number): + """ + :param sequence_number: The ordinal of this object during + one particular creation run. This is used to detect (guess, really) + when we have entered the trash can's deferred deallocation. + """ + self.i = sequence_number + Dealloc.CREATED += 1 + + def __del__(self): + if self.i == TRASH_UNWIND_LEVEL and not self.SPAWNED: + Dealloc.SPAWNED = greenlet.getcurrent() + other = Dealloc.BG_GLET = greenlet.greenlet(background_greenlet) + x = other.switch() + assert x == 42 + # It's important that we don't switch back to the greenlet, + # we leave it hanging there in an incomplete state. But we don't let it + # get collected, either. If we complete it now, while we're still + # in the scope of the initial trash can, things work out and we + # don't see the problem. We need this greenlet to complete + # at some point in the future, after we've exited this trash can invocation. + del other + elif self.i == 40 and greenlet.getcurrent() is not main: + Dealloc.BG_RAN = True + try: + main.switch(42) + except greenlet.GreenletExit as ex: + # We expect this; all references to us go away + # while we're still running, and we need to finish deleting + # ourself. + Dealloc.BG_RAN = type(ex) + del ex + + # Record the fact that we're dead last of all. This ensures that + # we actually get returned too. + Dealloc.DESTROYED += 1 + if greenlet.getcurrent() is not main: + Dealloc.DESTROYED_BG += 1 + + + def background_greenlet(): + # We direct through a second function, instead of + # directly calling ``make_some()``, so that we have complete + # control over when these objects are destroyed: we need them + # to be destroyed in the context of the background greenlet + t = make_some() + del t # Triggere deletion. + + def make_some(): + t = () + i = OBJECTS_PER_CONTAINER + while i: + # Nest the tuples; it's the recursion that gets us + # into trash. + t = (Dealloc(i), t) + i -= 1 + return t + + + some = make_some() + self.assertEqual(Dealloc.CREATED, OBJECTS_PER_CONTAINER) + self.assertEqual(Dealloc.DESTROYED, 0) + + # If we're going to crash, it should be on the following line. + # We only crash if ``assert()`` is enabled, of course. + del some + + # For non-debug builds of CPython, we won't crash. The best we can do is check + # the nesting level explicitly. + self.assertEqual(0, get_tstate_trash_delete_nesting()) + + # Discard this, raising GreenletExit into where it is waiting. + Dealloc.BG_GLET = None + # The same nesting level maintains. + self.assertEqual(0, get_tstate_trash_delete_nesting()) + + # We definitely cleaned some up in the background + self.assertGreater(Dealloc.DESTROYED_BG, 0) + + # Make sure all the cleanups happened. + self.assertIs(Dealloc.SPAWNED, main) + self.assertTrue(Dealloc.BG_RAN) + self.assertEqual(Dealloc.BG_RAN, greenlet.GreenletExit) + self.assertEqual(Dealloc.CREATED, Dealloc.DESTROYED ) + self.assertEqual(Dealloc.CREATED, OBJECTS_PER_CONTAINER * 2) + + import gc + gc.collect() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/Lib/site-packages/greenlet/tests/test_leaks.py b/venv/Lib/site-packages/greenlet/tests/test_leaks.py new file mode 100644 index 00000000..99da4ebe --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_leaks.py @@ -0,0 +1,447 @@ +# -*- coding: utf-8 -*- +""" +Testing scenarios that may have leaked. +""" +from __future__ import print_function, absolute_import, division + +import sys +import gc + +import time +import weakref +import threading + + +import greenlet +from . import TestCase +from . import PY314 +from .leakcheck import fails_leakcheck +from .leakcheck import ignores_leakcheck +from .leakcheck import RUNNING_ON_MANYLINUX + +# pylint:disable=protected-access + +assert greenlet.GREENLET_USE_GC # Option to disable this was removed in 1.0 + +class HasFinalizerTracksInstances(object): + EXTANT_INSTANCES = set() + def __init__(self, msg): + self.msg = sys.intern(msg) + self.EXTANT_INSTANCES.add(id(self)) + def __del__(self): + self.EXTANT_INSTANCES.remove(id(self)) + def __repr__(self): + return "" % ( + id(self), self.msg + ) + @classmethod + def reset(cls): + cls.EXTANT_INSTANCES.clear() + + +class TestLeaks(TestCase): + + def test_arg_refs(self): + args = ('a', 'b', 'c') + refcount_before = sys.getrefcount(args) + # pylint:disable=unnecessary-lambda + g = greenlet.greenlet( + lambda *args: greenlet.getcurrent().parent.switch(*args)) + for _ in range(100): + g.switch(*args) + self.assertEqual(sys.getrefcount(args), refcount_before) + + def test_kwarg_refs(self): + kwargs = {} + self.assertEqual(sys.getrefcount(kwargs), 2 if not PY314 else 1) + # pylint:disable=unnecessary-lambda + g = greenlet.greenlet( + lambda **gkwargs: greenlet.getcurrent().parent.switch(**gkwargs)) + for _ in range(100): + g.switch(**kwargs) + # Python 3.14 elides reference counting operations + # in some cases. See https://github.com/python/cpython/pull/130708 + self.assertEqual(sys.getrefcount(kwargs), 2 if not PY314 else 1) + + + @staticmethod + def __recycle_threads(): + # By introducing a thread that does sleep we allow other threads, + # that have triggered their __block condition, but did not have a + # chance to deallocate their thread state yet, to finally do so. + # The way it works is by requiring a GIL switch (different thread), + # which does a GIL release (sleep), which might do a GIL switch + # to finished threads and allow them to clean up. + def worker(): + time.sleep(0.001) + t = threading.Thread(target=worker) + t.start() + time.sleep(0.001) + t.join(10) + + def test_threaded_leak(self): + gg = [] + def worker(): + # only main greenlet present + gg.append(weakref.ref(greenlet.getcurrent())) + for _ in range(2): + t = threading.Thread(target=worker) + t.start() + t.join(10) + del t + greenlet.getcurrent() # update ts_current + self.__recycle_threads() + greenlet.getcurrent() # update ts_current + gc.collect() + greenlet.getcurrent() # update ts_current + for g in gg: + self.assertIsNone(g()) + + def test_threaded_adv_leak(self): + gg = [] + def worker(): + # main and additional *finished* greenlets + ll = greenlet.getcurrent().ll = [] + def additional(): + ll.append(greenlet.getcurrent()) + for _ in range(2): + greenlet.greenlet(additional).switch() + gg.append(weakref.ref(greenlet.getcurrent())) + for _ in range(2): + t = threading.Thread(target=worker) + t.start() + t.join(10) + del t + greenlet.getcurrent() # update ts_current + self.__recycle_threads() + greenlet.getcurrent() # update ts_current + gc.collect() + greenlet.getcurrent() # update ts_current + for g in gg: + self.assertIsNone(g()) + + def assertClocksUsed(self): + used = greenlet._greenlet.get_clocks_used_doing_optional_cleanup() + self.assertGreaterEqual(used, 0) + # we don't lose the value + greenlet._greenlet.enable_optional_cleanup(True) + used2 = greenlet._greenlet.get_clocks_used_doing_optional_cleanup() + self.assertEqual(used, used2) + self.assertGreater(greenlet._greenlet.CLOCKS_PER_SEC, 1) + + def _check_issue251(self, + manually_collect_background=True, + explicit_reference_to_switch=False): + # See https://github.com/python-greenlet/greenlet/issues/251 + # Killing a greenlet (probably not the main one) + # in one thread from another thread would + # result in leaking a list (the ts_delkey list). + # We no longer use lists to hold that stuff, though. + + # For the test to be valid, even empty lists have to be tracked by the + # GC + + assert gc.is_tracked([]) + HasFinalizerTracksInstances.reset() + greenlet.getcurrent() + greenlets_before = self.count_objects(greenlet.greenlet, exact_kind=False) + + background_glet_running = threading.Event() + background_glet_killed = threading.Event() + background_greenlets = [] + + # XXX: Switching this to a greenlet subclass that overrides + # run results in all callers failing the leaktest; that + # greenlet instance is leaked. There's a bound method for + # run() living on the stack of the greenlet in g_initialstub, + # and since we don't manually switch back to the background + # greenlet to let it "fall off the end" and exit the + # g_initialstub function, it never gets cleaned up. Making the + # garbage collector aware of this bound method (making it an + # attribute of the greenlet structure and traversing into it) + # doesn't help, for some reason. + def background_greenlet(): + # Throw control back to the main greenlet. + jd = HasFinalizerTracksInstances("DELETING STACK OBJECT") + greenlet._greenlet.set_thread_local( + 'test_leaks_key', + HasFinalizerTracksInstances("DELETING THREAD STATE")) + # Explicitly keeping 'switch' in a local variable + # breaks this test in all versions + if explicit_reference_to_switch: + s = greenlet.getcurrent().parent.switch + s([jd]) + else: + greenlet.getcurrent().parent.switch([jd]) + + bg_main_wrefs = [] + + def background_thread(): + glet = greenlet.greenlet(background_greenlet) + bg_main_wrefs.append(weakref.ref(glet.parent)) + + background_greenlets.append(glet) + glet.switch() # Be sure it's active. + # Control is ours again. + del glet # Delete one reference from the thread it runs in. + background_glet_running.set() + background_glet_killed.wait(10) + + # To trigger the background collection of the dead + # greenlet, thus clearing out the contents of the list, we + # need to run some APIs. See issue 252. + if manually_collect_background: + greenlet.getcurrent() + + + t = threading.Thread(target=background_thread) + t.start() + background_glet_running.wait(10) + greenlet.getcurrent() + lists_before = self.count_objects(list, exact_kind=True) + + assert len(background_greenlets) == 1 + self.assertFalse(background_greenlets[0].dead) + # Delete the last reference to the background greenlet + # from a different thread. This puts it in the background thread's + # ts_delkey list. + del background_greenlets[:] + background_glet_killed.set() + + # Now wait for the background thread to die. + t.join(10) + del t + # As part of the fix for 252, we need to cycle the ceval.c + # interpreter loop to be sure it has had a chance to process + # the pending call. + self.wait_for_pending_cleanups() + + lists_after = self.count_objects(list, exact_kind=True) + greenlets_after = self.count_objects(greenlet.greenlet, exact_kind=False) + + # On 2.7, we observe that lists_after is smaller than + # lists_before. No idea what lists got cleaned up. All the + # Python 3 versions match exactly. + self.assertLessEqual(lists_after, lists_before) + # On versions after 3.6, we've successfully cleaned up the + # greenlet references thanks to the internal "vectorcall" + # protocol; prior to that, there is a reference path through + # the ``greenlet.switch`` method still on the stack that we + # can't reach to clean up. The C code goes through terrific + # lengths to clean that up. + if not explicit_reference_to_switch \ + and greenlet._greenlet.get_clocks_used_doing_optional_cleanup() is not None: + # If cleanup was disabled, though, we may not find it. + self.assertEqual(greenlets_after, greenlets_before) + if manually_collect_background: + # TODO: Figure out how to make this work! + # The one on the stack is still leaking somehow + # in the non-manually-collect state. + self.assertEqual(HasFinalizerTracksInstances.EXTANT_INSTANCES, set()) + else: + # The explicit reference prevents us from collecting it + # and it isn't always found by the GC either for some + # reason. The entire frame is leaked somehow, on some + # platforms (e.g., MacPorts builds of Python (all + # versions!)), but not on other platforms (the linux and + # windows builds on GitHub actions and Appveyor). So we'd + # like to write a test that proves that the main greenlet + # sticks around, and we can on my machine (macOS 11.6, + # MacPorts builds of everything) but we can't write that + # same test on other platforms. However, hopefully iteration + # done by leakcheck will find it. + pass + + if greenlet._greenlet.get_clocks_used_doing_optional_cleanup() is not None: + self.assertClocksUsed() + + def test_issue251_killing_cross_thread_leaks_list(self): + self._check_issue251() + + def test_issue251_with_cleanup_disabled(self): + greenlet._greenlet.enable_optional_cleanup(False) + try: + self._check_issue251() + finally: + greenlet._greenlet.enable_optional_cleanup(True) + + @fails_leakcheck + def test_issue251_issue252_need_to_collect_in_background(self): + # Between greenlet 1.1.2 and the next version, this was still + # failing because the leak of the list still exists when we + # don't call a greenlet API before exiting the thread. The + # proximate cause is that neither of the two greenlets from + # the background thread are actually being destroyed, even + # though the GC is in fact visiting both objects. It's not + # clear where that leak is? For some reason the thread-local + # dict holding it isn't being cleaned up. + # + # The leak, I think, is in the CPYthon internal function that + # calls into green_switch(). The argument tuple is still on + # the C stack somewhere and can't be reached? That doesn't + # make sense, because the tuple should be collectable when + # this object goes away. + # + # Note that this test sometimes spuriously passes on Linux, + # for some reason, but I've never seen it pass on macOS. + self._check_issue251(manually_collect_background=False) + + @fails_leakcheck + def test_issue251_issue252_need_to_collect_in_background_cleanup_disabled(self): + self.expect_greenlet_leak = True + greenlet._greenlet.enable_optional_cleanup(False) + try: + self._check_issue251(manually_collect_background=False) + finally: + greenlet._greenlet.enable_optional_cleanup(True) + + @fails_leakcheck + def test_issue251_issue252_explicit_reference_not_collectable(self): + self._check_issue251( + manually_collect_background=False, + explicit_reference_to_switch=True) + + UNTRACK_ATTEMPTS = 100 + + def _only_test_some_versions(self): + # We're only looking for this problem specifically on 3.11, + # and this set of tests is relatively fragile, depending on + # OS and memory management details. So we want to run it on 3.11+ + # (obviously) but not every older 3.x version in order to reduce + # false negatives. At the moment, those false results seem to have + # resolved, so we are actually running this on 3.8+ + assert sys.version_info[0] >= 3 + if sys.version_info[:2] < (3, 8): + self.skipTest('Only observed on 3.11') + if RUNNING_ON_MANYLINUX: + self.skipTest("Slow and not worth repeating here") + + @ignores_leakcheck + # Because we're just trying to track raw memory, not objects, and running + # the leakcheck makes an already slow test slower. + def test_untracked_memory_doesnt_increase(self): + # See https://github.com/gevent/gevent/issues/1924 + # and https://github.com/python-greenlet/greenlet/issues/328 + self._only_test_some_versions() + def f(): + return 1 + + ITER = 10000 + def run_it(): + for _ in range(ITER): + greenlet.greenlet(f).switch() + + # Establish baseline + for _ in range(3): + run_it() + + # uss: (Linux, macOS, Windows): aka "Unique Set Size", this is + # the memory which is unique to a process and which would be + # freed if the process was terminated right now. + uss_before = self.get_process_uss() + + for count in range(self.UNTRACK_ATTEMPTS): + uss_before = max(uss_before, self.get_process_uss()) + run_it() + + uss_after = self.get_process_uss() + if uss_after <= uss_before and count > 1: + break + + self.assertLessEqual(uss_after, uss_before) + + def _check_untracked_memory_thread(self, deallocate_in_thread=True): + self._only_test_some_versions() + # Like the above test, but what if there are a bunch of + # unfinished greenlets in a thread that dies? + # Does it matter if we deallocate in the thread or not? + EXIT_COUNT = [0] + + def f(): + try: + greenlet.getcurrent().parent.switch() + except greenlet.GreenletExit: + EXIT_COUNT[0] += 1 + raise + return 1 + + ITER = 10000 + def run_it(): + glets = [] + for _ in range(ITER): + # Greenlet starts, switches back to us. + # We keep a strong reference to the greenlet though so it doesn't + # get a GreenletExit exception. + g = greenlet.greenlet(f) + glets.append(g) + g.switch() + + return glets + + test = self + + class ThreadFunc: + uss_before = uss_after = 0 + glets = () + ITER = 2 + def __call__(self): + self.uss_before = test.get_process_uss() + + for _ in range(self.ITER): + self.glets += tuple(run_it()) + + for g in self.glets: + test.assertIn('suspended active', str(g)) + # Drop them. + if deallocate_in_thread: + self.glets = () + self.uss_after = test.get_process_uss() + + # Establish baseline + uss_before = uss_after = None + for count in range(self.UNTRACK_ATTEMPTS): + EXIT_COUNT[0] = 0 + thread_func = ThreadFunc() + t = threading.Thread(target=thread_func) + t.start() + t.join(30) + self.assertFalse(t.is_alive()) + + if uss_before is None: + uss_before = thread_func.uss_before + + uss_before = max(uss_before, thread_func.uss_before) + if deallocate_in_thread: + self.assertEqual(thread_func.glets, ()) + self.assertEqual(EXIT_COUNT[0], ITER * thread_func.ITER) + + del thread_func # Deallocate the greenlets; but this won't raise into them + del t + if not deallocate_in_thread: + self.assertEqual(EXIT_COUNT[0], 0) + if deallocate_in_thread: + self.wait_for_pending_cleanups() + + uss_after = self.get_process_uss() + # See if we achieve a non-growth state at some point. Break when we do. + if uss_after <= uss_before and count > 1: + break + + self.wait_for_pending_cleanups() + uss_after = self.get_process_uss() + self.assertLessEqual(uss_after, uss_before, "after attempts %d" % (count,)) + + @ignores_leakcheck + # Because we're just trying to track raw memory, not objects, and running + # the leakcheck makes an already slow test slower. + def test_untracked_memory_doesnt_increase_unfinished_thread_dealloc_in_thread(self): + self._check_untracked_memory_thread(deallocate_in_thread=True) + + @ignores_leakcheck + # Because the main greenlets from the background threads do not exit in a timely fashion, + # we fail the object-based leakchecks. + def test_untracked_memory_doesnt_increase_unfinished_thread_dealloc_in_main(self): + self._check_untracked_memory_thread(deallocate_in_thread=False) + +if __name__ == '__main__': + __import__('unittest').main() diff --git a/venv/Lib/site-packages/greenlet/tests/test_stack_saved.py b/venv/Lib/site-packages/greenlet/tests/test_stack_saved.py new file mode 100644 index 00000000..b362bf95 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_stack_saved.py @@ -0,0 +1,19 @@ +import greenlet +from . import TestCase + + +class Test(TestCase): + + def test_stack_saved(self): + main = greenlet.getcurrent() + self.assertEqual(main._stack_saved, 0) + + def func(): + main.switch(main._stack_saved) + + g = greenlet.greenlet(func) + x = g.switch() + self.assertGreater(x, 0) + self.assertGreater(g._stack_saved, 0) + g.switch() + self.assertEqual(g._stack_saved, 0) diff --git a/venv/Lib/site-packages/greenlet/tests/test_throw.py b/venv/Lib/site-packages/greenlet/tests/test_throw.py new file mode 100644 index 00000000..f4f9a140 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_throw.py @@ -0,0 +1,128 @@ +import sys + + +from greenlet import greenlet +from . import TestCase + +def switch(*args): + return greenlet.getcurrent().parent.switch(*args) + + +class ThrowTests(TestCase): + def test_class(self): + def f(): + try: + switch("ok") + except RuntimeError: + switch("ok") + return + switch("fail") + g = greenlet(f) + res = g.switch() + self.assertEqual(res, "ok") + res = g.throw(RuntimeError) + self.assertEqual(res, "ok") + + def test_val(self): + def f(): + try: + switch("ok") + except RuntimeError: + val = sys.exc_info()[1] + if str(val) == "ciao": + switch("ok") + return + switch("fail") + + g = greenlet(f) + res = g.switch() + self.assertEqual(res, "ok") + res = g.throw(RuntimeError("ciao")) + self.assertEqual(res, "ok") + + g = greenlet(f) + res = g.switch() + self.assertEqual(res, "ok") + res = g.throw(RuntimeError, "ciao") + self.assertEqual(res, "ok") + + def test_kill(self): + def f(): + switch("ok") + switch("fail") + g = greenlet(f) + res = g.switch() + self.assertEqual(res, "ok") + res = g.throw() + self.assertTrue(isinstance(res, greenlet.GreenletExit)) + self.assertTrue(g.dead) + res = g.throw() # immediately eaten by the already-dead greenlet + self.assertTrue(isinstance(res, greenlet.GreenletExit)) + + def test_throw_goes_to_original_parent(self): + main = greenlet.getcurrent() + + def f1(): + try: + main.switch("f1 ready to catch") + except IndexError: + return "caught" + return "normal exit" + + def f2(): + main.switch("from f2") + + g1 = greenlet(f1) + g2 = greenlet(f2, parent=g1) + with self.assertRaises(IndexError): + g2.throw(IndexError) + self.assertTrue(g2.dead) + self.assertTrue(g1.dead) + + g1 = greenlet(f1) + g2 = greenlet(f2, parent=g1) + res = g1.switch() + self.assertEqual(res, "f1 ready to catch") + res = g2.throw(IndexError) + self.assertEqual(res, "caught") + self.assertTrue(g2.dead) + self.assertTrue(g1.dead) + + g1 = greenlet(f1) + g2 = greenlet(f2, parent=g1) + res = g1.switch() + self.assertEqual(res, "f1 ready to catch") + res = g2.switch() + self.assertEqual(res, "from f2") + res = g2.throw(IndexError) + self.assertEqual(res, "caught") + self.assertTrue(g2.dead) + self.assertTrue(g1.dead) + + def test_non_traceback_param(self): + with self.assertRaises(TypeError) as exc: + greenlet.getcurrent().throw( + Exception, + Exception(), + self + ) + self.assertEqual(str(exc.exception), + "throw() third argument must be a traceback object") + + def test_instance_of_wrong_type(self): + with self.assertRaises(TypeError) as exc: + greenlet.getcurrent().throw( + Exception(), + BaseException() + ) + + self.assertEqual(str(exc.exception), + "instance exception may not have a separate value") + + def test_not_throwable(self): + with self.assertRaises(TypeError) as exc: + greenlet.getcurrent().throw( + "abc" + ) + self.assertEqual(str(exc.exception), + "exceptions must be classes, or instances, not str") diff --git a/venv/Lib/site-packages/greenlet/tests/test_tracing.py b/venv/Lib/site-packages/greenlet/tests/test_tracing.py new file mode 100644 index 00000000..c044d4b6 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_tracing.py @@ -0,0 +1,291 @@ +from __future__ import print_function +import sys +import greenlet +import unittest + +from . import TestCase +from . import PY312 + +# https://discuss.python.org/t/cpython-3-12-greenlet-and-tracing-profiling-how-to-not-crash-and-get-correct-results/33144/2 +DEBUG_BUILD_PY312 = ( + PY312 and hasattr(sys, 'gettotalrefcount'), + "Broken on debug builds of Python 3.12" +) + +class SomeError(Exception): + pass + +class GreenletTracer(object): + oldtrace = None + + def __init__(self, error_on_trace=False): + self.actions = [] + self.error_on_trace = error_on_trace + + def __call__(self, *args): + self.actions.append(args) + if self.error_on_trace: + raise SomeError + + def __enter__(self): + self.oldtrace = greenlet.settrace(self) + return self.actions + + def __exit__(self, *args): + greenlet.settrace(self.oldtrace) + + +class TestGreenletTracing(TestCase): + """ + Tests of ``greenlet.settrace()`` + """ + + def test_a_greenlet_tracing(self): + main = greenlet.getcurrent() + def dummy(): + pass + def dummyexc(): + raise SomeError() + + with GreenletTracer() as actions: + g1 = greenlet.greenlet(dummy) + g1.switch() + g2 = greenlet.greenlet(dummyexc) + self.assertRaises(SomeError, g2.switch) + + self.assertEqual(actions, [ + ('switch', (main, g1)), + ('switch', (g1, main)), + ('switch', (main, g2)), + ('throw', (g2, main)), + ]) + + def test_b_exception_disables_tracing(self): + main = greenlet.getcurrent() + def dummy(): + main.switch() + g = greenlet.greenlet(dummy) + g.switch() + with GreenletTracer(error_on_trace=True) as actions: + self.assertRaises(SomeError, g.switch) + self.assertEqual(greenlet.gettrace(), None) + + self.assertEqual(actions, [ + ('switch', (main, g)), + ]) + + def test_set_same_tracer_twice(self): + # https://github.com/python-greenlet/greenlet/issues/332 + # Our logic in asserting that the tracefunction should + # gain a reference was incorrect if the same tracefunction was set + # twice. + tracer = GreenletTracer() + with tracer: + greenlet.settrace(tracer) + + +class PythonTracer(object): + oldtrace = None + + def __init__(self): + self.actions = [] + + def __call__(self, frame, event, arg): + # Record the co_name so we have an idea what function we're in. + self.actions.append((event, frame.f_code.co_name)) + + def __enter__(self): + self.oldtrace = sys.setprofile(self) + return self.actions + + def __exit__(self, *args): + sys.setprofile(self.oldtrace) + +def tpt_callback(): + return 42 + +class TestPythonTracing(TestCase): + """ + Tests of the interaction of ``sys.settrace()`` + with greenlet facilities. + + NOTE: Most of this is probably CPython specific. + """ + + maxDiff = None + + def test_trace_events_trivial(self): + with PythonTracer() as actions: + tpt_callback() + # If we use the sys.settrace instead of setprofile, we get + # this: + + # self.assertEqual(actions, [ + # ('call', 'tpt_callback'), + # ('call', '__exit__'), + # ]) + + self.assertEqual(actions, [ + ('return', '__enter__'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('call', '__exit__'), + ('c_call', '__exit__'), + ]) + + def _trace_switch(self, glet): + with PythonTracer() as actions: + glet.switch() + return actions + + def _check_trace_events_func_already_set(self, glet): + actions = self._trace_switch(glet) + self.assertEqual(actions, [ + ('return', '__enter__'), + ('c_call', '_trace_switch'), + ('call', 'run'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('return', 'run'), + ('c_return', '_trace_switch'), + ('call', '__exit__'), + ('c_call', '__exit__'), + ]) + + def test_trace_events_into_greenlet_func_already_set(self): + def run(): + return tpt_callback() + + self._check_trace_events_func_already_set(greenlet.greenlet(run)) + + def test_trace_events_into_greenlet_subclass_already_set(self): + class X(greenlet.greenlet): + def run(self): + return tpt_callback() + self._check_trace_events_func_already_set(X()) + + def _check_trace_events_from_greenlet_sets_profiler(self, g, tracer): + g.switch() + tpt_callback() + tracer.__exit__() + self.assertEqual(tracer.actions, [ + ('return', '__enter__'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('return', 'run'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('call', '__exit__'), + ('c_call', '__exit__'), + ]) + + + def test_trace_events_from_greenlet_func_sets_profiler(self): + tracer = PythonTracer() + def run(): + tracer.__enter__() + return tpt_callback() + + self._check_trace_events_from_greenlet_sets_profiler(greenlet.greenlet(run), + tracer) + + def test_trace_events_from_greenlet_subclass_sets_profiler(self): + tracer = PythonTracer() + class X(greenlet.greenlet): + def run(self): + tracer.__enter__() + return tpt_callback() + + self._check_trace_events_from_greenlet_sets_profiler(X(), tracer) + + @unittest.skipIf(*DEBUG_BUILD_PY312) + def test_trace_events_multiple_greenlets_switching(self): + tracer = PythonTracer() + + g1 = None + g2 = None + + def g1_run(): + tracer.__enter__() + tpt_callback() + g2.switch() + tpt_callback() + return 42 + + def g2_run(): + tpt_callback() + tracer.__exit__() + tpt_callback() + g1.switch() + + g1 = greenlet.greenlet(g1_run) + g2 = greenlet.greenlet(g2_run) + + x = g1.switch() + self.assertEqual(x, 42) + tpt_callback() # ensure not in the trace + self.assertEqual(tracer.actions, [ + ('return', '__enter__'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('c_call', 'g1_run'), + ('call', 'g2_run'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('call', '__exit__'), + ('c_call', '__exit__'), + ]) + + @unittest.skipIf(*DEBUG_BUILD_PY312) + def test_trace_events_multiple_greenlets_switching_siblings(self): + # Like the first version, but get both greenlets running first + # as "siblings" and then establish the tracing. + tracer = PythonTracer() + + g1 = None + g2 = None + + def g1_run(): + greenlet.getcurrent().parent.switch() + tracer.__enter__() + tpt_callback() + g2.switch() + tpt_callback() + return 42 + + def g2_run(): + greenlet.getcurrent().parent.switch() + + tpt_callback() + tracer.__exit__() + tpt_callback() + g1.switch() + + g1 = greenlet.greenlet(g1_run) + g2 = greenlet.greenlet(g2_run) + + # Start g1 + g1.switch() + # And it immediately returns control to us. + # Start g2 + g2.switch() + # Which also returns. Now kick of the real part of the + # test. + x = g1.switch() + self.assertEqual(x, 42) + + tpt_callback() # ensure not in the trace + self.assertEqual(tracer.actions, [ + ('return', '__enter__'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('c_call', 'g1_run'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('call', '__exit__'), + ('c_call', '__exit__'), + ]) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/Lib/site-packages/greenlet/tests/test_version.py b/venv/Lib/site-packages/greenlet/tests/test_version.py new file mode 100644 index 00000000..96c17cf1 --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_version.py @@ -0,0 +1,41 @@ +#! /usr/bin/env python +from __future__ import absolute_import +from __future__ import print_function + +import sys +import os +from unittest import TestCase as NonLeakingTestCase + +import greenlet + +# No reason to run this multiple times under leakchecks, +# it doesn't do anything. +class VersionTests(NonLeakingTestCase): + def test_version(self): + def find_dominating_file(name): + if os.path.exists(name): + return name + + tried = [] + here = os.path.abspath(os.path.dirname(__file__)) + for i in range(10): + up = ['..'] * i + path = [here] + up + [name] + fname = os.path.join(*path) + fname = os.path.abspath(fname) + tried.append(fname) + if os.path.exists(fname): + return fname + raise AssertionError("Could not find file " + name + "; checked " + str(tried)) + + try: + setup_py = find_dominating_file('setup.py') + except AssertionError as e: + self.skipTest("Unable to find setup.py; must be out of tree. " + str(e)) + + + invoke_setup = "%s %s --version" % (sys.executable, setup_py) + with os.popen(invoke_setup) as f: + sversion = f.read().strip() + + self.assertEqual(sversion, greenlet.__version__) diff --git a/venv/Lib/site-packages/greenlet/tests/test_weakref.py b/venv/Lib/site-packages/greenlet/tests/test_weakref.py new file mode 100644 index 00000000..05a38a7f --- /dev/null +++ b/venv/Lib/site-packages/greenlet/tests/test_weakref.py @@ -0,0 +1,35 @@ +import gc +import weakref + + +import greenlet +from . import TestCase + +class WeakRefTests(TestCase): + def test_dead_weakref(self): + def _dead_greenlet(): + g = greenlet.greenlet(lambda: None) + g.switch() + return g + o = weakref.ref(_dead_greenlet()) + gc.collect() + self.assertEqual(o(), None) + + def test_inactive_weakref(self): + o = weakref.ref(greenlet.greenlet()) + gc.collect() + self.assertEqual(o(), None) + + def test_dealloc_weakref(self): + seen = [] + def worker(): + try: + greenlet.getcurrent().parent.switch() + finally: + seen.append(g()) + g = greenlet.greenlet(worker) + g.switch() + g2 = greenlet.greenlet(lambda: None, g) + g = weakref.ref(g2) + g2 = None + self.assertEqual(seen, [None]) diff --git a/venv/Lib/site-packages/h11-0.16.0.dist-info/INSTALLER b/venv/Lib/site-packages/h11-0.16.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/h11-0.16.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/h11-0.16.0.dist-info/METADATA b/venv/Lib/site-packages/h11-0.16.0.dist-info/METADATA new file mode 100644 index 00000000..8a2f6390 --- /dev/null +++ b/venv/Lib/site-packages/h11-0.16.0.dist-info/METADATA @@ -0,0 +1,202 @@ +Metadata-Version: 2.4 +Name: h11 +Version: 0.16.0 +Summary: A pure-Python, bring-your-own-I/O implementation of HTTP/1.1 +Home-page: https://github.com/python-hyper/h11 +Author: Nathaniel J. Smith +Author-email: njs@pobox.com +License: MIT +Classifier: Development Status :: 3 - Alpha +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: System :: Networking +Requires-Python: >=3.8 +License-File: LICENSE.txt +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: home-page +Dynamic: license +Dynamic: license-file +Dynamic: requires-python +Dynamic: summary + +h11 +=== + +.. image:: https://travis-ci.org/python-hyper/h11.svg?branch=master + :target: https://travis-ci.org/python-hyper/h11 + :alt: Automated test status + +.. image:: https://codecov.io/gh/python-hyper/h11/branch/master/graph/badge.svg + :target: https://codecov.io/gh/python-hyper/h11 + :alt: Test coverage + +.. image:: https://readthedocs.org/projects/h11/badge/?version=latest + :target: http://h11.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +This is a little HTTP/1.1 library written from scratch in Python, +heavily inspired by `hyper-h2 `_. + +It's a "bring-your-own-I/O" library; h11 contains no IO code +whatsoever. This means you can hook h11 up to your favorite network +API, and that could be anything you want: synchronous, threaded, +asynchronous, or your own implementation of `RFC 6214 +`_ -- h11 won't judge you. +(Compare this to the current state of the art, where every time a `new +network API `_ comes along then someone +gets to start over reimplementing the entire HTTP protocol from +scratch.) Cory Benfield made an `excellent blog post describing the +benefits of this approach +`_, or if you like video +then here's his `PyCon 2016 talk on the same theme +`_. + +This also means that h11 is not immediately useful out of the box: +it's a toolkit for building programs that speak HTTP, not something +that could directly replace ``requests`` or ``twisted.web`` or +whatever. But h11 makes it much easier to implement something like +``requests`` or ``twisted.web``. + +At a high level, working with h11 goes like this: + +1) First, create an ``h11.Connection`` object to track the state of a + single HTTP/1.1 connection. + +2) When you read data off the network, pass it to + ``conn.receive_data(...)``; you'll get back a list of objects + representing high-level HTTP "events". + +3) When you want to send a high-level HTTP event, create the + corresponding "event" object and pass it to ``conn.send(...)``; + this will give you back some bytes that you can then push out + through the network. + +For example, a client might instantiate and then send a +``h11.Request`` object, then zero or more ``h11.Data`` objects for the +request body (e.g., if this is a POST), and then a +``h11.EndOfMessage`` to indicate the end of the message. Then the +server would then send back a ``h11.Response``, some ``h11.Data``, and +its own ``h11.EndOfMessage``. If either side violates the protocol, +you'll get a ``h11.ProtocolError`` exception. + +h11 is suitable for implementing both servers and clients, and has a +pleasantly symmetric API: the events you send as a client are exactly +the ones that you receive as a server and vice-versa. + +`Here's an example of a tiny HTTP client +`_ + +It also has `a fine manual `_. + +FAQ +--- + +*Whyyyyy?* + +I wanted to play with HTTP in `Curio +`__ and `Trio +`__, which at the time didn't have any +HTTP libraries. So I thought, no big deal, Python has, like, a dozen +different implementations of HTTP, surely I can find one that's +reusable. I didn't find one, but I did find Cory's call-to-arms +blog-post. So I figured, well, fine, if I have to implement HTTP from +scratch, at least I can make sure no-one *else* has to ever again. + +*Should I use it?* + +Maybe. You should be aware that it's a very young project. But, it's +feature complete and has an exhaustive test-suite and complete docs, +so the next step is for people to try using it and see how it goes +:-). If you do then please let us know -- if nothing else we'll want +to talk to you before making any incompatible changes! + +*What are the features/limitations?* + +Roughly speaking, it's trying to be a robust, complete, and non-hacky +implementation of the first "chapter" of the HTTP/1.1 spec: `RFC 7230: +HTTP/1.1 Message Syntax and Routing +`_. That is, it mostly focuses on +implementing HTTP at the level of taking bytes on and off the wire, +and the headers related to that, and tries to be anal about spec +conformance. It doesn't know about higher-level concerns like URL +routing, conditional GETs, cross-origin cookie policies, or content +negotiation. But it does know how to take care of framing, +cross-version differences in keep-alive handling, and the "obsolete +line folding" rule, so you can focus your energies on the hard / +interesting parts for your application, and it tries to support the +full specification in the sense that any useful HTTP/1.1 conformant +application should be able to use h11. + +It's pure Python, and has no dependencies outside of the standard +library. + +It has a test suite with 100.0% coverage for both statements and +branches. + +Currently it supports Python 3 (testing on 3.8-3.12) and PyPy 3. +The last Python 2-compatible version was h11 0.11.x. +(Originally it had a Cython wrapper for `http-parser +`_ and a beautiful nested state +machine implemented with ``yield from`` to postprocess the output. But +I had to take these out -- the new *parser* needs fewer lines-of-code +than the old *parser wrapper*, is written in pure Python, uses no +exotic language syntax, and has more features. It's sad, really; that +old state machine was really slick. I just need a few sentences here +to mourn that.) + +I don't know how fast it is. I haven't benchmarked or profiled it yet, +so it's probably got a few pointless hot spots, and I've been trying +to err on the side of simplicity and robustness instead of +micro-optimization. But at the architectural level I tried hard to +avoid fundamentally bad decisions, e.g., I believe that all the +parsing algorithms remain linear-time even in the face of pathological +input like slowloris, and there are no byte-by-byte loops. (I also +believe that it maintains bounded memory usage in the face of +arbitrary/pathological input.) + +The whole library is ~800 lines-of-code. You can read and understand +the whole thing in less than an hour. Most of the energy invested in +this so far has been spent on trying to keep things simple by +minimizing special-cases and ad hoc state manipulation; even though it +is now quite small and simple, I'm still annoyed that I haven't +figured out how to make it even smaller and simpler. (Unfortunately, +HTTP does not lend itself to simplicity.) + +The API is ~feature complete and I don't expect the general outlines +to change much, but you can't judge an API's ergonomics until you +actually document and use it, so I'd expect some changes in the +details. + +*How do I try it?* + +.. code-block:: sh + + $ pip install h11 + $ git clone git@github.com:python-hyper/h11 + $ cd h11/examples + $ python basic-client.py + +and go from there. + +*License?* + +MIT + +*Code of conduct?* + +Contributors are requested to follow our `code of conduct +`_ in +all project spaces. diff --git a/venv/Lib/site-packages/h11-0.16.0.dist-info/RECORD b/venv/Lib/site-packages/h11-0.16.0.dist-info/RECORD new file mode 100644 index 00000000..fa60234d --- /dev/null +++ b/venv/Lib/site-packages/h11-0.16.0.dist-info/RECORD @@ -0,0 +1,29 @@ +h11-0.16.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +h11-0.16.0.dist-info/METADATA,sha256=KPMmCYrAn8unm48YD5YIfIQf4kViFct7hyqcfVzRnWQ,8348 +h11-0.16.0.dist-info/RECORD,, +h11-0.16.0.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91 +h11-0.16.0.dist-info/licenses/LICENSE.txt,sha256=N9tbuFkm2yikJ6JYZ_ELEjIAOuob5pzLhRE4rbjm82E,1124 +h11-0.16.0.dist-info/top_level.txt,sha256=F7dC4jl3zeh8TGHEPaWJrMbeuoWbS379Gwdi-Yvdcis,4 +h11/__init__.py,sha256=iO1KzkSO42yZ6ffg-VMgbx_ZVTWGUY00nRYEWn-s3kY,1507 +h11/__pycache__/__init__.cpython-312.pyc,, +h11/__pycache__/_abnf.cpython-312.pyc,, +h11/__pycache__/_connection.cpython-312.pyc,, +h11/__pycache__/_events.cpython-312.pyc,, +h11/__pycache__/_headers.cpython-312.pyc,, +h11/__pycache__/_readers.cpython-312.pyc,, +h11/__pycache__/_receivebuffer.cpython-312.pyc,, +h11/__pycache__/_state.cpython-312.pyc,, +h11/__pycache__/_util.cpython-312.pyc,, +h11/__pycache__/_version.cpython-312.pyc,, +h11/__pycache__/_writers.cpython-312.pyc,, +h11/_abnf.py,sha256=ybixr0xsupnkA6GFAyMubuXF6Tc1lb_hF890NgCsfNc,4815 +h11/_connection.py,sha256=k9YRVf6koZqbttBW36xSWaJpWdZwa-xQVU9AHEo9DuI,26863 +h11/_events.py,sha256=I97aXoal1Wu7dkL548BANBUCkOIbe-x5CioYA9IBY14,11792 +h11/_headers.py,sha256=P7D-lBNxHwdLZPLimmYwrPG-9ZkjElvvJZJdZAgSP-4,10412 +h11/_readers.py,sha256=a4RypORUCC3d0q_kxPuBIM7jTD8iLt5X91TH0FsduN4,8590 +h11/_receivebuffer.py,sha256=xrspsdsNgWFxRfQcTXxR8RrdjRXXTK0Io5cQYWpJ1Ws,5252 +h11/_state.py,sha256=_5LG_BGR8FCcFQeBPH-TMHgm_-B-EUcWCnQof_9XjFE,13231 +h11/_util.py,sha256=LWkkjXyJaFlAy6Lt39w73UStklFT5ovcvo0TkY7RYuk,4888 +h11/_version.py,sha256=GVSsbPSPDcOuF6ptfIiXnVJoaEm3ygXbMnqlr_Giahw,686 +h11/_writers.py,sha256=oFKm6PtjeHfbj4RLX7VB7KDc1gIY53gXG3_HR9ltmTA,5081 +h11/py.typed,sha256=sow9soTwP9T_gEAQSVh7Gb8855h04Nwmhs2We-JRgZM,7 diff --git a/venv/Lib/site-packages/h11-0.16.0.dist-info/WHEEL b/venv/Lib/site-packages/h11-0.16.0.dist-info/WHEEL new file mode 100644 index 00000000..1eb3c49d --- /dev/null +++ b/venv/Lib/site-packages/h11-0.16.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (78.1.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/h11-0.16.0.dist-info/licenses/LICENSE.txt b/venv/Lib/site-packages/h11-0.16.0.dist-info/licenses/LICENSE.txt new file mode 100644 index 00000000..8f080eae --- /dev/null +++ b/venv/Lib/site-packages/h11-0.16.0.dist-info/licenses/LICENSE.txt @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Nathaniel J. Smith and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/venv/Lib/site-packages/h11-0.16.0.dist-info/top_level.txt b/venv/Lib/site-packages/h11-0.16.0.dist-info/top_level.txt new file mode 100644 index 00000000..0d24def7 --- /dev/null +++ b/venv/Lib/site-packages/h11-0.16.0.dist-info/top_level.txt @@ -0,0 +1 @@ +h11 diff --git a/venv/Lib/site-packages/h11/__init__.py b/venv/Lib/site-packages/h11/__init__.py new file mode 100644 index 00000000..989e92c3 --- /dev/null +++ b/venv/Lib/site-packages/h11/__init__.py @@ -0,0 +1,62 @@ +# A highish-level implementation of the HTTP/1.1 wire protocol (RFC 7230), +# containing no networking code at all, loosely modelled on hyper-h2's generic +# implementation of HTTP/2 (and in particular the h2.connection.H2Connection +# class). There's still a bunch of subtle details you need to get right if you +# want to make this actually useful, because it doesn't implement all the +# semantics to check that what you're asking to write to the wire is sensible, +# but at least it gets you out of dealing with the wire itself. + +from h11._connection import Connection, NEED_DATA, PAUSED +from h11._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from h11._state import ( + CLIENT, + CLOSED, + DONE, + ERROR, + IDLE, + MIGHT_SWITCH_PROTOCOL, + MUST_CLOSE, + SEND_BODY, + SEND_RESPONSE, + SERVER, + SWITCHED_PROTOCOL, +) +from h11._util import LocalProtocolError, ProtocolError, RemoteProtocolError +from h11._version import __version__ + +PRODUCT_ID = "python-h11/" + __version__ + + +__all__ = ( + "Connection", + "NEED_DATA", + "PAUSED", + "ConnectionClosed", + "Data", + "EndOfMessage", + "Event", + "InformationalResponse", + "Request", + "Response", + "CLIENT", + "CLOSED", + "DONE", + "ERROR", + "IDLE", + "MUST_CLOSE", + "SEND_BODY", + "SEND_RESPONSE", + "SERVER", + "SWITCHED_PROTOCOL", + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", +) diff --git a/venv/Lib/site-packages/h11/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/h11/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..a3d2f785 Binary files /dev/null and b/venv/Lib/site-packages/h11/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/h11/__pycache__/_abnf.cpython-312.pyc b/venv/Lib/site-packages/h11/__pycache__/_abnf.cpython-312.pyc new file mode 100644 index 00000000..cd4b5c1f Binary files /dev/null and b/venv/Lib/site-packages/h11/__pycache__/_abnf.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/h11/__pycache__/_connection.cpython-312.pyc b/venv/Lib/site-packages/h11/__pycache__/_connection.cpython-312.pyc new file mode 100644 index 00000000..7c34fdb6 Binary files /dev/null and b/venv/Lib/site-packages/h11/__pycache__/_connection.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/h11/__pycache__/_events.cpython-312.pyc b/venv/Lib/site-packages/h11/__pycache__/_events.cpython-312.pyc new file mode 100644 index 00000000..3bebb62f Binary files /dev/null and b/venv/Lib/site-packages/h11/__pycache__/_events.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/h11/__pycache__/_headers.cpython-312.pyc b/venv/Lib/site-packages/h11/__pycache__/_headers.cpython-312.pyc new file mode 100644 index 00000000..8df6eed8 Binary files /dev/null and b/venv/Lib/site-packages/h11/__pycache__/_headers.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/h11/__pycache__/_readers.cpython-312.pyc b/venv/Lib/site-packages/h11/__pycache__/_readers.cpython-312.pyc new file mode 100644 index 00000000..b5c15cfd Binary files /dev/null and b/venv/Lib/site-packages/h11/__pycache__/_readers.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/h11/__pycache__/_receivebuffer.cpython-312.pyc b/venv/Lib/site-packages/h11/__pycache__/_receivebuffer.cpython-312.pyc new file mode 100644 index 00000000..504c5503 Binary files /dev/null and b/venv/Lib/site-packages/h11/__pycache__/_receivebuffer.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/h11/__pycache__/_state.cpython-312.pyc b/venv/Lib/site-packages/h11/__pycache__/_state.cpython-312.pyc new file mode 100644 index 00000000..fcabfbf3 Binary files /dev/null and b/venv/Lib/site-packages/h11/__pycache__/_state.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/h11/__pycache__/_util.cpython-312.pyc b/venv/Lib/site-packages/h11/__pycache__/_util.cpython-312.pyc new file mode 100644 index 00000000..1a97a2fb Binary files /dev/null and b/venv/Lib/site-packages/h11/__pycache__/_util.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/h11/__pycache__/_version.cpython-312.pyc b/venv/Lib/site-packages/h11/__pycache__/_version.cpython-312.pyc new file mode 100644 index 00000000..a4255f3b Binary files /dev/null and b/venv/Lib/site-packages/h11/__pycache__/_version.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/h11/__pycache__/_writers.cpython-312.pyc b/venv/Lib/site-packages/h11/__pycache__/_writers.cpython-312.pyc new file mode 100644 index 00000000..0bdb09b7 Binary files /dev/null and b/venv/Lib/site-packages/h11/__pycache__/_writers.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/h11/_abnf.py b/venv/Lib/site-packages/h11/_abnf.py new file mode 100644 index 00000000..933587fb --- /dev/null +++ b/venv/Lib/site-packages/h11/_abnf.py @@ -0,0 +1,132 @@ +# We use native strings for all the re patterns, to take advantage of string +# formatting, and then convert to bytestrings when compiling the final re +# objects. + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#whitespace +# OWS = *( SP / HTAB ) +# ; optional whitespace +OWS = r"[ \t]*" + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.token.separators +# token = 1*tchar +# +# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" +# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" +# / DIGIT / ALPHA +# ; any VCHAR, except delimiters +token = r"[-!#$%&'*+.^_`|~0-9a-zA-Z]+" + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#header.fields +# field-name = token +field_name = token + +# The standard says: +# +# field-value = *( field-content / obs-fold ) +# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +# field-vchar = VCHAR / obs-text +# obs-fold = CRLF 1*( SP / HTAB ) +# ; obsolete line folding +# ; see Section 3.2.4 +# +# https://tools.ietf.org/html/rfc5234#appendix-B.1 +# +# VCHAR = %x21-7E +# ; visible (printing) characters +# +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.quoted-string +# obs-text = %x80-FF +# +# However, the standard definition of field-content is WRONG! It disallows +# fields containing a single visible character surrounded by whitespace, +# e.g. "foo a bar". +# +# See: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189 +# +# So our definition of field_content attempts to fix it up... +# +# Also, we allow lots of control characters, because apparently people assume +# that they're legal in practice (e.g., google analytics makes cookies with +# \x01 in them!): +# https://github.com/python-hyper/h11/issues/57 +# We still don't allow NUL or whitespace, because those are often treated as +# meta-characters and letting them through can lead to nasty issues like SSRF. +vchar = r"[\x21-\x7e]" +vchar_or_obs_text = r"[^\x00\s]" +field_vchar = vchar_or_obs_text +field_content = r"{field_vchar}+(?:[ \t]+{field_vchar}+)*".format(**globals()) + +# We handle obs-fold at a different level, and our fixed-up field_content +# already grows to swallow the whole value, so ? instead of * +field_value = r"({field_content})?".format(**globals()) + +# header-field = field-name ":" OWS field-value OWS +header_field = ( + r"(?P{field_name})" + r":" + r"{OWS}" + r"(?P{field_value})" + r"{OWS}".format(**globals()) +) + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#request.line +# +# request-line = method SP request-target SP HTTP-version CRLF +# method = token +# HTTP-version = HTTP-name "/" DIGIT "." DIGIT +# HTTP-name = %x48.54.54.50 ; "HTTP", case-sensitive +# +# request-target is complicated (see RFC 7230 sec 5.3) -- could be path, full +# URL, host+port (for connect), or even "*", but in any case we are guaranteed +# that it contists of the visible printing characters. +method = token +request_target = r"{vchar}+".format(**globals()) +http_version = r"HTTP/(?P[0-9]\.[0-9])" +request_line = ( + r"(?P{method})" + r" " + r"(?P{request_target})" + r" " + r"{http_version}".format(**globals()) +) + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#status.line +# +# status-line = HTTP-version SP status-code SP reason-phrase CRLF +# status-code = 3DIGIT +# reason-phrase = *( HTAB / SP / VCHAR / obs-text ) +status_code = r"[0-9]{3}" +reason_phrase = r"([ \t]|{vchar_or_obs_text})*".format(**globals()) +status_line = ( + r"{http_version}" + r" " + r"(?P{status_code})" + # However, there are apparently a few too many servers out there that just + # leave out the reason phrase: + # https://github.com/scrapy/scrapy/issues/345#issuecomment-281756036 + # https://github.com/seanmonstar/httparse/issues/29 + # so make it optional. ?: is a non-capturing group. + r"(?: (?P{reason_phrase}))?".format(**globals()) +) + +HEXDIG = r"[0-9A-Fa-f]" +# Actually +# +# chunk-size = 1*HEXDIG +# +# but we impose an upper-limit to avoid ridiculosity. len(str(2**64)) == 20 +chunk_size = r"({HEXDIG}){{1,20}}".format(**globals()) +# Actually +# +# chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-val ] ) +# +# but we aren't parsing the things so we don't really care. +chunk_ext = r";.*" +chunk_header = ( + r"(?P{chunk_size})" + r"(?P{chunk_ext})?" + r"{OWS}\r\n".format( + **globals() + ) # Even though the specification does not allow for extra whitespaces, + # we are lenient with trailing whitespaces because some servers on the wild use it. +) diff --git a/venv/Lib/site-packages/h11/_connection.py b/venv/Lib/site-packages/h11/_connection.py new file mode 100644 index 00000000..e37d82a8 --- /dev/null +++ b/venv/Lib/site-packages/h11/_connection.py @@ -0,0 +1,659 @@ +# This contains the main Connection class. Everything in h11 revolves around +# this. +from typing import ( + Any, + Callable, + cast, + Dict, + List, + Optional, + overload, + Tuple, + Type, + Union, +) + +from ._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from ._headers import get_comma_header, has_expect_100_continue, set_comma_header +from ._readers import READERS, ReadersType +from ._receivebuffer import ReceiveBuffer +from ._state import ( + _SWITCH_CONNECT, + _SWITCH_UPGRADE, + CLIENT, + ConnectionState, + DONE, + ERROR, + MIGHT_SWITCH_PROTOCOL, + SEND_BODY, + SERVER, + SWITCHED_PROTOCOL, +) +from ._util import ( # Import the internal things we need + LocalProtocolError, + RemoteProtocolError, + Sentinel, +) +from ._writers import WRITERS, WritersType + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = ["Connection", "NEED_DATA", "PAUSED"] + + +class NEED_DATA(Sentinel, metaclass=Sentinel): + pass + + +class PAUSED(Sentinel, metaclass=Sentinel): + pass + + +# If we ever have this much buffered without it making a complete parseable +# event, we error out. The only time we really buffer is when reading the +# request/response line + headers together, so this is effectively the limit on +# the size of that. +# +# Some precedents for defaults: +# - node.js: 80 * 1024 +# - tomcat: 8 * 1024 +# - IIS: 16 * 1024 +# - Apache: <8 KiB per line> +DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024 + + +# RFC 7230's rules for connection lifecycles: +# - If either side says they want to close the connection, then the connection +# must close. +# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close +# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive +# (and even this is a mess -- e.g. if you're implementing a proxy then +# sending Connection: keep-alive is forbidden). +# +# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So +# our rule is: +# - If someone says Connection: close, we will close +# - If someone uses HTTP/1.0, we will close. +def _keep_alive(event: Union[Request, Response]) -> bool: + connection = get_comma_header(event.headers, b"connection") + if b"close" in connection: + return False + if getattr(event, "http_version", b"1.1") < b"1.1": + return False + return True + + +def _body_framing( + request_method: bytes, event: Union[Request, Response] +) -> Tuple[str, Union[Tuple[()], Tuple[int]]]: + # Called when we enter SEND_BODY to figure out framing information for + # this body. + # + # These are the only two events that can trigger a SEND_BODY state: + assert type(event) in (Request, Response) + # Returns one of: + # + # ("content-length", count) + # ("chunked", ()) + # ("http/1.0", ()) + # + # which are (lookup key, *args) for constructing body reader/writer + # objects. + # + # Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3 + # + # Step 1: some responses always have an empty body, regardless of what the + # headers say. + if type(event) is Response: + if ( + event.status_code in (204, 304) + or request_method == b"HEAD" + or (request_method == b"CONNECT" and 200 <= event.status_code < 300) + ): + return ("content-length", (0,)) + # Section 3.3.3 also lists another case -- responses with status_code + # < 200. For us these are InformationalResponses, not Responses, so + # they can't get into this function in the first place. + assert event.status_code >= 200 + + # Step 2: check for Transfer-Encoding (T-E beats C-L): + transfer_encodings = get_comma_header(event.headers, b"transfer-encoding") + if transfer_encodings: + assert transfer_encodings == [b"chunked"] + return ("chunked", ()) + + # Step 3: check for Content-Length + content_lengths = get_comma_header(event.headers, b"content-length") + if content_lengths: + return ("content-length", (int(content_lengths[0]),)) + + # Step 4: no applicable headers; fallback/default depends on type + if type(event) is Request: + return ("content-length", (0,)) + else: + return ("http/1.0", ()) + + +################################################################ +# +# The main Connection class +# +################################################################ + + +class Connection: + """An object encapsulating the state of an HTTP connection. + + Args: + our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If + you're implementing a server, pass :data:`h11.SERVER`. + + max_incomplete_event_size (int): + The maximum number of bytes we're willing to buffer of an + incomplete event. In practice this mostly sets a limit on the + maximum size of the request/response line + headers. If this is + exceeded, then :meth:`next_event` will raise + :exc:`RemoteProtocolError`. + + """ + + def __init__( + self, + our_role: Type[Sentinel], + max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE, + ) -> None: + self._max_incomplete_event_size = max_incomplete_event_size + # State and role tracking + if our_role not in (CLIENT, SERVER): + raise ValueError(f"expected CLIENT or SERVER, not {our_role!r}") + self.our_role = our_role + self.their_role: Type[Sentinel] + if our_role is CLIENT: + self.their_role = SERVER + else: + self.their_role = CLIENT + self._cstate = ConnectionState() + + # Callables for converting data->events or vice-versa given the + # current state + self._writer = self._get_io_object(self.our_role, None, WRITERS) + self._reader = self._get_io_object(self.their_role, None, READERS) + + # Holds any unprocessed received data + self._receive_buffer = ReceiveBuffer() + # If this is true, then it indicates that the incoming connection was + # closed *after* the end of whatever's in self._receive_buffer: + self._receive_buffer_closed = False + + # Extra bits of state that don't fit into the state machine. + # + # These two are only used to interpret framing headers for figuring + # out how to read/write response bodies. their_http_version is also + # made available as a convenient public API. + self.their_http_version: Optional[bytes] = None + self._request_method: Optional[bytes] = None + # This is pure flow-control and doesn't at all affect the set of legal + # transitions, so no need to bother ConnectionState with it: + self.client_is_waiting_for_100_continue = False + + @property + def states(self) -> Dict[Type[Sentinel], Type[Sentinel]]: + """A dictionary like:: + + {CLIENT: , SERVER: } + + See :ref:`state-machine` for details. + + """ + return dict(self._cstate.states) + + @property + def our_state(self) -> Type[Sentinel]: + """The current state of whichever role we are playing. See + :ref:`state-machine` for details. + """ + return self._cstate.states[self.our_role] + + @property + def their_state(self) -> Type[Sentinel]: + """The current state of whichever role we are NOT playing. See + :ref:`state-machine` for details. + """ + return self._cstate.states[self.their_role] + + @property + def they_are_waiting_for_100_continue(self) -> bool: + return self.their_role is CLIENT and self.client_is_waiting_for_100_continue + + def start_next_cycle(self) -> None: + """Attempt to reset our connection state for a new request/response + cycle. + + If both client and server are in :data:`DONE` state, then resets them + both to :data:`IDLE` state in preparation for a new request/response + cycle on this same connection. Otherwise, raises a + :exc:`LocalProtocolError`. + + See :ref:`keepalive-and-pipelining`. + + """ + old_states = dict(self._cstate.states) + self._cstate.start_next_cycle() + self._request_method = None + # self.their_http_version gets left alone, since it presumably lasts + # beyond a single request/response cycle + assert not self.client_is_waiting_for_100_continue + self._respond_to_state_changes(old_states) + + def _process_error(self, role: Type[Sentinel]) -> None: + old_states = dict(self._cstate.states) + self._cstate.process_error(role) + self._respond_to_state_changes(old_states) + + def _server_switch_event(self, event: Event) -> Optional[Type[Sentinel]]: + if type(event) is InformationalResponse and event.status_code == 101: + return _SWITCH_UPGRADE + if type(event) is Response: + if ( + _SWITCH_CONNECT in self._cstate.pending_switch_proposals + and 200 <= event.status_code < 300 + ): + return _SWITCH_CONNECT + return None + + # All events go through here + def _process_event(self, role: Type[Sentinel], event: Event) -> None: + # First, pass the event through the state machine to make sure it + # succeeds. + old_states = dict(self._cstate.states) + if role is CLIENT and type(event) is Request: + if event.method == b"CONNECT": + self._cstate.process_client_switch_proposal(_SWITCH_CONNECT) + if get_comma_header(event.headers, b"upgrade"): + self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE) + server_switch_event = None + if role is SERVER: + server_switch_event = self._server_switch_event(event) + self._cstate.process_event(role, type(event), server_switch_event) + + # Then perform the updates triggered by it. + + if type(event) is Request: + self._request_method = event.method + + if role is self.their_role and type(event) in ( + Request, + Response, + InformationalResponse, + ): + event = cast(Union[Request, Response, InformationalResponse], event) + self.their_http_version = event.http_version + + # Keep alive handling + # + # RFC 7230 doesn't really say what one should do if Connection: close + # shows up on a 1xx InformationalResponse. I think the idea is that + # this is not supposed to happen. In any case, if it does happen, we + # ignore it. + if type(event) in (Request, Response) and not _keep_alive( + cast(Union[Request, Response], event) + ): + self._cstate.process_keep_alive_disabled() + + # 100-continue + if type(event) is Request and has_expect_100_continue(event): + self.client_is_waiting_for_100_continue = True + if type(event) in (InformationalResponse, Response): + self.client_is_waiting_for_100_continue = False + if role is CLIENT and type(event) in (Data, EndOfMessage): + self.client_is_waiting_for_100_continue = False + + self._respond_to_state_changes(old_states, event) + + def _get_io_object( + self, + role: Type[Sentinel], + event: Optional[Event], + io_dict: Union[ReadersType, WritersType], + ) -> Optional[Callable[..., Any]]: + # event may be None; it's only used when entering SEND_BODY + state = self._cstate.states[role] + if state is SEND_BODY: + # Special case: the io_dict has a dict of reader/writer factories + # that depend on the request/response framing. + framing_type, args = _body_framing( + cast(bytes, self._request_method), cast(Union[Request, Response], event) + ) + return io_dict[SEND_BODY][framing_type](*args) # type: ignore[index] + else: + # General case: the io_dict just has the appropriate reader/writer + # for this state + return io_dict.get((role, state)) # type: ignore[return-value] + + # This must be called after any action that might have caused + # self._cstate.states to change. + def _respond_to_state_changes( + self, + old_states: Dict[Type[Sentinel], Type[Sentinel]], + event: Optional[Event] = None, + ) -> None: + # Update reader/writer + if self.our_state != old_states[self.our_role]: + self._writer = self._get_io_object(self.our_role, event, WRITERS) + if self.their_state != old_states[self.their_role]: + self._reader = self._get_io_object(self.their_role, event, READERS) + + @property + def trailing_data(self) -> Tuple[bytes, bool]: + """Data that has been received, but not yet processed, represented as + a tuple with two elements, where the first is a byte-string containing + the unprocessed data itself, and the second is a bool that is True if + the receive connection was closed. + + See :ref:`switching-protocols` for discussion of why you'd want this. + """ + return (bytes(self._receive_buffer), self._receive_buffer_closed) + + def receive_data(self, data: bytes) -> None: + """Add data to our internal receive buffer. + + This does not actually do any processing on the data, just stores + it. To trigger processing, you have to call :meth:`next_event`. + + Args: + data (:term:`bytes-like object`): + The new data that was just received. + + Special case: If *data* is an empty byte-string like ``b""``, + then this indicates that the remote side has closed the + connection (end of file). Normally this is convenient, because + standard Python APIs like :meth:`file.read` or + :meth:`socket.recv` use ``b""`` to indicate end-of-file, while + other failures to read are indicated using other mechanisms + like raising :exc:`TimeoutError`. When using such an API you + can just blindly pass through whatever you get from ``read`` + to :meth:`receive_data`, and everything will work. + + But, if you have an API where reading an empty string is a + valid non-EOF condition, then you need to be aware of this and + make sure to check for such strings and avoid passing them to + :meth:`receive_data`. + + Returns: + Nothing, but after calling this you should call :meth:`next_event` + to parse the newly received data. + + Raises: + RuntimeError: + Raised if you pass an empty *data*, indicating EOF, and then + pass a non-empty *data*, indicating more data that somehow + arrived after the EOF. + + (Calling ``receive_data(b"")`` multiple times is fine, + and equivalent to calling it once.) + + """ + if data: + if self._receive_buffer_closed: + raise RuntimeError("received close, then received more data?") + self._receive_buffer += data + else: + self._receive_buffer_closed = True + + def _extract_next_receive_event( + self, + ) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: + state = self.their_state + # We don't pause immediately when they enter DONE, because even in + # DONE state we can still process a ConnectionClosed() event. But + # if we have data in our buffer, then we definitely aren't getting + # a ConnectionClosed() immediately and we need to pause. + if state is DONE and self._receive_buffer: + return PAUSED + if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL: + return PAUSED + assert self._reader is not None + event = self._reader(self._receive_buffer) + if event is None: + if not self._receive_buffer and self._receive_buffer_closed: + # In some unusual cases (basically just HTTP/1.0 bodies), EOF + # triggers an actual protocol event; in that case, we want to + # return that event, and then the state will change and we'll + # get called again to generate the actual ConnectionClosed(). + if hasattr(self._reader, "read_eof"): + event = self._reader.read_eof() + else: + event = ConnectionClosed() + if event is None: + event = NEED_DATA + return event # type: ignore[no-any-return] + + def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: + """Parse the next event out of our receive buffer, update our internal + state, and return it. + + This is a mutating operation -- think of it like calling :func:`next` + on an iterator. + + Returns: + : One of three things: + + 1) An event object -- see :ref:`events`. + + 2) The special constant :data:`NEED_DATA`, which indicates that + you need to read more data from your socket and pass it to + :meth:`receive_data` before this method will be able to return + any more events. + + 3) The special constant :data:`PAUSED`, which indicates that we + are not in a state where we can process incoming data (usually + because the peer has finished their part of the current + request/response cycle, and you have not yet called + :meth:`start_next_cycle`). See :ref:`flow-control` for details. + + Raises: + RemoteProtocolError: + The peer has misbehaved. You should close the connection + (possibly after sending some kind of 4xx response). + + Once this method returns :class:`ConnectionClosed` once, then all + subsequent calls will also return :class:`ConnectionClosed`. + + If this method raises any exception besides :exc:`RemoteProtocolError` + then that's a bug -- if it happens please file a bug report! + + If this method raises any exception then it also sets + :attr:`Connection.their_state` to :data:`ERROR` -- see + :ref:`error-handling` for discussion. + + """ + + if self.their_state is ERROR: + raise RemoteProtocolError("Can't receive data when peer state is ERROR") + try: + event = self._extract_next_receive_event() + if event not in [NEED_DATA, PAUSED]: + self._process_event(self.their_role, cast(Event, event)) + if event is NEED_DATA: + if len(self._receive_buffer) > self._max_incomplete_event_size: + # 431 is "Request header fields too large" which is pretty + # much the only situation where we can get here + raise RemoteProtocolError( + "Receive buffer too long", error_status_hint=431 + ) + if self._receive_buffer_closed: + # We're still trying to complete some event, but that's + # never going to happen because no more data is coming + raise RemoteProtocolError("peer unexpectedly closed connection") + return event + except BaseException as exc: + self._process_error(self.their_role) + if isinstance(exc, LocalProtocolError): + exc._reraise_as_remote_protocol_error() + else: + raise + + @overload + def send(self, event: ConnectionClosed) -> None: + ... + + @overload + def send( + self, event: Union[Request, InformationalResponse, Response, Data, EndOfMessage] + ) -> bytes: + ... + + @overload + def send(self, event: Event) -> Optional[bytes]: + ... + + def send(self, event: Event) -> Optional[bytes]: + """Convert a high-level event into bytes that can be sent to the peer, + while updating our internal state machine. + + Args: + event: The :ref:`event ` to send. + + Returns: + If ``type(event) is ConnectionClosed``, then returns + ``None``. Otherwise, returns a :term:`bytes-like object`. + + Raises: + LocalProtocolError: + Sending this event at this time would violate our + understanding of the HTTP/1.1 protocol. + + If this method raises any exception then it also sets + :attr:`Connection.our_state` to :data:`ERROR` -- see + :ref:`error-handling` for discussion. + + """ + data_list = self.send_with_data_passthrough(event) + if data_list is None: + return None + else: + return b"".join(data_list) + + def send_with_data_passthrough(self, event: Event) -> Optional[List[bytes]]: + """Identical to :meth:`send`, except that in situations where + :meth:`send` returns a single :term:`bytes-like object`, this instead + returns a list of them -- and when sending a :class:`Data` event, this + list is guaranteed to contain the exact object you passed in as + :attr:`Data.data`. See :ref:`sendfile` for discussion. + + """ + if self.our_state is ERROR: + raise LocalProtocolError("Can't send data when our state is ERROR") + try: + if type(event) is Response: + event = self._clean_up_response_headers_for_sending(event) + # We want to call _process_event before calling the writer, + # because if someone tries to do something invalid then this will + # give a sensible error message, while our writers all just assume + # they will only receive valid events. But, _process_event might + # change self._writer. So we have to do a little dance: + writer = self._writer + self._process_event(self.our_role, event) + if type(event) is ConnectionClosed: + return None + else: + # In any situation where writer is None, process_event should + # have raised ProtocolError + assert writer is not None + data_list: List[bytes] = [] + writer(event, data_list.append) + return data_list + except: + self._process_error(self.our_role) + raise + + def send_failed(self) -> None: + """Notify the state machine that we failed to send the data it gave + us. + + This causes :attr:`Connection.our_state` to immediately become + :data:`ERROR` -- see :ref:`error-handling` for discussion. + + """ + self._process_error(self.our_role) + + # When sending a Response, we take responsibility for a few things: + # + # - Sometimes you MUST set Connection: close. We take care of those + # times. (You can also set it yourself if you want, and if you do then + # we'll respect that and close the connection at the right time. But you + # don't have to worry about that unless you want to.) + # + # - The user has to set Content-Length if they want it. Otherwise, for + # responses that have bodies (e.g. not HEAD), then we will automatically + # select the right mechanism for streaming a body of unknown length, + # which depends on depending on the peer's HTTP version. + # + # This function's *only* responsibility is making sure headers are set up + # right -- everything downstream just looks at the headers. There are no + # side channels. + def _clean_up_response_headers_for_sending(self, response: Response) -> Response: + assert type(response) is Response + + headers = response.headers + need_close = False + + # HEAD requests need some special handling: they always act like they + # have Content-Length: 0, and that's how _body_framing treats + # them. But their headers are supposed to match what we would send if + # the request was a GET. (Technically there is one deviation allowed: + # we're allowed to leave out the framing headers -- see + # https://tools.ietf.org/html/rfc7231#section-4.3.2 . But it's just as + # easy to get them right.) + method_for_choosing_headers = cast(bytes, self._request_method) + if method_for_choosing_headers == b"HEAD": + method_for_choosing_headers = b"GET" + framing_type, _ = _body_framing(method_for_choosing_headers, response) + if framing_type in ("chunked", "http/1.0"): + # This response has a body of unknown length. + # If our peer is HTTP/1.1, we use Transfer-Encoding: chunked + # If our peer is HTTP/1.0, we use no framing headers, and close the + # connection afterwards. + # + # Make sure to clear Content-Length (in principle user could have + # set both and then we ignored Content-Length b/c + # Transfer-Encoding overwrote it -- this would be naughty of them, + # but the HTTP spec says that if our peer does this then we have + # to fix it instead of erroring out, so we'll accord the user the + # same respect). + headers = set_comma_header(headers, b"content-length", []) + if self.their_http_version is None or self.their_http_version < b"1.1": + # Either we never got a valid request and are sending back an + # error (their_http_version is None), so we assume the worst; + # or else we did get a valid HTTP/1.0 request, so we know that + # they don't understand chunked encoding. + headers = set_comma_header(headers, b"transfer-encoding", []) + # This is actually redundant ATM, since currently we + # unconditionally disable keep-alive when talking to HTTP/1.0 + # peers. But let's be defensive just in case we add + # Connection: keep-alive support later: + if self._request_method != b"HEAD": + need_close = True + else: + headers = set_comma_header(headers, b"transfer-encoding", [b"chunked"]) + + if not self._cstate.keep_alive or need_close: + # Make sure Connection: close is set + connection = set(get_comma_header(headers, b"connection")) + connection.discard(b"keep-alive") + connection.add(b"close") + headers = set_comma_header(headers, b"connection", sorted(connection)) + + return Response( + headers=headers, + status_code=response.status_code, + http_version=response.http_version, + reason=response.reason, + ) diff --git a/venv/Lib/site-packages/h11/_events.py b/venv/Lib/site-packages/h11/_events.py new file mode 100644 index 00000000..ca1c3adb --- /dev/null +++ b/venv/Lib/site-packages/h11/_events.py @@ -0,0 +1,369 @@ +# High level events that make up HTTP/1.1 conversations. Loosely inspired by +# the corresponding events in hyper-h2: +# +# http://python-hyper.org/h2/en/stable/api.html#events +# +# Don't subclass these. Stuff will break. + +import re +from abc import ABC +from dataclasses import dataclass +from typing import List, Tuple, Union + +from ._abnf import method, request_target +from ._headers import Headers, normalize_and_validate +from ._util import bytesify, LocalProtocolError, validate + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = [ + "Event", + "Request", + "InformationalResponse", + "Response", + "Data", + "EndOfMessage", + "ConnectionClosed", +] + +method_re = re.compile(method.encode("ascii")) +request_target_re = re.compile(request_target.encode("ascii")) + + +class Event(ABC): + """ + Base class for h11 events. + """ + + __slots__ = () + + +@dataclass(init=False, frozen=True) +class Request(Event): + """The beginning of an HTTP request. + + Fields: + + .. attribute:: method + + An HTTP method, e.g. ``b"GET"`` or ``b"POST"``. Always a byte + string. :term:`Bytes-like objects ` and native + strings containing only ascii characters will be automatically + converted to byte strings. + + .. attribute:: target + + The target of an HTTP request, e.g. ``b"/index.html"``, or one of the + more exotic formats described in `RFC 7320, section 5.3 + `_. Always a byte + string. :term:`Bytes-like objects ` and native + strings containing only ascii characters will be automatically + converted to byte strings. + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + """ + + __slots__ = ("method", "headers", "target", "http_version") + + method: bytes + headers: Headers + target: bytes + http_version: bytes + + def __init__( + self, + *, + method: Union[bytes, str], + headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]], + target: Union[bytes, str], + http_version: Union[bytes, str] = b"1.1", + _parsed: bool = False, + ) -> None: + super().__init__() + if isinstance(headers, Headers): + object.__setattr__(self, "headers", headers) + else: + object.__setattr__( + self, "headers", normalize_and_validate(headers, _parsed=_parsed) + ) + if not _parsed: + object.__setattr__(self, "method", bytesify(method)) + object.__setattr__(self, "target", bytesify(target)) + object.__setattr__(self, "http_version", bytesify(http_version)) + else: + object.__setattr__(self, "method", method) + object.__setattr__(self, "target", target) + object.__setattr__(self, "http_version", http_version) + + # "A server MUST respond with a 400 (Bad Request) status code to any + # HTTP/1.1 request message that lacks a Host header field and to any + # request message that contains more than one Host header field or a + # Host header field with an invalid field-value." + # -- https://tools.ietf.org/html/rfc7230#section-5.4 + host_count = 0 + for name, value in self.headers: + if name == b"host": + host_count += 1 + if self.http_version == b"1.1" and host_count == 0: + raise LocalProtocolError("Missing mandatory Host: header") + if host_count > 1: + raise LocalProtocolError("Found multiple Host: headers") + + validate(method_re, self.method, "Illegal method characters") + validate(request_target_re, self.target, "Illegal target characters") + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class _ResponseBase(Event): + __slots__ = ("headers", "http_version", "reason", "status_code") + + headers: Headers + http_version: bytes + reason: bytes + status_code: int + + def __init__( + self, + *, + headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]], + status_code: int, + http_version: Union[bytes, str] = b"1.1", + reason: Union[bytes, str] = b"", + _parsed: bool = False, + ) -> None: + super().__init__() + if isinstance(headers, Headers): + object.__setattr__(self, "headers", headers) + else: + object.__setattr__( + self, "headers", normalize_and_validate(headers, _parsed=_parsed) + ) + if not _parsed: + object.__setattr__(self, "reason", bytesify(reason)) + object.__setattr__(self, "http_version", bytesify(http_version)) + if not isinstance(status_code, int): + raise LocalProtocolError("status code must be integer") + # Because IntEnum objects are instances of int, but aren't + # duck-compatible (sigh), see gh-72. + object.__setattr__(self, "status_code", int(status_code)) + else: + object.__setattr__(self, "reason", reason) + object.__setattr__(self, "http_version", http_version) + object.__setattr__(self, "status_code", status_code) + + self.__post_init__() + + def __post_init__(self) -> None: + pass + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class InformationalResponse(_ResponseBase): + """An HTTP informational response. + + Fields: + + .. attribute:: status_code + + The status code of this response, as an integer. For an + :class:`InformationalResponse`, this is always in the range [100, + 200). + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for + details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + .. attribute:: reason + + The reason phrase of this response, as a byte string. For example: + ``b"OK"``, or ``b"Not Found"``. + + """ + + def __post_init__(self) -> None: + if not (100 <= self.status_code < 200): + raise LocalProtocolError( + "InformationalResponse status_code should be in range " + "[100, 200), not {}".format(self.status_code) + ) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class Response(_ResponseBase): + """The beginning of an HTTP response. + + Fields: + + .. attribute:: status_code + + The status code of this response, as an integer. For an + :class:`Response`, this is always in the range [200, + 1000). + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + .. attribute:: reason + + The reason phrase of this response, as a byte string. For example: + ``b"OK"``, or ``b"Not Found"``. + + """ + + def __post_init__(self) -> None: + if not (200 <= self.status_code < 1000): + raise LocalProtocolError( + "Response status_code should be in range [200, 1000), not {}".format( + self.status_code + ) + ) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class Data(Event): + """Part of an HTTP message body. + + Fields: + + .. attribute:: data + + A :term:`bytes-like object` containing part of a message body. Or, if + using the ``combine=False`` argument to :meth:`Connection.send`, then + any object that your socket writing code knows what to do with, and for + which calling :func:`len` returns the number of bytes that will be + written -- see :ref:`sendfile` for details. + + .. attribute:: chunk_start + + A marker that indicates whether this data object is from the start of a + chunked transfer encoding chunk. This field is ignored when when a Data + event is provided to :meth:`Connection.send`: it is only valid on + events emitted from :meth:`Connection.next_event`. You probably + shouldn't use this attribute at all; see + :ref:`chunk-delimiters-are-bad` for details. + + .. attribute:: chunk_end + + A marker that indicates whether this data object is the last for a + given chunked transfer encoding chunk. This field is ignored when when + a Data event is provided to :meth:`Connection.send`: it is only valid + on events emitted from :meth:`Connection.next_event`. You probably + shouldn't use this attribute at all; see + :ref:`chunk-delimiters-are-bad` for details. + + """ + + __slots__ = ("data", "chunk_start", "chunk_end") + + data: bytes + chunk_start: bool + chunk_end: bool + + def __init__( + self, data: bytes, chunk_start: bool = False, chunk_end: bool = False + ) -> None: + object.__setattr__(self, "data", data) + object.__setattr__(self, "chunk_start", chunk_start) + object.__setattr__(self, "chunk_end", chunk_end) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +# XX FIXME: "A recipient MUST ignore (or consider as an error) any fields that +# are forbidden to be sent in a trailer, since processing them as if they were +# present in the header section might bypass external security filters." +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#chunked.trailer.part +# Unfortunately, the list of forbidden fields is long and vague :-/ +@dataclass(init=False, frozen=True) +class EndOfMessage(Event): + """The end of an HTTP message. + + Fields: + + .. attribute:: headers + + Default value: ``[]`` + + Any trailing headers attached to this message, represented as a list of + (name, value) pairs. See :ref:`the header normalization rules + ` for details. + + Must be empty unless ``Transfer-Encoding: chunked`` is in use. + + """ + + __slots__ = ("headers",) + + headers: Headers + + def __init__( + self, + *, + headers: Union[ + Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]], None + ] = None, + _parsed: bool = False, + ) -> None: + super().__init__() + if headers is None: + headers = Headers([]) + elif not isinstance(headers, Headers): + headers = normalize_and_validate(headers, _parsed=_parsed) + + object.__setattr__(self, "headers", headers) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(frozen=True) +class ConnectionClosed(Event): + """This event indicates that the sender has closed their outgoing + connection. + + Note that this does not necessarily mean that they can't *receive* further + data, because TCP connections are composed to two one-way channels which + can be closed independently. See :ref:`closing` for details. + + No fields. + """ + + pass diff --git a/venv/Lib/site-packages/h11/_headers.py b/venv/Lib/site-packages/h11/_headers.py new file mode 100644 index 00000000..31da3e2b --- /dev/null +++ b/venv/Lib/site-packages/h11/_headers.py @@ -0,0 +1,282 @@ +import re +from typing import AnyStr, cast, List, overload, Sequence, Tuple, TYPE_CHECKING, Union + +from ._abnf import field_name, field_value +from ._util import bytesify, LocalProtocolError, validate + +if TYPE_CHECKING: + from ._events import Request + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal # type: ignore + +CONTENT_LENGTH_MAX_DIGITS = 20 # allow up to 1 billion TB - 1 + + +# Facts +# ----- +# +# Headers are: +# keys: case-insensitive ascii +# values: mixture of ascii and raw bytes +# +# "Historically, HTTP has allowed field content with text in the ISO-8859-1 +# charset [ISO-8859-1], supporting other charsets only through use of +# [RFC2047] encoding. In practice, most HTTP header field values use only a +# subset of the US-ASCII charset [USASCII]. Newly defined header fields SHOULD +# limit their field values to US-ASCII octets. A recipient SHOULD treat other +# octets in field content (obs-text) as opaque data." +# And it deprecates all non-ascii values +# +# Leading/trailing whitespace in header names is forbidden +# +# Values get leading/trailing whitespace stripped +# +# Content-Disposition actually needs to contain unicode semantically; to +# accomplish this it has a terrifically weird way of encoding the filename +# itself as ascii (and even this still has lots of cross-browser +# incompatibilities) +# +# Order is important: +# "a proxy MUST NOT change the order of these field values when forwarding a +# message" +# (and there are several headers where the order indicates a preference) +# +# Multiple occurences of the same header: +# "A sender MUST NOT generate multiple header fields with the same field name +# in a message unless either the entire field value for that header field is +# defined as a comma-separated list [or the header is Set-Cookie which gets a +# special exception]" - RFC 7230. (cookies are in RFC 6265) +# +# So every header aside from Set-Cookie can be merged by b", ".join if it +# occurs repeatedly. But, of course, they can't necessarily be split by +# .split(b","), because quoting. +# +# Given all this mess (case insensitive, duplicates allowed, order is +# important, ...), there doesn't appear to be any standard way to handle +# headers in Python -- they're almost like dicts, but... actually just +# aren't. For now we punt and just use a super simple representation: headers +# are a list of pairs +# +# [(name1, value1), (name2, value2), ...] +# +# where all entries are bytestrings, names are lowercase and have no +# leading/trailing whitespace, and values are bytestrings with no +# leading/trailing whitespace. Searching and updating are done via naive O(n) +# methods. +# +# Maybe a dict-of-lists would be better? + +_content_length_re = re.compile(rb"[0-9]+") +_field_name_re = re.compile(field_name.encode("ascii")) +_field_value_re = re.compile(field_value.encode("ascii")) + + +class Headers(Sequence[Tuple[bytes, bytes]]): + """ + A list-like interface that allows iterating over headers as byte-pairs + of (lowercased-name, value). + + Internally we actually store the representation as three-tuples, + including both the raw original casing, in order to preserve casing + over-the-wire, and the lowercased name, for case-insensitive comparisions. + + r = Request( + method="GET", + target="/", + headers=[("Host", "example.org"), ("Connection", "keep-alive")], + http_version="1.1", + ) + assert r.headers == [ + (b"host", b"example.org"), + (b"connection", b"keep-alive") + ] + assert r.headers.raw_items() == [ + (b"Host", b"example.org"), + (b"Connection", b"keep-alive") + ] + """ + + __slots__ = "_full_items" + + def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None: + self._full_items = full_items + + def __bool__(self) -> bool: + return bool(self._full_items) + + def __eq__(self, other: object) -> bool: + return list(self) == list(other) # type: ignore + + def __len__(self) -> int: + return len(self._full_items) + + def __repr__(self) -> str: + return "" % repr(list(self)) + + def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override] + _, name, value = self._full_items[idx] + return (name, value) + + def raw_items(self) -> List[Tuple[bytes, bytes]]: + return [(raw_name, value) for raw_name, _, value in self._full_items] + + +HeaderTypes = Union[ + List[Tuple[bytes, bytes]], + List[Tuple[bytes, str]], + List[Tuple[str, bytes]], + List[Tuple[str, str]], +] + + +@overload +def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers: + ... + + +@overload +def normalize_and_validate(headers: HeaderTypes, _parsed: Literal[False]) -> Headers: + ... + + +@overload +def normalize_and_validate( + headers: Union[Headers, HeaderTypes], _parsed: bool = False +) -> Headers: + ... + + +def normalize_and_validate( + headers: Union[Headers, HeaderTypes], _parsed: bool = False +) -> Headers: + new_headers = [] + seen_content_length = None + saw_transfer_encoding = False + for name, value in headers: + # For headers coming out of the parser, we can safely skip some steps, + # because it always returns bytes and has already run these regexes + # over the data: + if not _parsed: + name = bytesify(name) + value = bytesify(value) + validate(_field_name_re, name, "Illegal header name {!r}", name) + validate(_field_value_re, value, "Illegal header value {!r}", value) + assert isinstance(name, bytes) + assert isinstance(value, bytes) + + raw_name = name + name = name.lower() + if name == b"content-length": + lengths = {length.strip() for length in value.split(b",")} + if len(lengths) != 1: + raise LocalProtocolError("conflicting Content-Length headers") + value = lengths.pop() + validate(_content_length_re, value, "bad Content-Length") + if len(value) > CONTENT_LENGTH_MAX_DIGITS: + raise LocalProtocolError("bad Content-Length") + if seen_content_length is None: + seen_content_length = value + new_headers.append((raw_name, name, value)) + elif seen_content_length != value: + raise LocalProtocolError("conflicting Content-Length headers") + elif name == b"transfer-encoding": + # "A server that receives a request message with a transfer coding + # it does not understand SHOULD respond with 501 (Not + # Implemented)." + # https://tools.ietf.org/html/rfc7230#section-3.3.1 + if saw_transfer_encoding: + raise LocalProtocolError( + "multiple Transfer-Encoding headers", error_status_hint=501 + ) + # "All transfer-coding names are case-insensitive" + # -- https://tools.ietf.org/html/rfc7230#section-4 + value = value.lower() + if value != b"chunked": + raise LocalProtocolError( + "Only Transfer-Encoding: chunked is supported", + error_status_hint=501, + ) + saw_transfer_encoding = True + new_headers.append((raw_name, name, value)) + else: + new_headers.append((raw_name, name, value)) + return Headers(new_headers) + + +def get_comma_header(headers: Headers, name: bytes) -> List[bytes]: + # Should only be used for headers whose value is a list of + # comma-separated, case-insensitive values. + # + # The header name `name` is expected to be lower-case bytes. + # + # Connection: meets these criteria (including cast insensitivity). + # + # Content-Length: technically is just a single value (1*DIGIT), but the + # standard makes reference to implementations that do multiple values, and + # using this doesn't hurt. Ditto, case insensitivity doesn't things either + # way. + # + # Transfer-Encoding: is more complex (allows for quoted strings), so + # splitting on , is actually wrong. For example, this is legal: + # + # Transfer-Encoding: foo; options="1,2", chunked + # + # and should be parsed as + # + # foo; options="1,2" + # chunked + # + # but this naive function will parse it as + # + # foo; options="1 + # 2" + # chunked + # + # However, this is okay because the only thing we are going to do with + # any Transfer-Encoding is reject ones that aren't just "chunked", so + # both of these will be treated the same anyway. + # + # Expect: the only legal value is the literal string + # "100-continue". Splitting on commas is harmless. Case insensitive. + # + out: List[bytes] = [] + for _, found_name, found_raw_value in headers._full_items: + if found_name == name: + found_raw_value = found_raw_value.lower() + for found_split_value in found_raw_value.split(b","): + found_split_value = found_split_value.strip() + if found_split_value: + out.append(found_split_value) + return out + + +def set_comma_header(headers: Headers, name: bytes, new_values: List[bytes]) -> Headers: + # The header name `name` is expected to be lower-case bytes. + # + # Note that when we store the header we use title casing for the header + # names, in order to match the conventional HTTP header style. + # + # Simply calling `.title()` is a blunt approach, but it's correct + # here given the cases where we're using `set_comma_header`... + # + # Connection, Content-Length, Transfer-Encoding. + new_headers: List[Tuple[bytes, bytes]] = [] + for found_raw_name, found_name, found_raw_value in headers._full_items: + if found_name != name: + new_headers.append((found_raw_name, found_raw_value)) + for new_value in new_values: + new_headers.append((name.title(), new_value)) + return normalize_and_validate(new_headers) + + +def has_expect_100_continue(request: "Request") -> bool: + # https://tools.ietf.org/html/rfc7231#section-5.1.1 + # "A server that receives a 100-continue expectation in an HTTP/1.0 request + # MUST ignore that expectation." + if request.http_version < b"1.1": + return False + expect = get_comma_header(request.headers, b"expect") + return b"100-continue" in expect diff --git a/venv/Lib/site-packages/h11/_readers.py b/venv/Lib/site-packages/h11/_readers.py new file mode 100644 index 00000000..576804cc --- /dev/null +++ b/venv/Lib/site-packages/h11/_readers.py @@ -0,0 +1,250 @@ +# Code to read HTTP data +# +# Strategy: each reader is a callable which takes a ReceiveBuffer object, and +# either: +# 1) consumes some of it and returns an Event +# 2) raises a LocalProtocolError (for consistency -- e.g. we call validate() +# and it might raise a LocalProtocolError, so simpler just to always use +# this) +# 3) returns None, meaning "I need more data" +# +# If they have a .read_eof attribute, then this will be called if an EOF is +# received -- but this is optional. Either way, the actual ConnectionClosed +# event will be generated afterwards. +# +# READERS is a dict describing how to pick a reader. It maps states to either: +# - a reader +# - or, for body readers, a dict of per-framing reader factories + +import re +from typing import Any, Callable, Dict, Iterable, NoReturn, Optional, Tuple, Type, Union + +from ._abnf import chunk_header, header_field, request_line, status_line +from ._events import Data, EndOfMessage, InformationalResponse, Request, Response +from ._receivebuffer import ReceiveBuffer +from ._state import ( + CLIENT, + CLOSED, + DONE, + IDLE, + MUST_CLOSE, + SEND_BODY, + SEND_RESPONSE, + SERVER, +) +from ._util import LocalProtocolError, RemoteProtocolError, Sentinel, validate + +__all__ = ["READERS"] + +header_field_re = re.compile(header_field.encode("ascii")) +obs_fold_re = re.compile(rb"[ \t]+") + + +def _obsolete_line_fold(lines: Iterable[bytes]) -> Iterable[bytes]: + it = iter(lines) + last: Optional[bytes] = None + for line in it: + match = obs_fold_re.match(line) + if match: + if last is None: + raise LocalProtocolError("continuation line at start of headers") + if not isinstance(last, bytearray): + # Cast to a mutable type, avoiding copy on append to ensure O(n) time + last = bytearray(last) + last += b" " + last += line[match.end() :] + else: + if last is not None: + yield last + last = line + if last is not None: + yield last + + +def _decode_header_lines( + lines: Iterable[bytes], +) -> Iterable[Tuple[bytes, bytes]]: + for line in _obsolete_line_fold(lines): + matches = validate(header_field_re, line, "illegal header line: {!r}", line) + yield (matches["field_name"], matches["field_value"]) + + +request_line_re = re.compile(request_line.encode("ascii")) + + +def maybe_read_from_IDLE_client(buf: ReceiveBuffer) -> Optional[Request]: + lines = buf.maybe_extract_lines() + if lines is None: + if buf.is_next_line_obviously_invalid_request_line(): + raise LocalProtocolError("illegal request line") + return None + if not lines: + raise LocalProtocolError("no request line received") + matches = validate( + request_line_re, lines[0], "illegal request line: {!r}", lines[0] + ) + return Request( + headers=list(_decode_header_lines(lines[1:])), _parsed=True, **matches + ) + + +status_line_re = re.compile(status_line.encode("ascii")) + + +def maybe_read_from_SEND_RESPONSE_server( + buf: ReceiveBuffer, +) -> Union[InformationalResponse, Response, None]: + lines = buf.maybe_extract_lines() + if lines is None: + if buf.is_next_line_obviously_invalid_request_line(): + raise LocalProtocolError("illegal request line") + return None + if not lines: + raise LocalProtocolError("no response line received") + matches = validate(status_line_re, lines[0], "illegal status line: {!r}", lines[0]) + http_version = ( + b"1.1" if matches["http_version"] is None else matches["http_version"] + ) + reason = b"" if matches["reason"] is None else matches["reason"] + status_code = int(matches["status_code"]) + class_: Union[Type[InformationalResponse], Type[Response]] = ( + InformationalResponse if status_code < 200 else Response + ) + return class_( + headers=list(_decode_header_lines(lines[1:])), + _parsed=True, + status_code=status_code, + reason=reason, + http_version=http_version, + ) + + +class ContentLengthReader: + def __init__(self, length: int) -> None: + self._length = length + self._remaining = length + + def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]: + if self._remaining == 0: + return EndOfMessage() + data = buf.maybe_extract_at_most(self._remaining) + if data is None: + return None + self._remaining -= len(data) + return Data(data=data) + + def read_eof(self) -> NoReturn: + raise RemoteProtocolError( + "peer closed connection without sending complete message body " + "(received {} bytes, expected {})".format( + self._length - self._remaining, self._length + ) + ) + + +chunk_header_re = re.compile(chunk_header.encode("ascii")) + + +class ChunkedReader: + def __init__(self) -> None: + self._bytes_in_chunk = 0 + # After reading a chunk, we have to throw away the trailing \r\n. + # This tracks the bytes that we need to match and throw away. + self._bytes_to_discard = b"" + self._reading_trailer = False + + def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]: + if self._reading_trailer: + lines = buf.maybe_extract_lines() + if lines is None: + return None + return EndOfMessage(headers=list(_decode_header_lines(lines))) + if self._bytes_to_discard: + data = buf.maybe_extract_at_most(len(self._bytes_to_discard)) + if data is None: + return None + if data != self._bytes_to_discard[: len(data)]: + raise LocalProtocolError( + f"malformed chunk footer: {data!r} (expected {self._bytes_to_discard!r})" + ) + self._bytes_to_discard = self._bytes_to_discard[len(data) :] + if self._bytes_to_discard: + return None + # else, fall through and read some more + assert self._bytes_to_discard == b"" + if self._bytes_in_chunk == 0: + # We need to refill our chunk count + chunk_header = buf.maybe_extract_next_line() + if chunk_header is None: + return None + matches = validate( + chunk_header_re, + chunk_header, + "illegal chunk header: {!r}", + chunk_header, + ) + # XX FIXME: we discard chunk extensions. Does anyone care? + self._bytes_in_chunk = int(matches["chunk_size"], base=16) + if self._bytes_in_chunk == 0: + self._reading_trailer = True + return self(buf) + chunk_start = True + else: + chunk_start = False + assert self._bytes_in_chunk > 0 + data = buf.maybe_extract_at_most(self._bytes_in_chunk) + if data is None: + return None + self._bytes_in_chunk -= len(data) + if self._bytes_in_chunk == 0: + self._bytes_to_discard = b"\r\n" + chunk_end = True + else: + chunk_end = False + return Data(data=data, chunk_start=chunk_start, chunk_end=chunk_end) + + def read_eof(self) -> NoReturn: + raise RemoteProtocolError( + "peer closed connection without sending complete message body " + "(incomplete chunked read)" + ) + + +class Http10Reader: + def __call__(self, buf: ReceiveBuffer) -> Optional[Data]: + data = buf.maybe_extract_at_most(999999999) + if data is None: + return None + return Data(data=data) + + def read_eof(self) -> EndOfMessage: + return EndOfMessage() + + +def expect_nothing(buf: ReceiveBuffer) -> None: + if buf: + raise LocalProtocolError("Got data when expecting EOF") + return None + + +ReadersType = Dict[ + Union[Type[Sentinel], Tuple[Type[Sentinel], Type[Sentinel]]], + Union[Callable[..., Any], Dict[str, Callable[..., Any]]], +] + +READERS: ReadersType = { + (CLIENT, IDLE): maybe_read_from_IDLE_client, + (SERVER, IDLE): maybe_read_from_SEND_RESPONSE_server, + (SERVER, SEND_RESPONSE): maybe_read_from_SEND_RESPONSE_server, + (CLIENT, DONE): expect_nothing, + (CLIENT, MUST_CLOSE): expect_nothing, + (CLIENT, CLOSED): expect_nothing, + (SERVER, DONE): expect_nothing, + (SERVER, MUST_CLOSE): expect_nothing, + (SERVER, CLOSED): expect_nothing, + SEND_BODY: { + "chunked": ChunkedReader, + "content-length": ContentLengthReader, + "http/1.0": Http10Reader, + }, +} diff --git a/venv/Lib/site-packages/h11/_receivebuffer.py b/venv/Lib/site-packages/h11/_receivebuffer.py new file mode 100644 index 00000000..e5c4e08a --- /dev/null +++ b/venv/Lib/site-packages/h11/_receivebuffer.py @@ -0,0 +1,153 @@ +import re +import sys +from typing import List, Optional, Union + +__all__ = ["ReceiveBuffer"] + + +# Operations we want to support: +# - find next \r\n or \r\n\r\n (\n or \n\n are also acceptable), +# or wait until there is one +# - read at-most-N bytes +# Goals: +# - on average, do this fast +# - worst case, do this in O(n) where n is the number of bytes processed +# Plan: +# - store bytearray, offset, how far we've searched for a separator token +# - use the how-far-we've-searched data to avoid rescanning +# - while doing a stream of uninterrupted processing, advance offset instead +# of constantly copying +# WARNING: +# - I haven't benchmarked or profiled any of this yet. +# +# Note that starting in Python 3.4, deleting the initial n bytes from a +# bytearray is amortized O(n), thanks to some excellent work by Antoine +# Martin: +# +# https://bugs.python.org/issue19087 +# +# This means that if we only supported 3.4+, we could get rid of the code here +# involving self._start and self.compress, because it's doing exactly the same +# thing that bytearray now does internally. +# +# BUT unfortunately, we still support 2.7, and reading short segments out of a +# long buffer MUST be O(bytes read) to avoid DoS issues, so we can't actually +# delete this code. Yet: +# +# https://pythonclock.org/ +# +# (Two things to double-check first though: make sure PyPy also has the +# optimization, and benchmark to make sure it's a win, since we do have a +# slightly clever thing where we delay calling compress() until we've +# processed a whole event, which could in theory be slightly more efficient +# than the internal bytearray support.) +blank_line_regex = re.compile(b"\n\r?\n", re.MULTILINE) + + +class ReceiveBuffer: + def __init__(self) -> None: + self._data = bytearray() + self._next_line_search = 0 + self._multiple_lines_search = 0 + + def __iadd__(self, byteslike: Union[bytes, bytearray]) -> "ReceiveBuffer": + self._data += byteslike + return self + + def __bool__(self) -> bool: + return bool(len(self)) + + def __len__(self) -> int: + return len(self._data) + + # for @property unprocessed_data + def __bytes__(self) -> bytes: + return bytes(self._data) + + def _extract(self, count: int) -> bytearray: + # extracting an initial slice of the data buffer and return it + out = self._data[:count] + del self._data[:count] + + self._next_line_search = 0 + self._multiple_lines_search = 0 + + return out + + def maybe_extract_at_most(self, count: int) -> Optional[bytearray]: + """ + Extract a fixed number of bytes from the buffer. + """ + out = self._data[:count] + if not out: + return None + + return self._extract(count) + + def maybe_extract_next_line(self) -> Optional[bytearray]: + """ + Extract the first line, if it is completed in the buffer. + """ + # Only search in buffer space that we've not already looked at. + search_start_index = max(0, self._next_line_search - 1) + partial_idx = self._data.find(b"\r\n", search_start_index) + + if partial_idx == -1: + self._next_line_search = len(self._data) + return None + + # + 2 is to compensate len(b"\r\n") + idx = partial_idx + 2 + + return self._extract(idx) + + def maybe_extract_lines(self) -> Optional[List[bytearray]]: + """ + Extract everything up to the first blank line, and return a list of lines. + """ + # Handle the case where we have an immediate empty line. + if self._data[:1] == b"\n": + self._extract(1) + return [] + + if self._data[:2] == b"\r\n": + self._extract(2) + return [] + + # Only search in buffer space that we've not already looked at. + match = blank_line_regex.search(self._data, self._multiple_lines_search) + if match is None: + self._multiple_lines_search = max(0, len(self._data) - 2) + return None + + # Truncate the buffer and return it. + idx = match.span(0)[-1] + out = self._extract(idx) + lines = out.split(b"\n") + + for line in lines: + if line.endswith(b"\r"): + del line[-1] + + assert lines[-2] == lines[-1] == b"" + + del lines[-2:] + + return lines + + # In theory we should wait until `\r\n` before starting to validate + # incoming data. However it's interesting to detect (very) invalid data + # early given they might not even contain `\r\n` at all (hence only + # timeout will get rid of them). + # This is not a 100% effective detection but more of a cheap sanity check + # allowing for early abort in some useful cases. + # This is especially interesting when peer is messing up with HTTPS and + # sent us a TLS stream where we were expecting plain HTTP given all + # versions of TLS so far start handshake with a 0x16 message type code. + def is_next_line_obviously_invalid_request_line(self) -> bool: + try: + # HTTP header line must not contain non-printable characters + # and should not start with a space + return self._data[0] < 0x21 + except IndexError: + return False diff --git a/venv/Lib/site-packages/h11/_state.py b/venv/Lib/site-packages/h11/_state.py new file mode 100644 index 00000000..3ad444b0 --- /dev/null +++ b/venv/Lib/site-packages/h11/_state.py @@ -0,0 +1,365 @@ +################################################################ +# The core state machine +################################################################ +# +# Rule 1: everything that affects the state machine and state transitions must +# live here in this file. As much as possible goes into the table-based +# representation, but for the bits that don't quite fit, the actual code and +# state must nonetheless live here. +# +# Rule 2: this file does not know about what role we're playing; it only knows +# about HTTP request/response cycles in the abstract. This ensures that we +# don't cheat and apply different rules to local and remote parties. +# +# +# Theory of operation +# =================== +# +# Possibly the simplest way to think about this is that we actually have 5 +# different state machines here. Yes, 5. These are: +# +# 1) The client state, with its complicated automaton (see the docs) +# 2) The server state, with its complicated automaton (see the docs) +# 3) The keep-alive state, with possible states {True, False} +# 4) The SWITCH_CONNECT state, with possible states {False, True} +# 5) The SWITCH_UPGRADE state, with possible states {False, True} +# +# For (3)-(5), the first state listed is the initial state. +# +# (1)-(3) are stored explicitly in member variables. The last +# two are stored implicitly in the pending_switch_proposals set as: +# (state of 4) == (_SWITCH_CONNECT in pending_switch_proposals) +# (state of 5) == (_SWITCH_UPGRADE in pending_switch_proposals) +# +# And each of these machines has two different kinds of transitions: +# +# a) Event-triggered +# b) State-triggered +# +# Event triggered is the obvious thing that you'd think it is: some event +# happens, and if it's the right event at the right time then a transition +# happens. But there are somewhat complicated rules for which machines can +# "see" which events. (As a rule of thumb, if a machine "sees" an event, this +# means two things: the event can affect the machine, and if the machine is +# not in a state where it expects that event then it's an error.) These rules +# are: +# +# 1) The client machine sees all h11.events objects emitted by the client. +# +# 2) The server machine sees all h11.events objects emitted by the server. +# +# It also sees the client's Request event. +# +# And sometimes, server events are annotated with a _SWITCH_* event. For +# example, we can have a (Response, _SWITCH_CONNECT) event, which is +# different from a regular Response event. +# +# 3) The keep-alive machine sees the process_keep_alive_disabled() event +# (which is derived from Request/Response events), and this event +# transitions it from True -> False, or from False -> False. There's no way +# to transition back. +# +# 4&5) The _SWITCH_* machines transition from False->True when we get a +# Request that proposes the relevant type of switch (via +# process_client_switch_proposals), and they go from True->False when we +# get a Response that has no _SWITCH_* annotation. +# +# So that's event-triggered transitions. +# +# State-triggered transitions are less standard. What they do here is couple +# the machines together. The way this works is, when certain *joint* +# configurations of states are achieved, then we automatically transition to a +# new *joint* state. So, for example, if we're ever in a joint state with +# +# client: DONE +# keep-alive: False +# +# then the client state immediately transitions to: +# +# client: MUST_CLOSE +# +# This is fundamentally different from an event-based transition, because it +# doesn't matter how we arrived at the {client: DONE, keep-alive: False} state +# -- maybe the client transitioned SEND_BODY -> DONE, or keep-alive +# transitioned True -> False. Either way, once this precondition is satisfied, +# this transition is immediately triggered. +# +# What if two conflicting state-based transitions get enabled at the same +# time? In practice there's only one case where this arises (client DONE -> +# MIGHT_SWITCH_PROTOCOL versus DONE -> MUST_CLOSE), and we resolve it by +# explicitly prioritizing the DONE -> MIGHT_SWITCH_PROTOCOL transition. +# +# Implementation +# -------------- +# +# The event-triggered transitions for the server and client machines are all +# stored explicitly in a table. Ditto for the state-triggered transitions that +# involve just the server and client state. +# +# The transitions for the other machines, and the state-triggered transitions +# that involve the other machines, are written out as explicit Python code. +# +# It'd be nice if there were some cleaner way to do all this. This isn't +# *too* terrible, but I feel like it could probably be better. +# +# WARNING +# ------- +# +# The script that generates the state machine diagrams for the docs knows how +# to read out the EVENT_TRIGGERED_TRANSITIONS and STATE_TRIGGERED_TRANSITIONS +# tables. But it can't automatically read the transitions that are written +# directly in Python code. So if you touch those, you need to also update the +# script to keep it in sync! +from typing import cast, Dict, Optional, Set, Tuple, Type, Union + +from ._events import * +from ._util import LocalProtocolError, Sentinel + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = [ + "CLIENT", + "SERVER", + "IDLE", + "SEND_RESPONSE", + "SEND_BODY", + "DONE", + "MUST_CLOSE", + "CLOSED", + "MIGHT_SWITCH_PROTOCOL", + "SWITCHED_PROTOCOL", + "ERROR", +] + + +class CLIENT(Sentinel, metaclass=Sentinel): + pass + + +class SERVER(Sentinel, metaclass=Sentinel): + pass + + +# States +class IDLE(Sentinel, metaclass=Sentinel): + pass + + +class SEND_RESPONSE(Sentinel, metaclass=Sentinel): + pass + + +class SEND_BODY(Sentinel, metaclass=Sentinel): + pass + + +class DONE(Sentinel, metaclass=Sentinel): + pass + + +class MUST_CLOSE(Sentinel, metaclass=Sentinel): + pass + + +class CLOSED(Sentinel, metaclass=Sentinel): + pass + + +class ERROR(Sentinel, metaclass=Sentinel): + pass + + +# Switch types +class MIGHT_SWITCH_PROTOCOL(Sentinel, metaclass=Sentinel): + pass + + +class SWITCHED_PROTOCOL(Sentinel, metaclass=Sentinel): + pass + + +class _SWITCH_UPGRADE(Sentinel, metaclass=Sentinel): + pass + + +class _SWITCH_CONNECT(Sentinel, metaclass=Sentinel): + pass + + +EventTransitionType = Dict[ + Type[Sentinel], + Dict[ + Type[Sentinel], + Dict[Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], Type[Sentinel]], + ], +] + +EVENT_TRIGGERED_TRANSITIONS: EventTransitionType = { + CLIENT: { + IDLE: {Request: SEND_BODY, ConnectionClosed: CLOSED}, + SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE}, + DONE: {ConnectionClosed: CLOSED}, + MUST_CLOSE: {ConnectionClosed: CLOSED}, + CLOSED: {ConnectionClosed: CLOSED}, + MIGHT_SWITCH_PROTOCOL: {}, + SWITCHED_PROTOCOL: {}, + ERROR: {}, + }, + SERVER: { + IDLE: { + ConnectionClosed: CLOSED, + Response: SEND_BODY, + # Special case: server sees client Request events, in this form + (Request, CLIENT): SEND_RESPONSE, + }, + SEND_RESPONSE: { + InformationalResponse: SEND_RESPONSE, + Response: SEND_BODY, + (InformationalResponse, _SWITCH_UPGRADE): SWITCHED_PROTOCOL, + (Response, _SWITCH_CONNECT): SWITCHED_PROTOCOL, + }, + SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE}, + DONE: {ConnectionClosed: CLOSED}, + MUST_CLOSE: {ConnectionClosed: CLOSED}, + CLOSED: {ConnectionClosed: CLOSED}, + SWITCHED_PROTOCOL: {}, + ERROR: {}, + }, +} + +StateTransitionType = Dict[ + Tuple[Type[Sentinel], Type[Sentinel]], Dict[Type[Sentinel], Type[Sentinel]] +] + +# NB: there are also some special-case state-triggered transitions hard-coded +# into _fire_state_triggered_transitions below. +STATE_TRIGGERED_TRANSITIONS: StateTransitionType = { + # (Client state, Server state) -> new states + # Protocol negotiation + (MIGHT_SWITCH_PROTOCOL, SWITCHED_PROTOCOL): {CLIENT: SWITCHED_PROTOCOL}, + # Socket shutdown + (CLOSED, DONE): {SERVER: MUST_CLOSE}, + (CLOSED, IDLE): {SERVER: MUST_CLOSE}, + (ERROR, DONE): {SERVER: MUST_CLOSE}, + (DONE, CLOSED): {CLIENT: MUST_CLOSE}, + (IDLE, CLOSED): {CLIENT: MUST_CLOSE}, + (DONE, ERROR): {CLIENT: MUST_CLOSE}, +} + + +class ConnectionState: + def __init__(self) -> None: + # Extra bits of state that don't quite fit into the state model. + + # If this is False then it enables the automatic DONE -> MUST_CLOSE + # transition. Don't set this directly; call .keep_alive_disabled() + self.keep_alive = True + + # This is a subset of {UPGRADE, CONNECT}, containing the proposals + # made by the client for switching protocols. + self.pending_switch_proposals: Set[Type[Sentinel]] = set() + + self.states: Dict[Type[Sentinel], Type[Sentinel]] = {CLIENT: IDLE, SERVER: IDLE} + + def process_error(self, role: Type[Sentinel]) -> None: + self.states[role] = ERROR + self._fire_state_triggered_transitions() + + def process_keep_alive_disabled(self) -> None: + self.keep_alive = False + self._fire_state_triggered_transitions() + + def process_client_switch_proposal(self, switch_event: Type[Sentinel]) -> None: + self.pending_switch_proposals.add(switch_event) + self._fire_state_triggered_transitions() + + def process_event( + self, + role: Type[Sentinel], + event_type: Type[Event], + server_switch_event: Optional[Type[Sentinel]] = None, + ) -> None: + _event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]] = event_type + if server_switch_event is not None: + assert role is SERVER + if server_switch_event not in self.pending_switch_proposals: + raise LocalProtocolError( + "Received server _SWITCH_UPGRADE event without a pending proposal" + ) + _event_type = (event_type, server_switch_event) + if server_switch_event is None and _event_type is Response: + self.pending_switch_proposals = set() + self._fire_event_triggered_transitions(role, _event_type) + # Special case: the server state does get to see Request + # events. + if _event_type is Request: + assert role is CLIENT + self._fire_event_triggered_transitions(SERVER, (Request, CLIENT)) + self._fire_state_triggered_transitions() + + def _fire_event_triggered_transitions( + self, + role: Type[Sentinel], + event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], + ) -> None: + state = self.states[role] + try: + new_state = EVENT_TRIGGERED_TRANSITIONS[role][state][event_type] + except KeyError: + event_type = cast(Type[Event], event_type) + raise LocalProtocolError( + "can't handle event type {} when role={} and state={}".format( + event_type.__name__, role, self.states[role] + ) + ) from None + self.states[role] = new_state + + def _fire_state_triggered_transitions(self) -> None: + # We apply these rules repeatedly until converging on a fixed point + while True: + start_states = dict(self.states) + + # It could happen that both these special-case transitions are + # enabled at the same time: + # + # DONE -> MIGHT_SWITCH_PROTOCOL + # DONE -> MUST_CLOSE + # + # For example, this will always be true of a HTTP/1.0 client + # requesting CONNECT. If this happens, the protocol switch takes + # priority. From there the client will either go to + # SWITCHED_PROTOCOL, in which case it's none of our business when + # they close the connection, or else the server will deny the + # request, in which case the client will go back to DONE and then + # from there to MUST_CLOSE. + if self.pending_switch_proposals: + if self.states[CLIENT] is DONE: + self.states[CLIENT] = MIGHT_SWITCH_PROTOCOL + + if not self.pending_switch_proposals: + if self.states[CLIENT] is MIGHT_SWITCH_PROTOCOL: + self.states[CLIENT] = DONE + + if not self.keep_alive: + for role in (CLIENT, SERVER): + if self.states[role] is DONE: + self.states[role] = MUST_CLOSE + + # Tabular state-triggered transitions + joint_state = (self.states[CLIENT], self.states[SERVER]) + changes = STATE_TRIGGERED_TRANSITIONS.get(joint_state, {}) + self.states.update(changes) + + if self.states == start_states: + # Fixed point reached + return + + def start_next_cycle(self) -> None: + if self.states != {CLIENT: DONE, SERVER: DONE}: + raise LocalProtocolError( + f"not in a reusable state. self.states={self.states}" + ) + # Can't reach DONE/DONE with any of these active, but still, let's be + # sure. + assert self.keep_alive + assert not self.pending_switch_proposals + self.states = {CLIENT: IDLE, SERVER: IDLE} diff --git a/venv/Lib/site-packages/h11/_util.py b/venv/Lib/site-packages/h11/_util.py new file mode 100644 index 00000000..67184452 --- /dev/null +++ b/venv/Lib/site-packages/h11/_util.py @@ -0,0 +1,135 @@ +from typing import Any, Dict, NoReturn, Pattern, Tuple, Type, TypeVar, Union + +__all__ = [ + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", + "validate", + "bytesify", +] + + +class ProtocolError(Exception): + """Exception indicating a violation of the HTTP/1.1 protocol. + + This as an abstract base class, with two concrete base classes: + :exc:`LocalProtocolError`, which indicates that you tried to do something + that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which + indicates that the remote peer tried to do something that HTTP/1.1 says is + illegal. See :ref:`error-handling` for details. + + In addition to the normal :exc:`Exception` features, it has one attribute: + + .. attribute:: error_status_hint + + This gives a suggestion as to what status code a server might use if + this error occurred as part of a request. + + For a :exc:`RemoteProtocolError`, this is useful as a suggestion for + how you might want to respond to a misbehaving peer, if you're + implementing a server. + + For a :exc:`LocalProtocolError`, this can be taken as a suggestion for + how your peer might have responded to *you* if h11 had allowed you to + continue. + + The default is 400 Bad Request, a generic catch-all for protocol + violations. + + """ + + def __init__(self, msg: str, error_status_hint: int = 400) -> None: + if type(self) is ProtocolError: + raise TypeError("tried to directly instantiate ProtocolError") + Exception.__init__(self, msg) + self.error_status_hint = error_status_hint + + +# Strategy: there are a number of public APIs where a LocalProtocolError can +# be raised (send(), all the different event constructors, ...), and only one +# public API where RemoteProtocolError can be raised +# (receive_data()). Therefore we always raise LocalProtocolError internally, +# and then receive_data will translate this into a RemoteProtocolError. +# +# Internally: +# LocalProtocolError is the generic "ProtocolError". +# Externally: +# LocalProtocolError is for local errors and RemoteProtocolError is for +# remote errors. +class LocalProtocolError(ProtocolError): + def _reraise_as_remote_protocol_error(self) -> NoReturn: + # After catching a LocalProtocolError, use this method to re-raise it + # as a RemoteProtocolError. This method must be called from inside an + # except: block. + # + # An easy way to get an equivalent RemoteProtocolError is just to + # modify 'self' in place. + self.__class__ = RemoteProtocolError # type: ignore + # But the re-raising is somewhat non-trivial -- you might think that + # now that we've modified the in-flight exception object, that just + # doing 'raise' to re-raise it would be enough. But it turns out that + # this doesn't work, because Python tracks the exception type + # (exc_info[0]) separately from the exception object (exc_info[1]), + # and we only modified the latter. So we really do need to re-raise + # the new type explicitly. + # On py3, the traceback is part of the exception object, so our + # in-place modification preserved it and we can just re-raise: + raise self + + +class RemoteProtocolError(ProtocolError): + pass + + +def validate( + regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any +) -> Dict[str, bytes]: + match = regex.fullmatch(data) + if not match: + if format_args: + msg = msg.format(*format_args) + raise LocalProtocolError(msg) + return match.groupdict() + + +# Sentinel values +# +# - Inherit identity-based comparison and hashing from object +# - Have a nice repr +# - Have a *bonus property*: type(sentinel) is sentinel +# +# The bonus property is useful if you want to take the return value from +# next_event() and do some sort of dispatch based on type(event). + +_T_Sentinel = TypeVar("_T_Sentinel", bound="Sentinel") + + +class Sentinel(type): + def __new__( + cls: Type[_T_Sentinel], + name: str, + bases: Tuple[type, ...], + namespace: Dict[str, Any], + **kwds: Any + ) -> _T_Sentinel: + assert bases == (Sentinel,) + v = super().__new__(cls, name, bases, namespace, **kwds) + v.__class__ = v # type: ignore + return v + + def __repr__(self) -> str: + return self.__name__ + + +# Used for methods, request targets, HTTP versions, header names, and header +# values. Accepts ascii-strings, or bytes/bytearray/memoryview/..., and always +# returns bytes. +def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes: + # Fast-path: + if type(s) is bytes: + return s + if isinstance(s, str): + s = s.encode("ascii") + if isinstance(s, int): + raise TypeError("expected bytes-like object, not int") + return bytes(s) diff --git a/venv/Lib/site-packages/h11/_version.py b/venv/Lib/site-packages/h11/_version.py new file mode 100644 index 00000000..76e7327b --- /dev/null +++ b/venv/Lib/site-packages/h11/_version.py @@ -0,0 +1,16 @@ +# This file must be kept very simple, because it is consumed from several +# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc. + +# We use a simple scheme: +# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev +# where the +dev versions are never released into the wild, they're just what +# we stick into the VCS in between releases. +# +# This is compatible with PEP 440: +# http://legacy.python.org/dev/peps/pep-0440/ +# via the use of the "local suffix" "+dev", which is disallowed on index +# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we +# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before* +# 1.0.0.) + +__version__ = "0.16.0" diff --git a/venv/Lib/site-packages/h11/_writers.py b/venv/Lib/site-packages/h11/_writers.py new file mode 100644 index 00000000..939cdb91 --- /dev/null +++ b/venv/Lib/site-packages/h11/_writers.py @@ -0,0 +1,145 @@ +# Code to read HTTP data +# +# Strategy: each writer takes an event + a write-some-bytes function, which is +# calls. +# +# WRITERS is a dict describing how to pick a reader. It maps states to either: +# - a writer +# - or, for body writers, a dict of framin-dependent writer factories + +from typing import Any, Callable, Dict, List, Tuple, Type, Union + +from ._events import Data, EndOfMessage, Event, InformationalResponse, Request, Response +from ._headers import Headers +from ._state import CLIENT, IDLE, SEND_BODY, SEND_RESPONSE, SERVER +from ._util import LocalProtocolError, Sentinel + +__all__ = ["WRITERS"] + +Writer = Callable[[bytes], Any] + + +def write_headers(headers: Headers, write: Writer) -> None: + # "Since the Host field-value is critical information for handling a + # request, a user agent SHOULD generate Host as the first header field + # following the request-line." - RFC 7230 + raw_items = headers._full_items + for raw_name, name, value in raw_items: + if name == b"host": + write(b"%s: %s\r\n" % (raw_name, value)) + for raw_name, name, value in raw_items: + if name != b"host": + write(b"%s: %s\r\n" % (raw_name, value)) + write(b"\r\n") + + +def write_request(request: Request, write: Writer) -> None: + if request.http_version != b"1.1": + raise LocalProtocolError("I only send HTTP/1.1") + write(b"%s %s HTTP/1.1\r\n" % (request.method, request.target)) + write_headers(request.headers, write) + + +# Shared between InformationalResponse and Response +def write_any_response( + response: Union[InformationalResponse, Response], write: Writer +) -> None: + if response.http_version != b"1.1": + raise LocalProtocolError("I only send HTTP/1.1") + status_bytes = str(response.status_code).encode("ascii") + # We don't bother sending ascii status messages like "OK"; they're + # optional and ignored by the protocol. (But the space after the numeric + # status code is mandatory.) + # + # XX FIXME: could at least make an effort to pull out the status message + # from stdlib's http.HTTPStatus table. Or maybe just steal their enums + # (either by import or copy/paste). We already accept them as status codes + # since they're of type IntEnum < int. + write(b"HTTP/1.1 %s %s\r\n" % (status_bytes, response.reason)) + write_headers(response.headers, write) + + +class BodyWriter: + def __call__(self, event: Event, write: Writer) -> None: + if type(event) is Data: + self.send_data(event.data, write) + elif type(event) is EndOfMessage: + self.send_eom(event.headers, write) + else: # pragma: no cover + assert False + + def send_data(self, data: bytes, write: Writer) -> None: + pass + + def send_eom(self, headers: Headers, write: Writer) -> None: + pass + + +# +# These are all careful not to do anything to 'data' except call len(data) and +# write(data). This allows us to transparently pass-through funny objects, +# like placeholder objects referring to files on disk that will be sent via +# sendfile(2). +# +class ContentLengthWriter(BodyWriter): + def __init__(self, length: int) -> None: + self._length = length + + def send_data(self, data: bytes, write: Writer) -> None: + self._length -= len(data) + if self._length < 0: + raise LocalProtocolError("Too much data for declared Content-Length") + write(data) + + def send_eom(self, headers: Headers, write: Writer) -> None: + if self._length != 0: + raise LocalProtocolError("Too little data for declared Content-Length") + if headers: + raise LocalProtocolError("Content-Length and trailers don't mix") + + +class ChunkedWriter(BodyWriter): + def send_data(self, data: bytes, write: Writer) -> None: + # if we encoded 0-length data in the naive way, it would look like an + # end-of-message. + if not data: + return + write(b"%x\r\n" % len(data)) + write(data) + write(b"\r\n") + + def send_eom(self, headers: Headers, write: Writer) -> None: + write(b"0\r\n") + write_headers(headers, write) + + +class Http10Writer(BodyWriter): + def send_data(self, data: bytes, write: Writer) -> None: + write(data) + + def send_eom(self, headers: Headers, write: Writer) -> None: + if headers: + raise LocalProtocolError("can't send trailers to HTTP/1.0 client") + # no need to close the socket ourselves, that will be taken care of by + # Connection: close machinery + + +WritersType = Dict[ + Union[Tuple[Type[Sentinel], Type[Sentinel]], Type[Sentinel]], + Union[ + Dict[str, Type[BodyWriter]], + Callable[[Union[InformationalResponse, Response], Writer], None], + Callable[[Request, Writer], None], + ], +] + +WRITERS: WritersType = { + (CLIENT, IDLE): write_request, + (SERVER, IDLE): write_any_response, + (SERVER, SEND_RESPONSE): write_any_response, + SEND_BODY: { + "chunked": ChunkedWriter, + "content-length": ContentLengthWriter, + "http/1.0": Http10Writer, + }, +} diff --git a/venv/Lib/site-packages/h11/py.typed b/venv/Lib/site-packages/h11/py.typed new file mode 100644 index 00000000..f5642f79 --- /dev/null +++ b/venv/Lib/site-packages/h11/py.typed @@ -0,0 +1 @@ +Marker diff --git a/venv/Lib/site-packages/httpcore-1.0.9.dist-info/INSTALLER b/venv/Lib/site-packages/httpcore-1.0.9.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/httpcore-1.0.9.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/httpcore-1.0.9.dist-info/METADATA b/venv/Lib/site-packages/httpcore-1.0.9.dist-info/METADATA new file mode 100644 index 00000000..8056834e --- /dev/null +++ b/venv/Lib/site-packages/httpcore-1.0.9.dist-info/METADATA @@ -0,0 +1,625 @@ +Metadata-Version: 2.4 +Name: httpcore +Version: 1.0.9 +Summary: A minimal low-level HTTP client. +Project-URL: Documentation, https://www.encode.io/httpcore +Project-URL: Homepage, https://www.encode.io/httpcore/ +Project-URL: Source, https://github.com/encode/httpcore +Author-email: Tom Christie +License-Expression: BSD-3-Clause +License-File: LICENSE.md +Classifier: Development Status :: 3 - Alpha +Classifier: Environment :: Web Environment +Classifier: Framework :: AsyncIO +Classifier: Framework :: Trio +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Internet :: WWW/HTTP +Requires-Python: >=3.8 +Requires-Dist: certifi +Requires-Dist: h11>=0.16 +Provides-Extra: asyncio +Requires-Dist: anyio<5.0,>=4.0; extra == 'asyncio' +Provides-Extra: http2 +Requires-Dist: h2<5,>=3; extra == 'http2' +Provides-Extra: socks +Requires-Dist: socksio==1.*; extra == 'socks' +Provides-Extra: trio +Requires-Dist: trio<1.0,>=0.22.0; extra == 'trio' +Description-Content-Type: text/markdown + +# HTTP Core + +[![Test Suite](https://github.com/encode/httpcore/workflows/Test%20Suite/badge.svg)](https://github.com/encode/httpcore/actions) +[![Package version](https://badge.fury.io/py/httpcore.svg)](https://pypi.org/project/httpcore/) + +> *Do one thing, and do it well.* + +The HTTP Core package provides a minimal low-level HTTP client, which does +one thing only. Sending HTTP requests. + +It does not provide any high level model abstractions over the API, +does not handle redirects, multipart uploads, building authentication headers, +transparent HTTP caching, URL parsing, session cookie handling, +content or charset decoding, handling JSON, environment based configuration +defaults, or any of that Jazz. + +Some things HTTP Core does do: + +* Sending HTTP requests. +* Thread-safe / task-safe connection pooling. +* HTTP(S) proxy & SOCKS proxy support. +* Supports HTTP/1.1 and HTTP/2. +* Provides both sync and async interfaces. +* Async backend support for `asyncio` and `trio`. + +## Requirements + +Python 3.8+ + +## Installation + +For HTTP/1.1 only support, install with: + +```shell +$ pip install httpcore +``` + +There are also a number of optional extras available... + +```shell +$ pip install httpcore['asyncio,trio,http2,socks'] +``` + +## Sending requests + +Send an HTTP request: + +```python +import httpcore + +response = httpcore.request("GET", "https://www.example.com/") + +print(response) +# +print(response.status) +# 200 +print(response.headers) +# [(b'Accept-Ranges', b'bytes'), (b'Age', b'557328'), (b'Cache-Control', b'max-age=604800'), ...] +print(response.content) +# b'\n\n\nExample Domain\n\n\n ...' +``` + +The top-level `httpcore.request()` function is provided for convenience. In practice whenever you're working with `httpcore` you'll want to use the connection pooling functionality that it provides. + +```python +import httpcore + +http = httpcore.ConnectionPool() +response = http.request("GET", "https://www.example.com/") +``` + +Once you're ready to get going, [head over to the documentation](https://www.encode.io/httpcore/). + +## Motivation + +You *probably* don't want to be using HTTP Core directly. It might make sense if +you're writing something like a proxy service in Python, and you just want +something at the lowest possible level, but more typically you'll want to use +a higher level client library, such as `httpx`. + +The motivation for `httpcore` is: + +* To provide a reusable low-level client library, that other packages can then build on top of. +* To provide a *really clear interface split* between the networking code and client logic, + so that each is easier to understand and reason about in isolation. + +## Dependencies + +The `httpcore` package has the following dependencies... + +* `h11` +* `certifi` + +And the following optional extras... + +* `anyio` - Required by `pip install httpcore['asyncio']`. +* `trio` - Required by `pip install httpcore['trio']`. +* `h2` - Required by `pip install httpcore['http2']`. +* `socksio` - Required by `pip install httpcore['socks']`. + +## Versioning + +We use [SEMVER for our versioning policy](https://semver.org/). + +For changes between package versions please see our [project changelog](CHANGELOG.md). + +We recommend pinning your requirements either the most current major version, or a more specific version range: + +```python +pip install 'httpcore==1.*' +``` +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +## Version 1.0.9 (April 24th, 2025) + +- Resolve https://github.com/advisories/GHSA-vqfr-h8mv-ghfj with h11 dependency update. (#1008) + +## Version 1.0.8 (April 11th, 2025) + +- Fix `AttributeError` when importing on Python 3.14. (#1005) + +## Version 1.0.7 (November 15th, 2024) + +- Support `proxy=…` configuration on `ConnectionPool()`. (#974) + +## Version 1.0.6 (October 1st, 2024) + +- Relax `trio` dependency pinning. (#956) +- Handle `trio` raising `NotImplementedError` on unsupported platforms. (#955) +- Handle mapping `ssl.SSLError` to `httpcore.ConnectError`. (#918) + +## 1.0.5 (March 27th, 2024) + +- Handle `EndOfStream` exception for anyio backend. (#899) +- Allow trio `0.25.*` series in package dependancies. (#903) + +## 1.0.4 (February 21st, 2024) + +- Add `target` request extension. (#888) +- Fix support for connection `Upgrade` and `CONNECT` when some data in the stream has been read. (#882) + +## 1.0.3 (February 13th, 2024) + +- Fix support for async cancellations. (#880) +- Fix trace extension when used with socks proxy. (#849) +- Fix SSL context for connections using the "wss" scheme (#869) + +## 1.0.2 (November 10th, 2023) + +- Fix `float("inf")` timeouts in `Event.wait` function. (#846) + +## 1.0.1 (November 3rd, 2023) + +- Fix pool timeout to account for the total time spent retrying. (#823) +- Raise a neater RuntimeError when the correct async deps are not installed. (#826) +- Add support for synchronous TLS-in-TLS streams. (#840) + +## 1.0.0 (October 6th, 2023) + +From version 1.0 our async support is now optional, as the package has minimal dependencies by default. + +For async support use either `pip install 'httpcore[asyncio]'` or `pip install 'httpcore[trio]'`. + +The project versioning policy is now explicitly governed by SEMVER. See https://semver.org/. + +- Async support becomes fully optional. (#809) +- Add support for Python 3.12. (#807) + +## 0.18.0 (September 8th, 2023) + +- Add support for HTTPS proxies. (#745, #786) +- Drop Python 3.7 support. (#727) +- Handle `sni_hostname` extension with SOCKS proxy. (#774) +- Handle HTTP/1.1 half-closed connections gracefully. (#641) +- Change the type of `Extensions` from `Mapping[Str, Any]` to `MutableMapping[Str, Any]`. (#762) + +## 0.17.3 (July 5th, 2023) + +- Support async cancellations, ensuring that the connection pool is left in a clean state when cancellations occur. (#726) +- The networking backend interface has [been added to the public API](https://www.encode.io/httpcore/network-backends). Some classes which were previously private implementation detail are now part of the top-level public API. (#699) +- Graceful handling of HTTP/2 GoAway frames, with requests being transparently retried on a new connection. (#730) +- Add exceptions when a synchronous `trace callback` is passed to an asynchronous request or an asynchronous `trace callback` is passed to a synchronous request. (#717) +- Drop Python 3.7 support. (#727) + +## 0.17.2 (May 23th, 2023) + +- Add `socket_options` argument to `ConnectionPool` and `HTTProxy` classes. (#668) +- Improve logging with per-module logger names. (#690) +- Add `sni_hostname` request extension. (#696) +- Resolve race condition during import of `anyio` package. (#692) +- Enable TCP_NODELAY for all synchronous sockets. (#651) + +## 0.17.1 (May 17th, 2023) + +- If 'retries' is set, then allow retries if an SSL handshake error occurs. (#669) +- Improve correctness of tracebacks on network exceptions, by raising properly chained exceptions. (#678) +- Prevent connection-hanging behaviour when HTTP/2 connections are closed by a server-sent 'GoAway' frame. (#679) +- Fix edge-case exception when removing requests from the connection pool. (#680) +- Fix pool timeout edge-case. (#688) + +## 0.17.0 (March 16th, 2023) + +- Add DEBUG level logging. (#648) +- Respect HTTP/2 max concurrent streams when settings updates are sent by server. (#652) +- Increase the allowable HTTP header size to 100kB. (#647) +- Add `retries` option to SOCKS proxy classes. (#643) + +## 0.16.3 (December 20th, 2022) + +- Allow `ws` and `wss` schemes. Allows us to properly support websocket upgrade connections. (#625) +- Forwarding HTTP proxies use a connection-per-remote-host. Required by some proxy implementations. (#637) +- Don't raise `RuntimeError` when closing a connection pool with active connections. Removes some error cases when cancellations are used. (#631) +- Lazy import `anyio`, so that it's no longer a hard dependancy, and isn't imported if unused. (#639) + +## 0.16.2 (November 25th, 2022) + +- Revert 'Fix async cancellation behaviour', which introduced race conditions. (#627) +- Raise `RuntimeError` if attempting to us UNIX domain sockets on Windows. (#619) + +## 0.16.1 (November 17th, 2022) + +- Fix HTTP/1.1 interim informational responses, such as "100 Continue". (#605) + +## 0.16.0 (October 11th, 2022) + +- Support HTTP/1.1 informational responses. (#581) +- Fix async cancellation behaviour. (#580) +- Support `h11` 0.14. (#579) + +## 0.15.0 (May 17th, 2022) + +- Drop Python 3.6 support (#535) +- Ensure HTTP proxy CONNECT requests include `timeout` configuration. (#506) +- Switch to explicit `typing.Optional` for type hints. (#513) +- For `trio` map OSError exceptions to `ConnectError`. (#543) + +## 0.14.7 (February 4th, 2022) + +- Requests which raise a PoolTimeout need to be removed from the pool queue. (#502) +- Fix AttributeError that happened when Socks5Connection were terminated. (#501) + +## 0.14.6 (February 1st, 2022) + +- Fix SOCKS support for `http://` URLs. (#492) +- Resolve race condition around exceptions during streaming a response. (#491) + +## 0.14.5 (January 18th, 2022) + +- SOCKS proxy support. (#478) +- Add proxy_auth argument to HTTPProxy. (#481) +- Improve error message on 'RemoteProtocolError' exception when server disconnects without sending a response. (#479) + +## 0.14.4 (January 5th, 2022) + +- Support HTTP/2 on HTTPS tunnelling proxies. (#468) +- Fix proxy headers missing on HTTP forwarding. (#456) +- Only instantiate SSL context if required. (#457) +- More robust HTTP/2 handling. (#253, #439, #440, #441) + +## 0.14.3 (November 17th, 2021) + +- Fix race condition when removing closed connections from the pool. (#437) + +## 0.14.2 (November 16th, 2021) + +- Failed connections no longer remain in the pool. (Pull #433) + +## 0.14.1 (November 12th, 2021) + +- `max_connections` becomes optional. (Pull #429) +- `certifi` is now included in the install dependancies. (Pull #428) +- `h2` is now strictly optional. (Pull #428) + +## 0.14.0 (November 11th, 2021) + +The 0.14 release is a complete reworking of `httpcore`, comprehensively addressing some underlying issues in the connection pooling, as well as substantially redesigning the API to be more user friendly. + +Some of the lower-level API design also makes the components more easily testable in isolation, and the package now has 100% test coverage. + +See [discussion #419](https://github.com/encode/httpcore/discussions/419) for a little more background. + +There's some other neat bits in there too, such as the "trace" extension, which gives a hook into inspecting the internal events that occur during the request/response cycle. This extension is needed for the HTTPX cli, in order to... + +* Log the point at which the connection is established, and the IP/port on which it is made. +* Determine if the outgoing request should log as HTTP/1.1 or HTTP/2, rather than having to assume it's HTTP/2 if the --http2 flag was passed. (Which may not actually be true.) +* Log SSL version info / certificate info. + +Note that `curio` support is not currently available in 0.14.0. If you're using `httpcore` with `curio` please get in touch, so we can assess if we ought to prioritize it as a feature or not. + +## 0.13.7 (September 13th, 2021) + +- Fix broken error messaging when URL scheme is missing, or a non HTTP(S) scheme is used. (Pull #403) + +## 0.13.6 (June 15th, 2021) + +### Fixed + +- Close sockets when read or write timeouts occur. (Pull #365) + +## 0.13.5 (June 14th, 2021) + +### Fixed + +- Resolved niggles with AnyIO EOF behaviours. (Pull #358, #362) + +## 0.13.4 (June 9th, 2021) + +### Added + +- Improved error messaging when URL scheme is missing, or a non HTTP(S) scheme is used. (Pull #354) + +### Fixed + +- Switched to `anyio` as the default backend implementation when running with `asyncio`. Resolves some awkward [TLS timeout issues](https://github.com/encode/httpx/discussions/1511). + +## 0.13.3 (May 6th, 2021) + +### Added + +- Support HTTP/2 prior knowledge, using `httpcore.SyncConnectionPool(http1=False)`. (Pull #333) + +### Fixed + +- Handle cases where environment does not provide `select.poll` support. (Pull #331) + +## 0.13.2 (April 29th, 2021) + +### Added + +- Improve error message for specific case of `RemoteProtocolError` where server disconnects without sending a response. (Pull #313) + +## 0.13.1 (April 28th, 2021) + +### Fixed + +- More resiliant testing for closed connections. (Pull #311) +- Don't raise exceptions on ungraceful connection closes. (Pull #310) + +## 0.13.0 (April 21st, 2021) + +The 0.13 release updates the core API in order to match the HTTPX Transport API, +introduced in HTTPX 0.18 onwards. + +An example of making requests with the new interface is: + +```python +with httpcore.SyncConnectionPool() as http: + status_code, headers, stream, extensions = http.handle_request( + method=b'GET', + url=(b'https', b'example.org', 443, b'/'), + headers=[(b'host', b'example.org'), (b'user-agent', b'httpcore')] + stream=httpcore.ByteStream(b''), + extensions={} + ) + body = stream.read() + print(status_code, body) +``` + +### Changed + +- The `.request()` method is now `handle_request()`. (Pull #296) +- The `.arequest()` method is now `.handle_async_request()`. (Pull #296) +- The `headers` argument is no longer optional. (Pull #296) +- The `stream` argument is no longer optional. (Pull #296) +- The `ext` argument is now named `extensions`, and is no longer optional. (Pull #296) +- The `"reason"` extension keyword is now named `"reason_phrase"`. (Pull #296) +- The `"reason_phrase"` and `"http_version"` extensions now use byte strings for their values. (Pull #296) +- The `httpcore.PlainByteStream()` class becomes `httpcore.ByteStream()`. (Pull #296) + +### Added + +- Streams now support a `.read()` interface. (Pull #296) + +### Fixed + +- Task cancellation no longer leaks connections from the connection pool. (Pull #305) + +## 0.12.3 (December 7th, 2020) + +### Fixed + +- Abort SSL connections on close rather than waiting for remote EOF when using `asyncio`. (Pull #167) +- Fix exception raised in case of connect timeouts when using the `anyio` backend. (Pull #236) +- Fix `Host` header precedence for `:authority` in HTTP/2. (Pull #241, #243) +- Handle extra edge case when detecting for socket readability when using `asyncio`. (Pull #242, #244) +- Fix `asyncio` SSL warning when using proxy tunneling. (Pull #249) + +## 0.12.2 (November 20th, 2020) + +### Fixed + +- Properly wrap connect errors on the asyncio backend. (Pull #235) +- Fix `ImportError` occurring on Python 3.9 when using the HTTP/1.1 sync client in a multithreaded context. (Pull #237) + +## 0.12.1 (November 7th, 2020) + +### Added + +- Add connect retries. (Pull #221) + +### Fixed + +- Tweak detection of dropped connections, resolving an issue with open files limits on Linux. (Pull #185) +- Avoid leaking connections when establishing an HTTP tunnel to a proxy has failed. (Pull #223) +- Properly wrap OS errors when using `trio`. (Pull #225) + +## 0.12.0 (October 6th, 2020) + +### Changed + +- HTTP header casing is now preserved, rather than always sent in lowercase. (#216 and python-hyper/h11#104) + +### Added + +- Add Python 3.9 to officially supported versions. + +### Fixed + +- Gracefully handle a stdlib asyncio bug when a connection is closed while it is in a paused-for-reading state. (#201) + +## 0.11.1 (September 28nd, 2020) + +### Fixed + +- Add await to async semaphore release() coroutine (#197) +- Drop incorrect curio classifier (#192) + +## 0.11.0 (September 22nd, 2020) + +The Transport API with 0.11.0 has a couple of significant changes. + +Firstly we've moved changed the request interface in order to allow extensions, which will later enable us to support features +such as trailing headers, HTTP/2 server push, and CONNECT/Upgrade connections. + +The interface changes from: + +```python +def request(method, url, headers, stream, timeout): + return (http_version, status_code, reason, headers, stream) +``` + +To instead including an optional dictionary of extensions on the request and response: + +```python +def request(method, url, headers, stream, ext): + return (status_code, headers, stream, ext) +``` + +Having an open-ended extensions point will allow us to add later support for various optional features, that wouldn't otherwise be supported without these API changes. + +In particular: + +* Trailing headers support. +* HTTP/2 Server Push +* sendfile. +* Exposing raw connection on CONNECT, Upgrade, HTTP/2 bi-di streaming. +* Exposing debug information out of the API, including template name, template context. + +Currently extensions are limited to: + +* request: `timeout` - Optional. Timeout dictionary. +* response: `http_version` - Optional. Include the HTTP version used on the response. +* response: `reason` - Optional. Include the reason phrase used on the response. Only valid with HTTP/1.*. + +See https://github.com/encode/httpx/issues/1274#issuecomment-694884553 for the history behind this. + +Secondly, the async version of `request` is now namespaced as `arequest`. + +This allows concrete transports to support both sync and async implementations on the same class. + +### Added + +- Add curio support. (Pull #168) +- Add anyio support, with `backend="anyio"`. (Pull #169) + +### Changed + +- Update the Transport API to use 'ext' for optional extensions. (Pull #190) +- Update the Transport API to use `.request` and `.arequest` so implementations can support both sync and async. (Pull #189) + +## 0.10.2 (August 20th, 2020) + +### Added + +- Added Unix Domain Socket support. (Pull #139) + +### Fixed + +- Always include the port on proxy CONNECT requests. (Pull #154) +- Fix `max_keepalive_connections` configuration. (Pull #153) +- Fixes behaviour in HTTP/1.1 where server disconnects can be used to signal the end of the response body. (Pull #164) + +## 0.10.1 (August 7th, 2020) + +- Include `max_keepalive_connections` on `AsyncHTTPProxy`/`SyncHTTPProxy` classes. + +## 0.10.0 (August 7th, 2020) + +The most notable change in the 0.10.0 release is that HTTP/2 support is now fully optional. + +Use either `pip install httpcore` for HTTP/1.1 support only, or `pip install httpcore[http2]` for HTTP/1.1 and HTTP/2 support. + +### Added + +- HTTP/2 support becomes optional. (Pull #121, #130) +- Add `local_address=...` support. (Pull #100, #134) +- Add `PlainByteStream`, `IteratorByteStream`, `AsyncIteratorByteStream`. The `AsyncByteSteam` and `SyncByteStream` classes are now pure interface classes. (#133) +- Add `LocalProtocolError`, `RemoteProtocolError` exceptions. (Pull #129) +- Add `UnsupportedProtocol` exception. (Pull #128) +- Add `.get_connection_info()` method. (Pull #102, #137) +- Add better TRACE logs. (Pull #101) + +### Changed + +- `max_keepalive` is deprecated in favour of `max_keepalive_connections`. (Pull #140) + +### Fixed + +- Improve handling of server disconnects. (Pull #112) + +## 0.9.1 (May 27th, 2020) + +### Fixed + +- Proper host resolution for sync case, including IPv6 support. (Pull #97) +- Close outstanding connections when connection pool is closed. (Pull #98) + +## 0.9.0 (May 21th, 2020) + +### Changed + +- URL port becomes an `Optional[int]` instead of `int`. (Pull #92) + +### Fixed + +- Honor HTTP/2 max concurrent streams settings. (Pull #89, #90) +- Remove incorrect debug log. (Pull #83) + +## 0.8.4 (May 11th, 2020) + +### Added + +- Logging via HTTPCORE_LOG_LEVEL and HTTPX_LOG_LEVEL environment variables +and TRACE level logging. (Pull #79) + +### Fixed + +- Reuse of connections on HTTP/2 in close concurrency situations. (Pull #81) + +## 0.8.3 (May 6rd, 2020) + +### Fixed + +- Include `Host` and `Accept` headers on proxy "CONNECT" requests. +- De-duplicate any headers also contained in proxy_headers. +- HTTP/2 flag not being passed down to proxy connections. + +## 0.8.2 (May 3rd, 2020) + +### Fixed + +- Fix connections using proxy forwarding requests not being added to the +connection pool properly. (Pull #70) + +## 0.8.1 (April 30th, 2020) + +### Changed + +- Allow inherintance of both `httpcore.AsyncByteStream`, `httpcore.SyncByteStream` without type conflicts. + +## 0.8.0 (April 30th, 2020) + +### Fixed + +- Fixed tunnel proxy support. + +### Added + +- New `TimeoutException` base class. + +## 0.7.0 (March 5th, 2020) + +- First integration with HTTPX. diff --git a/venv/Lib/site-packages/httpcore-1.0.9.dist-info/RECORD b/venv/Lib/site-packages/httpcore-1.0.9.dist-info/RECORD new file mode 100644 index 00000000..3f37ea06 --- /dev/null +++ b/venv/Lib/site-packages/httpcore-1.0.9.dist-info/RECORD @@ -0,0 +1,68 @@ +httpcore-1.0.9.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +httpcore-1.0.9.dist-info/METADATA,sha256=_i1P2mGZEol4d54M8n88BFxTGGP83Zh-rMdPOhjUHCE,21529 +httpcore-1.0.9.dist-info/RECORD,, +httpcore-1.0.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +httpcore-1.0.9.dist-info/licenses/LICENSE.md,sha256=_ctZFUx0y6uhahEkL3dAvqnyPW_rVUeRfYxflKgDkqU,1518 +httpcore/__init__.py,sha256=9kT_kqChCCJUTHww24ZmR_ezcdbpRYWksD-gYNzkZP8,3445 +httpcore/__pycache__/__init__.cpython-312.pyc,, +httpcore/__pycache__/_api.cpython-312.pyc,, +httpcore/__pycache__/_exceptions.cpython-312.pyc,, +httpcore/__pycache__/_models.cpython-312.pyc,, +httpcore/__pycache__/_ssl.cpython-312.pyc,, +httpcore/__pycache__/_synchronization.cpython-312.pyc,, +httpcore/__pycache__/_trace.cpython-312.pyc,, +httpcore/__pycache__/_utils.cpython-312.pyc,, +httpcore/_api.py,sha256=unZmeDschBWCGCPCwkS3Wot9euK6bg_kKxLtGTxw214,3146 +httpcore/_async/__init__.py,sha256=EWdl2v4thnAHzJpqjU4h2a8DUiGAvNiWrkii9pfhTf0,1221 +httpcore/_async/__pycache__/__init__.cpython-312.pyc,, +httpcore/_async/__pycache__/connection.cpython-312.pyc,, +httpcore/_async/__pycache__/connection_pool.cpython-312.pyc,, +httpcore/_async/__pycache__/http11.cpython-312.pyc,, +httpcore/_async/__pycache__/http2.cpython-312.pyc,, +httpcore/_async/__pycache__/http_proxy.cpython-312.pyc,, +httpcore/_async/__pycache__/interfaces.cpython-312.pyc,, +httpcore/_async/__pycache__/socks_proxy.cpython-312.pyc,, +httpcore/_async/connection.py,sha256=6OcPXqMEfc0BU38_-iHUNDd1vKSTc2UVT09XqNb_BOk,8449 +httpcore/_async/connection_pool.py,sha256=DOIQ2s2ZCf9qfwxhzMprTPLqCL8OxGXiKF6qRHxvVyY,17307 +httpcore/_async/http11.py,sha256=-qM9bV7PjSQF5vxs37-eUXOIFwbIjPcZbNliuX9TtBw,13880 +httpcore/_async/http2.py,sha256=azX1fcmtXaIwjputFlZ4vd92J8xwjGOa9ax9QIv4394,23936 +httpcore/_async/http_proxy.py,sha256=2zVkrlv-Ds-rWGaqaXlrhEJiAQFPo23BT3Gq_sWoBXU,14701 +httpcore/_async/interfaces.py,sha256=jTiaWL83pgpGC9ziv90ZfwaKNMmHwmOalzaKiuTxATo,4455 +httpcore/_async/socks_proxy.py,sha256=lLKgLlggPfhFlqi0ODeBkOWvt9CghBBUyqsnsU1tx6Q,13841 +httpcore/_backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +httpcore/_backends/__pycache__/__init__.cpython-312.pyc,, +httpcore/_backends/__pycache__/anyio.cpython-312.pyc,, +httpcore/_backends/__pycache__/auto.cpython-312.pyc,, +httpcore/_backends/__pycache__/base.cpython-312.pyc,, +httpcore/_backends/__pycache__/mock.cpython-312.pyc,, +httpcore/_backends/__pycache__/sync.cpython-312.pyc,, +httpcore/_backends/__pycache__/trio.cpython-312.pyc,, +httpcore/_backends/anyio.py,sha256=x8PgEhXRC8bVqsdzk_YJx8Y6d9Tub06CuUSwnbmtqoY,5252 +httpcore/_backends/auto.py,sha256=zO136PKZmsaTDK-HRk84eA-MUg8_2wJf4NvmK432Aio,1662 +httpcore/_backends/base.py,sha256=aShgRdZnMmRhFWHetjumlM73f8Kz1YOAyCUP_4kHslA,3042 +httpcore/_backends/mock.py,sha256=er9T436uSe7NLrfiLa4x6Nuqg5ivQ693CxWYCWsgbH4,4077 +httpcore/_backends/sync.py,sha256=bhE4d9iK9Umxdsdsgm2EfKnXaBms2WggGYU-7jmUujU,7977 +httpcore/_backends/trio.py,sha256=LHu4_Mr5MswQmmT3yE4oLgf9b_JJfeVS4BjDxeJc7Ro,5996 +httpcore/_exceptions.py,sha256=looCKga3_YVYu3s-d3L9RMPRJyhsY7fiuuGxvkOD0c0,1184 +httpcore/_models.py,sha256=IO2CcXcdpovRcLTdGFGB6RyBZdEm2h_TOmoCc4rEKho,17623 +httpcore/_ssl.py,sha256=srqmSNU4iOUvWF-SrJvb8G_YEbHFELOXQOwdDIBTS9c,187 +httpcore/_sync/__init__.py,sha256=JBDIgXt5la1LCJ1sLQeKhjKFpLnpNr8Svs6z2ni3fgg,1141 +httpcore/_sync/__pycache__/__init__.cpython-312.pyc,, +httpcore/_sync/__pycache__/connection.cpython-312.pyc,, +httpcore/_sync/__pycache__/connection_pool.cpython-312.pyc,, +httpcore/_sync/__pycache__/http11.cpython-312.pyc,, +httpcore/_sync/__pycache__/http2.cpython-312.pyc,, +httpcore/_sync/__pycache__/http_proxy.cpython-312.pyc,, +httpcore/_sync/__pycache__/interfaces.cpython-312.pyc,, +httpcore/_sync/__pycache__/socks_proxy.cpython-312.pyc,, +httpcore/_sync/connection.py,sha256=9exGOb3PB-Mp2T1-sckSeL2t-tJ_9-NXomV8ihmWCgU,8238 +httpcore/_sync/connection_pool.py,sha256=a-T8LTsUxc7r0Ww1atfHSDoWPjQ0fA8Ul7S3-F0Mj70,16955 +httpcore/_sync/http11.py,sha256=IFobD1Md5JFlJGKWnh1_Q3epikUryI8qo09v8MiJIEA,13476 +httpcore/_sync/http2.py,sha256=AxU4yhcq68Bn5vqdJYtiXKYUj7nvhYbxz3v4rT4xnvA,23400 +httpcore/_sync/http_proxy.py,sha256=_al_6crKuEZu2wyvu493RZImJdBJnj5oGKNjLOJL2Zo,14463 +httpcore/_sync/interfaces.py,sha256=snXON42vUDHO5JBJvo8D4VWk2Wat44z2OXXHDrjbl94,4344 +httpcore/_sync/socks_proxy.py,sha256=zegZW9Snqj2_992DFJa8_CppOVBkVL4AgwduRkStakQ,13614 +httpcore/_synchronization.py,sha256=zSi13mAColBnknjZBknUC6hKNDQT4C6ijnezZ-r0T2s,9434 +httpcore/_trace.py,sha256=ck6ZoIzYTkdNAIfq5MGeKqBXDtqjOX-qfYwmZFbrGco,3952 +httpcore/_utils.py,sha256=_RLgXYOAYC350ikALV59GZ68IJrdocRZxPs9PjmzdFY,1537 +httpcore/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/Lib/site-packages/httpcore-1.0.9.dist-info/WHEEL b/venv/Lib/site-packages/httpcore-1.0.9.dist-info/WHEEL new file mode 100644 index 00000000..12228d41 --- /dev/null +++ b/venv/Lib/site-packages/httpcore-1.0.9.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.27.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/Lib/site-packages/httpcore-1.0.9.dist-info/licenses/LICENSE.md b/venv/Lib/site-packages/httpcore-1.0.9.dist-info/licenses/LICENSE.md new file mode 100644 index 00000000..311b2b56 --- /dev/null +++ b/venv/Lib/site-packages/httpcore-1.0.9.dist-info/licenses/LICENSE.md @@ -0,0 +1,27 @@ +Copyright © 2020, [Encode OSS Ltd](https://www.encode.io/). +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/Lib/site-packages/httpcore/__init__.py b/venv/Lib/site-packages/httpcore/__init__.py new file mode 100644 index 00000000..9a92dc4a --- /dev/null +++ b/venv/Lib/site-packages/httpcore/__init__.py @@ -0,0 +1,141 @@ +from ._api import request, stream +from ._async import ( + AsyncConnectionInterface, + AsyncConnectionPool, + AsyncHTTP2Connection, + AsyncHTTP11Connection, + AsyncHTTPConnection, + AsyncHTTPProxy, + AsyncSOCKSProxy, +) +from ._backends.base import ( + SOCKET_OPTION, + AsyncNetworkBackend, + AsyncNetworkStream, + NetworkBackend, + NetworkStream, +) +from ._backends.mock import AsyncMockBackend, AsyncMockStream, MockBackend, MockStream +from ._backends.sync import SyncBackend +from ._exceptions import ( + ConnectError, + ConnectionNotAvailable, + ConnectTimeout, + LocalProtocolError, + NetworkError, + PoolTimeout, + ProtocolError, + ProxyError, + ReadError, + ReadTimeout, + RemoteProtocolError, + TimeoutException, + UnsupportedProtocol, + WriteError, + WriteTimeout, +) +from ._models import URL, Origin, Proxy, Request, Response +from ._ssl import default_ssl_context +from ._sync import ( + ConnectionInterface, + ConnectionPool, + HTTP2Connection, + HTTP11Connection, + HTTPConnection, + HTTPProxy, + SOCKSProxy, +) + +# The 'httpcore.AnyIOBackend' class is conditional on 'anyio' being installed. +try: + from ._backends.anyio import AnyIOBackend +except ImportError: # pragma: nocover + + class AnyIOBackend: # type: ignore + def __init__(self, *args, **kwargs): # type: ignore + msg = ( + "Attempted to use 'httpcore.AnyIOBackend' but 'anyio' is not installed." + ) + raise RuntimeError(msg) + + +# The 'httpcore.TrioBackend' class is conditional on 'trio' being installed. +try: + from ._backends.trio import TrioBackend +except ImportError: # pragma: nocover + + class TrioBackend: # type: ignore + def __init__(self, *args, **kwargs): # type: ignore + msg = "Attempted to use 'httpcore.TrioBackend' but 'trio' is not installed." + raise RuntimeError(msg) + + +__all__ = [ + # top-level requests + "request", + "stream", + # models + "Origin", + "URL", + "Request", + "Response", + "Proxy", + # async + "AsyncHTTPConnection", + "AsyncConnectionPool", + "AsyncHTTPProxy", + "AsyncHTTP11Connection", + "AsyncHTTP2Connection", + "AsyncConnectionInterface", + "AsyncSOCKSProxy", + # sync + "HTTPConnection", + "ConnectionPool", + "HTTPProxy", + "HTTP11Connection", + "HTTP2Connection", + "ConnectionInterface", + "SOCKSProxy", + # network backends, implementations + "SyncBackend", + "AnyIOBackend", + "TrioBackend", + # network backends, mock implementations + "AsyncMockBackend", + "AsyncMockStream", + "MockBackend", + "MockStream", + # network backends, interface + "AsyncNetworkStream", + "AsyncNetworkBackend", + "NetworkStream", + "NetworkBackend", + # util + "default_ssl_context", + "SOCKET_OPTION", + # exceptions + "ConnectionNotAvailable", + "ProxyError", + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", + "UnsupportedProtocol", + "TimeoutException", + "PoolTimeout", + "ConnectTimeout", + "ReadTimeout", + "WriteTimeout", + "NetworkError", + "ConnectError", + "ReadError", + "WriteError", +] + +__version__ = "1.0.9" + + +__locals = locals() +for __name in __all__: + # Exclude SOCKET_OPTION, it causes AttributeError on Python 3.14 + if not __name.startswith(("__", "SOCKET_OPTION")): + setattr(__locals[__name], "__module__", "httpcore") # noqa diff --git a/venv/Lib/site-packages/httpcore/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/httpcore/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..a5228302 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/__pycache__/_api.cpython-312.pyc b/venv/Lib/site-packages/httpcore/__pycache__/_api.cpython-312.pyc new file mode 100644 index 00000000..0259342e Binary files /dev/null and b/venv/Lib/site-packages/httpcore/__pycache__/_api.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/__pycache__/_exceptions.cpython-312.pyc b/venv/Lib/site-packages/httpcore/__pycache__/_exceptions.cpython-312.pyc new file mode 100644 index 00000000..f35fe8ce Binary files /dev/null and b/venv/Lib/site-packages/httpcore/__pycache__/_exceptions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/__pycache__/_models.cpython-312.pyc b/venv/Lib/site-packages/httpcore/__pycache__/_models.cpython-312.pyc new file mode 100644 index 00000000..20c09ffb Binary files /dev/null and b/venv/Lib/site-packages/httpcore/__pycache__/_models.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/__pycache__/_ssl.cpython-312.pyc b/venv/Lib/site-packages/httpcore/__pycache__/_ssl.cpython-312.pyc new file mode 100644 index 00000000..8d6319c3 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/__pycache__/_ssl.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/__pycache__/_synchronization.cpython-312.pyc b/venv/Lib/site-packages/httpcore/__pycache__/_synchronization.cpython-312.pyc new file mode 100644 index 00000000..4b75fc01 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/__pycache__/_synchronization.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/__pycache__/_trace.cpython-312.pyc b/venv/Lib/site-packages/httpcore/__pycache__/_trace.cpython-312.pyc new file mode 100644 index 00000000..6eb641d7 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/__pycache__/_trace.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/__pycache__/_utils.cpython-312.pyc b/venv/Lib/site-packages/httpcore/__pycache__/_utils.cpython-312.pyc new file mode 100644 index 00000000..a9289044 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/__pycache__/_utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_api.py b/venv/Lib/site-packages/httpcore/_api.py new file mode 100644 index 00000000..38b961d1 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_api.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +import contextlib +import typing + +from ._models import URL, Extensions, HeaderTypes, Response +from ._sync.connection_pool import ConnectionPool + + +def request( + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes | typing.Iterator[bytes] | None = None, + extensions: Extensions | None = None, +) -> Response: + """ + Sends an HTTP request, returning the response. + + ``` + response = httpcore.request("GET", "https://www.example.com/") + ``` + + Arguments: + method: The HTTP method for the request. Typically one of `"GET"`, + `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`. + url: The URL of the HTTP request. Either as an instance of `httpcore.URL`, + or as str/bytes. + headers: The HTTP request headers. Either as a dictionary of str/bytes, + or as a list of two-tuples of str/bytes. + content: The content of the request body. Either as bytes, + or as a bytes iterator. + extensions: A dictionary of optional extra information included on the request. + Possible keys include `"timeout"`. + + Returns: + An instance of `httpcore.Response`. + """ + with ConnectionPool() as pool: + return pool.request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + + +@contextlib.contextmanager +def stream( + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes | typing.Iterator[bytes] | None = None, + extensions: Extensions | None = None, +) -> typing.Iterator[Response]: + """ + Sends an HTTP request, returning the response within a content manager. + + ``` + with httpcore.stream("GET", "https://www.example.com/") as response: + ... + ``` + + When using the `stream()` function, the body of the response will not be + automatically read. If you want to access the response body you should + either use `content = response.read()`, or `for chunk in response.iter_content()`. + + Arguments: + method: The HTTP method for the request. Typically one of `"GET"`, + `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`. + url: The URL of the HTTP request. Either as an instance of `httpcore.URL`, + or as str/bytes. + headers: The HTTP request headers. Either as a dictionary of str/bytes, + or as a list of two-tuples of str/bytes. + content: The content of the request body. Either as bytes, + or as a bytes iterator. + extensions: A dictionary of optional extra information included on the request. + Possible keys include `"timeout"`. + + Returns: + An instance of `httpcore.Response`. + """ + with ConnectionPool() as pool: + with pool.stream( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) as response: + yield response diff --git a/venv/Lib/site-packages/httpcore/_async/__init__.py b/venv/Lib/site-packages/httpcore/_async/__init__.py new file mode 100644 index 00000000..88dc7f01 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_async/__init__.py @@ -0,0 +1,39 @@ +from .connection import AsyncHTTPConnection +from .connection_pool import AsyncConnectionPool +from .http11 import AsyncHTTP11Connection +from .http_proxy import AsyncHTTPProxy +from .interfaces import AsyncConnectionInterface + +try: + from .http2 import AsyncHTTP2Connection +except ImportError: # pragma: nocover + + class AsyncHTTP2Connection: # type: ignore + def __init__(self, *args, **kwargs) -> None: # type: ignore + raise RuntimeError( + "Attempted to use http2 support, but the `h2` package is not " + "installed. Use 'pip install httpcore[http2]'." + ) + + +try: + from .socks_proxy import AsyncSOCKSProxy +except ImportError: # pragma: nocover + + class AsyncSOCKSProxy: # type: ignore + def __init__(self, *args, **kwargs) -> None: # type: ignore + raise RuntimeError( + "Attempted to use SOCKS support, but the `socksio` package is not " + "installed. Use 'pip install httpcore[socks]'." + ) + + +__all__ = [ + "AsyncHTTPConnection", + "AsyncConnectionPool", + "AsyncHTTPProxy", + "AsyncHTTP11Connection", + "AsyncHTTP2Connection", + "AsyncConnectionInterface", + "AsyncSOCKSProxy", +] diff --git a/venv/Lib/site-packages/httpcore/_async/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_async/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..6ea156ad Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_async/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_async/__pycache__/connection.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_async/__pycache__/connection.cpython-312.pyc new file mode 100644 index 00000000..6c00ca88 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_async/__pycache__/connection.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_async/__pycache__/connection_pool.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_async/__pycache__/connection_pool.cpython-312.pyc new file mode 100644 index 00000000..7d54f785 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_async/__pycache__/connection_pool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_async/__pycache__/http11.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_async/__pycache__/http11.cpython-312.pyc new file mode 100644 index 00000000..30edb7d8 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_async/__pycache__/http11.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_async/__pycache__/http2.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_async/__pycache__/http2.cpython-312.pyc new file mode 100644 index 00000000..8a23e33f Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_async/__pycache__/http2.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_async/__pycache__/http_proxy.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_async/__pycache__/http_proxy.cpython-312.pyc new file mode 100644 index 00000000..f977b7b9 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_async/__pycache__/http_proxy.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_async/__pycache__/interfaces.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_async/__pycache__/interfaces.cpython-312.pyc new file mode 100644 index 00000000..45762068 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_async/__pycache__/interfaces.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_async/__pycache__/socks_proxy.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_async/__pycache__/socks_proxy.cpython-312.pyc new file mode 100644 index 00000000..b01f0501 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_async/__pycache__/socks_proxy.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_async/connection.py b/venv/Lib/site-packages/httpcore/_async/connection.py new file mode 100644 index 00000000..b42581df --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_async/connection.py @@ -0,0 +1,222 @@ +from __future__ import annotations + +import itertools +import logging +import ssl +import types +import typing + +from .._backends.auto import AutoBackend +from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream +from .._exceptions import ConnectError, ConnectTimeout +from .._models import Origin, Request, Response +from .._ssl import default_ssl_context +from .._synchronization import AsyncLock +from .._trace import Trace +from .http11 import AsyncHTTP11Connection +from .interfaces import AsyncConnectionInterface + +RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc. + + +logger = logging.getLogger("httpcore.connection") + + +def exponential_backoff(factor: float) -> typing.Iterator[float]: + """ + Generate a geometric sequence that has a ratio of 2 and starts with 0. + + For example: + - `factor = 2`: `0, 2, 4, 8, 16, 32, 64, ...` + - `factor = 3`: `0, 3, 6, 12, 24, 48, 96, ...` + """ + yield 0 + for n in itertools.count(): + yield factor * 2**n + + +class AsyncHTTPConnection(AsyncConnectionInterface): + def __init__( + self, + origin: Origin, + ssl_context: ssl.SSLContext | None = None, + keepalive_expiry: float | None = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: str | None = None, + uds: str | None = None, + network_backend: AsyncNetworkBackend | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> None: + self._origin = origin + self._ssl_context = ssl_context + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._retries = retries + self._local_address = local_address + self._uds = uds + + self._network_backend: AsyncNetworkBackend = ( + AutoBackend() if network_backend is None else network_backend + ) + self._connection: AsyncConnectionInterface | None = None + self._connect_failed: bool = False + self._request_lock = AsyncLock() + self._socket_options = socket_options + + async def handle_async_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection to {self._origin}" + ) + + try: + async with self._request_lock: + if self._connection is None: + stream = await self._connect(request) + + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import AsyncHTTP2Connection + + self._connection = AsyncHTTP2Connection( + origin=self._origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = AsyncHTTP11Connection( + origin=self._origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + except BaseException as exc: + self._connect_failed = True + raise exc + + return await self._connection.handle_async_request(request) + + async def _connect(self, request: Request) -> AsyncNetworkStream: + timeouts = request.extensions.get("timeout", {}) + sni_hostname = request.extensions.get("sni_hostname", None) + timeout = timeouts.get("connect", None) + + retries_left = self._retries + delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) + + while True: + try: + if self._uds is None: + kwargs = { + "host": self._origin.host.decode("ascii"), + "port": self._origin.port, + "local_address": self._local_address, + "timeout": timeout, + "socket_options": self._socket_options, + } + async with Trace("connect_tcp", logger, request, kwargs) as trace: + stream = await self._network_backend.connect_tcp(**kwargs) + trace.return_value = stream + else: + kwargs = { + "path": self._uds, + "timeout": timeout, + "socket_options": self._socket_options, + } + async with Trace( + "connect_unix_socket", logger, request, kwargs + ) as trace: + stream = await self._network_backend.connect_unix_socket( + **kwargs + ) + trace.return_value = stream + + if self._origin.scheme in (b"https", b"wss"): + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": sni_hostname + or self._origin.host.decode("ascii"), + "timeout": timeout, + } + async with Trace("start_tls", logger, request, kwargs) as trace: + stream = await stream.start_tls(**kwargs) + trace.return_value = stream + return stream + except (ConnectError, ConnectTimeout): + if retries_left <= 0: + raise + retries_left -= 1 + delay = next(delays) + async with Trace("retry", logger, request, kwargs) as trace: + await self._network_backend.sleep(delay) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + async def aclose(self) -> None: + if self._connection is not None: + async with Trace("close", logger, None, {}): + await self._connection.aclose() + + def is_available(self) -> bool: + if self._connection is None: + # If HTTP/2 support is enabled, and the resulting connection could + # end up as HTTP/2 then we should indicate the connection as being + # available to service multiple requests. + return ( + self._http2 + and (self._origin.scheme == b"https" or not self._http1) + and not self._connect_failed + ) + return self._connection.is_available() + + def has_expired(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.has_expired() + + def is_idle(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.is_idle() + + def is_closed(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.is_closed() + + def info(self) -> str: + if self._connection is None: + return "CONNECTION FAILED" if self._connect_failed else "CONNECTING" + return self._connection.info() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + async def __aenter__(self) -> AsyncHTTPConnection: + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + await self.aclose() diff --git a/venv/Lib/site-packages/httpcore/_async/connection_pool.py b/venv/Lib/site-packages/httpcore/_async/connection_pool.py new file mode 100644 index 00000000..96e973d0 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_async/connection_pool.py @@ -0,0 +1,420 @@ +from __future__ import annotations + +import ssl +import sys +import types +import typing + +from .._backends.auto import AutoBackend +from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend +from .._exceptions import ConnectionNotAvailable, UnsupportedProtocol +from .._models import Origin, Proxy, Request, Response +from .._synchronization import AsyncEvent, AsyncShieldCancellation, AsyncThreadLock +from .connection import AsyncHTTPConnection +from .interfaces import AsyncConnectionInterface, AsyncRequestInterface + + +class AsyncPoolRequest: + def __init__(self, request: Request) -> None: + self.request = request + self.connection: AsyncConnectionInterface | None = None + self._connection_acquired = AsyncEvent() + + def assign_to_connection(self, connection: AsyncConnectionInterface | None) -> None: + self.connection = connection + self._connection_acquired.set() + + def clear_connection(self) -> None: + self.connection = None + self._connection_acquired = AsyncEvent() + + async def wait_for_connection( + self, timeout: float | None = None + ) -> AsyncConnectionInterface: + if self.connection is None: + await self._connection_acquired.wait(timeout=timeout) + assert self.connection is not None + return self.connection + + def is_queued(self) -> bool: + return self.connection is None + + +class AsyncConnectionPool(AsyncRequestInterface): + """ + A connection pool for making HTTP requests. + """ + + def __init__( + self, + ssl_context: ssl.SSLContext | None = None, + proxy: Proxy | None = None, + max_connections: int | None = 10, + max_keepalive_connections: int | None = None, + keepalive_expiry: float | None = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: str | None = None, + uds: str | None = None, + network_backend: AsyncNetworkBackend | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish a + connection. + local_address: Local address to connect from. Can also be used to connect + using a particular address family. Using `local_address="0.0.0.0"` + will connect using an `AF_INET` address (IPv4), while using + `local_address="::"` will connect using an `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + socket_options: Socket options that have to be included + in the TCP socket when the connection was established. + """ + self._ssl_context = ssl_context + self._proxy = proxy + self._max_connections = ( + sys.maxsize if max_connections is None else max_connections + ) + self._max_keepalive_connections = ( + sys.maxsize + if max_keepalive_connections is None + else max_keepalive_connections + ) + self._max_keepalive_connections = min( + self._max_connections, self._max_keepalive_connections + ) + + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._retries = retries + self._local_address = local_address + self._uds = uds + + self._network_backend = ( + AutoBackend() if network_backend is None else network_backend + ) + self._socket_options = socket_options + + # The mutable state on a connection pool is the queue of incoming requests, + # and the set of connections that are servicing those requests. + self._connections: list[AsyncConnectionInterface] = [] + self._requests: list[AsyncPoolRequest] = [] + + # We only mutate the state of the connection pool within an 'optional_thread_lock' + # context. This holds a threading lock unless we're running in async mode, + # in which case it is a no-op. + self._optional_thread_lock = AsyncThreadLock() + + def create_connection(self, origin: Origin) -> AsyncConnectionInterface: + if self._proxy is not None: + if self._proxy.url.scheme in (b"socks5", b"socks5h"): + from .socks_proxy import AsyncSocks5Connection + + return AsyncSocks5Connection( + proxy_origin=self._proxy.url.origin, + proxy_auth=self._proxy.auth, + remote_origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + elif origin.scheme == b"http": + from .http_proxy import AsyncForwardHTTPConnection + + return AsyncForwardHTTPConnection( + proxy_origin=self._proxy.url.origin, + proxy_headers=self._proxy.headers, + proxy_ssl_context=self._proxy.ssl_context, + remote_origin=origin, + keepalive_expiry=self._keepalive_expiry, + network_backend=self._network_backend, + ) + from .http_proxy import AsyncTunnelHTTPConnection + + return AsyncTunnelHTTPConnection( + proxy_origin=self._proxy.url.origin, + proxy_headers=self._proxy.headers, + proxy_ssl_context=self._proxy.ssl_context, + remote_origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + return AsyncHTTPConnection( + origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + retries=self._retries, + local_address=self._local_address, + uds=self._uds, + network_backend=self._network_backend, + socket_options=self._socket_options, + ) + + @property + def connections(self) -> list[AsyncConnectionInterface]: + """ + Return a list of the connections currently in the pool. + + For example: + + ```python + >>> pool.connections + [ + , + , + , + ] + ``` + """ + return list(self._connections) + + async def handle_async_request(self, request: Request) -> Response: + """ + Send an HTTP request, and return an HTTP response. + + This is the core implementation that is called into by `.request()` or `.stream()`. + """ + scheme = request.url.scheme.decode() + if scheme == "": + raise UnsupportedProtocol( + "Request URL is missing an 'http://' or 'https://' protocol." + ) + if scheme not in ("http", "https", "ws", "wss"): + raise UnsupportedProtocol( + f"Request URL has an unsupported protocol '{scheme}://'." + ) + + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("pool", None) + + with self._optional_thread_lock: + # Add the incoming request to our request queue. + pool_request = AsyncPoolRequest(request) + self._requests.append(pool_request) + + try: + while True: + with self._optional_thread_lock: + # Assign incoming requests to available connections, + # closing or creating new connections as required. + closing = self._assign_requests_to_connections() + await self._close_connections(closing) + + # Wait until this request has an assigned connection. + connection = await pool_request.wait_for_connection(timeout=timeout) + + try: + # Send the request on the assigned connection. + response = await connection.handle_async_request( + pool_request.request + ) + except ConnectionNotAvailable: + # In some cases a connection may initially be available to + # handle a request, but then become unavailable. + # + # In this case we clear the connection and try again. + pool_request.clear_connection() + else: + break # pragma: nocover + + except BaseException as exc: + with self._optional_thread_lock: + # For any exception or cancellation we remove the request from + # the queue, and then re-assign requests to connections. + self._requests.remove(pool_request) + closing = self._assign_requests_to_connections() + + await self._close_connections(closing) + raise exc from None + + # Return the response. Note that in this case we still have to manage + # the point at which the response is closed. + assert isinstance(response.stream, typing.AsyncIterable) + return Response( + status=response.status, + headers=response.headers, + content=PoolByteStream( + stream=response.stream, pool_request=pool_request, pool=self + ), + extensions=response.extensions, + ) + + def _assign_requests_to_connections(self) -> list[AsyncConnectionInterface]: + """ + Manage the state of the connection pool, assigning incoming + requests to connections as available. + + Called whenever a new request is added or removed from the pool. + + Any closing connections are returned, allowing the I/O for closing + those connections to be handled seperately. + """ + closing_connections = [] + + # First we handle cleaning up any connections that are closed, + # have expired their keep-alive, or surplus idle connections. + for connection in list(self._connections): + if connection.is_closed(): + # log: "removing closed connection" + self._connections.remove(connection) + elif connection.has_expired(): + # log: "closing expired connection" + self._connections.remove(connection) + closing_connections.append(connection) + elif ( + connection.is_idle() + and len([connection.is_idle() for connection in self._connections]) + > self._max_keepalive_connections + ): + # log: "closing idle connection" + self._connections.remove(connection) + closing_connections.append(connection) + + # Assign queued requests to connections. + queued_requests = [request for request in self._requests if request.is_queued()] + for pool_request in queued_requests: + origin = pool_request.request.url.origin + available_connections = [ + connection + for connection in self._connections + if connection.can_handle_request(origin) and connection.is_available() + ] + idle_connections = [ + connection for connection in self._connections if connection.is_idle() + ] + + # There are three cases for how we may be able to handle the request: + # + # 1. There is an existing connection that can handle the request. + # 2. We can create a new connection to handle the request. + # 3. We can close an idle connection and then create a new connection + # to handle the request. + if available_connections: + # log: "reusing existing connection" + connection = available_connections[0] + pool_request.assign_to_connection(connection) + elif len(self._connections) < self._max_connections: + # log: "creating new connection" + connection = self.create_connection(origin) + self._connections.append(connection) + pool_request.assign_to_connection(connection) + elif idle_connections: + # log: "closing idle connection" + connection = idle_connections[0] + self._connections.remove(connection) + closing_connections.append(connection) + # log: "creating new connection" + connection = self.create_connection(origin) + self._connections.append(connection) + pool_request.assign_to_connection(connection) + + return closing_connections + + async def _close_connections(self, closing: list[AsyncConnectionInterface]) -> None: + # Close connections which have been removed from the pool. + with AsyncShieldCancellation(): + for connection in closing: + await connection.aclose() + + async def aclose(self) -> None: + # Explicitly close the connection pool. + # Clears all existing requests and connections. + with self._optional_thread_lock: + closing_connections = list(self._connections) + self._connections = [] + await self._close_connections(closing_connections) + + async def __aenter__(self) -> AsyncConnectionPool: + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + await self.aclose() + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + with self._optional_thread_lock: + request_is_queued = [request.is_queued() for request in self._requests] + connection_is_idle = [ + connection.is_idle() for connection in self._connections + ] + + num_active_requests = request_is_queued.count(False) + num_queued_requests = request_is_queued.count(True) + num_active_connections = connection_is_idle.count(False) + num_idle_connections = connection_is_idle.count(True) + + requests_info = ( + f"Requests: {num_active_requests} active, {num_queued_requests} queued" + ) + connection_info = ( + f"Connections: {num_active_connections} active, {num_idle_connections} idle" + ) + + return f"<{class_name} [{requests_info} | {connection_info}]>" + + +class PoolByteStream: + def __init__( + self, + stream: typing.AsyncIterable[bytes], + pool_request: AsyncPoolRequest, + pool: AsyncConnectionPool, + ) -> None: + self._stream = stream + self._pool_request = pool_request + self._pool = pool + self._closed = False + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + try: + async for part in self._stream: + yield part + except BaseException as exc: + await self.aclose() + raise exc from None + + async def aclose(self) -> None: + if not self._closed: + self._closed = True + with AsyncShieldCancellation(): + if hasattr(self._stream, "aclose"): + await self._stream.aclose() + + with self._pool._optional_thread_lock: + self._pool._requests.remove(self._pool_request) + closing = self._pool._assign_requests_to_connections() + + await self._pool._close_connections(closing) diff --git a/venv/Lib/site-packages/httpcore/_async/http11.py b/venv/Lib/site-packages/httpcore/_async/http11.py new file mode 100644 index 00000000..e6d6d709 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_async/http11.py @@ -0,0 +1,379 @@ +from __future__ import annotations + +import enum +import logging +import ssl +import time +import types +import typing + +import h11 + +from .._backends.base import AsyncNetworkStream +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, + WriteError, + map_exceptions, +) +from .._models import Origin, Request, Response +from .._synchronization import AsyncLock, AsyncShieldCancellation +from .._trace import Trace +from .interfaces import AsyncConnectionInterface + +logger = logging.getLogger("httpcore.http11") + + +# A subset of `h11.Event` types supported by `_send_event` +H11SendEvent = typing.Union[ + h11.Request, + h11.Data, + h11.EndOfMessage, +] + + +class HTTPConnectionState(enum.IntEnum): + NEW = 0 + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class AsyncHTTP11Connection(AsyncConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + MAX_INCOMPLETE_EVENT_SIZE = 100 * 1024 + + def __init__( + self, + origin: Origin, + stream: AsyncNetworkStream, + keepalive_expiry: float | None = None, + ) -> None: + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: float | None = keepalive_expiry + self._expire_at: float | None = None + self._state = HTTPConnectionState.NEW + self._state_lock = AsyncLock() + self._request_count = 0 + self._h11_state = h11.Connection( + our_role=h11.CLIENT, + max_incomplete_event_size=self.MAX_INCOMPLETE_EVENT_SIZE, + ) + + async def handle_async_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + async with self._state_lock: + if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE): + self._request_count += 1 + self._state = HTTPConnectionState.ACTIVE + self._expire_at = None + else: + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request} + try: + async with Trace( + "send_request_headers", logger, request, kwargs + ) as trace: + await self._send_request_headers(**kwargs) + async with Trace("send_request_body", logger, request, kwargs) as trace: + await self._send_request_body(**kwargs) + except WriteError: + # If we get a write error while we're writing the request, + # then we supress this error and move on to attempting to + # read the response. Servers can sometimes close the request + # pre-emptively and then respond with a well formed HTTP + # error response. + pass + + async with Trace( + "receive_response_headers", logger, request, kwargs + ) as trace: + ( + http_version, + status, + reason_phrase, + headers, + trailing_data, + ) = await self._receive_response_headers(**kwargs) + trace.return_value = ( + http_version, + status, + reason_phrase, + headers, + ) + + network_stream = self._network_stream + + # CONNECT or Upgrade request + if (status == 101) or ( + (request.method == b"CONNECT") and (200 <= status < 300) + ): + network_stream = AsyncHTTP11UpgradeStream(network_stream, trailing_data) + + return Response( + status=status, + headers=headers, + content=HTTP11ConnectionByteStream(self, request), + extensions={ + "http_version": http_version, + "reason_phrase": reason_phrase, + "network_stream": network_stream, + }, + ) + except BaseException as exc: + with AsyncShieldCancellation(): + async with Trace("response_closed", logger, request) as trace: + await self._response_closed() + raise exc + + # Sending the request... + + async def _send_request_headers(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + with map_exceptions({h11.LocalProtocolError: LocalProtocolError}): + event = h11.Request( + method=request.method, + target=request.url.target, + headers=request.headers, + ) + await self._send_event(event, timeout=timeout) + + async def _send_request_body(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + assert isinstance(request.stream, typing.AsyncIterable) + async for chunk in request.stream: + event = h11.Data(data=chunk) + await self._send_event(event, timeout=timeout) + + await self._send_event(h11.EndOfMessage(), timeout=timeout) + + async def _send_event(self, event: h11.Event, timeout: float | None = None) -> None: + bytes_to_send = self._h11_state.send(event) + if bytes_to_send is not None: + await self._network_stream.write(bytes_to_send, timeout=timeout) + + # Receiving the response... + + async def _receive_response_headers( + self, request: Request + ) -> tuple[bytes, int, bytes, list[tuple[bytes, bytes]], bytes]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + while True: + event = await self._receive_event(timeout=timeout) + if isinstance(event, h11.Response): + break + if ( + isinstance(event, h11.InformationalResponse) + and event.status_code == 101 + ): + break + + http_version = b"HTTP/" + event.http_version + + # h11 version 0.11+ supports a `raw_items` interface to get the + # raw header casing, rather than the enforced lowercase headers. + headers = event.headers.raw_items() + + trailing_data, _ = self._h11_state.trailing_data + + return http_version, event.status_code, event.reason, headers, trailing_data + + async def _receive_response_body( + self, request: Request + ) -> typing.AsyncIterator[bytes]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + while True: + event = await self._receive_event(timeout=timeout) + if isinstance(event, h11.Data): + yield bytes(event.data) + elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)): + break + + async def _receive_event( + self, timeout: float | None = None + ) -> h11.Event | type[h11.PAUSED]: + while True: + with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}): + event = self._h11_state.next_event() + + if event is h11.NEED_DATA: + data = await self._network_stream.read( + self.READ_NUM_BYTES, timeout=timeout + ) + + # If we feed this case through h11 we'll raise an exception like: + # + # httpcore.RemoteProtocolError: can't handle event type + # ConnectionClosed when role=SERVER and state=SEND_RESPONSE + # + # Which is accurate, but not very informative from an end-user + # perspective. Instead we handle this case distinctly and treat + # it as a ConnectError. + if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE: + msg = "Server disconnected without sending a response." + raise RemoteProtocolError(msg) + + self._h11_state.receive_data(data) + else: + # mypy fails to narrow the type in the above if statement above + return event # type: ignore[return-value] + + async def _response_closed(self) -> None: + async with self._state_lock: + if ( + self._h11_state.our_state is h11.DONE + and self._h11_state.their_state is h11.DONE + ): + self._state = HTTPConnectionState.IDLE + self._h11_state.start_next_cycle() + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + else: + await self.aclose() + + # Once the connection is no longer required... + + async def aclose(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._state = HTTPConnectionState.CLOSED + await self._network_stream.aclose() + + # The AsyncConnectionInterface methods provide information about the state of + # the connection, allowing for a connection pooling implementation to + # determine when to reuse and when to close the connection... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + # Note that HTTP/1.1 connections in the "NEW" state are not treated as + # being "available". The control flow which created the connection will + # be able to send an outgoing request, but the connection will not be + # acquired from the connection pool for any other request. + return self._state == HTTPConnectionState.IDLE + + def has_expired(self) -> bool: + now = time.monotonic() + keepalive_expired = self._expire_at is not None and now > self._expire_at + + # If the HTTP connection is idle but the socket is readable, then the + # only valid state is that the socket is about to return b"", indicating + # a server-initiated disconnect. + server_disconnected = ( + self._state == HTTPConnectionState.IDLE + and self._network_stream.get_extra_info("is_readable") + ) + + return keepalive_expired or server_disconnected + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/1.1, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + async def __aenter__(self) -> AsyncHTTP11Connection: + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + await self.aclose() + + +class HTTP11ConnectionByteStream: + def __init__(self, connection: AsyncHTTP11Connection, request: Request) -> None: + self._connection = connection + self._request = request + self._closed = False + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + kwargs = {"request": self._request} + try: + async with Trace("receive_response_body", logger, self._request, kwargs): + async for chunk in self._connection._receive_response_body(**kwargs): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + with AsyncShieldCancellation(): + await self.aclose() + raise exc + + async def aclose(self) -> None: + if not self._closed: + self._closed = True + async with Trace("response_closed", logger, self._request): + await self._connection._response_closed() + + +class AsyncHTTP11UpgradeStream(AsyncNetworkStream): + def __init__(self, stream: AsyncNetworkStream, leading_data: bytes) -> None: + self._stream = stream + self._leading_data = leading_data + + async def read(self, max_bytes: int, timeout: float | None = None) -> bytes: + if self._leading_data: + buffer = self._leading_data[:max_bytes] + self._leading_data = self._leading_data[max_bytes:] + return buffer + else: + return await self._stream.read(max_bytes, timeout) + + async def write(self, buffer: bytes, timeout: float | None = None) -> None: + await self._stream.write(buffer, timeout) + + async def aclose(self) -> None: + await self._stream.aclose() + + async def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: str | None = None, + timeout: float | None = None, + ) -> AsyncNetworkStream: + return await self._stream.start_tls(ssl_context, server_hostname, timeout) + + def get_extra_info(self, info: str) -> typing.Any: + return self._stream.get_extra_info(info) diff --git a/venv/Lib/site-packages/httpcore/_async/http2.py b/venv/Lib/site-packages/httpcore/_async/http2.py new file mode 100644 index 00000000..dbd0beeb --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_async/http2.py @@ -0,0 +1,592 @@ +from __future__ import annotations + +import enum +import logging +import time +import types +import typing + +import h2.config +import h2.connection +import h2.events +import h2.exceptions +import h2.settings + +from .._backends.base import AsyncNetworkStream +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, +) +from .._models import Origin, Request, Response +from .._synchronization import AsyncLock, AsyncSemaphore, AsyncShieldCancellation +from .._trace import Trace +from .interfaces import AsyncConnectionInterface + +logger = logging.getLogger("httpcore.http2") + + +def has_body_headers(request: Request) -> bool: + return any( + k.lower() == b"content-length" or k.lower() == b"transfer-encoding" + for k, v in request.headers + ) + + +class HTTPConnectionState(enum.IntEnum): + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class AsyncHTTP2Connection(AsyncConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + CONFIG = h2.config.H2Configuration(validate_inbound_headers=False) + + def __init__( + self, + origin: Origin, + stream: AsyncNetworkStream, + keepalive_expiry: float | None = None, + ): + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: float | None = keepalive_expiry + self._h2_state = h2.connection.H2Connection(config=self.CONFIG) + self._state = HTTPConnectionState.IDLE + self._expire_at: float | None = None + self._request_count = 0 + self._init_lock = AsyncLock() + self._state_lock = AsyncLock() + self._read_lock = AsyncLock() + self._write_lock = AsyncLock() + self._sent_connection_init = False + self._used_all_stream_ids = False + self._connection_error = False + + # Mapping from stream ID to response stream events. + self._events: dict[ + int, + list[ + h2.events.ResponseReceived + | h2.events.DataReceived + | h2.events.StreamEnded + | h2.events.StreamReset, + ], + ] = {} + + # Connection terminated events are stored as state since + # we need to handle them for all streams. + self._connection_terminated: h2.events.ConnectionTerminated | None = None + + self._read_exception: Exception | None = None + self._write_exception: Exception | None = None + + async def handle_async_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + # This cannot occur in normal operation, since the connection pool + # will only send requests on connections that handle them. + # It's in place simply for resilience as a guard against incorrect + # usage, for anyone working directly with httpcore connections. + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + async with self._state_lock: + if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE): + self._request_count += 1 + self._expire_at = None + self._state = HTTPConnectionState.ACTIVE + else: + raise ConnectionNotAvailable() + + async with self._init_lock: + if not self._sent_connection_init: + try: + sci_kwargs = {"request": request} + async with Trace( + "send_connection_init", logger, request, sci_kwargs + ): + await self._send_connection_init(**sci_kwargs) + except BaseException as exc: + with AsyncShieldCancellation(): + await self.aclose() + raise exc + + self._sent_connection_init = True + + # Initially start with just 1 until the remote server provides + # its max_concurrent_streams value + self._max_streams = 1 + + local_settings_max_streams = ( + self._h2_state.local_settings.max_concurrent_streams + ) + self._max_streams_semaphore = AsyncSemaphore(local_settings_max_streams) + + for _ in range(local_settings_max_streams - self._max_streams): + await self._max_streams_semaphore.acquire() + + await self._max_streams_semaphore.acquire() + + try: + stream_id = self._h2_state.get_next_available_stream_id() + self._events[stream_id] = [] + except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover + self._used_all_stream_ids = True + self._request_count -= 1 + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request, "stream_id": stream_id} + async with Trace("send_request_headers", logger, request, kwargs): + await self._send_request_headers(request=request, stream_id=stream_id) + async with Trace("send_request_body", logger, request, kwargs): + await self._send_request_body(request=request, stream_id=stream_id) + async with Trace( + "receive_response_headers", logger, request, kwargs + ) as trace: + status, headers = await self._receive_response( + request=request, stream_id=stream_id + ) + trace.return_value = (status, headers) + + return Response( + status=status, + headers=headers, + content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id), + extensions={ + "http_version": b"HTTP/2", + "network_stream": self._network_stream, + "stream_id": stream_id, + }, + ) + except BaseException as exc: # noqa: PIE786 + with AsyncShieldCancellation(): + kwargs = {"stream_id": stream_id} + async with Trace("response_closed", logger, request, kwargs): + await self._response_closed(stream_id=stream_id) + + if isinstance(exc, h2.exceptions.ProtocolError): + # One case where h2 can raise a protocol error is when a + # closed frame has been seen by the state machine. + # + # This happens when one stream is reading, and encounters + # a GOAWAY event. Other flows of control may then raise + # a protocol error at any point they interact with the 'h2_state'. + # + # In this case we'll have stored the event, and should raise + # it as a RemoteProtocolError. + if self._connection_terminated: # pragma: nocover + raise RemoteProtocolError(self._connection_terminated) + # If h2 raises a protocol error in some other state then we + # must somehow have made a protocol violation. + raise LocalProtocolError(exc) # pragma: nocover + + raise exc + + async def _send_connection_init(self, request: Request) -> None: + """ + The HTTP/2 connection requires some initial setup before we can start + using individual request/response streams on it. + """ + # Need to set these manually here instead of manipulating via + # __setitem__() otherwise the H2Connection will emit SettingsUpdate + # frames in addition to sending the undesired defaults. + self._h2_state.local_settings = h2.settings.Settings( + client=True, + initial_values={ + # Disable PUSH_PROMISE frames from the server since we don't do anything + # with them for now. Maybe when we support caching? + h2.settings.SettingCodes.ENABLE_PUSH: 0, + # These two are taken from h2 for safe defaults + h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100, + h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536, + }, + ) + + # Some websites (*cough* Yahoo *cough*) balk at this setting being + # present in the initial handshake since it's not defined in the original + # RFC despite the RFC mandating ignoring settings you don't know about. + del self._h2_state.local_settings[ + h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL + ] + + self._h2_state.initiate_connection() + self._h2_state.increment_flow_control_window(2**24) + await self._write_outgoing_data(request) + + # Sending the request... + + async def _send_request_headers(self, request: Request, stream_id: int) -> None: + """ + Send the request headers to a given stream ID. + """ + end_stream = not has_body_headers(request) + + # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'. + # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require + # HTTP/1.1 style headers, and map them appropriately if we end up on + # an HTTP/2 connection. + authority = [v for k, v in request.headers if k.lower() == b"host"][0] + + headers = [ + (b":method", request.method), + (b":authority", authority), + (b":scheme", request.url.scheme), + (b":path", request.url.target), + ] + [ + (k.lower(), v) + for k, v in request.headers + if k.lower() + not in ( + b"host", + b"transfer-encoding", + ) + ] + + self._h2_state.send_headers(stream_id, headers, end_stream=end_stream) + self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id) + await self._write_outgoing_data(request) + + async def _send_request_body(self, request: Request, stream_id: int) -> None: + """ + Iterate over the request body sending it to a given stream ID. + """ + if not has_body_headers(request): + return + + assert isinstance(request.stream, typing.AsyncIterable) + async for data in request.stream: + await self._send_stream_data(request, stream_id, data) + await self._send_end_stream(request, stream_id) + + async def _send_stream_data( + self, request: Request, stream_id: int, data: bytes + ) -> None: + """ + Send a single chunk of data in one or more data frames. + """ + while data: + max_flow = await self._wait_for_outgoing_flow(request, stream_id) + chunk_size = min(len(data), max_flow) + chunk, data = data[:chunk_size], data[chunk_size:] + self._h2_state.send_data(stream_id, chunk) + await self._write_outgoing_data(request) + + async def _send_end_stream(self, request: Request, stream_id: int) -> None: + """ + Send an empty data frame on on a given stream ID with the END_STREAM flag set. + """ + self._h2_state.end_stream(stream_id) + await self._write_outgoing_data(request) + + # Receiving the response... + + async def _receive_response( + self, request: Request, stream_id: int + ) -> tuple[int, list[tuple[bytes, bytes]]]: + """ + Return the response status code and headers for a given stream ID. + """ + while True: + event = await self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.ResponseReceived): + break + + status_code = 200 + headers = [] + assert event.headers is not None + for k, v in event.headers: + if k == b":status": + status_code = int(v.decode("ascii", errors="ignore")) + elif not k.startswith(b":"): + headers.append((k, v)) + + return (status_code, headers) + + async def _receive_response_body( + self, request: Request, stream_id: int + ) -> typing.AsyncIterator[bytes]: + """ + Iterator that returns the bytes of the response body for a given stream ID. + """ + while True: + event = await self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.DataReceived): + assert event.flow_controlled_length is not None + assert event.data is not None + amount = event.flow_controlled_length + self._h2_state.acknowledge_received_data(amount, stream_id) + await self._write_outgoing_data(request) + yield event.data + elif isinstance(event, h2.events.StreamEnded): + break + + async def _receive_stream_event( + self, request: Request, stream_id: int + ) -> h2.events.ResponseReceived | h2.events.DataReceived | h2.events.StreamEnded: + """ + Return the next available event for a given stream ID. + + Will read more data from the network if required. + """ + while not self._events.get(stream_id): + await self._receive_events(request, stream_id) + event = self._events[stream_id].pop(0) + if isinstance(event, h2.events.StreamReset): + raise RemoteProtocolError(event) + return event + + async def _receive_events( + self, request: Request, stream_id: int | None = None + ) -> None: + """ + Read some data from the network until we see one or more events + for a given stream ID. + """ + async with self._read_lock: + if self._connection_terminated is not None: + last_stream_id = self._connection_terminated.last_stream_id + if stream_id and last_stream_id and stream_id > last_stream_id: + self._request_count -= 1 + raise ConnectionNotAvailable() + raise RemoteProtocolError(self._connection_terminated) + + # This conditional is a bit icky. We don't want to block reading if we've + # actually got an event to return for a given stream. We need to do that + # check *within* the atomic read lock. Though it also need to be optional, + # because when we call it from `_wait_for_outgoing_flow` we *do* want to + # block until we've available flow control, event when we have events + # pending for the stream ID we're attempting to send on. + if stream_id is None or not self._events.get(stream_id): + events = await self._read_incoming_data(request) + for event in events: + if isinstance(event, h2.events.RemoteSettingsChanged): + async with Trace( + "receive_remote_settings", logger, request + ) as trace: + await self._receive_remote_settings_change(event) + trace.return_value = event + + elif isinstance( + event, + ( + h2.events.ResponseReceived, + h2.events.DataReceived, + h2.events.StreamEnded, + h2.events.StreamReset, + ), + ): + if event.stream_id in self._events: + self._events[event.stream_id].append(event) + + elif isinstance(event, h2.events.ConnectionTerminated): + self._connection_terminated = event + + await self._write_outgoing_data(request) + + async def _receive_remote_settings_change( + self, event: h2.events.RemoteSettingsChanged + ) -> None: + max_concurrent_streams = event.changed_settings.get( + h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS + ) + if max_concurrent_streams: + new_max_streams = min( + max_concurrent_streams.new_value, + self._h2_state.local_settings.max_concurrent_streams, + ) + if new_max_streams and new_max_streams != self._max_streams: + while new_max_streams > self._max_streams: + await self._max_streams_semaphore.release() + self._max_streams += 1 + while new_max_streams < self._max_streams: + await self._max_streams_semaphore.acquire() + self._max_streams -= 1 + + async def _response_closed(self, stream_id: int) -> None: + await self._max_streams_semaphore.release() + del self._events[stream_id] + async with self._state_lock: + if self._connection_terminated and not self._events: + await self.aclose() + + elif self._state == HTTPConnectionState.ACTIVE and not self._events: + self._state = HTTPConnectionState.IDLE + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + if self._used_all_stream_ids: # pragma: nocover + await self.aclose() + + async def aclose(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._h2_state.close_connection() + self._state = HTTPConnectionState.CLOSED + await self._network_stream.aclose() + + # Wrappers around network read/write operations... + + async def _read_incoming_data(self, request: Request) -> list[h2.events.Event]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + if self._read_exception is not None: + raise self._read_exception # pragma: nocover + + try: + data = await self._network_stream.read(self.READ_NUM_BYTES, timeout) + if data == b"": + raise RemoteProtocolError("Server disconnected") + except Exception as exc: + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future reads. + # (For example, this means that a single read timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._read_exception = exc + self._connection_error = True + raise exc + + events: list[h2.events.Event] = self._h2_state.receive_data(data) + + return events + + async def _write_outgoing_data(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + async with self._write_lock: + data_to_send = self._h2_state.data_to_send() + + if self._write_exception is not None: + raise self._write_exception # pragma: nocover + + try: + await self._network_stream.write(data_to_send, timeout) + except Exception as exc: # pragma: nocover + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future write. + # (For example, this means that a single write timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._write_exception = exc + self._connection_error = True + raise exc + + # Flow control... + + async def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int: + """ + Returns the maximum allowable outgoing flow for a given stream. + + If the allowable flow is zero, then waits on the network until + WindowUpdated frames have increased the flow rate. + https://tools.ietf.org/html/rfc7540#section-6.9 + """ + local_flow: int = self._h2_state.local_flow_control_window(stream_id) + max_frame_size: int = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + while flow == 0: + await self._receive_events(request) + local_flow = self._h2_state.local_flow_control_window(stream_id) + max_frame_size = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + return flow + + # Interface for connection pooling... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + return ( + self._state != HTTPConnectionState.CLOSED + and not self._connection_error + and not self._used_all_stream_ids + and not ( + self._h2_state.state_machine.state + == h2.connection.ConnectionState.CLOSED + ) + ) + + def has_expired(self) -> bool: + now = time.monotonic() + return self._expire_at is not None and now > self._expire_at + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/2, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + async def __aenter__(self) -> AsyncHTTP2Connection: + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + await self.aclose() + + +class HTTP2ConnectionByteStream: + def __init__( + self, connection: AsyncHTTP2Connection, request: Request, stream_id: int + ) -> None: + self._connection = connection + self._request = request + self._stream_id = stream_id + self._closed = False + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + kwargs = {"request": self._request, "stream_id": self._stream_id} + try: + async with Trace("receive_response_body", logger, self._request, kwargs): + async for chunk in self._connection._receive_response_body( + request=self._request, stream_id=self._stream_id + ): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + with AsyncShieldCancellation(): + await self.aclose() + raise exc + + async def aclose(self) -> None: + if not self._closed: + self._closed = True + kwargs = {"stream_id": self._stream_id} + async with Trace("response_closed", logger, self._request, kwargs): + await self._connection._response_closed(stream_id=self._stream_id) diff --git a/venv/Lib/site-packages/httpcore/_async/http_proxy.py b/venv/Lib/site-packages/httpcore/_async/http_proxy.py new file mode 100644 index 00000000..cc9d9206 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_async/http_proxy.py @@ -0,0 +1,367 @@ +from __future__ import annotations + +import base64 +import logging +import ssl +import typing + +from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend +from .._exceptions import ProxyError +from .._models import ( + URL, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, +) +from .._ssl import default_ssl_context +from .._synchronization import AsyncLock +from .._trace import Trace +from .connection import AsyncHTTPConnection +from .connection_pool import AsyncConnectionPool +from .http11 import AsyncHTTP11Connection +from .interfaces import AsyncConnectionInterface + +ByteOrStr = typing.Union[bytes, str] +HeadersAsSequence = typing.Sequence[typing.Tuple[ByteOrStr, ByteOrStr]] +HeadersAsMapping = typing.Mapping[ByteOrStr, ByteOrStr] + + +logger = logging.getLogger("httpcore.proxy") + + +def merge_headers( + default_headers: typing.Sequence[tuple[bytes, bytes]] | None = None, + override_headers: typing.Sequence[tuple[bytes, bytes]] | None = None, +) -> list[tuple[bytes, bytes]]: + """ + Append default_headers and override_headers, de-duplicating if a key exists + in both cases. + """ + default_headers = [] if default_headers is None else list(default_headers) + override_headers = [] if override_headers is None else list(override_headers) + has_override = set(key.lower() for key, value in override_headers) + default_headers = [ + (key, value) + for key, value in default_headers + if key.lower() not in has_override + ] + return default_headers + override_headers + + +class AsyncHTTPProxy(AsyncConnectionPool): # pragma: nocover + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: URL | bytes | str, + proxy_auth: tuple[bytes | str, bytes | str] | None = None, + proxy_headers: HeadersAsMapping | HeadersAsSequence | None = None, + ssl_context: ssl.SSLContext | None = None, + proxy_ssl_context: ssl.SSLContext | None = None, + max_connections: int | None = 10, + max_keepalive_connections: int | None = None, + keepalive_expiry: float | None = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: str | None = None, + uds: str | None = None, + network_backend: AsyncNetworkBackend | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + proxy_auth: Any proxy authentication as a two-tuple of + (username, password). May be either bytes or ascii-only str. + proxy_headers: Any HTTP headers to use for the proxy requests. + For example `{"Proxy-Authorization": "Basic :"}`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + proxy_ssl_context: The same as `ssl_context`, but for a proxy server rather than a remote origin. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + retries=retries, + local_address=local_address, + uds=uds, + socket_options=socket_options, + ) + + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + if ( + self._proxy_url.scheme == b"http" and proxy_ssl_context is not None + ): # pragma: no cover + raise RuntimeError( + "The `proxy_ssl_context` argument is not allowed for the http scheme" + ) + + self._ssl_context = ssl_context + self._proxy_ssl_context = proxy_ssl_context + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + if proxy_auth is not None: + username = enforce_bytes(proxy_auth[0], name="proxy_auth") + password = enforce_bytes(proxy_auth[1], name="proxy_auth") + userpass = username + b":" + password + authorization = b"Basic " + base64.b64encode(userpass) + self._proxy_headers = [ + (b"Proxy-Authorization", authorization) + ] + self._proxy_headers + + def create_connection(self, origin: Origin) -> AsyncConnectionInterface: + if origin.scheme == b"http": + return AsyncForwardHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + remote_origin=origin, + keepalive_expiry=self._keepalive_expiry, + network_backend=self._network_backend, + proxy_ssl_context=self._proxy_ssl_context, + ) + return AsyncTunnelHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + remote_origin=origin, + ssl_context=self._ssl_context, + proxy_ssl_context=self._proxy_ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class AsyncForwardHTTPConnection(AsyncConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + proxy_headers: HeadersAsMapping | HeadersAsSequence | None = None, + keepalive_expiry: float | None = None, + network_backend: AsyncNetworkBackend | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + proxy_ssl_context: ssl.SSLContext | None = None, + ) -> None: + self._connection = AsyncHTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + socket_options=socket_options, + ssl_context=proxy_ssl_context, + ) + self._proxy_origin = proxy_origin + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + self._remote_origin = remote_origin + + async def handle_async_request(self, request: Request) -> Response: + headers = merge_headers(self._proxy_headers, request.headers) + url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=bytes(request.url), + ) + proxy_request = Request( + method=request.method, + url=url, + headers=headers, + content=request.stream, + extensions=request.extensions, + ) + return await self._connection.handle_async_request(proxy_request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + async def aclose(self) -> None: + await self._connection.aclose() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + +class AsyncTunnelHTTPConnection(AsyncConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + ssl_context: ssl.SSLContext | None = None, + proxy_ssl_context: ssl.SSLContext | None = None, + proxy_headers: typing.Sequence[tuple[bytes, bytes]] | None = None, + keepalive_expiry: float | None = None, + http1: bool = True, + http2: bool = False, + network_backend: AsyncNetworkBackend | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> None: + self._connection: AsyncConnectionInterface = AsyncHTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + socket_options=socket_options, + ssl_context=proxy_ssl_context, + ) + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._ssl_context = ssl_context + self._proxy_ssl_context = proxy_ssl_context + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._connect_lock = AsyncLock() + self._connected = False + + async def handle_async_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + async with self._connect_lock: + if not self._connected: + target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port) + + connect_url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=target, + ) + connect_headers = merge_headers( + [(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers + ) + connect_request = Request( + method=b"CONNECT", + url=connect_url, + headers=connect_headers, + extensions=request.extensions, + ) + connect_response = await self._connection.handle_async_request( + connect_request + ) + + if connect_response.status < 200 or connect_response.status > 299: + reason_bytes = connect_response.extensions.get("reason_phrase", b"") + reason_str = reason_bytes.decode("ascii", errors="ignore") + msg = "%d %s" % (connect_response.status, reason_str) + await self._connection.aclose() + raise ProxyError(msg) + + stream = connect_response.extensions["network_stream"] + + # Upgrade the stream to SSL + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + async with Trace("start_tls", logger, request, kwargs) as trace: + stream = await stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import AsyncHTTP2Connection + + self._connection = AsyncHTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = AsyncHTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + + self._connected = True + return await self._connection.handle_async_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + async def aclose(self) -> None: + await self._connection.aclose() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/venv/Lib/site-packages/httpcore/_async/interfaces.py b/venv/Lib/site-packages/httpcore/_async/interfaces.py new file mode 100644 index 00000000..361583be --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_async/interfaces.py @@ -0,0 +1,137 @@ +from __future__ import annotations + +import contextlib +import typing + +from .._models import ( + URL, + Extensions, + HeaderTypes, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, + include_request_headers, +) + + +class AsyncRequestInterface: + async def request( + self, + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes | typing.AsyncIterator[bytes] | None = None, + extensions: Extensions | None = None, + ) -> Response: + # Strict type checking on our parameters. + method = enforce_bytes(method, name="method") + url = enforce_url(url, name="url") + headers = enforce_headers(headers, name="headers") + + # Include Host header, and optionally Content-Length or Transfer-Encoding. + headers = include_request_headers(headers, url=url, content=content) + + request = Request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + response = await self.handle_async_request(request) + try: + await response.aread() + finally: + await response.aclose() + return response + + @contextlib.asynccontextmanager + async def stream( + self, + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes | typing.AsyncIterator[bytes] | None = None, + extensions: Extensions | None = None, + ) -> typing.AsyncIterator[Response]: + # Strict type checking on our parameters. + method = enforce_bytes(method, name="method") + url = enforce_url(url, name="url") + headers = enforce_headers(headers, name="headers") + + # Include Host header, and optionally Content-Length or Transfer-Encoding. + headers = include_request_headers(headers, url=url, content=content) + + request = Request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + response = await self.handle_async_request(request) + try: + yield response + finally: + await response.aclose() + + async def handle_async_request(self, request: Request) -> Response: + raise NotImplementedError() # pragma: nocover + + +class AsyncConnectionInterface(AsyncRequestInterface): + async def aclose(self) -> None: + raise NotImplementedError() # pragma: nocover + + def info(self) -> str: + raise NotImplementedError() # pragma: nocover + + def can_handle_request(self, origin: Origin) -> bool: + raise NotImplementedError() # pragma: nocover + + def is_available(self) -> bool: + """ + Return `True` if the connection is currently able to accept an + outgoing request. + + An HTTP/1.1 connection will only be available if it is currently idle. + + An HTTP/2 connection will be available so long as the stream ID space is + not yet exhausted, and the connection is not in an error state. + + While the connection is being established we may not yet know if it is going + to result in an HTTP/1.1 or HTTP/2 connection. The connection should be + treated as being available, but might ultimately raise `NewConnectionRequired` + required exceptions if multiple requests are attempted over a connection + that ends up being established as HTTP/1.1. + """ + raise NotImplementedError() # pragma: nocover + + def has_expired(self) -> bool: + """ + Return `True` if the connection is in a state where it should be closed. + + This either means that the connection is idle and it has passed the + expiry time on its keep-alive, or that server has sent an EOF. + """ + raise NotImplementedError() # pragma: nocover + + def is_idle(self) -> bool: + """ + Return `True` if the connection is currently idle. + """ + raise NotImplementedError() # pragma: nocover + + def is_closed(self) -> bool: + """ + Return `True` if the connection has been closed. + + Used when a response is closed to determine if the connection may be + returned to the connection pool or not. + """ + raise NotImplementedError() # pragma: nocover diff --git a/venv/Lib/site-packages/httpcore/_async/socks_proxy.py b/venv/Lib/site-packages/httpcore/_async/socks_proxy.py new file mode 100644 index 00000000..b363f55a --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_async/socks_proxy.py @@ -0,0 +1,341 @@ +from __future__ import annotations + +import logging +import ssl + +import socksio + +from .._backends.auto import AutoBackend +from .._backends.base import AsyncNetworkBackend, AsyncNetworkStream +from .._exceptions import ConnectionNotAvailable, ProxyError +from .._models import URL, Origin, Request, Response, enforce_bytes, enforce_url +from .._ssl import default_ssl_context +from .._synchronization import AsyncLock +from .._trace import Trace +from .connection_pool import AsyncConnectionPool +from .http11 import AsyncHTTP11Connection +from .interfaces import AsyncConnectionInterface + +logger = logging.getLogger("httpcore.socks") + + +AUTH_METHODS = { + b"\x00": "NO AUTHENTICATION REQUIRED", + b"\x01": "GSSAPI", + b"\x02": "USERNAME/PASSWORD", + b"\xff": "NO ACCEPTABLE METHODS", +} + +REPLY_CODES = { + b"\x00": "Succeeded", + b"\x01": "General SOCKS server failure", + b"\x02": "Connection not allowed by ruleset", + b"\x03": "Network unreachable", + b"\x04": "Host unreachable", + b"\x05": "Connection refused", + b"\x06": "TTL expired", + b"\x07": "Command not supported", + b"\x08": "Address type not supported", +} + + +async def _init_socks5_connection( + stream: AsyncNetworkStream, + *, + host: bytes, + port: int, + auth: tuple[bytes, bytes] | None = None, +) -> None: + conn = socksio.socks5.SOCKS5Connection() + + # Auth method request + auth_method = ( + socksio.socks5.SOCKS5AuthMethod.NO_AUTH_REQUIRED + if auth is None + else socksio.socks5.SOCKS5AuthMethod.USERNAME_PASSWORD + ) + conn.send(socksio.socks5.SOCKS5AuthMethodsRequest([auth_method])) + outgoing_bytes = conn.data_to_send() + await stream.write(outgoing_bytes) + + # Auth method response + incoming_bytes = await stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socksio.socks5.SOCKS5AuthReply) + if response.method != auth_method: + requested = AUTH_METHODS.get(auth_method, "UNKNOWN") + responded = AUTH_METHODS.get(response.method, "UNKNOWN") + raise ProxyError( + f"Requested {requested} from proxy server, but got {responded}." + ) + + if response.method == socksio.socks5.SOCKS5AuthMethod.USERNAME_PASSWORD: + # Username/password request + assert auth is not None + username, password = auth + conn.send(socksio.socks5.SOCKS5UsernamePasswordRequest(username, password)) + outgoing_bytes = conn.data_to_send() + await stream.write(outgoing_bytes) + + # Username/password response + incoming_bytes = await stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socksio.socks5.SOCKS5UsernamePasswordReply) + if not response.success: + raise ProxyError("Invalid username/password") + + # Connect request + conn.send( + socksio.socks5.SOCKS5CommandRequest.from_address( + socksio.socks5.SOCKS5Command.CONNECT, (host, port) + ) + ) + outgoing_bytes = conn.data_to_send() + await stream.write(outgoing_bytes) + + # Connect response + incoming_bytes = await stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socksio.socks5.SOCKS5Reply) + if response.reply_code != socksio.socks5.SOCKS5ReplyCode.SUCCEEDED: + reply_code = REPLY_CODES.get(response.reply_code, "UNKOWN") + raise ProxyError(f"Proxy Server could not connect: {reply_code}.") + + +class AsyncSOCKSProxy(AsyncConnectionPool): # pragma: nocover + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: URL | bytes | str, + proxy_auth: tuple[bytes | str, bytes | str] | None = None, + ssl_context: ssl.SSLContext | None = None, + max_connections: int | None = 10, + max_keepalive_connections: int | None = None, + keepalive_expiry: float | None = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + network_backend: AsyncNetworkBackend | None = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + retries=retries, + ) + self._ssl_context = ssl_context + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + if proxy_auth is not None: + username, password = proxy_auth + username_bytes = enforce_bytes(username, name="proxy_auth") + password_bytes = enforce_bytes(password, name="proxy_auth") + self._proxy_auth: tuple[bytes, bytes] | None = ( + username_bytes, + password_bytes, + ) + else: + self._proxy_auth = None + + def create_connection(self, origin: Origin) -> AsyncConnectionInterface: + return AsyncSocks5Connection( + proxy_origin=self._proxy_url.origin, + remote_origin=origin, + proxy_auth=self._proxy_auth, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class AsyncSocks5Connection(AsyncConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + proxy_auth: tuple[bytes, bytes] | None = None, + ssl_context: ssl.SSLContext | None = None, + keepalive_expiry: float | None = None, + http1: bool = True, + http2: bool = False, + network_backend: AsyncNetworkBackend | None = None, + ) -> None: + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._proxy_auth = proxy_auth + self._ssl_context = ssl_context + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + + self._network_backend: AsyncNetworkBackend = ( + AutoBackend() if network_backend is None else network_backend + ) + self._connect_lock = AsyncLock() + self._connection: AsyncConnectionInterface | None = None + self._connect_failed = False + + async def handle_async_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + sni_hostname = request.extensions.get("sni_hostname", None) + timeout = timeouts.get("connect", None) + + async with self._connect_lock: + if self._connection is None: + try: + # Connect to the proxy + kwargs = { + "host": self._proxy_origin.host.decode("ascii"), + "port": self._proxy_origin.port, + "timeout": timeout, + } + async with Trace("connect_tcp", logger, request, kwargs) as trace: + stream = await self._network_backend.connect_tcp(**kwargs) + trace.return_value = stream + + # Connect to the remote host using socks5 + kwargs = { + "stream": stream, + "host": self._remote_origin.host.decode("ascii"), + "port": self._remote_origin.port, + "auth": self._proxy_auth, + } + async with Trace( + "setup_socks5_connection", logger, request, kwargs + ) as trace: + await _init_socks5_connection(**kwargs) + trace.return_value = stream + + # Upgrade the stream to SSL + if self._remote_origin.scheme == b"https": + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ( + ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ) + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": sni_hostname + or self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + async with Trace("start_tls", logger, request, kwargs) as trace: + stream = await stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or ( + self._http2 and not self._http1 + ): # pragma: nocover + from .http2 import AsyncHTTP2Connection + + self._connection = AsyncHTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = AsyncHTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + except Exception as exc: + self._connect_failed = True + raise exc + elif not self._connection.is_available(): # pragma: nocover + raise ConnectionNotAvailable() + + return await self._connection.handle_async_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + async def aclose(self) -> None: + if self._connection is not None: + await self._connection.aclose() + + def is_available(self) -> bool: + if self._connection is None: # pragma: nocover + # If HTTP/2 support is enabled, and the resulting connection could + # end up as HTTP/2 then we should indicate the connection as being + # available to service multiple requests. + return ( + self._http2 + and (self._remote_origin.scheme == b"https" or not self._http1) + and not self._connect_failed + ) + return self._connection.is_available() + + def has_expired(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.has_expired() + + def is_idle(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.is_idle() + + def is_closed(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.is_closed() + + def info(self) -> str: + if self._connection is None: # pragma: nocover + return "CONNECTION FAILED" if self._connect_failed else "CONNECTING" + return self._connection.info() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/venv/Lib/site-packages/httpcore/_backends/__init__.py b/venv/Lib/site-packages/httpcore/_backends/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/httpcore/_backends/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_backends/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..980e0c6c Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_backends/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_backends/__pycache__/anyio.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_backends/__pycache__/anyio.cpython-312.pyc new file mode 100644 index 00000000..82e22f73 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_backends/__pycache__/anyio.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_backends/__pycache__/auto.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_backends/__pycache__/auto.cpython-312.pyc new file mode 100644 index 00000000..43de916f Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_backends/__pycache__/auto.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_backends/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_backends/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..a8825290 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_backends/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_backends/__pycache__/mock.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_backends/__pycache__/mock.cpython-312.pyc new file mode 100644 index 00000000..4bb0b5dc Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_backends/__pycache__/mock.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_backends/__pycache__/sync.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_backends/__pycache__/sync.cpython-312.pyc new file mode 100644 index 00000000..079e9700 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_backends/__pycache__/sync.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_backends/__pycache__/trio.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_backends/__pycache__/trio.cpython-312.pyc new file mode 100644 index 00000000..1c77c77d Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_backends/__pycache__/trio.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_backends/anyio.py b/venv/Lib/site-packages/httpcore/_backends/anyio.py new file mode 100644 index 00000000..a140095e --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_backends/anyio.py @@ -0,0 +1,146 @@ +from __future__ import annotations + +import ssl +import typing + +import anyio + +from .._exceptions import ( + ConnectError, + ConnectTimeout, + ReadError, + ReadTimeout, + WriteError, + WriteTimeout, + map_exceptions, +) +from .._utils import is_socket_readable +from .base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream + + +class AnyIOStream(AsyncNetworkStream): + def __init__(self, stream: anyio.abc.ByteStream) -> None: + self._stream = stream + + async def read(self, max_bytes: int, timeout: float | None = None) -> bytes: + exc_map = { + TimeoutError: ReadTimeout, + anyio.BrokenResourceError: ReadError, + anyio.ClosedResourceError: ReadError, + anyio.EndOfStream: ReadError, + } + with map_exceptions(exc_map): + with anyio.fail_after(timeout): + try: + return await self._stream.receive(max_bytes=max_bytes) + except anyio.EndOfStream: # pragma: nocover + return b"" + + async def write(self, buffer: bytes, timeout: float | None = None) -> None: + if not buffer: + return + + exc_map = { + TimeoutError: WriteTimeout, + anyio.BrokenResourceError: WriteError, + anyio.ClosedResourceError: WriteError, + } + with map_exceptions(exc_map): + with anyio.fail_after(timeout): + await self._stream.send(item=buffer) + + async def aclose(self) -> None: + await self._stream.aclose() + + async def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: str | None = None, + timeout: float | None = None, + ) -> AsyncNetworkStream: + exc_map = { + TimeoutError: ConnectTimeout, + anyio.BrokenResourceError: ConnectError, + anyio.EndOfStream: ConnectError, + ssl.SSLError: ConnectError, + } + with map_exceptions(exc_map): + try: + with anyio.fail_after(timeout): + ssl_stream = await anyio.streams.tls.TLSStream.wrap( + self._stream, + ssl_context=ssl_context, + hostname=server_hostname, + standard_compatible=False, + server_side=False, + ) + except Exception as exc: # pragma: nocover + await self.aclose() + raise exc + return AnyIOStream(ssl_stream) + + def get_extra_info(self, info: str) -> typing.Any: + if info == "ssl_object": + return self._stream.extra(anyio.streams.tls.TLSAttribute.ssl_object, None) + if info == "client_addr": + return self._stream.extra(anyio.abc.SocketAttribute.local_address, None) + if info == "server_addr": + return self._stream.extra(anyio.abc.SocketAttribute.remote_address, None) + if info == "socket": + return self._stream.extra(anyio.abc.SocketAttribute.raw_socket, None) + if info == "is_readable": + sock = self._stream.extra(anyio.abc.SocketAttribute.raw_socket, None) + return is_socket_readable(sock) + return None + + +class AnyIOBackend(AsyncNetworkBackend): + async def connect_tcp( + self, + host: str, + port: int, + timeout: float | None = None, + local_address: str | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> AsyncNetworkStream: # pragma: nocover + if socket_options is None: + socket_options = [] + exc_map = { + TimeoutError: ConnectTimeout, + OSError: ConnectError, + anyio.BrokenResourceError: ConnectError, + } + with map_exceptions(exc_map): + with anyio.fail_after(timeout): + stream: anyio.abc.ByteStream = await anyio.connect_tcp( + remote_host=host, + remote_port=port, + local_host=local_address, + ) + # By default TCP sockets opened in `asyncio` include TCP_NODELAY. + for option in socket_options: + stream._raw_socket.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover + return AnyIOStream(stream) + + async def connect_unix_socket( + self, + path: str, + timeout: float | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> AsyncNetworkStream: # pragma: nocover + if socket_options is None: + socket_options = [] + exc_map = { + TimeoutError: ConnectTimeout, + OSError: ConnectError, + anyio.BrokenResourceError: ConnectError, + } + with map_exceptions(exc_map): + with anyio.fail_after(timeout): + stream: anyio.abc.ByteStream = await anyio.connect_unix(path) + for option in socket_options: + stream._raw_socket.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover + return AnyIOStream(stream) + + async def sleep(self, seconds: float) -> None: + await anyio.sleep(seconds) # pragma: nocover diff --git a/venv/Lib/site-packages/httpcore/_backends/auto.py b/venv/Lib/site-packages/httpcore/_backends/auto.py new file mode 100644 index 00000000..49f0e698 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_backends/auto.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +import typing + +from .._synchronization import current_async_library +from .base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream + + +class AutoBackend(AsyncNetworkBackend): + async def _init_backend(self) -> None: + if not (hasattr(self, "_backend")): + backend = current_async_library() + if backend == "trio": + from .trio import TrioBackend + + self._backend: AsyncNetworkBackend = TrioBackend() + else: + from .anyio import AnyIOBackend + + self._backend = AnyIOBackend() + + async def connect_tcp( + self, + host: str, + port: int, + timeout: float | None = None, + local_address: str | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> AsyncNetworkStream: + await self._init_backend() + return await self._backend.connect_tcp( + host, + port, + timeout=timeout, + local_address=local_address, + socket_options=socket_options, + ) + + async def connect_unix_socket( + self, + path: str, + timeout: float | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> AsyncNetworkStream: # pragma: nocover + await self._init_backend() + return await self._backend.connect_unix_socket( + path, timeout=timeout, socket_options=socket_options + ) + + async def sleep(self, seconds: float) -> None: # pragma: nocover + await self._init_backend() + return await self._backend.sleep(seconds) diff --git a/venv/Lib/site-packages/httpcore/_backends/base.py b/venv/Lib/site-packages/httpcore/_backends/base.py new file mode 100644 index 00000000..cf55c8b1 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_backends/base.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +import ssl +import time +import typing + +SOCKET_OPTION = typing.Union[ + typing.Tuple[int, int, int], + typing.Tuple[int, int, typing.Union[bytes, bytearray]], + typing.Tuple[int, int, None, int], +] + + +class NetworkStream: + def read(self, max_bytes: int, timeout: float | None = None) -> bytes: + raise NotImplementedError() # pragma: nocover + + def write(self, buffer: bytes, timeout: float | None = None) -> None: + raise NotImplementedError() # pragma: nocover + + def close(self) -> None: + raise NotImplementedError() # pragma: nocover + + def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: str | None = None, + timeout: float | None = None, + ) -> NetworkStream: + raise NotImplementedError() # pragma: nocover + + def get_extra_info(self, info: str) -> typing.Any: + return None # pragma: nocover + + +class NetworkBackend: + def connect_tcp( + self, + host: str, + port: int, + timeout: float | None = None, + local_address: str | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> NetworkStream: + raise NotImplementedError() # pragma: nocover + + def connect_unix_socket( + self, + path: str, + timeout: float | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> NetworkStream: + raise NotImplementedError() # pragma: nocover + + def sleep(self, seconds: float) -> None: + time.sleep(seconds) # pragma: nocover + + +class AsyncNetworkStream: + async def read(self, max_bytes: int, timeout: float | None = None) -> bytes: + raise NotImplementedError() # pragma: nocover + + async def write(self, buffer: bytes, timeout: float | None = None) -> None: + raise NotImplementedError() # pragma: nocover + + async def aclose(self) -> None: + raise NotImplementedError() # pragma: nocover + + async def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: str | None = None, + timeout: float | None = None, + ) -> AsyncNetworkStream: + raise NotImplementedError() # pragma: nocover + + def get_extra_info(self, info: str) -> typing.Any: + return None # pragma: nocover + + +class AsyncNetworkBackend: + async def connect_tcp( + self, + host: str, + port: int, + timeout: float | None = None, + local_address: str | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> AsyncNetworkStream: + raise NotImplementedError() # pragma: nocover + + async def connect_unix_socket( + self, + path: str, + timeout: float | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> AsyncNetworkStream: + raise NotImplementedError() # pragma: nocover + + async def sleep(self, seconds: float) -> None: + raise NotImplementedError() # pragma: nocover diff --git a/venv/Lib/site-packages/httpcore/_backends/mock.py b/venv/Lib/site-packages/httpcore/_backends/mock.py new file mode 100644 index 00000000..9b6edca0 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_backends/mock.py @@ -0,0 +1,143 @@ +from __future__ import annotations + +import ssl +import typing + +from .._exceptions import ReadError +from .base import ( + SOCKET_OPTION, + AsyncNetworkBackend, + AsyncNetworkStream, + NetworkBackend, + NetworkStream, +) + + +class MockSSLObject: + def __init__(self, http2: bool): + self._http2 = http2 + + def selected_alpn_protocol(self) -> str: + return "h2" if self._http2 else "http/1.1" + + +class MockStream(NetworkStream): + def __init__(self, buffer: list[bytes], http2: bool = False) -> None: + self._buffer = buffer + self._http2 = http2 + self._closed = False + + def read(self, max_bytes: int, timeout: float | None = None) -> bytes: + if self._closed: + raise ReadError("Connection closed") + if not self._buffer: + return b"" + return self._buffer.pop(0) + + def write(self, buffer: bytes, timeout: float | None = None) -> None: + pass + + def close(self) -> None: + self._closed = True + + def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: str | None = None, + timeout: float | None = None, + ) -> NetworkStream: + return self + + def get_extra_info(self, info: str) -> typing.Any: + return MockSSLObject(http2=self._http2) if info == "ssl_object" else None + + def __repr__(self) -> str: + return "" + + +class MockBackend(NetworkBackend): + def __init__(self, buffer: list[bytes], http2: bool = False) -> None: + self._buffer = buffer + self._http2 = http2 + + def connect_tcp( + self, + host: str, + port: int, + timeout: float | None = None, + local_address: str | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> NetworkStream: + return MockStream(list(self._buffer), http2=self._http2) + + def connect_unix_socket( + self, + path: str, + timeout: float | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> NetworkStream: + return MockStream(list(self._buffer), http2=self._http2) + + def sleep(self, seconds: float) -> None: + pass + + +class AsyncMockStream(AsyncNetworkStream): + def __init__(self, buffer: list[bytes], http2: bool = False) -> None: + self._buffer = buffer + self._http2 = http2 + self._closed = False + + async def read(self, max_bytes: int, timeout: float | None = None) -> bytes: + if self._closed: + raise ReadError("Connection closed") + if not self._buffer: + return b"" + return self._buffer.pop(0) + + async def write(self, buffer: bytes, timeout: float | None = None) -> None: + pass + + async def aclose(self) -> None: + self._closed = True + + async def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: str | None = None, + timeout: float | None = None, + ) -> AsyncNetworkStream: + return self + + def get_extra_info(self, info: str) -> typing.Any: + return MockSSLObject(http2=self._http2) if info == "ssl_object" else None + + def __repr__(self) -> str: + return "" + + +class AsyncMockBackend(AsyncNetworkBackend): + def __init__(self, buffer: list[bytes], http2: bool = False) -> None: + self._buffer = buffer + self._http2 = http2 + + async def connect_tcp( + self, + host: str, + port: int, + timeout: float | None = None, + local_address: str | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> AsyncNetworkStream: + return AsyncMockStream(list(self._buffer), http2=self._http2) + + async def connect_unix_socket( + self, + path: str, + timeout: float | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> AsyncNetworkStream: + return AsyncMockStream(list(self._buffer), http2=self._http2) + + async def sleep(self, seconds: float) -> None: + pass diff --git a/venv/Lib/site-packages/httpcore/_backends/sync.py b/venv/Lib/site-packages/httpcore/_backends/sync.py new file mode 100644 index 00000000..4018a09c --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_backends/sync.py @@ -0,0 +1,241 @@ +from __future__ import annotations + +import functools +import socket +import ssl +import sys +import typing + +from .._exceptions import ( + ConnectError, + ConnectTimeout, + ExceptionMapping, + ReadError, + ReadTimeout, + WriteError, + WriteTimeout, + map_exceptions, +) +from .._utils import is_socket_readable +from .base import SOCKET_OPTION, NetworkBackend, NetworkStream + + +class TLSinTLSStream(NetworkStream): # pragma: no cover + """ + Because the standard `SSLContext.wrap_socket` method does + not work for `SSLSocket` objects, we need this class + to implement TLS stream using an underlying `SSLObject` + instance in order to support TLS on top of TLS. + """ + + # Defined in RFC 8449 + TLS_RECORD_SIZE = 16384 + + def __init__( + self, + sock: socket.socket, + ssl_context: ssl.SSLContext, + server_hostname: str | None = None, + timeout: float | None = None, + ): + self._sock = sock + self._incoming = ssl.MemoryBIO() + self._outgoing = ssl.MemoryBIO() + + self.ssl_obj = ssl_context.wrap_bio( + incoming=self._incoming, + outgoing=self._outgoing, + server_hostname=server_hostname, + ) + + self._sock.settimeout(timeout) + self._perform_io(self.ssl_obj.do_handshake) + + def _perform_io( + self, + func: typing.Callable[..., typing.Any], + ) -> typing.Any: + ret = None + + while True: + errno = None + try: + ret = func() + except (ssl.SSLWantReadError, ssl.SSLWantWriteError) as e: + errno = e.errno + + self._sock.sendall(self._outgoing.read()) + + if errno == ssl.SSL_ERROR_WANT_READ: + buf = self._sock.recv(self.TLS_RECORD_SIZE) + + if buf: + self._incoming.write(buf) + else: + self._incoming.write_eof() + if errno is None: + return ret + + def read(self, max_bytes: int, timeout: float | None = None) -> bytes: + exc_map: ExceptionMapping = {socket.timeout: ReadTimeout, OSError: ReadError} + with map_exceptions(exc_map): + self._sock.settimeout(timeout) + return typing.cast( + bytes, self._perform_io(functools.partial(self.ssl_obj.read, max_bytes)) + ) + + def write(self, buffer: bytes, timeout: float | None = None) -> None: + exc_map: ExceptionMapping = {socket.timeout: WriteTimeout, OSError: WriteError} + with map_exceptions(exc_map): + self._sock.settimeout(timeout) + while buffer: + nsent = self._perform_io(functools.partial(self.ssl_obj.write, buffer)) + buffer = buffer[nsent:] + + def close(self) -> None: + self._sock.close() + + def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: str | None = None, + timeout: float | None = None, + ) -> NetworkStream: + raise NotImplementedError() + + def get_extra_info(self, info: str) -> typing.Any: + if info == "ssl_object": + return self.ssl_obj + if info == "client_addr": + return self._sock.getsockname() + if info == "server_addr": + return self._sock.getpeername() + if info == "socket": + return self._sock + if info == "is_readable": + return is_socket_readable(self._sock) + return None + + +class SyncStream(NetworkStream): + def __init__(self, sock: socket.socket) -> None: + self._sock = sock + + def read(self, max_bytes: int, timeout: float | None = None) -> bytes: + exc_map: ExceptionMapping = {socket.timeout: ReadTimeout, OSError: ReadError} + with map_exceptions(exc_map): + self._sock.settimeout(timeout) + return self._sock.recv(max_bytes) + + def write(self, buffer: bytes, timeout: float | None = None) -> None: + if not buffer: + return + + exc_map: ExceptionMapping = {socket.timeout: WriteTimeout, OSError: WriteError} + with map_exceptions(exc_map): + while buffer: + self._sock.settimeout(timeout) + n = self._sock.send(buffer) + buffer = buffer[n:] + + def close(self) -> None: + self._sock.close() + + def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: str | None = None, + timeout: float | None = None, + ) -> NetworkStream: + exc_map: ExceptionMapping = { + socket.timeout: ConnectTimeout, + OSError: ConnectError, + } + with map_exceptions(exc_map): + try: + if isinstance(self._sock, ssl.SSLSocket): # pragma: no cover + # If the underlying socket has already been upgraded + # to the TLS layer (i.e. is an instance of SSLSocket), + # we need some additional smarts to support TLS-in-TLS. + return TLSinTLSStream( + self._sock, ssl_context, server_hostname, timeout + ) + else: + self._sock.settimeout(timeout) + sock = ssl_context.wrap_socket( + self._sock, server_hostname=server_hostname + ) + except Exception as exc: # pragma: nocover + self.close() + raise exc + return SyncStream(sock) + + def get_extra_info(self, info: str) -> typing.Any: + if info == "ssl_object" and isinstance(self._sock, ssl.SSLSocket): + return self._sock._sslobj # type: ignore + if info == "client_addr": + return self._sock.getsockname() + if info == "server_addr": + return self._sock.getpeername() + if info == "socket": + return self._sock + if info == "is_readable": + return is_socket_readable(self._sock) + return None + + +class SyncBackend(NetworkBackend): + def connect_tcp( + self, + host: str, + port: int, + timeout: float | None = None, + local_address: str | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> NetworkStream: + # Note that we automatically include `TCP_NODELAY` + # in addition to any other custom socket options. + if socket_options is None: + socket_options = [] # pragma: no cover + address = (host, port) + source_address = None if local_address is None else (local_address, 0) + exc_map: ExceptionMapping = { + socket.timeout: ConnectTimeout, + OSError: ConnectError, + } + + with map_exceptions(exc_map): + sock = socket.create_connection( + address, + timeout, + source_address=source_address, + ) + for option in socket_options: + sock.setsockopt(*option) # pragma: no cover + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + return SyncStream(sock) + + def connect_unix_socket( + self, + path: str, + timeout: float | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> NetworkStream: # pragma: nocover + if sys.platform == "win32": + raise RuntimeError( + "Attempted to connect to a UNIX socket on a Windows system." + ) + if socket_options is None: + socket_options = [] + + exc_map: ExceptionMapping = { + socket.timeout: ConnectTimeout, + OSError: ConnectError, + } + with map_exceptions(exc_map): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + for option in socket_options: + sock.setsockopt(*option) + sock.settimeout(timeout) + sock.connect(path) + return SyncStream(sock) diff --git a/venv/Lib/site-packages/httpcore/_backends/trio.py b/venv/Lib/site-packages/httpcore/_backends/trio.py new file mode 100644 index 00000000..6f53f5f2 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_backends/trio.py @@ -0,0 +1,159 @@ +from __future__ import annotations + +import ssl +import typing + +import trio + +from .._exceptions import ( + ConnectError, + ConnectTimeout, + ExceptionMapping, + ReadError, + ReadTimeout, + WriteError, + WriteTimeout, + map_exceptions, +) +from .base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream + + +class TrioStream(AsyncNetworkStream): + def __init__(self, stream: trio.abc.Stream) -> None: + self._stream = stream + + async def read(self, max_bytes: int, timeout: float | None = None) -> bytes: + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: ReadTimeout, + trio.BrokenResourceError: ReadError, + trio.ClosedResourceError: ReadError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + data: bytes = await self._stream.receive_some(max_bytes=max_bytes) + return data + + async def write(self, buffer: bytes, timeout: float | None = None) -> None: + if not buffer: + return + + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: WriteTimeout, + trio.BrokenResourceError: WriteError, + trio.ClosedResourceError: WriteError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + await self._stream.send_all(data=buffer) + + async def aclose(self) -> None: + await self._stream.aclose() + + async def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: str | None = None, + timeout: float | None = None, + ) -> AsyncNetworkStream: + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: ConnectTimeout, + trio.BrokenResourceError: ConnectError, + } + ssl_stream = trio.SSLStream( + self._stream, + ssl_context=ssl_context, + server_hostname=server_hostname, + https_compatible=True, + server_side=False, + ) + with map_exceptions(exc_map): + try: + with trio.fail_after(timeout_or_inf): + await ssl_stream.do_handshake() + except Exception as exc: # pragma: nocover + await self.aclose() + raise exc + return TrioStream(ssl_stream) + + def get_extra_info(self, info: str) -> typing.Any: + if info == "ssl_object" and isinstance(self._stream, trio.SSLStream): + # Type checkers cannot see `_ssl_object` attribute because trio._ssl.SSLStream uses __getattr__/__setattr__. + # Tracked at https://github.com/python-trio/trio/issues/542 + return self._stream._ssl_object # type: ignore[attr-defined] + if info == "client_addr": + return self._get_socket_stream().socket.getsockname() + if info == "server_addr": + return self._get_socket_stream().socket.getpeername() + if info == "socket": + stream = self._stream + while isinstance(stream, trio.SSLStream): + stream = stream.transport_stream + assert isinstance(stream, trio.SocketStream) + return stream.socket + if info == "is_readable": + socket = self.get_extra_info("socket") + return socket.is_readable() + return None + + def _get_socket_stream(self) -> trio.SocketStream: + stream = self._stream + while isinstance(stream, trio.SSLStream): + stream = stream.transport_stream + assert isinstance(stream, trio.SocketStream) + return stream + + +class TrioBackend(AsyncNetworkBackend): + async def connect_tcp( + self, + host: str, + port: int, + timeout: float | None = None, + local_address: str | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> AsyncNetworkStream: + # By default for TCP sockets, trio enables TCP_NODELAY. + # https://trio.readthedocs.io/en/stable/reference-io.html#trio.SocketStream + if socket_options is None: + socket_options = [] # pragma: no cover + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: ConnectTimeout, + trio.BrokenResourceError: ConnectError, + OSError: ConnectError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + stream: trio.abc.Stream = await trio.open_tcp_stream( + host=host, port=port, local_address=local_address + ) + for option in socket_options: + stream.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover + return TrioStream(stream) + + async def connect_unix_socket( + self, + path: str, + timeout: float | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> AsyncNetworkStream: # pragma: nocover + if socket_options is None: + socket_options = [] + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: ConnectTimeout, + trio.BrokenResourceError: ConnectError, + OSError: ConnectError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + stream: trio.abc.Stream = await trio.open_unix_socket(path) + for option in socket_options: + stream.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover + return TrioStream(stream) + + async def sleep(self, seconds: float) -> None: + await trio.sleep(seconds) # pragma: nocover diff --git a/venv/Lib/site-packages/httpcore/_exceptions.py b/venv/Lib/site-packages/httpcore/_exceptions.py new file mode 100644 index 00000000..bc28d44f --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_exceptions.py @@ -0,0 +1,81 @@ +import contextlib +import typing + +ExceptionMapping = typing.Mapping[typing.Type[Exception], typing.Type[Exception]] + + +@contextlib.contextmanager +def map_exceptions(map: ExceptionMapping) -> typing.Iterator[None]: + try: + yield + except Exception as exc: # noqa: PIE786 + for from_exc, to_exc in map.items(): + if isinstance(exc, from_exc): + raise to_exc(exc) from exc + raise # pragma: nocover + + +class ConnectionNotAvailable(Exception): + pass + + +class ProxyError(Exception): + pass + + +class UnsupportedProtocol(Exception): + pass + + +class ProtocolError(Exception): + pass + + +class RemoteProtocolError(ProtocolError): + pass + + +class LocalProtocolError(ProtocolError): + pass + + +# Timeout errors + + +class TimeoutException(Exception): + pass + + +class PoolTimeout(TimeoutException): + pass + + +class ConnectTimeout(TimeoutException): + pass + + +class ReadTimeout(TimeoutException): + pass + + +class WriteTimeout(TimeoutException): + pass + + +# Network errors + + +class NetworkError(Exception): + pass + + +class ConnectError(NetworkError): + pass + + +class ReadError(NetworkError): + pass + + +class WriteError(NetworkError): + pass diff --git a/venv/Lib/site-packages/httpcore/_models.py b/venv/Lib/site-packages/httpcore/_models.py new file mode 100644 index 00000000..8a65f133 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_models.py @@ -0,0 +1,516 @@ +from __future__ import annotations + +import base64 +import ssl +import typing +import urllib.parse + +# Functions for typechecking... + + +ByteOrStr = typing.Union[bytes, str] +HeadersAsSequence = typing.Sequence[typing.Tuple[ByteOrStr, ByteOrStr]] +HeadersAsMapping = typing.Mapping[ByteOrStr, ByteOrStr] +HeaderTypes = typing.Union[HeadersAsSequence, HeadersAsMapping, None] + +Extensions = typing.MutableMapping[str, typing.Any] + + +def enforce_bytes(value: bytes | str, *, name: str) -> bytes: + """ + Any arguments that are ultimately represented as bytes can be specified + either as bytes or as strings. + + However we enforce that any string arguments must only contain characters in + the plain ASCII range. chr(0)...chr(127). If you need to use characters + outside that range then be precise, and use a byte-wise argument. + """ + if isinstance(value, str): + try: + return value.encode("ascii") + except UnicodeEncodeError: + raise TypeError(f"{name} strings may not include unicode characters.") + elif isinstance(value, bytes): + return value + + seen_type = type(value).__name__ + raise TypeError(f"{name} must be bytes or str, but got {seen_type}.") + + +def enforce_url(value: URL | bytes | str, *, name: str) -> URL: + """ + Type check for URL parameters. + """ + if isinstance(value, (bytes, str)): + return URL(value) + elif isinstance(value, URL): + return value + + seen_type = type(value).__name__ + raise TypeError(f"{name} must be a URL, bytes, or str, but got {seen_type}.") + + +def enforce_headers( + value: HeadersAsMapping | HeadersAsSequence | None = None, *, name: str +) -> list[tuple[bytes, bytes]]: + """ + Convienence function that ensure all items in request or response headers + are either bytes or strings in the plain ASCII range. + """ + if value is None: + return [] + elif isinstance(value, typing.Mapping): + return [ + ( + enforce_bytes(k, name="header name"), + enforce_bytes(v, name="header value"), + ) + for k, v in value.items() + ] + elif isinstance(value, typing.Sequence): + return [ + ( + enforce_bytes(k, name="header name"), + enforce_bytes(v, name="header value"), + ) + for k, v in value + ] + + seen_type = type(value).__name__ + raise TypeError( + f"{name} must be a mapping or sequence of two-tuples, but got {seen_type}." + ) + + +def enforce_stream( + value: bytes | typing.Iterable[bytes] | typing.AsyncIterable[bytes] | None, + *, + name: str, +) -> typing.Iterable[bytes] | typing.AsyncIterable[bytes]: + if value is None: + return ByteStream(b"") + elif isinstance(value, bytes): + return ByteStream(value) + return value + + +# * https://tools.ietf.org/html/rfc3986#section-3.2.3 +# * https://url.spec.whatwg.org/#url-miscellaneous +# * https://url.spec.whatwg.org/#scheme-state +DEFAULT_PORTS = { + b"ftp": 21, + b"http": 80, + b"https": 443, + b"ws": 80, + b"wss": 443, +} + + +def include_request_headers( + headers: list[tuple[bytes, bytes]], + *, + url: "URL", + content: None | bytes | typing.Iterable[bytes] | typing.AsyncIterable[bytes], +) -> list[tuple[bytes, bytes]]: + headers_set = set(k.lower() for k, v in headers) + + if b"host" not in headers_set: + default_port = DEFAULT_PORTS.get(url.scheme) + if url.port is None or url.port == default_port: + header_value = url.host + else: + header_value = b"%b:%d" % (url.host, url.port) + headers = [(b"Host", header_value)] + headers + + if ( + content is not None + and b"content-length" not in headers_set + and b"transfer-encoding" not in headers_set + ): + if isinstance(content, bytes): + content_length = str(len(content)).encode("ascii") + headers += [(b"Content-Length", content_length)] + else: + headers += [(b"Transfer-Encoding", b"chunked")] # pragma: nocover + + return headers + + +# Interfaces for byte streams... + + +class ByteStream: + """ + A container for non-streaming content, and that supports both sync and async + stream iteration. + """ + + def __init__(self, content: bytes) -> None: + self._content = content + + def __iter__(self) -> typing.Iterator[bytes]: + yield self._content + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + yield self._content + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{len(self._content)} bytes]>" + + +class Origin: + def __init__(self, scheme: bytes, host: bytes, port: int) -> None: + self.scheme = scheme + self.host = host + self.port = port + + def __eq__(self, other: typing.Any) -> bool: + return ( + isinstance(other, Origin) + and self.scheme == other.scheme + and self.host == other.host + and self.port == other.port + ) + + def __str__(self) -> str: + scheme = self.scheme.decode("ascii") + host = self.host.decode("ascii") + port = str(self.port) + return f"{scheme}://{host}:{port}" + + +class URL: + """ + Represents the URL against which an HTTP request may be made. + + The URL may either be specified as a plain string, for convienence: + + ```python + url = httpcore.URL("https://www.example.com/") + ``` + + Or be constructed with explicitily pre-parsed components: + + ```python + url = httpcore.URL(scheme=b'https', host=b'www.example.com', port=None, target=b'/') + ``` + + Using this second more explicit style allows integrations that are using + `httpcore` to pass through URLs that have already been parsed in order to use + libraries such as `rfc-3986` rather than relying on the stdlib. It also ensures + that URL parsing is treated identically at both the networking level and at any + higher layers of abstraction. + + The four components are important here, as they allow the URL to be precisely + specified in a pre-parsed format. They also allow certain types of request to + be created that could not otherwise be expressed. + + For example, an HTTP request to `http://www.example.com/` forwarded via a proxy + at `http://localhost:8080`... + + ```python + # Constructs an HTTP request with a complete URL as the target: + # GET https://www.example.com/ HTTP/1.1 + url = httpcore.URL( + scheme=b'http', + host=b'localhost', + port=8080, + target=b'https://www.example.com/' + ) + request = httpcore.Request( + method="GET", + url=url + ) + ``` + + Another example is constructing an `OPTIONS *` request... + + ```python + # Constructs an 'OPTIONS *' HTTP request: + # OPTIONS * HTTP/1.1 + url = httpcore.URL(scheme=b'https', host=b'www.example.com', target=b'*') + request = httpcore.Request(method="OPTIONS", url=url) + ``` + + This kind of request is not possible to formulate with a URL string, + because the `/` delimiter is always used to demark the target from the + host/port portion of the URL. + + For convenience, string-like arguments may be specified either as strings or + as bytes. However, once a request is being issue over-the-wire, the URL + components are always ultimately required to be a bytewise representation. + + In order to avoid any ambiguity over character encodings, when strings are used + as arguments, they must be strictly limited to the ASCII range `chr(0)`-`chr(127)`. + If you require a bytewise representation that is outside this range you must + handle the character encoding directly, and pass a bytes instance. + """ + + def __init__( + self, + url: bytes | str = "", + *, + scheme: bytes | str = b"", + host: bytes | str = b"", + port: int | None = None, + target: bytes | str = b"", + ) -> None: + """ + Parameters: + url: The complete URL as a string or bytes. + scheme: The URL scheme as a string or bytes. + Typically either `"http"` or `"https"`. + host: The URL host as a string or bytes. Such as `"www.example.com"`. + port: The port to connect to. Either an integer or `None`. + target: The target of the HTTP request. Such as `"/items?search=red"`. + """ + if url: + parsed = urllib.parse.urlparse(enforce_bytes(url, name="url")) + self.scheme = parsed.scheme + self.host = parsed.hostname or b"" + self.port = parsed.port + self.target = (parsed.path or b"/") + ( + b"?" + parsed.query if parsed.query else b"" + ) + else: + self.scheme = enforce_bytes(scheme, name="scheme") + self.host = enforce_bytes(host, name="host") + self.port = port + self.target = enforce_bytes(target, name="target") + + @property + def origin(self) -> Origin: + default_port = { + b"http": 80, + b"https": 443, + b"ws": 80, + b"wss": 443, + b"socks5": 1080, + b"socks5h": 1080, + }[self.scheme] + return Origin( + scheme=self.scheme, host=self.host, port=self.port or default_port + ) + + def __eq__(self, other: typing.Any) -> bool: + return ( + isinstance(other, URL) + and other.scheme == self.scheme + and other.host == self.host + and other.port == self.port + and other.target == self.target + ) + + def __bytes__(self) -> bytes: + if self.port is None: + return b"%b://%b%b" % (self.scheme, self.host, self.target) + return b"%b://%b:%d%b" % (self.scheme, self.host, self.port, self.target) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(scheme={self.scheme!r}, " + f"host={self.host!r}, port={self.port!r}, target={self.target!r})" + ) + + +class Request: + """ + An HTTP request. + """ + + def __init__( + self, + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes + | typing.Iterable[bytes] + | typing.AsyncIterable[bytes] + | None = None, + extensions: Extensions | None = None, + ) -> None: + """ + Parameters: + method: The HTTP request method, either as a string or bytes. + For example: `GET`. + url: The request URL, either as a `URL` instance, or as a string or bytes. + For example: `"https://www.example.com".` + headers: The HTTP request headers. + content: The content of the request body. + extensions: A dictionary of optional extra information included on + the request. Possible keys include `"timeout"`, and `"trace"`. + """ + self.method: bytes = enforce_bytes(method, name="method") + self.url: URL = enforce_url(url, name="url") + self.headers: list[tuple[bytes, bytes]] = enforce_headers( + headers, name="headers" + ) + self.stream: typing.Iterable[bytes] | typing.AsyncIterable[bytes] = ( + enforce_stream(content, name="content") + ) + self.extensions = {} if extensions is None else extensions + + if "target" in self.extensions: + self.url = URL( + scheme=self.url.scheme, + host=self.url.host, + port=self.url.port, + target=self.extensions["target"], + ) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.method!r}]>" + + +class Response: + """ + An HTTP response. + """ + + def __init__( + self, + status: int, + *, + headers: HeaderTypes = None, + content: bytes + | typing.Iterable[bytes] + | typing.AsyncIterable[bytes] + | None = None, + extensions: Extensions | None = None, + ) -> None: + """ + Parameters: + status: The HTTP status code of the response. For example `200`. + headers: The HTTP response headers. + content: The content of the response body. + extensions: A dictionary of optional extra information included on + the responseself.Possible keys include `"http_version"`, + `"reason_phrase"`, and `"network_stream"`. + """ + self.status: int = status + self.headers: list[tuple[bytes, bytes]] = enforce_headers( + headers, name="headers" + ) + self.stream: typing.Iterable[bytes] | typing.AsyncIterable[bytes] = ( + enforce_stream(content, name="content") + ) + self.extensions = {} if extensions is None else extensions + + self._stream_consumed = False + + @property + def content(self) -> bytes: + if not hasattr(self, "_content"): + if isinstance(self.stream, typing.Iterable): + raise RuntimeError( + "Attempted to access 'response.content' on a streaming response. " + "Call 'response.read()' first." + ) + else: + raise RuntimeError( + "Attempted to access 'response.content' on a streaming response. " + "Call 'await response.aread()' first." + ) + return self._content + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.status}]>" + + # Sync interface... + + def read(self) -> bytes: + if not isinstance(self.stream, typing.Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to read an asynchronous response using 'response.read()'. " + "You should use 'await response.aread()' instead." + ) + if not hasattr(self, "_content"): + self._content = b"".join([part for part in self.iter_stream()]) + return self._content + + def iter_stream(self) -> typing.Iterator[bytes]: + if not isinstance(self.stream, typing.Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to stream an asynchronous response using 'for ... in " + "response.iter_stream()'. " + "You should use 'async for ... in response.aiter_stream()' instead." + ) + if self._stream_consumed: + raise RuntimeError( + "Attempted to call 'for ... in response.iter_stream()' more than once." + ) + self._stream_consumed = True + for chunk in self.stream: + yield chunk + + def close(self) -> None: + if not isinstance(self.stream, typing.Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to close an asynchronous response using 'response.close()'. " + "You should use 'await response.aclose()' instead." + ) + if hasattr(self.stream, "close"): + self.stream.close() + + # Async interface... + + async def aread(self) -> bytes: + if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to read an synchronous response using " + "'await response.aread()'. " + "You should use 'response.read()' instead." + ) + if not hasattr(self, "_content"): + self._content = b"".join([part async for part in self.aiter_stream()]) + return self._content + + async def aiter_stream(self) -> typing.AsyncIterator[bytes]: + if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to stream an synchronous response using 'async for ... in " + "response.aiter_stream()'. " + "You should use 'for ... in response.iter_stream()' instead." + ) + if self._stream_consumed: + raise RuntimeError( + "Attempted to call 'async for ... in response.aiter_stream()' " + "more than once." + ) + self._stream_consumed = True + async for chunk in self.stream: + yield chunk + + async def aclose(self) -> None: + if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to close a synchronous response using " + "'await response.aclose()'. " + "You should use 'response.close()' instead." + ) + if hasattr(self.stream, "aclose"): + await self.stream.aclose() + + +class Proxy: + def __init__( + self, + url: URL | bytes | str, + auth: tuple[bytes | str, bytes | str] | None = None, + headers: HeadersAsMapping | HeadersAsSequence | None = None, + ssl_context: ssl.SSLContext | None = None, + ): + self.url = enforce_url(url, name="url") + self.headers = enforce_headers(headers, name="headers") + self.ssl_context = ssl_context + + if auth is not None: + username = enforce_bytes(auth[0], name="auth") + password = enforce_bytes(auth[1], name="auth") + userpass = username + b":" + password + authorization = b"Basic " + base64.b64encode(userpass) + self.auth: tuple[bytes, bytes] | None = (username, password) + self.headers = [(b"Proxy-Authorization", authorization)] + self.headers + else: + self.auth = None diff --git a/venv/Lib/site-packages/httpcore/_ssl.py b/venv/Lib/site-packages/httpcore/_ssl.py new file mode 100644 index 00000000..c99c5a67 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_ssl.py @@ -0,0 +1,9 @@ +import ssl + +import certifi + + +def default_ssl_context() -> ssl.SSLContext: + context = ssl.create_default_context() + context.load_verify_locations(certifi.where()) + return context diff --git a/venv/Lib/site-packages/httpcore/_sync/__init__.py b/venv/Lib/site-packages/httpcore/_sync/__init__.py new file mode 100644 index 00000000..b476d76d --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_sync/__init__.py @@ -0,0 +1,39 @@ +from .connection import HTTPConnection +from .connection_pool import ConnectionPool +from .http11 import HTTP11Connection +from .http_proxy import HTTPProxy +from .interfaces import ConnectionInterface + +try: + from .http2 import HTTP2Connection +except ImportError: # pragma: nocover + + class HTTP2Connection: # type: ignore + def __init__(self, *args, **kwargs) -> None: # type: ignore + raise RuntimeError( + "Attempted to use http2 support, but the `h2` package is not " + "installed. Use 'pip install httpcore[http2]'." + ) + + +try: + from .socks_proxy import SOCKSProxy +except ImportError: # pragma: nocover + + class SOCKSProxy: # type: ignore + def __init__(self, *args, **kwargs) -> None: # type: ignore + raise RuntimeError( + "Attempted to use SOCKS support, but the `socksio` package is not " + "installed. Use 'pip install httpcore[socks]'." + ) + + +__all__ = [ + "HTTPConnection", + "ConnectionPool", + "HTTPProxy", + "HTTP11Connection", + "HTTP2Connection", + "ConnectionInterface", + "SOCKSProxy", +] diff --git a/venv/Lib/site-packages/httpcore/_sync/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_sync/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..623033fb Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_sync/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_sync/__pycache__/connection.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_sync/__pycache__/connection.cpython-312.pyc new file mode 100644 index 00000000..9bc7a437 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_sync/__pycache__/connection.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_sync/__pycache__/connection_pool.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_sync/__pycache__/connection_pool.cpython-312.pyc new file mode 100644 index 00000000..c67d5f0c Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_sync/__pycache__/connection_pool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_sync/__pycache__/http11.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_sync/__pycache__/http11.cpython-312.pyc new file mode 100644 index 00000000..4d4a9be5 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_sync/__pycache__/http11.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_sync/__pycache__/http2.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_sync/__pycache__/http2.cpython-312.pyc new file mode 100644 index 00000000..95d23828 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_sync/__pycache__/http2.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_sync/__pycache__/http_proxy.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_sync/__pycache__/http_proxy.cpython-312.pyc new file mode 100644 index 00000000..d5eaa7c7 Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_sync/__pycache__/http_proxy.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_sync/__pycache__/interfaces.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_sync/__pycache__/interfaces.cpython-312.pyc new file mode 100644 index 00000000..befd251c Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_sync/__pycache__/interfaces.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_sync/__pycache__/socks_proxy.cpython-312.pyc b/venv/Lib/site-packages/httpcore/_sync/__pycache__/socks_proxy.cpython-312.pyc new file mode 100644 index 00000000..fb2fa51a Binary files /dev/null and b/venv/Lib/site-packages/httpcore/_sync/__pycache__/socks_proxy.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpcore/_sync/connection.py b/venv/Lib/site-packages/httpcore/_sync/connection.py new file mode 100644 index 00000000..363f8be8 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_sync/connection.py @@ -0,0 +1,222 @@ +from __future__ import annotations + +import itertools +import logging +import ssl +import types +import typing + +from .._backends.sync import SyncBackend +from .._backends.base import SOCKET_OPTION, NetworkBackend, NetworkStream +from .._exceptions import ConnectError, ConnectTimeout +from .._models import Origin, Request, Response +from .._ssl import default_ssl_context +from .._synchronization import Lock +from .._trace import Trace +from .http11 import HTTP11Connection +from .interfaces import ConnectionInterface + +RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc. + + +logger = logging.getLogger("httpcore.connection") + + +def exponential_backoff(factor: float) -> typing.Iterator[float]: + """ + Generate a geometric sequence that has a ratio of 2 and starts with 0. + + For example: + - `factor = 2`: `0, 2, 4, 8, 16, 32, 64, ...` + - `factor = 3`: `0, 3, 6, 12, 24, 48, 96, ...` + """ + yield 0 + for n in itertools.count(): + yield factor * 2**n + + +class HTTPConnection(ConnectionInterface): + def __init__( + self, + origin: Origin, + ssl_context: ssl.SSLContext | None = None, + keepalive_expiry: float | None = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: str | None = None, + uds: str | None = None, + network_backend: NetworkBackend | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> None: + self._origin = origin + self._ssl_context = ssl_context + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._retries = retries + self._local_address = local_address + self._uds = uds + + self._network_backend: NetworkBackend = ( + SyncBackend() if network_backend is None else network_backend + ) + self._connection: ConnectionInterface | None = None + self._connect_failed: bool = False + self._request_lock = Lock() + self._socket_options = socket_options + + def handle_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection to {self._origin}" + ) + + try: + with self._request_lock: + if self._connection is None: + stream = self._connect(request) + + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import HTTP2Connection + + self._connection = HTTP2Connection( + origin=self._origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = HTTP11Connection( + origin=self._origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + except BaseException as exc: + self._connect_failed = True + raise exc + + return self._connection.handle_request(request) + + def _connect(self, request: Request) -> NetworkStream: + timeouts = request.extensions.get("timeout", {}) + sni_hostname = request.extensions.get("sni_hostname", None) + timeout = timeouts.get("connect", None) + + retries_left = self._retries + delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) + + while True: + try: + if self._uds is None: + kwargs = { + "host": self._origin.host.decode("ascii"), + "port": self._origin.port, + "local_address": self._local_address, + "timeout": timeout, + "socket_options": self._socket_options, + } + with Trace("connect_tcp", logger, request, kwargs) as trace: + stream = self._network_backend.connect_tcp(**kwargs) + trace.return_value = stream + else: + kwargs = { + "path": self._uds, + "timeout": timeout, + "socket_options": self._socket_options, + } + with Trace( + "connect_unix_socket", logger, request, kwargs + ) as trace: + stream = self._network_backend.connect_unix_socket( + **kwargs + ) + trace.return_value = stream + + if self._origin.scheme in (b"https", b"wss"): + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": sni_hostname + or self._origin.host.decode("ascii"), + "timeout": timeout, + } + with Trace("start_tls", logger, request, kwargs) as trace: + stream = stream.start_tls(**kwargs) + trace.return_value = stream + return stream + except (ConnectError, ConnectTimeout): + if retries_left <= 0: + raise + retries_left -= 1 + delay = next(delays) + with Trace("retry", logger, request, kwargs) as trace: + self._network_backend.sleep(delay) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def close(self) -> None: + if self._connection is not None: + with Trace("close", logger, None, {}): + self._connection.close() + + def is_available(self) -> bool: + if self._connection is None: + # If HTTP/2 support is enabled, and the resulting connection could + # end up as HTTP/2 then we should indicate the connection as being + # available to service multiple requests. + return ( + self._http2 + and (self._origin.scheme == b"https" or not self._http1) + and not self._connect_failed + ) + return self._connection.is_available() + + def has_expired(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.has_expired() + + def is_idle(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.is_idle() + + def is_closed(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.is_closed() + + def info(self) -> str: + if self._connection is None: + return "CONNECTION FAILED" if self._connect_failed else "CONNECTING" + return self._connection.info() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + def __enter__(self) -> HTTPConnection: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + self.close() diff --git a/venv/Lib/site-packages/httpcore/_sync/connection_pool.py b/venv/Lib/site-packages/httpcore/_sync/connection_pool.py new file mode 100644 index 00000000..9ccfa53e --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_sync/connection_pool.py @@ -0,0 +1,420 @@ +from __future__ import annotations + +import ssl +import sys +import types +import typing + +from .._backends.sync import SyncBackend +from .._backends.base import SOCKET_OPTION, NetworkBackend +from .._exceptions import ConnectionNotAvailable, UnsupportedProtocol +from .._models import Origin, Proxy, Request, Response +from .._synchronization import Event, ShieldCancellation, ThreadLock +from .connection import HTTPConnection +from .interfaces import ConnectionInterface, RequestInterface + + +class PoolRequest: + def __init__(self, request: Request) -> None: + self.request = request + self.connection: ConnectionInterface | None = None + self._connection_acquired = Event() + + def assign_to_connection(self, connection: ConnectionInterface | None) -> None: + self.connection = connection + self._connection_acquired.set() + + def clear_connection(self) -> None: + self.connection = None + self._connection_acquired = Event() + + def wait_for_connection( + self, timeout: float | None = None + ) -> ConnectionInterface: + if self.connection is None: + self._connection_acquired.wait(timeout=timeout) + assert self.connection is not None + return self.connection + + def is_queued(self) -> bool: + return self.connection is None + + +class ConnectionPool(RequestInterface): + """ + A connection pool for making HTTP requests. + """ + + def __init__( + self, + ssl_context: ssl.SSLContext | None = None, + proxy: Proxy | None = None, + max_connections: int | None = 10, + max_keepalive_connections: int | None = None, + keepalive_expiry: float | None = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: str | None = None, + uds: str | None = None, + network_backend: NetworkBackend | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish a + connection. + local_address: Local address to connect from. Can also be used to connect + using a particular address family. Using `local_address="0.0.0.0"` + will connect using an `AF_INET` address (IPv4), while using + `local_address="::"` will connect using an `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + socket_options: Socket options that have to be included + in the TCP socket when the connection was established. + """ + self._ssl_context = ssl_context + self._proxy = proxy + self._max_connections = ( + sys.maxsize if max_connections is None else max_connections + ) + self._max_keepalive_connections = ( + sys.maxsize + if max_keepalive_connections is None + else max_keepalive_connections + ) + self._max_keepalive_connections = min( + self._max_connections, self._max_keepalive_connections + ) + + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._retries = retries + self._local_address = local_address + self._uds = uds + + self._network_backend = ( + SyncBackend() if network_backend is None else network_backend + ) + self._socket_options = socket_options + + # The mutable state on a connection pool is the queue of incoming requests, + # and the set of connections that are servicing those requests. + self._connections: list[ConnectionInterface] = [] + self._requests: list[PoolRequest] = [] + + # We only mutate the state of the connection pool within an 'optional_thread_lock' + # context. This holds a threading lock unless we're running in async mode, + # in which case it is a no-op. + self._optional_thread_lock = ThreadLock() + + def create_connection(self, origin: Origin) -> ConnectionInterface: + if self._proxy is not None: + if self._proxy.url.scheme in (b"socks5", b"socks5h"): + from .socks_proxy import Socks5Connection + + return Socks5Connection( + proxy_origin=self._proxy.url.origin, + proxy_auth=self._proxy.auth, + remote_origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + elif origin.scheme == b"http": + from .http_proxy import ForwardHTTPConnection + + return ForwardHTTPConnection( + proxy_origin=self._proxy.url.origin, + proxy_headers=self._proxy.headers, + proxy_ssl_context=self._proxy.ssl_context, + remote_origin=origin, + keepalive_expiry=self._keepalive_expiry, + network_backend=self._network_backend, + ) + from .http_proxy import TunnelHTTPConnection + + return TunnelHTTPConnection( + proxy_origin=self._proxy.url.origin, + proxy_headers=self._proxy.headers, + proxy_ssl_context=self._proxy.ssl_context, + remote_origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + return HTTPConnection( + origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + retries=self._retries, + local_address=self._local_address, + uds=self._uds, + network_backend=self._network_backend, + socket_options=self._socket_options, + ) + + @property + def connections(self) -> list[ConnectionInterface]: + """ + Return a list of the connections currently in the pool. + + For example: + + ```python + >>> pool.connections + [ + , + , + , + ] + ``` + """ + return list(self._connections) + + def handle_request(self, request: Request) -> Response: + """ + Send an HTTP request, and return an HTTP response. + + This is the core implementation that is called into by `.request()` or `.stream()`. + """ + scheme = request.url.scheme.decode() + if scheme == "": + raise UnsupportedProtocol( + "Request URL is missing an 'http://' or 'https://' protocol." + ) + if scheme not in ("http", "https", "ws", "wss"): + raise UnsupportedProtocol( + f"Request URL has an unsupported protocol '{scheme}://'." + ) + + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("pool", None) + + with self._optional_thread_lock: + # Add the incoming request to our request queue. + pool_request = PoolRequest(request) + self._requests.append(pool_request) + + try: + while True: + with self._optional_thread_lock: + # Assign incoming requests to available connections, + # closing or creating new connections as required. + closing = self._assign_requests_to_connections() + self._close_connections(closing) + + # Wait until this request has an assigned connection. + connection = pool_request.wait_for_connection(timeout=timeout) + + try: + # Send the request on the assigned connection. + response = connection.handle_request( + pool_request.request + ) + except ConnectionNotAvailable: + # In some cases a connection may initially be available to + # handle a request, but then become unavailable. + # + # In this case we clear the connection and try again. + pool_request.clear_connection() + else: + break # pragma: nocover + + except BaseException as exc: + with self._optional_thread_lock: + # For any exception or cancellation we remove the request from + # the queue, and then re-assign requests to connections. + self._requests.remove(pool_request) + closing = self._assign_requests_to_connections() + + self._close_connections(closing) + raise exc from None + + # Return the response. Note that in this case we still have to manage + # the point at which the response is closed. + assert isinstance(response.stream, typing.Iterable) + return Response( + status=response.status, + headers=response.headers, + content=PoolByteStream( + stream=response.stream, pool_request=pool_request, pool=self + ), + extensions=response.extensions, + ) + + def _assign_requests_to_connections(self) -> list[ConnectionInterface]: + """ + Manage the state of the connection pool, assigning incoming + requests to connections as available. + + Called whenever a new request is added or removed from the pool. + + Any closing connections are returned, allowing the I/O for closing + those connections to be handled seperately. + """ + closing_connections = [] + + # First we handle cleaning up any connections that are closed, + # have expired their keep-alive, or surplus idle connections. + for connection in list(self._connections): + if connection.is_closed(): + # log: "removing closed connection" + self._connections.remove(connection) + elif connection.has_expired(): + # log: "closing expired connection" + self._connections.remove(connection) + closing_connections.append(connection) + elif ( + connection.is_idle() + and len([connection.is_idle() for connection in self._connections]) + > self._max_keepalive_connections + ): + # log: "closing idle connection" + self._connections.remove(connection) + closing_connections.append(connection) + + # Assign queued requests to connections. + queued_requests = [request for request in self._requests if request.is_queued()] + for pool_request in queued_requests: + origin = pool_request.request.url.origin + available_connections = [ + connection + for connection in self._connections + if connection.can_handle_request(origin) and connection.is_available() + ] + idle_connections = [ + connection for connection in self._connections if connection.is_idle() + ] + + # There are three cases for how we may be able to handle the request: + # + # 1. There is an existing connection that can handle the request. + # 2. We can create a new connection to handle the request. + # 3. We can close an idle connection and then create a new connection + # to handle the request. + if available_connections: + # log: "reusing existing connection" + connection = available_connections[0] + pool_request.assign_to_connection(connection) + elif len(self._connections) < self._max_connections: + # log: "creating new connection" + connection = self.create_connection(origin) + self._connections.append(connection) + pool_request.assign_to_connection(connection) + elif idle_connections: + # log: "closing idle connection" + connection = idle_connections[0] + self._connections.remove(connection) + closing_connections.append(connection) + # log: "creating new connection" + connection = self.create_connection(origin) + self._connections.append(connection) + pool_request.assign_to_connection(connection) + + return closing_connections + + def _close_connections(self, closing: list[ConnectionInterface]) -> None: + # Close connections which have been removed from the pool. + with ShieldCancellation(): + for connection in closing: + connection.close() + + def close(self) -> None: + # Explicitly close the connection pool. + # Clears all existing requests and connections. + with self._optional_thread_lock: + closing_connections = list(self._connections) + self._connections = [] + self._close_connections(closing_connections) + + def __enter__(self) -> ConnectionPool: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + self.close() + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + with self._optional_thread_lock: + request_is_queued = [request.is_queued() for request in self._requests] + connection_is_idle = [ + connection.is_idle() for connection in self._connections + ] + + num_active_requests = request_is_queued.count(False) + num_queued_requests = request_is_queued.count(True) + num_active_connections = connection_is_idle.count(False) + num_idle_connections = connection_is_idle.count(True) + + requests_info = ( + f"Requests: {num_active_requests} active, {num_queued_requests} queued" + ) + connection_info = ( + f"Connections: {num_active_connections} active, {num_idle_connections} idle" + ) + + return f"<{class_name} [{requests_info} | {connection_info}]>" + + +class PoolByteStream: + def __init__( + self, + stream: typing.Iterable[bytes], + pool_request: PoolRequest, + pool: ConnectionPool, + ) -> None: + self._stream = stream + self._pool_request = pool_request + self._pool = pool + self._closed = False + + def __iter__(self) -> typing.Iterator[bytes]: + try: + for part in self._stream: + yield part + except BaseException as exc: + self.close() + raise exc from None + + def close(self) -> None: + if not self._closed: + self._closed = True + with ShieldCancellation(): + if hasattr(self._stream, "close"): + self._stream.close() + + with self._pool._optional_thread_lock: + self._pool._requests.remove(self._pool_request) + closing = self._pool._assign_requests_to_connections() + + self._pool._close_connections(closing) diff --git a/venv/Lib/site-packages/httpcore/_sync/http11.py b/venv/Lib/site-packages/httpcore/_sync/http11.py new file mode 100644 index 00000000..ebd3a974 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_sync/http11.py @@ -0,0 +1,379 @@ +from __future__ import annotations + +import enum +import logging +import ssl +import time +import types +import typing + +import h11 + +from .._backends.base import NetworkStream +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, + WriteError, + map_exceptions, +) +from .._models import Origin, Request, Response +from .._synchronization import Lock, ShieldCancellation +from .._trace import Trace +from .interfaces import ConnectionInterface + +logger = logging.getLogger("httpcore.http11") + + +# A subset of `h11.Event` types supported by `_send_event` +H11SendEvent = typing.Union[ + h11.Request, + h11.Data, + h11.EndOfMessage, +] + + +class HTTPConnectionState(enum.IntEnum): + NEW = 0 + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class HTTP11Connection(ConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + MAX_INCOMPLETE_EVENT_SIZE = 100 * 1024 + + def __init__( + self, + origin: Origin, + stream: NetworkStream, + keepalive_expiry: float | None = None, + ) -> None: + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: float | None = keepalive_expiry + self._expire_at: float | None = None + self._state = HTTPConnectionState.NEW + self._state_lock = Lock() + self._request_count = 0 + self._h11_state = h11.Connection( + our_role=h11.CLIENT, + max_incomplete_event_size=self.MAX_INCOMPLETE_EVENT_SIZE, + ) + + def handle_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + with self._state_lock: + if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE): + self._request_count += 1 + self._state = HTTPConnectionState.ACTIVE + self._expire_at = None + else: + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request} + try: + with Trace( + "send_request_headers", logger, request, kwargs + ) as trace: + self._send_request_headers(**kwargs) + with Trace("send_request_body", logger, request, kwargs) as trace: + self._send_request_body(**kwargs) + except WriteError: + # If we get a write error while we're writing the request, + # then we supress this error and move on to attempting to + # read the response. Servers can sometimes close the request + # pre-emptively and then respond with a well formed HTTP + # error response. + pass + + with Trace( + "receive_response_headers", logger, request, kwargs + ) as trace: + ( + http_version, + status, + reason_phrase, + headers, + trailing_data, + ) = self._receive_response_headers(**kwargs) + trace.return_value = ( + http_version, + status, + reason_phrase, + headers, + ) + + network_stream = self._network_stream + + # CONNECT or Upgrade request + if (status == 101) or ( + (request.method == b"CONNECT") and (200 <= status < 300) + ): + network_stream = HTTP11UpgradeStream(network_stream, trailing_data) + + return Response( + status=status, + headers=headers, + content=HTTP11ConnectionByteStream(self, request), + extensions={ + "http_version": http_version, + "reason_phrase": reason_phrase, + "network_stream": network_stream, + }, + ) + except BaseException as exc: + with ShieldCancellation(): + with Trace("response_closed", logger, request) as trace: + self._response_closed() + raise exc + + # Sending the request... + + def _send_request_headers(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + with map_exceptions({h11.LocalProtocolError: LocalProtocolError}): + event = h11.Request( + method=request.method, + target=request.url.target, + headers=request.headers, + ) + self._send_event(event, timeout=timeout) + + def _send_request_body(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + assert isinstance(request.stream, typing.Iterable) + for chunk in request.stream: + event = h11.Data(data=chunk) + self._send_event(event, timeout=timeout) + + self._send_event(h11.EndOfMessage(), timeout=timeout) + + def _send_event(self, event: h11.Event, timeout: float | None = None) -> None: + bytes_to_send = self._h11_state.send(event) + if bytes_to_send is not None: + self._network_stream.write(bytes_to_send, timeout=timeout) + + # Receiving the response... + + def _receive_response_headers( + self, request: Request + ) -> tuple[bytes, int, bytes, list[tuple[bytes, bytes]], bytes]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + while True: + event = self._receive_event(timeout=timeout) + if isinstance(event, h11.Response): + break + if ( + isinstance(event, h11.InformationalResponse) + and event.status_code == 101 + ): + break + + http_version = b"HTTP/" + event.http_version + + # h11 version 0.11+ supports a `raw_items` interface to get the + # raw header casing, rather than the enforced lowercase headers. + headers = event.headers.raw_items() + + trailing_data, _ = self._h11_state.trailing_data + + return http_version, event.status_code, event.reason, headers, trailing_data + + def _receive_response_body( + self, request: Request + ) -> typing.Iterator[bytes]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + while True: + event = self._receive_event(timeout=timeout) + if isinstance(event, h11.Data): + yield bytes(event.data) + elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)): + break + + def _receive_event( + self, timeout: float | None = None + ) -> h11.Event | type[h11.PAUSED]: + while True: + with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}): + event = self._h11_state.next_event() + + if event is h11.NEED_DATA: + data = self._network_stream.read( + self.READ_NUM_BYTES, timeout=timeout + ) + + # If we feed this case through h11 we'll raise an exception like: + # + # httpcore.RemoteProtocolError: can't handle event type + # ConnectionClosed when role=SERVER and state=SEND_RESPONSE + # + # Which is accurate, but not very informative from an end-user + # perspective. Instead we handle this case distinctly and treat + # it as a ConnectError. + if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE: + msg = "Server disconnected without sending a response." + raise RemoteProtocolError(msg) + + self._h11_state.receive_data(data) + else: + # mypy fails to narrow the type in the above if statement above + return event # type: ignore[return-value] + + def _response_closed(self) -> None: + with self._state_lock: + if ( + self._h11_state.our_state is h11.DONE + and self._h11_state.their_state is h11.DONE + ): + self._state = HTTPConnectionState.IDLE + self._h11_state.start_next_cycle() + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + else: + self.close() + + # Once the connection is no longer required... + + def close(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._state = HTTPConnectionState.CLOSED + self._network_stream.close() + + # The ConnectionInterface methods provide information about the state of + # the connection, allowing for a connection pooling implementation to + # determine when to reuse and when to close the connection... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + # Note that HTTP/1.1 connections in the "NEW" state are not treated as + # being "available". The control flow which created the connection will + # be able to send an outgoing request, but the connection will not be + # acquired from the connection pool for any other request. + return self._state == HTTPConnectionState.IDLE + + def has_expired(self) -> bool: + now = time.monotonic() + keepalive_expired = self._expire_at is not None and now > self._expire_at + + # If the HTTP connection is idle but the socket is readable, then the + # only valid state is that the socket is about to return b"", indicating + # a server-initiated disconnect. + server_disconnected = ( + self._state == HTTPConnectionState.IDLE + and self._network_stream.get_extra_info("is_readable") + ) + + return keepalive_expired or server_disconnected + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/1.1, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + def __enter__(self) -> HTTP11Connection: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + self.close() + + +class HTTP11ConnectionByteStream: + def __init__(self, connection: HTTP11Connection, request: Request) -> None: + self._connection = connection + self._request = request + self._closed = False + + def __iter__(self) -> typing.Iterator[bytes]: + kwargs = {"request": self._request} + try: + with Trace("receive_response_body", logger, self._request, kwargs): + for chunk in self._connection._receive_response_body(**kwargs): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + with ShieldCancellation(): + self.close() + raise exc + + def close(self) -> None: + if not self._closed: + self._closed = True + with Trace("response_closed", logger, self._request): + self._connection._response_closed() + + +class HTTP11UpgradeStream(NetworkStream): + def __init__(self, stream: NetworkStream, leading_data: bytes) -> None: + self._stream = stream + self._leading_data = leading_data + + def read(self, max_bytes: int, timeout: float | None = None) -> bytes: + if self._leading_data: + buffer = self._leading_data[:max_bytes] + self._leading_data = self._leading_data[max_bytes:] + return buffer + else: + return self._stream.read(max_bytes, timeout) + + def write(self, buffer: bytes, timeout: float | None = None) -> None: + self._stream.write(buffer, timeout) + + def close(self) -> None: + self._stream.close() + + def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: str | None = None, + timeout: float | None = None, + ) -> NetworkStream: + return self._stream.start_tls(ssl_context, server_hostname, timeout) + + def get_extra_info(self, info: str) -> typing.Any: + return self._stream.get_extra_info(info) diff --git a/venv/Lib/site-packages/httpcore/_sync/http2.py b/venv/Lib/site-packages/httpcore/_sync/http2.py new file mode 100644 index 00000000..ddcc1890 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_sync/http2.py @@ -0,0 +1,592 @@ +from __future__ import annotations + +import enum +import logging +import time +import types +import typing + +import h2.config +import h2.connection +import h2.events +import h2.exceptions +import h2.settings + +from .._backends.base import NetworkStream +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, +) +from .._models import Origin, Request, Response +from .._synchronization import Lock, Semaphore, ShieldCancellation +from .._trace import Trace +from .interfaces import ConnectionInterface + +logger = logging.getLogger("httpcore.http2") + + +def has_body_headers(request: Request) -> bool: + return any( + k.lower() == b"content-length" or k.lower() == b"transfer-encoding" + for k, v in request.headers + ) + + +class HTTPConnectionState(enum.IntEnum): + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class HTTP2Connection(ConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + CONFIG = h2.config.H2Configuration(validate_inbound_headers=False) + + def __init__( + self, + origin: Origin, + stream: NetworkStream, + keepalive_expiry: float | None = None, + ): + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: float | None = keepalive_expiry + self._h2_state = h2.connection.H2Connection(config=self.CONFIG) + self._state = HTTPConnectionState.IDLE + self._expire_at: float | None = None + self._request_count = 0 + self._init_lock = Lock() + self._state_lock = Lock() + self._read_lock = Lock() + self._write_lock = Lock() + self._sent_connection_init = False + self._used_all_stream_ids = False + self._connection_error = False + + # Mapping from stream ID to response stream events. + self._events: dict[ + int, + list[ + h2.events.ResponseReceived + | h2.events.DataReceived + | h2.events.StreamEnded + | h2.events.StreamReset, + ], + ] = {} + + # Connection terminated events are stored as state since + # we need to handle them for all streams. + self._connection_terminated: h2.events.ConnectionTerminated | None = None + + self._read_exception: Exception | None = None + self._write_exception: Exception | None = None + + def handle_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + # This cannot occur in normal operation, since the connection pool + # will only send requests on connections that handle them. + # It's in place simply for resilience as a guard against incorrect + # usage, for anyone working directly with httpcore connections. + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + with self._state_lock: + if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE): + self._request_count += 1 + self._expire_at = None + self._state = HTTPConnectionState.ACTIVE + else: + raise ConnectionNotAvailable() + + with self._init_lock: + if not self._sent_connection_init: + try: + sci_kwargs = {"request": request} + with Trace( + "send_connection_init", logger, request, sci_kwargs + ): + self._send_connection_init(**sci_kwargs) + except BaseException as exc: + with ShieldCancellation(): + self.close() + raise exc + + self._sent_connection_init = True + + # Initially start with just 1 until the remote server provides + # its max_concurrent_streams value + self._max_streams = 1 + + local_settings_max_streams = ( + self._h2_state.local_settings.max_concurrent_streams + ) + self._max_streams_semaphore = Semaphore(local_settings_max_streams) + + for _ in range(local_settings_max_streams - self._max_streams): + self._max_streams_semaphore.acquire() + + self._max_streams_semaphore.acquire() + + try: + stream_id = self._h2_state.get_next_available_stream_id() + self._events[stream_id] = [] + except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover + self._used_all_stream_ids = True + self._request_count -= 1 + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request, "stream_id": stream_id} + with Trace("send_request_headers", logger, request, kwargs): + self._send_request_headers(request=request, stream_id=stream_id) + with Trace("send_request_body", logger, request, kwargs): + self._send_request_body(request=request, stream_id=stream_id) + with Trace( + "receive_response_headers", logger, request, kwargs + ) as trace: + status, headers = self._receive_response( + request=request, stream_id=stream_id + ) + trace.return_value = (status, headers) + + return Response( + status=status, + headers=headers, + content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id), + extensions={ + "http_version": b"HTTP/2", + "network_stream": self._network_stream, + "stream_id": stream_id, + }, + ) + except BaseException as exc: # noqa: PIE786 + with ShieldCancellation(): + kwargs = {"stream_id": stream_id} + with Trace("response_closed", logger, request, kwargs): + self._response_closed(stream_id=stream_id) + + if isinstance(exc, h2.exceptions.ProtocolError): + # One case where h2 can raise a protocol error is when a + # closed frame has been seen by the state machine. + # + # This happens when one stream is reading, and encounters + # a GOAWAY event. Other flows of control may then raise + # a protocol error at any point they interact with the 'h2_state'. + # + # In this case we'll have stored the event, and should raise + # it as a RemoteProtocolError. + if self._connection_terminated: # pragma: nocover + raise RemoteProtocolError(self._connection_terminated) + # If h2 raises a protocol error in some other state then we + # must somehow have made a protocol violation. + raise LocalProtocolError(exc) # pragma: nocover + + raise exc + + def _send_connection_init(self, request: Request) -> None: + """ + The HTTP/2 connection requires some initial setup before we can start + using individual request/response streams on it. + """ + # Need to set these manually here instead of manipulating via + # __setitem__() otherwise the H2Connection will emit SettingsUpdate + # frames in addition to sending the undesired defaults. + self._h2_state.local_settings = h2.settings.Settings( + client=True, + initial_values={ + # Disable PUSH_PROMISE frames from the server since we don't do anything + # with them for now. Maybe when we support caching? + h2.settings.SettingCodes.ENABLE_PUSH: 0, + # These two are taken from h2 for safe defaults + h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100, + h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536, + }, + ) + + # Some websites (*cough* Yahoo *cough*) balk at this setting being + # present in the initial handshake since it's not defined in the original + # RFC despite the RFC mandating ignoring settings you don't know about. + del self._h2_state.local_settings[ + h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL + ] + + self._h2_state.initiate_connection() + self._h2_state.increment_flow_control_window(2**24) + self._write_outgoing_data(request) + + # Sending the request... + + def _send_request_headers(self, request: Request, stream_id: int) -> None: + """ + Send the request headers to a given stream ID. + """ + end_stream = not has_body_headers(request) + + # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'. + # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require + # HTTP/1.1 style headers, and map them appropriately if we end up on + # an HTTP/2 connection. + authority = [v for k, v in request.headers if k.lower() == b"host"][0] + + headers = [ + (b":method", request.method), + (b":authority", authority), + (b":scheme", request.url.scheme), + (b":path", request.url.target), + ] + [ + (k.lower(), v) + for k, v in request.headers + if k.lower() + not in ( + b"host", + b"transfer-encoding", + ) + ] + + self._h2_state.send_headers(stream_id, headers, end_stream=end_stream) + self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id) + self._write_outgoing_data(request) + + def _send_request_body(self, request: Request, stream_id: int) -> None: + """ + Iterate over the request body sending it to a given stream ID. + """ + if not has_body_headers(request): + return + + assert isinstance(request.stream, typing.Iterable) + for data in request.stream: + self._send_stream_data(request, stream_id, data) + self._send_end_stream(request, stream_id) + + def _send_stream_data( + self, request: Request, stream_id: int, data: bytes + ) -> None: + """ + Send a single chunk of data in one or more data frames. + """ + while data: + max_flow = self._wait_for_outgoing_flow(request, stream_id) + chunk_size = min(len(data), max_flow) + chunk, data = data[:chunk_size], data[chunk_size:] + self._h2_state.send_data(stream_id, chunk) + self._write_outgoing_data(request) + + def _send_end_stream(self, request: Request, stream_id: int) -> None: + """ + Send an empty data frame on on a given stream ID with the END_STREAM flag set. + """ + self._h2_state.end_stream(stream_id) + self._write_outgoing_data(request) + + # Receiving the response... + + def _receive_response( + self, request: Request, stream_id: int + ) -> tuple[int, list[tuple[bytes, bytes]]]: + """ + Return the response status code and headers for a given stream ID. + """ + while True: + event = self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.ResponseReceived): + break + + status_code = 200 + headers = [] + assert event.headers is not None + for k, v in event.headers: + if k == b":status": + status_code = int(v.decode("ascii", errors="ignore")) + elif not k.startswith(b":"): + headers.append((k, v)) + + return (status_code, headers) + + def _receive_response_body( + self, request: Request, stream_id: int + ) -> typing.Iterator[bytes]: + """ + Iterator that returns the bytes of the response body for a given stream ID. + """ + while True: + event = self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.DataReceived): + assert event.flow_controlled_length is not None + assert event.data is not None + amount = event.flow_controlled_length + self._h2_state.acknowledge_received_data(amount, stream_id) + self._write_outgoing_data(request) + yield event.data + elif isinstance(event, h2.events.StreamEnded): + break + + def _receive_stream_event( + self, request: Request, stream_id: int + ) -> h2.events.ResponseReceived | h2.events.DataReceived | h2.events.StreamEnded: + """ + Return the next available event for a given stream ID. + + Will read more data from the network if required. + """ + while not self._events.get(stream_id): + self._receive_events(request, stream_id) + event = self._events[stream_id].pop(0) + if isinstance(event, h2.events.StreamReset): + raise RemoteProtocolError(event) + return event + + def _receive_events( + self, request: Request, stream_id: int | None = None + ) -> None: + """ + Read some data from the network until we see one or more events + for a given stream ID. + """ + with self._read_lock: + if self._connection_terminated is not None: + last_stream_id = self._connection_terminated.last_stream_id + if stream_id and last_stream_id and stream_id > last_stream_id: + self._request_count -= 1 + raise ConnectionNotAvailable() + raise RemoteProtocolError(self._connection_terminated) + + # This conditional is a bit icky. We don't want to block reading if we've + # actually got an event to return for a given stream. We need to do that + # check *within* the atomic read lock. Though it also need to be optional, + # because when we call it from `_wait_for_outgoing_flow` we *do* want to + # block until we've available flow control, event when we have events + # pending for the stream ID we're attempting to send on. + if stream_id is None or not self._events.get(stream_id): + events = self._read_incoming_data(request) + for event in events: + if isinstance(event, h2.events.RemoteSettingsChanged): + with Trace( + "receive_remote_settings", logger, request + ) as trace: + self._receive_remote_settings_change(event) + trace.return_value = event + + elif isinstance( + event, + ( + h2.events.ResponseReceived, + h2.events.DataReceived, + h2.events.StreamEnded, + h2.events.StreamReset, + ), + ): + if event.stream_id in self._events: + self._events[event.stream_id].append(event) + + elif isinstance(event, h2.events.ConnectionTerminated): + self._connection_terminated = event + + self._write_outgoing_data(request) + + def _receive_remote_settings_change( + self, event: h2.events.RemoteSettingsChanged + ) -> None: + max_concurrent_streams = event.changed_settings.get( + h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS + ) + if max_concurrent_streams: + new_max_streams = min( + max_concurrent_streams.new_value, + self._h2_state.local_settings.max_concurrent_streams, + ) + if new_max_streams and new_max_streams != self._max_streams: + while new_max_streams > self._max_streams: + self._max_streams_semaphore.release() + self._max_streams += 1 + while new_max_streams < self._max_streams: + self._max_streams_semaphore.acquire() + self._max_streams -= 1 + + def _response_closed(self, stream_id: int) -> None: + self._max_streams_semaphore.release() + del self._events[stream_id] + with self._state_lock: + if self._connection_terminated and not self._events: + self.close() + + elif self._state == HTTPConnectionState.ACTIVE and not self._events: + self._state = HTTPConnectionState.IDLE + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + if self._used_all_stream_ids: # pragma: nocover + self.close() + + def close(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._h2_state.close_connection() + self._state = HTTPConnectionState.CLOSED + self._network_stream.close() + + # Wrappers around network read/write operations... + + def _read_incoming_data(self, request: Request) -> list[h2.events.Event]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + if self._read_exception is not None: + raise self._read_exception # pragma: nocover + + try: + data = self._network_stream.read(self.READ_NUM_BYTES, timeout) + if data == b"": + raise RemoteProtocolError("Server disconnected") + except Exception as exc: + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future reads. + # (For example, this means that a single read timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._read_exception = exc + self._connection_error = True + raise exc + + events: list[h2.events.Event] = self._h2_state.receive_data(data) + + return events + + def _write_outgoing_data(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + with self._write_lock: + data_to_send = self._h2_state.data_to_send() + + if self._write_exception is not None: + raise self._write_exception # pragma: nocover + + try: + self._network_stream.write(data_to_send, timeout) + except Exception as exc: # pragma: nocover + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future write. + # (For example, this means that a single write timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._write_exception = exc + self._connection_error = True + raise exc + + # Flow control... + + def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int: + """ + Returns the maximum allowable outgoing flow for a given stream. + + If the allowable flow is zero, then waits on the network until + WindowUpdated frames have increased the flow rate. + https://tools.ietf.org/html/rfc7540#section-6.9 + """ + local_flow: int = self._h2_state.local_flow_control_window(stream_id) + max_frame_size: int = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + while flow == 0: + self._receive_events(request) + local_flow = self._h2_state.local_flow_control_window(stream_id) + max_frame_size = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + return flow + + # Interface for connection pooling... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + return ( + self._state != HTTPConnectionState.CLOSED + and not self._connection_error + and not self._used_all_stream_ids + and not ( + self._h2_state.state_machine.state + == h2.connection.ConnectionState.CLOSED + ) + ) + + def has_expired(self) -> bool: + now = time.monotonic() + return self._expire_at is not None and now > self._expire_at + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/2, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + def __enter__(self) -> HTTP2Connection: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + self.close() + + +class HTTP2ConnectionByteStream: + def __init__( + self, connection: HTTP2Connection, request: Request, stream_id: int + ) -> None: + self._connection = connection + self._request = request + self._stream_id = stream_id + self._closed = False + + def __iter__(self) -> typing.Iterator[bytes]: + kwargs = {"request": self._request, "stream_id": self._stream_id} + try: + with Trace("receive_response_body", logger, self._request, kwargs): + for chunk in self._connection._receive_response_body( + request=self._request, stream_id=self._stream_id + ): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + with ShieldCancellation(): + self.close() + raise exc + + def close(self) -> None: + if not self._closed: + self._closed = True + kwargs = {"stream_id": self._stream_id} + with Trace("response_closed", logger, self._request, kwargs): + self._connection._response_closed(stream_id=self._stream_id) diff --git a/venv/Lib/site-packages/httpcore/_sync/http_proxy.py b/venv/Lib/site-packages/httpcore/_sync/http_proxy.py new file mode 100644 index 00000000..ecca88f7 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_sync/http_proxy.py @@ -0,0 +1,367 @@ +from __future__ import annotations + +import base64 +import logging +import ssl +import typing + +from .._backends.base import SOCKET_OPTION, NetworkBackend +from .._exceptions import ProxyError +from .._models import ( + URL, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, +) +from .._ssl import default_ssl_context +from .._synchronization import Lock +from .._trace import Trace +from .connection import HTTPConnection +from .connection_pool import ConnectionPool +from .http11 import HTTP11Connection +from .interfaces import ConnectionInterface + +ByteOrStr = typing.Union[bytes, str] +HeadersAsSequence = typing.Sequence[typing.Tuple[ByteOrStr, ByteOrStr]] +HeadersAsMapping = typing.Mapping[ByteOrStr, ByteOrStr] + + +logger = logging.getLogger("httpcore.proxy") + + +def merge_headers( + default_headers: typing.Sequence[tuple[bytes, bytes]] | None = None, + override_headers: typing.Sequence[tuple[bytes, bytes]] | None = None, +) -> list[tuple[bytes, bytes]]: + """ + Append default_headers and override_headers, de-duplicating if a key exists + in both cases. + """ + default_headers = [] if default_headers is None else list(default_headers) + override_headers = [] if override_headers is None else list(override_headers) + has_override = set(key.lower() for key, value in override_headers) + default_headers = [ + (key, value) + for key, value in default_headers + if key.lower() not in has_override + ] + return default_headers + override_headers + + +class HTTPProxy(ConnectionPool): # pragma: nocover + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: URL | bytes | str, + proxy_auth: tuple[bytes | str, bytes | str] | None = None, + proxy_headers: HeadersAsMapping | HeadersAsSequence | None = None, + ssl_context: ssl.SSLContext | None = None, + proxy_ssl_context: ssl.SSLContext | None = None, + max_connections: int | None = 10, + max_keepalive_connections: int | None = None, + keepalive_expiry: float | None = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: str | None = None, + uds: str | None = None, + network_backend: NetworkBackend | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + proxy_auth: Any proxy authentication as a two-tuple of + (username, password). May be either bytes or ascii-only str. + proxy_headers: Any HTTP headers to use for the proxy requests. + For example `{"Proxy-Authorization": "Basic :"}`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + proxy_ssl_context: The same as `ssl_context`, but for a proxy server rather than a remote origin. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + retries=retries, + local_address=local_address, + uds=uds, + socket_options=socket_options, + ) + + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + if ( + self._proxy_url.scheme == b"http" and proxy_ssl_context is not None + ): # pragma: no cover + raise RuntimeError( + "The `proxy_ssl_context` argument is not allowed for the http scheme" + ) + + self._ssl_context = ssl_context + self._proxy_ssl_context = proxy_ssl_context + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + if proxy_auth is not None: + username = enforce_bytes(proxy_auth[0], name="proxy_auth") + password = enforce_bytes(proxy_auth[1], name="proxy_auth") + userpass = username + b":" + password + authorization = b"Basic " + base64.b64encode(userpass) + self._proxy_headers = [ + (b"Proxy-Authorization", authorization) + ] + self._proxy_headers + + def create_connection(self, origin: Origin) -> ConnectionInterface: + if origin.scheme == b"http": + return ForwardHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + remote_origin=origin, + keepalive_expiry=self._keepalive_expiry, + network_backend=self._network_backend, + proxy_ssl_context=self._proxy_ssl_context, + ) + return TunnelHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + remote_origin=origin, + ssl_context=self._ssl_context, + proxy_ssl_context=self._proxy_ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class ForwardHTTPConnection(ConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + proxy_headers: HeadersAsMapping | HeadersAsSequence | None = None, + keepalive_expiry: float | None = None, + network_backend: NetworkBackend | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + proxy_ssl_context: ssl.SSLContext | None = None, + ) -> None: + self._connection = HTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + socket_options=socket_options, + ssl_context=proxy_ssl_context, + ) + self._proxy_origin = proxy_origin + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + self._remote_origin = remote_origin + + def handle_request(self, request: Request) -> Response: + headers = merge_headers(self._proxy_headers, request.headers) + url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=bytes(request.url), + ) + proxy_request = Request( + method=request.method, + url=url, + headers=headers, + content=request.stream, + extensions=request.extensions, + ) + return self._connection.handle_request(proxy_request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + def close(self) -> None: + self._connection.close() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + +class TunnelHTTPConnection(ConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + ssl_context: ssl.SSLContext | None = None, + proxy_ssl_context: ssl.SSLContext | None = None, + proxy_headers: typing.Sequence[tuple[bytes, bytes]] | None = None, + keepalive_expiry: float | None = None, + http1: bool = True, + http2: bool = False, + network_backend: NetworkBackend | None = None, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> None: + self._connection: ConnectionInterface = HTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + socket_options=socket_options, + ssl_context=proxy_ssl_context, + ) + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._ssl_context = ssl_context + self._proxy_ssl_context = proxy_ssl_context + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._connect_lock = Lock() + self._connected = False + + def handle_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + with self._connect_lock: + if not self._connected: + target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port) + + connect_url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=target, + ) + connect_headers = merge_headers( + [(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers + ) + connect_request = Request( + method=b"CONNECT", + url=connect_url, + headers=connect_headers, + extensions=request.extensions, + ) + connect_response = self._connection.handle_request( + connect_request + ) + + if connect_response.status < 200 or connect_response.status > 299: + reason_bytes = connect_response.extensions.get("reason_phrase", b"") + reason_str = reason_bytes.decode("ascii", errors="ignore") + msg = "%d %s" % (connect_response.status, reason_str) + self._connection.close() + raise ProxyError(msg) + + stream = connect_response.extensions["network_stream"] + + # Upgrade the stream to SSL + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + with Trace("start_tls", logger, request, kwargs) as trace: + stream = stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import HTTP2Connection + + self._connection = HTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = HTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + + self._connected = True + return self._connection.handle_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + def close(self) -> None: + self._connection.close() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/venv/Lib/site-packages/httpcore/_sync/interfaces.py b/venv/Lib/site-packages/httpcore/_sync/interfaces.py new file mode 100644 index 00000000..e673d4cc --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_sync/interfaces.py @@ -0,0 +1,137 @@ +from __future__ import annotations + +import contextlib +import typing + +from .._models import ( + URL, + Extensions, + HeaderTypes, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, + include_request_headers, +) + + +class RequestInterface: + def request( + self, + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes | typing.Iterator[bytes] | None = None, + extensions: Extensions | None = None, + ) -> Response: + # Strict type checking on our parameters. + method = enforce_bytes(method, name="method") + url = enforce_url(url, name="url") + headers = enforce_headers(headers, name="headers") + + # Include Host header, and optionally Content-Length or Transfer-Encoding. + headers = include_request_headers(headers, url=url, content=content) + + request = Request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + response = self.handle_request(request) + try: + response.read() + finally: + response.close() + return response + + @contextlib.contextmanager + def stream( + self, + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes | typing.Iterator[bytes] | None = None, + extensions: Extensions | None = None, + ) -> typing.Iterator[Response]: + # Strict type checking on our parameters. + method = enforce_bytes(method, name="method") + url = enforce_url(url, name="url") + headers = enforce_headers(headers, name="headers") + + # Include Host header, and optionally Content-Length or Transfer-Encoding. + headers = include_request_headers(headers, url=url, content=content) + + request = Request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + response = self.handle_request(request) + try: + yield response + finally: + response.close() + + def handle_request(self, request: Request) -> Response: + raise NotImplementedError() # pragma: nocover + + +class ConnectionInterface(RequestInterface): + def close(self) -> None: + raise NotImplementedError() # pragma: nocover + + def info(self) -> str: + raise NotImplementedError() # pragma: nocover + + def can_handle_request(self, origin: Origin) -> bool: + raise NotImplementedError() # pragma: nocover + + def is_available(self) -> bool: + """ + Return `True` if the connection is currently able to accept an + outgoing request. + + An HTTP/1.1 connection will only be available if it is currently idle. + + An HTTP/2 connection will be available so long as the stream ID space is + not yet exhausted, and the connection is not in an error state. + + While the connection is being established we may not yet know if it is going + to result in an HTTP/1.1 or HTTP/2 connection. The connection should be + treated as being available, but might ultimately raise `NewConnectionRequired` + required exceptions if multiple requests are attempted over a connection + that ends up being established as HTTP/1.1. + """ + raise NotImplementedError() # pragma: nocover + + def has_expired(self) -> bool: + """ + Return `True` if the connection is in a state where it should be closed. + + This either means that the connection is idle and it has passed the + expiry time on its keep-alive, or that server has sent an EOF. + """ + raise NotImplementedError() # pragma: nocover + + def is_idle(self) -> bool: + """ + Return `True` if the connection is currently idle. + """ + raise NotImplementedError() # pragma: nocover + + def is_closed(self) -> bool: + """ + Return `True` if the connection has been closed. + + Used when a response is closed to determine if the connection may be + returned to the connection pool or not. + """ + raise NotImplementedError() # pragma: nocover diff --git a/venv/Lib/site-packages/httpcore/_sync/socks_proxy.py b/venv/Lib/site-packages/httpcore/_sync/socks_proxy.py new file mode 100644 index 00000000..0ca96ddf --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_sync/socks_proxy.py @@ -0,0 +1,341 @@ +from __future__ import annotations + +import logging +import ssl + +import socksio + +from .._backends.sync import SyncBackend +from .._backends.base import NetworkBackend, NetworkStream +from .._exceptions import ConnectionNotAvailable, ProxyError +from .._models import URL, Origin, Request, Response, enforce_bytes, enforce_url +from .._ssl import default_ssl_context +from .._synchronization import Lock +from .._trace import Trace +from .connection_pool import ConnectionPool +from .http11 import HTTP11Connection +from .interfaces import ConnectionInterface + +logger = logging.getLogger("httpcore.socks") + + +AUTH_METHODS = { + b"\x00": "NO AUTHENTICATION REQUIRED", + b"\x01": "GSSAPI", + b"\x02": "USERNAME/PASSWORD", + b"\xff": "NO ACCEPTABLE METHODS", +} + +REPLY_CODES = { + b"\x00": "Succeeded", + b"\x01": "General SOCKS server failure", + b"\x02": "Connection not allowed by ruleset", + b"\x03": "Network unreachable", + b"\x04": "Host unreachable", + b"\x05": "Connection refused", + b"\x06": "TTL expired", + b"\x07": "Command not supported", + b"\x08": "Address type not supported", +} + + +def _init_socks5_connection( + stream: NetworkStream, + *, + host: bytes, + port: int, + auth: tuple[bytes, bytes] | None = None, +) -> None: + conn = socksio.socks5.SOCKS5Connection() + + # Auth method request + auth_method = ( + socksio.socks5.SOCKS5AuthMethod.NO_AUTH_REQUIRED + if auth is None + else socksio.socks5.SOCKS5AuthMethod.USERNAME_PASSWORD + ) + conn.send(socksio.socks5.SOCKS5AuthMethodsRequest([auth_method])) + outgoing_bytes = conn.data_to_send() + stream.write(outgoing_bytes) + + # Auth method response + incoming_bytes = stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socksio.socks5.SOCKS5AuthReply) + if response.method != auth_method: + requested = AUTH_METHODS.get(auth_method, "UNKNOWN") + responded = AUTH_METHODS.get(response.method, "UNKNOWN") + raise ProxyError( + f"Requested {requested} from proxy server, but got {responded}." + ) + + if response.method == socksio.socks5.SOCKS5AuthMethod.USERNAME_PASSWORD: + # Username/password request + assert auth is not None + username, password = auth + conn.send(socksio.socks5.SOCKS5UsernamePasswordRequest(username, password)) + outgoing_bytes = conn.data_to_send() + stream.write(outgoing_bytes) + + # Username/password response + incoming_bytes = stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socksio.socks5.SOCKS5UsernamePasswordReply) + if not response.success: + raise ProxyError("Invalid username/password") + + # Connect request + conn.send( + socksio.socks5.SOCKS5CommandRequest.from_address( + socksio.socks5.SOCKS5Command.CONNECT, (host, port) + ) + ) + outgoing_bytes = conn.data_to_send() + stream.write(outgoing_bytes) + + # Connect response + incoming_bytes = stream.read(max_bytes=4096) + response = conn.receive_data(incoming_bytes) + assert isinstance(response, socksio.socks5.SOCKS5Reply) + if response.reply_code != socksio.socks5.SOCKS5ReplyCode.SUCCEEDED: + reply_code = REPLY_CODES.get(response.reply_code, "UNKOWN") + raise ProxyError(f"Proxy Server could not connect: {reply_code}.") + + +class SOCKSProxy(ConnectionPool): # pragma: nocover + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: URL | bytes | str, + proxy_auth: tuple[bytes | str, bytes | str] | None = None, + ssl_context: ssl.SSLContext | None = None, + max_connections: int | None = 10, + max_keepalive_connections: int | None = None, + keepalive_expiry: float | None = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + network_backend: NetworkBackend | None = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + retries=retries, + ) + self._ssl_context = ssl_context + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + if proxy_auth is not None: + username, password = proxy_auth + username_bytes = enforce_bytes(username, name="proxy_auth") + password_bytes = enforce_bytes(password, name="proxy_auth") + self._proxy_auth: tuple[bytes, bytes] | None = ( + username_bytes, + password_bytes, + ) + else: + self._proxy_auth = None + + def create_connection(self, origin: Origin) -> ConnectionInterface: + return Socks5Connection( + proxy_origin=self._proxy_url.origin, + remote_origin=origin, + proxy_auth=self._proxy_auth, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class Socks5Connection(ConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + proxy_auth: tuple[bytes, bytes] | None = None, + ssl_context: ssl.SSLContext | None = None, + keepalive_expiry: float | None = None, + http1: bool = True, + http2: bool = False, + network_backend: NetworkBackend | None = None, + ) -> None: + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._proxy_auth = proxy_auth + self._ssl_context = ssl_context + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + + self._network_backend: NetworkBackend = ( + SyncBackend() if network_backend is None else network_backend + ) + self._connect_lock = Lock() + self._connection: ConnectionInterface | None = None + self._connect_failed = False + + def handle_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + sni_hostname = request.extensions.get("sni_hostname", None) + timeout = timeouts.get("connect", None) + + with self._connect_lock: + if self._connection is None: + try: + # Connect to the proxy + kwargs = { + "host": self._proxy_origin.host.decode("ascii"), + "port": self._proxy_origin.port, + "timeout": timeout, + } + with Trace("connect_tcp", logger, request, kwargs) as trace: + stream = self._network_backend.connect_tcp(**kwargs) + trace.return_value = stream + + # Connect to the remote host using socks5 + kwargs = { + "stream": stream, + "host": self._remote_origin.host.decode("ascii"), + "port": self._remote_origin.port, + "auth": self._proxy_auth, + } + with Trace( + "setup_socks5_connection", logger, request, kwargs + ) as trace: + _init_socks5_connection(**kwargs) + trace.return_value = stream + + # Upgrade the stream to SSL + if self._remote_origin.scheme == b"https": + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ( + ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ) + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": sni_hostname + or self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + with Trace("start_tls", logger, request, kwargs) as trace: + stream = stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or ( + self._http2 and not self._http1 + ): # pragma: nocover + from .http2 import HTTP2Connection + + self._connection = HTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = HTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + except Exception as exc: + self._connect_failed = True + raise exc + elif not self._connection.is_available(): # pragma: nocover + raise ConnectionNotAvailable() + + return self._connection.handle_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + def close(self) -> None: + if self._connection is not None: + self._connection.close() + + def is_available(self) -> bool: + if self._connection is None: # pragma: nocover + # If HTTP/2 support is enabled, and the resulting connection could + # end up as HTTP/2 then we should indicate the connection as being + # available to service multiple requests. + return ( + self._http2 + and (self._remote_origin.scheme == b"https" or not self._http1) + and not self._connect_failed + ) + return self._connection.is_available() + + def has_expired(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.has_expired() + + def is_idle(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.is_idle() + + def is_closed(self) -> bool: + if self._connection is None: # pragma: nocover + return self._connect_failed + return self._connection.is_closed() + + def info(self) -> str: + if self._connection is None: # pragma: nocover + return "CONNECTION FAILED" if self._connect_failed else "CONNECTING" + return self._connection.info() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/venv/Lib/site-packages/httpcore/_synchronization.py b/venv/Lib/site-packages/httpcore/_synchronization.py new file mode 100644 index 00000000..2ecc9e9c --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_synchronization.py @@ -0,0 +1,318 @@ +from __future__ import annotations + +import threading +import types + +from ._exceptions import ExceptionMapping, PoolTimeout, map_exceptions + +# Our async synchronization primatives use either 'anyio' or 'trio' depending +# on if they're running under asyncio or trio. + +try: + import trio +except (ImportError, NotImplementedError): # pragma: nocover + trio = None # type: ignore + +try: + import anyio +except ImportError: # pragma: nocover + anyio = None # type: ignore + + +def current_async_library() -> str: + # Determine if we're running under trio or asyncio. + # See https://sniffio.readthedocs.io/en/latest/ + try: + import sniffio + except ImportError: # pragma: nocover + environment = "asyncio" + else: + environment = sniffio.current_async_library() + + if environment not in ("asyncio", "trio"): # pragma: nocover + raise RuntimeError("Running under an unsupported async environment.") + + if environment == "asyncio" and anyio is None: # pragma: nocover + raise RuntimeError( + "Running with asyncio requires installation of 'httpcore[asyncio]'." + ) + + if environment == "trio" and trio is None: # pragma: nocover + raise RuntimeError( + "Running with trio requires installation of 'httpcore[trio]'." + ) + + return environment + + +class AsyncLock: + """ + This is a standard lock. + + In the sync case `Lock` provides thread locking. + In the async case `AsyncLock` provides async locking. + """ + + def __init__(self) -> None: + self._backend = "" + + def setup(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a lock with the correct implementation. + """ + self._backend = current_async_library() + if self._backend == "trio": + self._trio_lock = trio.Lock() + elif self._backend == "asyncio": + self._anyio_lock = anyio.Lock() + + async def __aenter__(self) -> AsyncLock: + if not self._backend: + self.setup() + + if self._backend == "trio": + await self._trio_lock.acquire() + elif self._backend == "asyncio": + await self._anyio_lock.acquire() + + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + if self._backend == "trio": + self._trio_lock.release() + elif self._backend == "asyncio": + self._anyio_lock.release() + + +class AsyncThreadLock: + """ + This is a threading-only lock for no-I/O contexts. + + In the sync case `ThreadLock` provides thread locking. + In the async case `AsyncThreadLock` is a no-op. + """ + + def __enter__(self) -> AsyncThreadLock: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + pass + + +class AsyncEvent: + def __init__(self) -> None: + self._backend = "" + + def setup(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a lock with the correct implementation. + """ + self._backend = current_async_library() + if self._backend == "trio": + self._trio_event = trio.Event() + elif self._backend == "asyncio": + self._anyio_event = anyio.Event() + + def set(self) -> None: + if not self._backend: + self.setup() + + if self._backend == "trio": + self._trio_event.set() + elif self._backend == "asyncio": + self._anyio_event.set() + + async def wait(self, timeout: float | None = None) -> None: + if not self._backend: + self.setup() + + if self._backend == "trio": + trio_exc_map: ExceptionMapping = {trio.TooSlowError: PoolTimeout} + timeout_or_inf = float("inf") if timeout is None else timeout + with map_exceptions(trio_exc_map): + with trio.fail_after(timeout_or_inf): + await self._trio_event.wait() + elif self._backend == "asyncio": + anyio_exc_map: ExceptionMapping = {TimeoutError: PoolTimeout} + with map_exceptions(anyio_exc_map): + with anyio.fail_after(timeout): + await self._anyio_event.wait() + + +class AsyncSemaphore: + def __init__(self, bound: int) -> None: + self._bound = bound + self._backend = "" + + def setup(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a semaphore with the correct implementation. + """ + self._backend = current_async_library() + if self._backend == "trio": + self._trio_semaphore = trio.Semaphore( + initial_value=self._bound, max_value=self._bound + ) + elif self._backend == "asyncio": + self._anyio_semaphore = anyio.Semaphore( + initial_value=self._bound, max_value=self._bound + ) + + async def acquire(self) -> None: + if not self._backend: + self.setup() + + if self._backend == "trio": + await self._trio_semaphore.acquire() + elif self._backend == "asyncio": + await self._anyio_semaphore.acquire() + + async def release(self) -> None: + if self._backend == "trio": + self._trio_semaphore.release() + elif self._backend == "asyncio": + self._anyio_semaphore.release() + + +class AsyncShieldCancellation: + # For certain portions of our codebase where we're dealing with + # closing connections during exception handling we want to shield + # the operation from being cancelled. + # + # with AsyncShieldCancellation(): + # ... # clean-up operations, shielded from cancellation. + + def __init__(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a shielded scope with the correct implementation. + """ + self._backend = current_async_library() + + if self._backend == "trio": + self._trio_shield = trio.CancelScope(shield=True) + elif self._backend == "asyncio": + self._anyio_shield = anyio.CancelScope(shield=True) + + def __enter__(self) -> AsyncShieldCancellation: + if self._backend == "trio": + self._trio_shield.__enter__() + elif self._backend == "asyncio": + self._anyio_shield.__enter__() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + if self._backend == "trio": + self._trio_shield.__exit__(exc_type, exc_value, traceback) + elif self._backend == "asyncio": + self._anyio_shield.__exit__(exc_type, exc_value, traceback) + + +# Our thread-based synchronization primitives... + + +class Lock: + """ + This is a standard lock. + + In the sync case `Lock` provides thread locking. + In the async case `AsyncLock` provides async locking. + """ + + def __init__(self) -> None: + self._lock = threading.Lock() + + def __enter__(self) -> Lock: + self._lock.acquire() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + self._lock.release() + + +class ThreadLock: + """ + This is a threading-only lock for no-I/O contexts. + + In the sync case `ThreadLock` provides thread locking. + In the async case `AsyncThreadLock` is a no-op. + """ + + def __init__(self) -> None: + self._lock = threading.Lock() + + def __enter__(self) -> ThreadLock: + self._lock.acquire() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + self._lock.release() + + +class Event: + def __init__(self) -> None: + self._event = threading.Event() + + def set(self) -> None: + self._event.set() + + def wait(self, timeout: float | None = None) -> None: + if timeout == float("inf"): # pragma: no cover + timeout = None + if not self._event.wait(timeout=timeout): + raise PoolTimeout() # pragma: nocover + + +class Semaphore: + def __init__(self, bound: int) -> None: + self._semaphore = threading.Semaphore(value=bound) + + def acquire(self) -> None: + self._semaphore.acquire() + + def release(self) -> None: + self._semaphore.release() + + +class ShieldCancellation: + # Thread-synchronous codebases don't support cancellation semantics. + # We have this class because we need to mirror the async and sync + # cases within our package, but it's just a no-op. + def __enter__(self) -> ShieldCancellation: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + pass diff --git a/venv/Lib/site-packages/httpcore/_trace.py b/venv/Lib/site-packages/httpcore/_trace.py new file mode 100644 index 00000000..5f1cd7c4 --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_trace.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +import inspect +import logging +import types +import typing + +from ._models import Request + + +class Trace: + def __init__( + self, + name: str, + logger: logging.Logger, + request: Request | None = None, + kwargs: dict[str, typing.Any] | None = None, + ) -> None: + self.name = name + self.logger = logger + self.trace_extension = ( + None if request is None else request.extensions.get("trace") + ) + self.debug = self.logger.isEnabledFor(logging.DEBUG) + self.kwargs = kwargs or {} + self.return_value: typing.Any = None + self.should_trace = self.debug or self.trace_extension is not None + self.prefix = self.logger.name.split(".")[-1] + + def trace(self, name: str, info: dict[str, typing.Any]) -> None: + if self.trace_extension is not None: + prefix_and_name = f"{self.prefix}.{name}" + ret = self.trace_extension(prefix_and_name, info) + if inspect.iscoroutine(ret): # pragma: no cover + raise TypeError( + "If you are using a synchronous interface, " + "the callback of the `trace` extension should " + "be a normal function instead of an asynchronous function." + ) + + if self.debug: + if not info or "return_value" in info and info["return_value"] is None: + message = name + else: + args = " ".join([f"{key}={value!r}" for key, value in info.items()]) + message = f"{name} {args}" + self.logger.debug(message) + + def __enter__(self) -> Trace: + if self.should_trace: + info = self.kwargs + self.trace(f"{self.name}.started", info) + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + if self.should_trace: + if exc_value is None: + info = {"return_value": self.return_value} + self.trace(f"{self.name}.complete", info) + else: + info = {"exception": exc_value} + self.trace(f"{self.name}.failed", info) + + async def atrace(self, name: str, info: dict[str, typing.Any]) -> None: + if self.trace_extension is not None: + prefix_and_name = f"{self.prefix}.{name}" + coro = self.trace_extension(prefix_and_name, info) + if not inspect.iscoroutine(coro): # pragma: no cover + raise TypeError( + "If you're using an asynchronous interface, " + "the callback of the `trace` extension should " + "be an asynchronous function rather than a normal function." + ) + await coro + + if self.debug: + if not info or "return_value" in info and info["return_value"] is None: + message = name + else: + args = " ".join([f"{key}={value!r}" for key, value in info.items()]) + message = f"{name} {args}" + self.logger.debug(message) + + async def __aenter__(self) -> Trace: + if self.should_trace: + info = self.kwargs + await self.atrace(f"{self.name}.started", info) + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + if self.should_trace: + if exc_value is None: + info = {"return_value": self.return_value} + await self.atrace(f"{self.name}.complete", info) + else: + info = {"exception": exc_value} + await self.atrace(f"{self.name}.failed", info) diff --git a/venv/Lib/site-packages/httpcore/_utils.py b/venv/Lib/site-packages/httpcore/_utils.py new file mode 100644 index 00000000..c44ff93c --- /dev/null +++ b/venv/Lib/site-packages/httpcore/_utils.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +import select +import socket +import sys + + +def is_socket_readable(sock: socket.socket | None) -> bool: + """ + Return whether a socket, as identifed by its file descriptor, is readable. + "A socket is readable" means that the read buffer isn't empty, i.e. that calling + .recv() on it would immediately return some data. + """ + # NOTE: we want check for readability without actually attempting to read, because + # we don't want to block forever if it's not readable. + + # In the case that the socket no longer exists, or cannot return a file + # descriptor, we treat it as being readable, as if it the next read operation + # on it is ready to return the terminating `b""`. + sock_fd = None if sock is None else sock.fileno() + if sock_fd is None or sock_fd < 0: # pragma: nocover + return True + + # The implementation below was stolen from: + # https://github.com/python-trio/trio/blob/20ee2b1b7376db637435d80e266212a35837ddcc/trio/_socket.py#L471-L478 + # See also: https://github.com/encode/httpcore/pull/193#issuecomment-703129316 + + # Use select.select on Windows, and when poll is unavailable and select.poll + # everywhere else. (E.g. When eventlet is in use. See #327) + if ( + sys.platform == "win32" or getattr(select, "poll", None) is None + ): # pragma: nocover + rready, _, _ = select.select([sock_fd], [], [], 0) + return bool(rready) + p = select.poll() + p.register(sock_fd, select.POLLIN) + return bool(p.poll(0)) diff --git a/venv/Lib/site-packages/httpcore/py.typed b/venv/Lib/site-packages/httpcore/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/httpx-0.28.1.dist-info/INSTALLER b/venv/Lib/site-packages/httpx-0.28.1.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/httpx-0.28.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/httpx-0.28.1.dist-info/METADATA b/venv/Lib/site-packages/httpx-0.28.1.dist-info/METADATA new file mode 100644 index 00000000..b0d2b196 --- /dev/null +++ b/venv/Lib/site-packages/httpx-0.28.1.dist-info/METADATA @@ -0,0 +1,203 @@ +Metadata-Version: 2.3 +Name: httpx +Version: 0.28.1 +Summary: The next generation HTTP client. +Project-URL: Changelog, https://github.com/encode/httpx/blob/master/CHANGELOG.md +Project-URL: Documentation, https://www.python-httpx.org +Project-URL: Homepage, https://github.com/encode/httpx +Project-URL: Source, https://github.com/encode/httpx +Author-email: Tom Christie +License: BSD-3-Clause +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Web Environment +Classifier: Framework :: AsyncIO +Classifier: Framework :: Trio +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Internet :: WWW/HTTP +Requires-Python: >=3.8 +Requires-Dist: anyio +Requires-Dist: certifi +Requires-Dist: httpcore==1.* +Requires-Dist: idna +Provides-Extra: brotli +Requires-Dist: brotli; (platform_python_implementation == 'CPython') and extra == 'brotli' +Requires-Dist: brotlicffi; (platform_python_implementation != 'CPython') and extra == 'brotli' +Provides-Extra: cli +Requires-Dist: click==8.*; extra == 'cli' +Requires-Dist: pygments==2.*; extra == 'cli' +Requires-Dist: rich<14,>=10; extra == 'cli' +Provides-Extra: http2 +Requires-Dist: h2<5,>=3; extra == 'http2' +Provides-Extra: socks +Requires-Dist: socksio==1.*; extra == 'socks' +Provides-Extra: zstd +Requires-Dist: zstandard>=0.18.0; extra == 'zstd' +Description-Content-Type: text/markdown + +

+ HTTPX +

+ +

HTTPX - A next-generation HTTP client for Python.

+ +

+ + Test Suite + + + Package version + +

+ +HTTPX is a fully featured HTTP client library for Python 3. It includes **an integrated command line client**, has support for both **HTTP/1.1 and HTTP/2**, and provides both **sync and async APIs**. + +--- + +Install HTTPX using pip: + +```shell +$ pip install httpx +``` + +Now, let's get started: + +```pycon +>>> import httpx +>>> r = httpx.get('https://www.example.org/') +>>> r + +>>> r.status_code +200 +>>> r.headers['content-type'] +'text/html; charset=UTF-8' +>>> r.text +'\n\n\nExample Domain...' +``` + +Or, using the command-line client. + +```shell +$ pip install 'httpx[cli]' # The command line client is an optional dependency. +``` + +Which now allows us to use HTTPX directly from the command-line... + +

+ httpx --help +

+ +Sending a request... + +

+ httpx http://httpbin.org/json +

+ +## Features + +HTTPX builds on the well-established usability of `requests`, and gives you: + +* A broadly [requests-compatible API](https://www.python-httpx.org/compatibility/). +* An integrated command-line client. +* HTTP/1.1 [and HTTP/2 support](https://www.python-httpx.org/http2/). +* Standard synchronous interface, but with [async support if you need it](https://www.python-httpx.org/async/). +* Ability to make requests directly to [WSGI applications](https://www.python-httpx.org/advanced/transports/#wsgi-transport) or [ASGI applications](https://www.python-httpx.org/advanced/transports/#asgi-transport). +* Strict timeouts everywhere. +* Fully type annotated. +* 100% test coverage. + +Plus all the standard features of `requests`... + +* International Domains and URLs +* Keep-Alive & Connection Pooling +* Sessions with Cookie Persistence +* Browser-style SSL Verification +* Basic/Digest Authentication +* Elegant Key/Value Cookies +* Automatic Decompression +* Automatic Content Decoding +* Unicode Response Bodies +* Multipart File Uploads +* HTTP(S) Proxy Support +* Connection Timeouts +* Streaming Downloads +* .netrc Support +* Chunked Requests + +## Installation + +Install with pip: + +```shell +$ pip install httpx +``` + +Or, to include the optional HTTP/2 support, use: + +```shell +$ pip install httpx[http2] +``` + +HTTPX requires Python 3.8+. + +## Documentation + +Project documentation is available at [https://www.python-httpx.org/](https://www.python-httpx.org/). + +For a run-through of all the basics, head over to the [QuickStart](https://www.python-httpx.org/quickstart/). + +For more advanced topics, see the [Advanced Usage](https://www.python-httpx.org/advanced/) section, the [async support](https://www.python-httpx.org/async/) section, or the [HTTP/2](https://www.python-httpx.org/http2/) section. + +The [Developer Interface](https://www.python-httpx.org/api/) provides a comprehensive API reference. + +To find out about tools that integrate with HTTPX, see [Third Party Packages](https://www.python-httpx.org/third_party_packages/). + +## Contribute + +If you want to contribute with HTTPX check out the [Contributing Guide](https://www.python-httpx.org/contributing/) to learn how to start. + +## Dependencies + +The HTTPX project relies on these excellent libraries: + +* `httpcore` - The underlying transport implementation for `httpx`. + * `h11` - HTTP/1.1 support. +* `certifi` - SSL certificates. +* `idna` - Internationalized domain name support. +* `sniffio` - Async library autodetection. + +As well as these optional installs: + +* `h2` - HTTP/2 support. *(Optional, with `httpx[http2]`)* +* `socksio` - SOCKS proxy support. *(Optional, with `httpx[socks]`)* +* `rich` - Rich terminal support. *(Optional, with `httpx[cli]`)* +* `click` - Command line client support. *(Optional, with `httpx[cli]`)* +* `brotli` or `brotlicffi` - Decoding for "brotli" compressed responses. *(Optional, with `httpx[brotli]`)* +* `zstandard` - Decoding for "zstd" compressed responses. *(Optional, with `httpx[zstd]`)* + +A huge amount of credit is due to `requests` for the API layout that +much of this work follows, as well as to `urllib3` for plenty of design +inspiration around the lower-level networking details. + +--- + +

HTTPX is BSD licensed code.
Designed & crafted with care.

— 🦋 —

+ +## Release Information + +### Fixed + +* Reintroduced supposedly-private `URLTypes` shortcut. (#2673) + + +--- + +[Full changelog](https://github.com/encode/httpx/blob/master/CHANGELOG.md) diff --git a/venv/Lib/site-packages/httpx-0.28.1.dist-info/RECORD b/venv/Lib/site-packages/httpx-0.28.1.dist-info/RECORD new file mode 100644 index 00000000..f162442e --- /dev/null +++ b/venv/Lib/site-packages/httpx-0.28.1.dist-info/RECORD @@ -0,0 +1,54 @@ +../../Scripts/httpx.exe,sha256=EMnJmv40xEO6Vt9_LFublGHSg3adF4QY_sILz_HSVw4,108392 +httpx-0.28.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +httpx-0.28.1.dist-info/METADATA,sha256=_rubD48-gNV8gZnDBPNcQzboWB0dGNeYPJJ2a4J5OyU,7052 +httpx-0.28.1.dist-info/RECORD,, +httpx-0.28.1.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87 +httpx-0.28.1.dist-info/entry_points.txt,sha256=2lVkdQmxLA1pNMgSN2eV89o90HCZezhmNwsy6ryKDSA,37 +httpx-0.28.1.dist-info/licenses/LICENSE.md,sha256=TsWdVE8StfU5o6cW_TIaxYzNgDC0ZSIfLIgCAM3yjY0,1508 +httpx/__init__.py,sha256=CsaZe6yZj0rHg6322AWKWHGTMVr9txgEfD5P3_Rrz60,2171 +httpx/__pycache__/__init__.cpython-312.pyc,, +httpx/__pycache__/__version__.cpython-312.pyc,, +httpx/__pycache__/_api.cpython-312.pyc,, +httpx/__pycache__/_auth.cpython-312.pyc,, +httpx/__pycache__/_client.cpython-312.pyc,, +httpx/__pycache__/_config.cpython-312.pyc,, +httpx/__pycache__/_content.cpython-312.pyc,, +httpx/__pycache__/_decoders.cpython-312.pyc,, +httpx/__pycache__/_exceptions.cpython-312.pyc,, +httpx/__pycache__/_main.cpython-312.pyc,, +httpx/__pycache__/_models.cpython-312.pyc,, +httpx/__pycache__/_multipart.cpython-312.pyc,, +httpx/__pycache__/_status_codes.cpython-312.pyc,, +httpx/__pycache__/_types.cpython-312.pyc,, +httpx/__pycache__/_urlparse.cpython-312.pyc,, +httpx/__pycache__/_urls.cpython-312.pyc,, +httpx/__pycache__/_utils.cpython-312.pyc,, +httpx/__version__.py,sha256=LoUyYeOXTieGzuP_64UL0wxdtxjuu_QbOvE7NOg-IqU,108 +httpx/_api.py,sha256=r_Zgs4jIpcPJLqK5dbbSayqo_iVMKFaxZCd-oOHxLEs,11743 +httpx/_auth.py,sha256=Yr3QwaUSK17rGYx-7j-FdicFIzz4Y9FFV-1F4-7RXX4,11891 +httpx/_client.py,sha256=xD-UG67-WMkeltAAOeGGj-cZ2RRTAm19sWRxlFY7_40,65714 +httpx/_config.py,sha256=pPp2U-wicfcKsF-KYRE1LYdt3e6ERGeIoXZ8Gjo3LWc,8547 +httpx/_content.py,sha256=LGGzrJTR3OvN4Mb1GVVNLXkXJH-6oKlwAttO9p5w_yg,8161 +httpx/_decoders.py,sha256=p0dX8I0NEHexs3UGp4SsZutiMhsXrrWl6-GnqVb0iKM,12041 +httpx/_exceptions.py,sha256=bxW7fxzgVMAdNTbwT0Vnq04gJDW1_gI_GFiQPuMyjL0,8527 +httpx/_main.py,sha256=Cg9GMabiTT_swaDfUgIRitSwxLRMSwUDOm7LdSGqlA4,15626 +httpx/_models.py,sha256=4__Guyv1gLxuZChwim8kfQNiIOcJ9acreFOSurvZfms,44700 +httpx/_multipart.py,sha256=KOHEZZl6oohg9mPaKyyu345qq1rJLg35TUG3YAzXB3Y,9843 +httpx/_status_codes.py,sha256=DYn-2ufBgMeXy5s8x3_TB7wjAuAAMewTakPrm5rXEsc,5639 +httpx/_transports/__init__.py,sha256=GbUoBSAOp7z-l-9j5YhMhR3DMIcn6FVLhj072O3Nnno,275 +httpx/_transports/__pycache__/__init__.cpython-312.pyc,, +httpx/_transports/__pycache__/asgi.cpython-312.pyc,, +httpx/_transports/__pycache__/base.cpython-312.pyc,, +httpx/_transports/__pycache__/default.cpython-312.pyc,, +httpx/_transports/__pycache__/mock.cpython-312.pyc,, +httpx/_transports/__pycache__/wsgi.cpython-312.pyc,, +httpx/_transports/asgi.py,sha256=HRfiDYMPt4wQH2gFgHZg4c-i3sblo6bL5GTqcET-xz8,5501 +httpx/_transports/base.py,sha256=kZS_VMbViYfF570pogUCJ1bulz-ybfL51Pqs9yktebU,2523 +httpx/_transports/default.py,sha256=AzeaRUyVwCccTyyNJexDf0n1dFfzzydpdIQgvw7PLnk,13983 +httpx/_transports/mock.py,sha256=PTo0d567RITXxGrki6kN7_67wwAxfwiMDcuXJiZCjEo,1232 +httpx/_transports/wsgi.py,sha256=NcPX3Xap_EwCFZWO_OaSyQNuInCYx1QMNbO8GAei6jY,4825 +httpx/_types.py,sha256=Jyh41GQq7AOev8IOWKDAg7zCbvHAfufmW5g_PiTtErY,2965 +httpx/_urlparse.py,sha256=ZAmH47ONfkxrrj-PPYhGeiHjb6AjKCS-ANWIN4OL_KY,18546 +httpx/_urls.py,sha256=dX99VR1DSOHpgo9Aq7PzYO4FKdxqKjwyNp8grf8dHN0,21550 +httpx/_utils.py,sha256=_TVeqAKvxJkKHdz7dFeb4s0LZqQXgeFkXSgfiHBK_1o,8285 +httpx/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/Lib/site-packages/httpx-0.28.1.dist-info/WHEEL b/venv/Lib/site-packages/httpx-0.28.1.dist-info/WHEEL new file mode 100644 index 00000000..21aaa729 --- /dev/null +++ b/venv/Lib/site-packages/httpx-0.28.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.26.3 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/Lib/site-packages/httpx-0.28.1.dist-info/entry_points.txt b/venv/Lib/site-packages/httpx-0.28.1.dist-info/entry_points.txt new file mode 100644 index 00000000..8ae96007 --- /dev/null +++ b/venv/Lib/site-packages/httpx-0.28.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +httpx = httpx:main diff --git a/venv/Lib/site-packages/httpx-0.28.1.dist-info/licenses/LICENSE.md b/venv/Lib/site-packages/httpx-0.28.1.dist-info/licenses/LICENSE.md new file mode 100644 index 00000000..ab79d16a --- /dev/null +++ b/venv/Lib/site-packages/httpx-0.28.1.dist-info/licenses/LICENSE.md @@ -0,0 +1,12 @@ +Copyright © 2019, [Encode OSS Ltd](https://www.encode.io/). +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/Lib/site-packages/httpx/__init__.py b/venv/Lib/site-packages/httpx/__init__.py new file mode 100644 index 00000000..e9addde0 --- /dev/null +++ b/venv/Lib/site-packages/httpx/__init__.py @@ -0,0 +1,105 @@ +from .__version__ import __description__, __title__, __version__ +from ._api import * +from ._auth import * +from ._client import * +from ._config import * +from ._content import * +from ._exceptions import * +from ._models import * +from ._status_codes import * +from ._transports import * +from ._types import * +from ._urls import * + +try: + from ._main import main +except ImportError: # pragma: no cover + + def main() -> None: # type: ignore + import sys + + print( + "The httpx command line client could not run because the required " + "dependencies were not installed.\nMake sure you've installed " + "everything with: pip install 'httpx[cli]'" + ) + sys.exit(1) + + +__all__ = [ + "__description__", + "__title__", + "__version__", + "ASGITransport", + "AsyncBaseTransport", + "AsyncByteStream", + "AsyncClient", + "AsyncHTTPTransport", + "Auth", + "BaseTransport", + "BasicAuth", + "ByteStream", + "Client", + "CloseError", + "codes", + "ConnectError", + "ConnectTimeout", + "CookieConflict", + "Cookies", + "create_ssl_context", + "DecodingError", + "delete", + "DigestAuth", + "get", + "head", + "Headers", + "HTTPError", + "HTTPStatusError", + "HTTPTransport", + "InvalidURL", + "Limits", + "LocalProtocolError", + "main", + "MockTransport", + "NetRCAuth", + "NetworkError", + "options", + "patch", + "PoolTimeout", + "post", + "ProtocolError", + "Proxy", + "ProxyError", + "put", + "QueryParams", + "ReadError", + "ReadTimeout", + "RemoteProtocolError", + "request", + "Request", + "RequestError", + "RequestNotRead", + "Response", + "ResponseNotRead", + "stream", + "StreamClosed", + "StreamConsumed", + "StreamError", + "SyncByteStream", + "Timeout", + "TimeoutException", + "TooManyRedirects", + "TransportError", + "UnsupportedProtocol", + "URL", + "USE_CLIENT_DEFAULT", + "WriteError", + "WriteTimeout", + "WSGITransport", +] + + +__locals = locals() +for __name in __all__: + if not __name.startswith("__"): + setattr(__locals[__name], "__module__", "httpx") # noqa diff --git a/venv/Lib/site-packages/httpx/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..58106120 Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/__version__.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/__version__.cpython-312.pyc new file mode 100644 index 00000000..b3ec6d9f Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/__version__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_api.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_api.cpython-312.pyc new file mode 100644 index 00000000..7b76cbaf Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_api.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_auth.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_auth.cpython-312.pyc new file mode 100644 index 00000000..6261fab5 Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_auth.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_client.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_client.cpython-312.pyc new file mode 100644 index 00000000..3fdaac53 Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_client.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_config.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_config.cpython-312.pyc new file mode 100644 index 00000000..0da42e12 Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_config.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_content.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_content.cpython-312.pyc new file mode 100644 index 00000000..b81555ed Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_content.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_decoders.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_decoders.cpython-312.pyc new file mode 100644 index 00000000..58b3b67f Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_decoders.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_exceptions.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_exceptions.cpython-312.pyc new file mode 100644 index 00000000..8e14e295 Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_exceptions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_main.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_main.cpython-312.pyc new file mode 100644 index 00000000..730c7554 Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_main.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_models.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_models.cpython-312.pyc new file mode 100644 index 00000000..7bae7c38 Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_models.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_multipart.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_multipart.cpython-312.pyc new file mode 100644 index 00000000..b4618ce5 Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_multipart.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_status_codes.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_status_codes.cpython-312.pyc new file mode 100644 index 00000000..bed5cd73 Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_status_codes.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_types.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_types.cpython-312.pyc new file mode 100644 index 00000000..d417cf7b Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_types.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_urlparse.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_urlparse.cpython-312.pyc new file mode 100644 index 00000000..2e137054 Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_urlparse.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_urls.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_urls.cpython-312.pyc new file mode 100644 index 00000000..1dac612c Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_urls.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__pycache__/_utils.cpython-312.pyc b/venv/Lib/site-packages/httpx/__pycache__/_utils.cpython-312.pyc new file mode 100644 index 00000000..add1760f Binary files /dev/null and b/venv/Lib/site-packages/httpx/__pycache__/_utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/__version__.py b/venv/Lib/site-packages/httpx/__version__.py new file mode 100644 index 00000000..801bfacf --- /dev/null +++ b/venv/Lib/site-packages/httpx/__version__.py @@ -0,0 +1,3 @@ +__title__ = "httpx" +__description__ = "A next generation HTTP client, for Python 3." +__version__ = "0.28.1" diff --git a/venv/Lib/site-packages/httpx/_api.py b/venv/Lib/site-packages/httpx/_api.py new file mode 100644 index 00000000..c3cda1ec --- /dev/null +++ b/venv/Lib/site-packages/httpx/_api.py @@ -0,0 +1,438 @@ +from __future__ import annotations + +import typing +from contextlib import contextmanager + +from ._client import Client +from ._config import DEFAULT_TIMEOUT_CONFIG +from ._models import Response +from ._types import ( + AuthTypes, + CookieTypes, + HeaderTypes, + ProxyTypes, + QueryParamTypes, + RequestContent, + RequestData, + RequestFiles, + TimeoutTypes, +) +from ._urls import URL + +if typing.TYPE_CHECKING: + import ssl # pragma: no cover + + +__all__ = [ + "delete", + "get", + "head", + "options", + "patch", + "post", + "put", + "request", + "stream", +] + + +def request( + method: str, + url: URL | str, + *, + params: QueryParamTypes | None = None, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | None = None, + proxy: ProxyTypes | None = None, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + follow_redirects: bool = False, + verify: ssl.SSLContext | str | bool = True, + trust_env: bool = True, +) -> Response: + """ + Sends an HTTP request. + + **Parameters:** + + * **method** - HTTP method for the new `Request` object: `GET`, `OPTIONS`, + `HEAD`, `POST`, `PUT`, `PATCH`, or `DELETE`. + * **url** - URL for the new `Request` object. + * **params** - *(optional)* Query parameters to include in the URL, as a + string, dictionary, or sequence of two-tuples. + * **content** - *(optional)* Binary content to include in the body of the + request, as bytes or a byte iterator. + * **data** - *(optional)* Form data to include in the body of the request, + as a dictionary. + * **files** - *(optional)* A dictionary of upload files to include in the + body of the request. + * **json** - *(optional)* A JSON serializable object to include in the body + of the request. + * **headers** - *(optional)* Dictionary of HTTP headers to include in the + request. + * **cookies** - *(optional)* Dictionary of Cookie items to include in the + request. + * **auth** - *(optional)* An authentication class to use when sending the + request. + * **proxy** - *(optional)* A proxy URL where all the traffic should be routed. + * **timeout** - *(optional)* The timeout configuration to use when sending + the request. + * **follow_redirects** - *(optional)* Enables or disables HTTP redirects. + * **verify** - *(optional)* Either `True` to use an SSL context with the + default CA bundle, `False` to disable verification, or an instance of + `ssl.SSLContext` to use a custom context. + * **trust_env** - *(optional)* Enables or disables usage of environment + variables for configuration. + + **Returns:** `Response` + + Usage: + + ``` + >>> import httpx + >>> response = httpx.request('GET', 'https://httpbin.org/get') + >>> response + + ``` + """ + with Client( + cookies=cookies, + proxy=proxy, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) as client: + return client.request( + method=method, + url=url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + auth=auth, + follow_redirects=follow_redirects, + ) + + +@contextmanager +def stream( + method: str, + url: URL | str, + *, + params: QueryParamTypes | None = None, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | None = None, + proxy: ProxyTypes | None = None, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + follow_redirects: bool = False, + verify: ssl.SSLContext | str | bool = True, + trust_env: bool = True, +) -> typing.Iterator[Response]: + """ + Alternative to `httpx.request()` that streams the response body + instead of loading it into memory at once. + + **Parameters**: See `httpx.request`. + + See also: [Streaming Responses][0] + + [0]: /quickstart#streaming-responses + """ + with Client( + cookies=cookies, + proxy=proxy, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) as client: + with client.stream( + method=method, + url=url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + auth=auth, + follow_redirects=follow_redirects, + ) as response: + yield response + + +def get( + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | None = None, + proxy: ProxyTypes | None = None, + follow_redirects: bool = False, + verify: ssl.SSLContext | str | bool = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends a `GET` request. + + **Parameters**: See `httpx.request`. + + Note that the `data`, `files`, `json` and `content` parameters are not available + on this function, as `GET` requests should not include a request body. + """ + return request( + "GET", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxy=proxy, + follow_redirects=follow_redirects, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) + + +def options( + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | None = None, + proxy: ProxyTypes | None = None, + follow_redirects: bool = False, + verify: ssl.SSLContext | str | bool = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends an `OPTIONS` request. + + **Parameters**: See `httpx.request`. + + Note that the `data`, `files`, `json` and `content` parameters are not available + on this function, as `OPTIONS` requests should not include a request body. + """ + return request( + "OPTIONS", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxy=proxy, + follow_redirects=follow_redirects, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) + + +def head( + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | None = None, + proxy: ProxyTypes | None = None, + follow_redirects: bool = False, + verify: ssl.SSLContext | str | bool = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends a `HEAD` request. + + **Parameters**: See `httpx.request`. + + Note that the `data`, `files`, `json` and `content` parameters are not available + on this function, as `HEAD` requests should not include a request body. + """ + return request( + "HEAD", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxy=proxy, + follow_redirects=follow_redirects, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) + + +def post( + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | None = None, + proxy: ProxyTypes | None = None, + follow_redirects: bool = False, + verify: ssl.SSLContext | str | bool = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends a `POST` request. + + **Parameters**: See `httpx.request`. + """ + return request( + "POST", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxy=proxy, + follow_redirects=follow_redirects, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) + + +def put( + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | None = None, + proxy: ProxyTypes | None = None, + follow_redirects: bool = False, + verify: ssl.SSLContext | str | bool = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends a `PUT` request. + + **Parameters**: See `httpx.request`. + """ + return request( + "PUT", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxy=proxy, + follow_redirects=follow_redirects, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) + + +def patch( + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | None = None, + proxy: ProxyTypes | None = None, + follow_redirects: bool = False, + verify: ssl.SSLContext | str | bool = True, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + trust_env: bool = True, +) -> Response: + """ + Sends a `PATCH` request. + + **Parameters**: See `httpx.request`. + """ + return request( + "PATCH", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxy=proxy, + follow_redirects=follow_redirects, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) + + +def delete( + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | None = None, + proxy: ProxyTypes | None = None, + follow_redirects: bool = False, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + verify: ssl.SSLContext | str | bool = True, + trust_env: bool = True, +) -> Response: + """ + Sends a `DELETE` request. + + **Parameters**: See `httpx.request`. + + Note that the `data`, `files`, `json` and `content` parameters are not available + on this function, as `DELETE` requests should not include a request body. + """ + return request( + "DELETE", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + proxy=proxy, + follow_redirects=follow_redirects, + verify=verify, + timeout=timeout, + trust_env=trust_env, + ) diff --git a/venv/Lib/site-packages/httpx/_auth.py b/venv/Lib/site-packages/httpx/_auth.py new file mode 100644 index 00000000..b03971ab --- /dev/null +++ b/venv/Lib/site-packages/httpx/_auth.py @@ -0,0 +1,348 @@ +from __future__ import annotations + +import hashlib +import os +import re +import time +import typing +from base64 import b64encode +from urllib.request import parse_http_list + +from ._exceptions import ProtocolError +from ._models import Cookies, Request, Response +from ._utils import to_bytes, to_str, unquote + +if typing.TYPE_CHECKING: # pragma: no cover + from hashlib import _Hash + + +__all__ = ["Auth", "BasicAuth", "DigestAuth", "NetRCAuth"] + + +class Auth: + """ + Base class for all authentication schemes. + + To implement a custom authentication scheme, subclass `Auth` and override + the `.auth_flow()` method. + + If the authentication scheme does I/O such as disk access or network calls, or uses + synchronization primitives such as locks, you should override `.sync_auth_flow()` + and/or `.async_auth_flow()` instead of `.auth_flow()` to provide specialized + implementations that will be used by `Client` and `AsyncClient` respectively. + """ + + requires_request_body = False + requires_response_body = False + + def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: + """ + Execute the authentication flow. + + To dispatch a request, `yield` it: + + ``` + yield request + ``` + + The client will `.send()` the response back into the flow generator. You can + access it like so: + + ``` + response = yield request + ``` + + A `return` (or reaching the end of the generator) will result in the + client returning the last response obtained from the server. + + You can dispatch as many requests as is necessary. + """ + yield request + + def sync_auth_flow( + self, request: Request + ) -> typing.Generator[Request, Response, None]: + """ + Execute the authentication flow synchronously. + + By default, this defers to `.auth_flow()`. You should override this method + when the authentication scheme does I/O and/or uses concurrency primitives. + """ + if self.requires_request_body: + request.read() + + flow = self.auth_flow(request) + request = next(flow) + + while True: + response = yield request + if self.requires_response_body: + response.read() + + try: + request = flow.send(response) + except StopIteration: + break + + async def async_auth_flow( + self, request: Request + ) -> typing.AsyncGenerator[Request, Response]: + """ + Execute the authentication flow asynchronously. + + By default, this defers to `.auth_flow()`. You should override this method + when the authentication scheme does I/O and/or uses concurrency primitives. + """ + if self.requires_request_body: + await request.aread() + + flow = self.auth_flow(request) + request = next(flow) + + while True: + response = yield request + if self.requires_response_body: + await response.aread() + + try: + request = flow.send(response) + except StopIteration: + break + + +class FunctionAuth(Auth): + """ + Allows the 'auth' argument to be passed as a simple callable function, + that takes the request, and returns a new, modified request. + """ + + def __init__(self, func: typing.Callable[[Request], Request]) -> None: + self._func = func + + def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: + yield self._func(request) + + +class BasicAuth(Auth): + """ + Allows the 'auth' argument to be passed as a (username, password) pair, + and uses HTTP Basic authentication. + """ + + def __init__(self, username: str | bytes, password: str | bytes) -> None: + self._auth_header = self._build_auth_header(username, password) + + def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: + request.headers["Authorization"] = self._auth_header + yield request + + def _build_auth_header(self, username: str | bytes, password: str | bytes) -> str: + userpass = b":".join((to_bytes(username), to_bytes(password))) + token = b64encode(userpass).decode() + return f"Basic {token}" + + +class NetRCAuth(Auth): + """ + Use a 'netrc' file to lookup basic auth credentials based on the url host. + """ + + def __init__(self, file: str | None = None) -> None: + # Lazily import 'netrc'. + # There's no need for us to load this module unless 'NetRCAuth' is being used. + import netrc + + self._netrc_info = netrc.netrc(file) + + def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: + auth_info = self._netrc_info.authenticators(request.url.host) + if auth_info is None or not auth_info[2]: + # The netrc file did not have authentication credentials for this host. + yield request + else: + # Build a basic auth header with credentials from the netrc file. + request.headers["Authorization"] = self._build_auth_header( + username=auth_info[0], password=auth_info[2] + ) + yield request + + def _build_auth_header(self, username: str | bytes, password: str | bytes) -> str: + userpass = b":".join((to_bytes(username), to_bytes(password))) + token = b64encode(userpass).decode() + return f"Basic {token}" + + +class DigestAuth(Auth): + _ALGORITHM_TO_HASH_FUNCTION: dict[str, typing.Callable[[bytes], _Hash]] = { + "MD5": hashlib.md5, + "MD5-SESS": hashlib.md5, + "SHA": hashlib.sha1, + "SHA-SESS": hashlib.sha1, + "SHA-256": hashlib.sha256, + "SHA-256-SESS": hashlib.sha256, + "SHA-512": hashlib.sha512, + "SHA-512-SESS": hashlib.sha512, + } + + def __init__(self, username: str | bytes, password: str | bytes) -> None: + self._username = to_bytes(username) + self._password = to_bytes(password) + self._last_challenge: _DigestAuthChallenge | None = None + self._nonce_count = 1 + + def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: + if self._last_challenge: + request.headers["Authorization"] = self._build_auth_header( + request, self._last_challenge + ) + + response = yield request + + if response.status_code != 401 or "www-authenticate" not in response.headers: + # If the response is not a 401 then we don't + # need to build an authenticated request. + return + + for auth_header in response.headers.get_list("www-authenticate"): + if auth_header.lower().startswith("digest "): + break + else: + # If the response does not include a 'WWW-Authenticate: Digest ...' + # header, then we don't need to build an authenticated request. + return + + self._last_challenge = self._parse_challenge(request, response, auth_header) + self._nonce_count = 1 + + request.headers["Authorization"] = self._build_auth_header( + request, self._last_challenge + ) + if response.cookies: + Cookies(response.cookies).set_cookie_header(request=request) + yield request + + def _parse_challenge( + self, request: Request, response: Response, auth_header: str + ) -> _DigestAuthChallenge: + """ + Returns a challenge from a Digest WWW-Authenticate header. + These take the form of: + `Digest realm="realm@host.com",qop="auth,auth-int",nonce="abc",opaque="xyz"` + """ + scheme, _, fields = auth_header.partition(" ") + + # This method should only ever have been called with a Digest auth header. + assert scheme.lower() == "digest" + + header_dict: dict[str, str] = {} + for field in parse_http_list(fields): + key, value = field.strip().split("=", 1) + header_dict[key] = unquote(value) + + try: + realm = header_dict["realm"].encode() + nonce = header_dict["nonce"].encode() + algorithm = header_dict.get("algorithm", "MD5") + opaque = header_dict["opaque"].encode() if "opaque" in header_dict else None + qop = header_dict["qop"].encode() if "qop" in header_dict else None + return _DigestAuthChallenge( + realm=realm, nonce=nonce, algorithm=algorithm, opaque=opaque, qop=qop + ) + except KeyError as exc: + message = "Malformed Digest WWW-Authenticate header" + raise ProtocolError(message, request=request) from exc + + def _build_auth_header( + self, request: Request, challenge: _DigestAuthChallenge + ) -> str: + hash_func = self._ALGORITHM_TO_HASH_FUNCTION[challenge.algorithm.upper()] + + def digest(data: bytes) -> bytes: + return hash_func(data).hexdigest().encode() + + A1 = b":".join((self._username, challenge.realm, self._password)) + + path = request.url.raw_path + A2 = b":".join((request.method.encode(), path)) + # TODO: implement auth-int + HA2 = digest(A2) + + nc_value = b"%08x" % self._nonce_count + cnonce = self._get_client_nonce(self._nonce_count, challenge.nonce) + self._nonce_count += 1 + + HA1 = digest(A1) + if challenge.algorithm.lower().endswith("-sess"): + HA1 = digest(b":".join((HA1, challenge.nonce, cnonce))) + + qop = self._resolve_qop(challenge.qop, request=request) + if qop is None: + # Following RFC 2069 + digest_data = [HA1, challenge.nonce, HA2] + else: + # Following RFC 2617/7616 + digest_data = [HA1, challenge.nonce, nc_value, cnonce, qop, HA2] + + format_args = { + "username": self._username, + "realm": challenge.realm, + "nonce": challenge.nonce, + "uri": path, + "response": digest(b":".join(digest_data)), + "algorithm": challenge.algorithm.encode(), + } + if challenge.opaque: + format_args["opaque"] = challenge.opaque + if qop: + format_args["qop"] = b"auth" + format_args["nc"] = nc_value + format_args["cnonce"] = cnonce + + return "Digest " + self._get_header_value(format_args) + + def _get_client_nonce(self, nonce_count: int, nonce: bytes) -> bytes: + s = str(nonce_count).encode() + s += nonce + s += time.ctime().encode() + s += os.urandom(8) + + return hashlib.sha1(s).hexdigest()[:16].encode() + + def _get_header_value(self, header_fields: dict[str, bytes]) -> str: + NON_QUOTED_FIELDS = ("algorithm", "qop", "nc") + QUOTED_TEMPLATE = '{}="{}"' + NON_QUOTED_TEMPLATE = "{}={}" + + header_value = "" + for i, (field, value) in enumerate(header_fields.items()): + if i > 0: + header_value += ", " + template = ( + QUOTED_TEMPLATE + if field not in NON_QUOTED_FIELDS + else NON_QUOTED_TEMPLATE + ) + header_value += template.format(field, to_str(value)) + + return header_value + + def _resolve_qop(self, qop: bytes | None, request: Request) -> bytes | None: + if qop is None: + return None + qops = re.split(b", ?", qop) + if b"auth" in qops: + return b"auth" + + if qops == [b"auth-int"]: + raise NotImplementedError("Digest auth-int support is not yet implemented") + + message = f'Unexpected qop value "{qop!r}" in digest auth' + raise ProtocolError(message, request=request) + + +class _DigestAuthChallenge(typing.NamedTuple): + realm: bytes + nonce: bytes + algorithm: str + opaque: bytes | None + qop: bytes | None diff --git a/venv/Lib/site-packages/httpx/_client.py b/venv/Lib/site-packages/httpx/_client.py new file mode 100644 index 00000000..2249231f --- /dev/null +++ b/venv/Lib/site-packages/httpx/_client.py @@ -0,0 +1,2019 @@ +from __future__ import annotations + +import datetime +import enum +import logging +import time +import typing +import warnings +from contextlib import asynccontextmanager, contextmanager +from types import TracebackType + +from .__version__ import __version__ +from ._auth import Auth, BasicAuth, FunctionAuth +from ._config import ( + DEFAULT_LIMITS, + DEFAULT_MAX_REDIRECTS, + DEFAULT_TIMEOUT_CONFIG, + Limits, + Proxy, + Timeout, +) +from ._decoders import SUPPORTED_DECODERS +from ._exceptions import ( + InvalidURL, + RemoteProtocolError, + TooManyRedirects, + request_context, +) +from ._models import Cookies, Headers, Request, Response +from ._status_codes import codes +from ._transports.base import AsyncBaseTransport, BaseTransport +from ._transports.default import AsyncHTTPTransport, HTTPTransport +from ._types import ( + AsyncByteStream, + AuthTypes, + CertTypes, + CookieTypes, + HeaderTypes, + ProxyTypes, + QueryParamTypes, + RequestContent, + RequestData, + RequestExtensions, + RequestFiles, + SyncByteStream, + TimeoutTypes, +) +from ._urls import URL, QueryParams +from ._utils import URLPattern, get_environment_proxies + +if typing.TYPE_CHECKING: + import ssl # pragma: no cover + +__all__ = ["USE_CLIENT_DEFAULT", "AsyncClient", "Client"] + +# The type annotation for @classmethod and context managers here follows PEP 484 +# https://www.python.org/dev/peps/pep-0484/#annotating-instance-and-class-methods +T = typing.TypeVar("T", bound="Client") +U = typing.TypeVar("U", bound="AsyncClient") + + +def _is_https_redirect(url: URL, location: URL) -> bool: + """ + Return 'True' if 'location' is a HTTPS upgrade of 'url' + """ + if url.host != location.host: + return False + + return ( + url.scheme == "http" + and _port_or_default(url) == 80 + and location.scheme == "https" + and _port_or_default(location) == 443 + ) + + +def _port_or_default(url: URL) -> int | None: + if url.port is not None: + return url.port + return {"http": 80, "https": 443}.get(url.scheme) + + +def _same_origin(url: URL, other: URL) -> bool: + """ + Return 'True' if the given URLs share the same origin. + """ + return ( + url.scheme == other.scheme + and url.host == other.host + and _port_or_default(url) == _port_or_default(other) + ) + + +class UseClientDefault: + """ + For some parameters such as `auth=...` and `timeout=...` we need to be able + to indicate the default "unset" state, in a way that is distinctly different + to using `None`. + + The default "unset" state indicates that whatever default is set on the + client should be used. This is different to setting `None`, which + explicitly disables the parameter, possibly overriding a client default. + + For example we use `timeout=USE_CLIENT_DEFAULT` in the `request()` signature. + Omitting the `timeout` parameter will send a request using whatever default + timeout has been configured on the client. Including `timeout=None` will + ensure no timeout is used. + + Note that user code shouldn't need to use the `USE_CLIENT_DEFAULT` constant, + but it is used internally when a parameter is not included. + """ + + +USE_CLIENT_DEFAULT = UseClientDefault() + + +logger = logging.getLogger("httpx") + +USER_AGENT = f"python-httpx/{__version__}" +ACCEPT_ENCODING = ", ".join( + [key for key in SUPPORTED_DECODERS.keys() if key != "identity"] +) + + +class ClientState(enum.Enum): + # UNOPENED: + # The client has been instantiated, but has not been used to send a request, + # or been opened by entering the context of a `with` block. + UNOPENED = 1 + # OPENED: + # The client has either sent a request, or is within a `with` block. + OPENED = 2 + # CLOSED: + # The client has either exited the `with` block, or `close()` has + # been called explicitly. + CLOSED = 3 + + +class BoundSyncStream(SyncByteStream): + """ + A byte stream that is bound to a given response instance, and that + ensures the `response.elapsed` is set once the response is closed. + """ + + def __init__( + self, stream: SyncByteStream, response: Response, start: float + ) -> None: + self._stream = stream + self._response = response + self._start = start + + def __iter__(self) -> typing.Iterator[bytes]: + for chunk in self._stream: + yield chunk + + def close(self) -> None: + elapsed = time.perf_counter() - self._start + self._response.elapsed = datetime.timedelta(seconds=elapsed) + self._stream.close() + + +class BoundAsyncStream(AsyncByteStream): + """ + An async byte stream that is bound to a given response instance, and that + ensures the `response.elapsed` is set once the response is closed. + """ + + def __init__( + self, stream: AsyncByteStream, response: Response, start: float + ) -> None: + self._stream = stream + self._response = response + self._start = start + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + async for chunk in self._stream: + yield chunk + + async def aclose(self) -> None: + elapsed = time.perf_counter() - self._start + self._response.elapsed = datetime.timedelta(seconds=elapsed) + await self._stream.aclose() + + +EventHook = typing.Callable[..., typing.Any] + + +class BaseClient: + def __init__( + self, + *, + auth: AuthTypes | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + follow_redirects: bool = False, + max_redirects: int = DEFAULT_MAX_REDIRECTS, + event_hooks: None | (typing.Mapping[str, list[EventHook]]) = None, + base_url: URL | str = "", + trust_env: bool = True, + default_encoding: str | typing.Callable[[bytes], str] = "utf-8", + ) -> None: + event_hooks = {} if event_hooks is None else event_hooks + + self._base_url = self._enforce_trailing_slash(URL(base_url)) + + self._auth = self._build_auth(auth) + self._params = QueryParams(params) + self.headers = Headers(headers) + self._cookies = Cookies(cookies) + self._timeout = Timeout(timeout) + self.follow_redirects = follow_redirects + self.max_redirects = max_redirects + self._event_hooks = { + "request": list(event_hooks.get("request", [])), + "response": list(event_hooks.get("response", [])), + } + self._trust_env = trust_env + self._default_encoding = default_encoding + self._state = ClientState.UNOPENED + + @property + def is_closed(self) -> bool: + """ + Check if the client being closed + """ + return self._state == ClientState.CLOSED + + @property + def trust_env(self) -> bool: + return self._trust_env + + def _enforce_trailing_slash(self, url: URL) -> URL: + if url.raw_path.endswith(b"/"): + return url + return url.copy_with(raw_path=url.raw_path + b"/") + + def _get_proxy_map( + self, proxy: ProxyTypes | None, allow_env_proxies: bool + ) -> dict[str, Proxy | None]: + if proxy is None: + if allow_env_proxies: + return { + key: None if url is None else Proxy(url=url) + for key, url in get_environment_proxies().items() + } + return {} + else: + proxy = Proxy(url=proxy) if isinstance(proxy, (str, URL)) else proxy + return {"all://": proxy} + + @property + def timeout(self) -> Timeout: + return self._timeout + + @timeout.setter + def timeout(self, timeout: TimeoutTypes) -> None: + self._timeout = Timeout(timeout) + + @property + def event_hooks(self) -> dict[str, list[EventHook]]: + return self._event_hooks + + @event_hooks.setter + def event_hooks(self, event_hooks: dict[str, list[EventHook]]) -> None: + self._event_hooks = { + "request": list(event_hooks.get("request", [])), + "response": list(event_hooks.get("response", [])), + } + + @property + def auth(self) -> Auth | None: + """ + Authentication class used when none is passed at the request-level. + + See also [Authentication][0]. + + [0]: /quickstart/#authentication + """ + return self._auth + + @auth.setter + def auth(self, auth: AuthTypes) -> None: + self._auth = self._build_auth(auth) + + @property + def base_url(self) -> URL: + """ + Base URL to use when sending requests with relative URLs. + """ + return self._base_url + + @base_url.setter + def base_url(self, url: URL | str) -> None: + self._base_url = self._enforce_trailing_slash(URL(url)) + + @property + def headers(self) -> Headers: + """ + HTTP headers to include when sending requests. + """ + return self._headers + + @headers.setter + def headers(self, headers: HeaderTypes) -> None: + client_headers = Headers( + { + b"Accept": b"*/*", + b"Accept-Encoding": ACCEPT_ENCODING.encode("ascii"), + b"Connection": b"keep-alive", + b"User-Agent": USER_AGENT.encode("ascii"), + } + ) + client_headers.update(headers) + self._headers = client_headers + + @property + def cookies(self) -> Cookies: + """ + Cookie values to include when sending requests. + """ + return self._cookies + + @cookies.setter + def cookies(self, cookies: CookieTypes) -> None: + self._cookies = Cookies(cookies) + + @property + def params(self) -> QueryParams: + """ + Query parameters to include in the URL when sending requests. + """ + return self._params + + @params.setter + def params(self, params: QueryParamTypes) -> None: + self._params = QueryParams(params) + + def build_request( + self, + method: str, + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Request: + """ + Build and return a request instance. + + * The `params`, `headers` and `cookies` arguments + are merged with any values set on the client. + * The `url` argument is merged with any `base_url` set on the client. + + See also: [Request instances][0] + + [0]: /advanced/clients/#request-instances + """ + url = self._merge_url(url) + headers = self._merge_headers(headers) + cookies = self._merge_cookies(cookies) + params = self._merge_queryparams(params) + extensions = {} if extensions is None else extensions + if "timeout" not in extensions: + timeout = ( + self.timeout + if isinstance(timeout, UseClientDefault) + else Timeout(timeout) + ) + extensions = dict(**extensions, timeout=timeout.as_dict()) + return Request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + extensions=extensions, + ) + + def _merge_url(self, url: URL | str) -> URL: + """ + Merge a URL argument together with any 'base_url' on the client, + to create the URL used for the outgoing request. + """ + merge_url = URL(url) + if merge_url.is_relative_url: + # To merge URLs we always append to the base URL. To get this + # behaviour correct we always ensure the base URL ends in a '/' + # separator, and strip any leading '/' from the merge URL. + # + # So, eg... + # + # >>> client = Client(base_url="https://www.example.com/subpath") + # >>> client.base_url + # URL('https://www.example.com/subpath/') + # >>> client.build_request("GET", "/path").url + # URL('https://www.example.com/subpath/path') + merge_raw_path = self.base_url.raw_path + merge_url.raw_path.lstrip(b"/") + return self.base_url.copy_with(raw_path=merge_raw_path) + return merge_url + + def _merge_cookies(self, cookies: CookieTypes | None = None) -> CookieTypes | None: + """ + Merge a cookies argument together with any cookies on the client, + to create the cookies used for the outgoing request. + """ + if cookies or self.cookies: + merged_cookies = Cookies(self.cookies) + merged_cookies.update(cookies) + return merged_cookies + return cookies + + def _merge_headers(self, headers: HeaderTypes | None = None) -> HeaderTypes | None: + """ + Merge a headers argument together with any headers on the client, + to create the headers used for the outgoing request. + """ + merged_headers = Headers(self.headers) + merged_headers.update(headers) + return merged_headers + + def _merge_queryparams( + self, params: QueryParamTypes | None = None + ) -> QueryParamTypes | None: + """ + Merge a queryparams argument together with any queryparams on the client, + to create the queryparams used for the outgoing request. + """ + if params or self.params: + merged_queryparams = QueryParams(self.params) + return merged_queryparams.merge(params) + return params + + def _build_auth(self, auth: AuthTypes | None) -> Auth | None: + if auth is None: + return None + elif isinstance(auth, tuple): + return BasicAuth(username=auth[0], password=auth[1]) + elif isinstance(auth, Auth): + return auth + elif callable(auth): + return FunctionAuth(func=auth) + else: + raise TypeError(f'Invalid "auth" argument: {auth!r}') + + def _build_request_auth( + self, + request: Request, + auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT, + ) -> Auth: + auth = ( + self._auth if isinstance(auth, UseClientDefault) else self._build_auth(auth) + ) + + if auth is not None: + return auth + + username, password = request.url.username, request.url.password + if username or password: + return BasicAuth(username=username, password=password) + + return Auth() + + def _build_redirect_request(self, request: Request, response: Response) -> Request: + """ + Given a request and a redirect response, return a new request that + should be used to effect the redirect. + """ + method = self._redirect_method(request, response) + url = self._redirect_url(request, response) + headers = self._redirect_headers(request, url, method) + stream = self._redirect_stream(request, method) + cookies = Cookies(self.cookies) + return Request( + method=method, + url=url, + headers=headers, + cookies=cookies, + stream=stream, + extensions=request.extensions, + ) + + def _redirect_method(self, request: Request, response: Response) -> str: + """ + When being redirected we may want to change the method of the request + based on certain specs or browser behavior. + """ + method = request.method + + # https://tools.ietf.org/html/rfc7231#section-6.4.4 + if response.status_code == codes.SEE_OTHER and method != "HEAD": + method = "GET" + + # Do what the browsers do, despite standards... + # Turn 302s into GETs. + if response.status_code == codes.FOUND and method != "HEAD": + method = "GET" + + # If a POST is responded to with a 301, turn it into a GET. + # This bizarre behaviour is explained in 'requests' issue 1704. + if response.status_code == codes.MOVED_PERMANENTLY and method == "POST": + method = "GET" + + return method + + def _redirect_url(self, request: Request, response: Response) -> URL: + """ + Return the URL for the redirect to follow. + """ + location = response.headers["Location"] + + try: + url = URL(location) + except InvalidURL as exc: + raise RemoteProtocolError( + f"Invalid URL in location header: {exc}.", request=request + ) from None + + # Handle malformed 'Location' headers that are "absolute" form, have no host. + # See: https://github.com/encode/httpx/issues/771 + if url.scheme and not url.host: + url = url.copy_with(host=request.url.host) + + # Facilitate relative 'Location' headers, as allowed by RFC 7231. + # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') + if url.is_relative_url: + url = request.url.join(url) + + # Attach previous fragment if needed (RFC 7231 7.1.2) + if request.url.fragment and not url.fragment: + url = url.copy_with(fragment=request.url.fragment) + + return url + + def _redirect_headers(self, request: Request, url: URL, method: str) -> Headers: + """ + Return the headers that should be used for the redirect request. + """ + headers = Headers(request.headers) + + if not _same_origin(url, request.url): + if not _is_https_redirect(request.url, url): + # Strip Authorization headers when responses are redirected + # away from the origin. (Except for direct HTTP to HTTPS redirects.) + headers.pop("Authorization", None) + + # Update the Host header. + headers["Host"] = url.netloc.decode("ascii") + + if method != request.method and method == "GET": + # If we've switch to a 'GET' request, then strip any headers which + # are only relevant to the request body. + headers.pop("Content-Length", None) + headers.pop("Transfer-Encoding", None) + + # We should use the client cookie store to determine any cookie header, + # rather than whatever was on the original outgoing request. + headers.pop("Cookie", None) + + return headers + + def _redirect_stream( + self, request: Request, method: str + ) -> SyncByteStream | AsyncByteStream | None: + """ + Return the body that should be used for the redirect request. + """ + if method != request.method and method == "GET": + return None + + return request.stream + + def _set_timeout(self, request: Request) -> None: + if "timeout" not in request.extensions: + timeout = ( + self.timeout + if isinstance(self.timeout, UseClientDefault) + else Timeout(self.timeout) + ) + request.extensions = dict(**request.extensions, timeout=timeout.as_dict()) + + +class Client(BaseClient): + """ + An HTTP client, with connection pooling, HTTP/2, redirects, cookie persistence, etc. + + It can be shared between threads. + + Usage: + + ```python + >>> client = httpx.Client() + >>> response = client.get('https://example.org') + ``` + + **Parameters:** + + * **auth** - *(optional)* An authentication class to use when sending + requests. + * **params** - *(optional)* Query parameters to include in request URLs, as + a string, dictionary, or sequence of two-tuples. + * **headers** - *(optional)* Dictionary of HTTP headers to include when + sending requests. + * **cookies** - *(optional)* Dictionary of Cookie items to include when + sending requests. + * **verify** - *(optional)* Either `True` to use an SSL context with the + default CA bundle, `False` to disable verification, or an instance of + `ssl.SSLContext` to use a custom context. + * **http2** - *(optional)* A boolean indicating if HTTP/2 support should be + enabled. Defaults to `False`. + * **proxy** - *(optional)* A proxy URL where all the traffic should be routed. + * **timeout** - *(optional)* The timeout configuration to use when sending + requests. + * **limits** - *(optional)* The limits configuration to use. + * **max_redirects** - *(optional)* The maximum number of redirect responses + that should be followed. + * **base_url** - *(optional)* A URL to use as the base when building + request URLs. + * **transport** - *(optional)* A transport class to use for sending requests + over the network. + * **trust_env** - *(optional)* Enables or disables usage of environment + variables for configuration. + * **default_encoding** - *(optional)* The default encoding to use for decoding + response text, if no charset information is included in a response Content-Type + header. Set to a callable for automatic character set detection. Default: "utf-8". + """ + + def __init__( + self, + *, + auth: AuthTypes | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + verify: ssl.SSLContext | str | bool = True, + cert: CertTypes | None = None, + trust_env: bool = True, + http1: bool = True, + http2: bool = False, + proxy: ProxyTypes | None = None, + mounts: None | (typing.Mapping[str, BaseTransport | None]) = None, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + follow_redirects: bool = False, + limits: Limits = DEFAULT_LIMITS, + max_redirects: int = DEFAULT_MAX_REDIRECTS, + event_hooks: None | (typing.Mapping[str, list[EventHook]]) = None, + base_url: URL | str = "", + transport: BaseTransport | None = None, + default_encoding: str | typing.Callable[[bytes], str] = "utf-8", + ) -> None: + super().__init__( + auth=auth, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + follow_redirects=follow_redirects, + max_redirects=max_redirects, + event_hooks=event_hooks, + base_url=base_url, + trust_env=trust_env, + default_encoding=default_encoding, + ) + + if http2: + try: + import h2 # noqa + except ImportError: # pragma: no cover + raise ImportError( + "Using http2=True, but the 'h2' package is not installed. " + "Make sure to install httpx using `pip install httpx[http2]`." + ) from None + + allow_env_proxies = trust_env and transport is None + proxy_map = self._get_proxy_map(proxy, allow_env_proxies) + + self._transport = self._init_transport( + verify=verify, + cert=cert, + trust_env=trust_env, + http1=http1, + http2=http2, + limits=limits, + transport=transport, + ) + self._mounts: dict[URLPattern, BaseTransport | None] = { + URLPattern(key): None + if proxy is None + else self._init_proxy_transport( + proxy, + verify=verify, + cert=cert, + trust_env=trust_env, + http1=http1, + http2=http2, + limits=limits, + ) + for key, proxy in proxy_map.items() + } + if mounts is not None: + self._mounts.update( + {URLPattern(key): transport for key, transport in mounts.items()} + ) + + self._mounts = dict(sorted(self._mounts.items())) + + def _init_transport( + self, + verify: ssl.SSLContext | str | bool = True, + cert: CertTypes | None = None, + trust_env: bool = True, + http1: bool = True, + http2: bool = False, + limits: Limits = DEFAULT_LIMITS, + transport: BaseTransport | None = None, + ) -> BaseTransport: + if transport is not None: + return transport + + return HTTPTransport( + verify=verify, + cert=cert, + trust_env=trust_env, + http1=http1, + http2=http2, + limits=limits, + ) + + def _init_proxy_transport( + self, + proxy: Proxy, + verify: ssl.SSLContext | str | bool = True, + cert: CertTypes | None = None, + trust_env: bool = True, + http1: bool = True, + http2: bool = False, + limits: Limits = DEFAULT_LIMITS, + ) -> BaseTransport: + return HTTPTransport( + verify=verify, + cert=cert, + trust_env=trust_env, + http1=http1, + http2=http2, + limits=limits, + proxy=proxy, + ) + + def _transport_for_url(self, url: URL) -> BaseTransport: + """ + Returns the transport instance that should be used for a given URL. + This will either be the standard connection pool, or a proxy. + """ + for pattern, transport in self._mounts.items(): + if pattern.matches(url): + return self._transport if transport is None else transport + + return self._transport + + def request( + self, + method: str, + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Build and send a request. + + Equivalent to: + + ```python + request = client.build_request(...) + response = client.send(request, ...) + ``` + + See `Client.build_request()`, `Client.send()` and + [Merging of configuration][0] for how the various parameters + are merged with client-level configuration. + + [0]: /advanced/clients/#merging-of-configuration + """ + if cookies is not None: + message = ( + "Setting per-request cookies=<...> is being deprecated, because " + "the expected behaviour on cookie persistence is ambiguous. Set " + "cookies directly on the client instance instead." + ) + warnings.warn(message, DeprecationWarning, stacklevel=2) + + request = self.build_request( + method=method, + url=url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + return self.send(request, auth=auth, follow_redirects=follow_redirects) + + @contextmanager + def stream( + self, + method: str, + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> typing.Iterator[Response]: + """ + Alternative to `httpx.request()` that streams the response body + instead of loading it into memory at once. + + **Parameters**: See `httpx.request`. + + See also: [Streaming Responses][0] + + [0]: /quickstart#streaming-responses + """ + request = self.build_request( + method=method, + url=url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + response = self.send( + request=request, + auth=auth, + follow_redirects=follow_redirects, + stream=True, + ) + try: + yield response + finally: + response.close() + + def send( + self, + request: Request, + *, + stream: bool = False, + auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + ) -> Response: + """ + Send a request. + + The request is sent as-is, unmodified. + + Typically you'll want to build one with `Client.build_request()` + so that any client-level configuration is merged into the request, + but passing an explicit `httpx.Request()` is supported as well. + + See also: [Request instances][0] + + [0]: /advanced/clients/#request-instances + """ + if self._state == ClientState.CLOSED: + raise RuntimeError("Cannot send a request, as the client has been closed.") + + self._state = ClientState.OPENED + follow_redirects = ( + self.follow_redirects + if isinstance(follow_redirects, UseClientDefault) + else follow_redirects + ) + + self._set_timeout(request) + + auth = self._build_request_auth(request, auth) + + response = self._send_handling_auth( + request, + auth=auth, + follow_redirects=follow_redirects, + history=[], + ) + try: + if not stream: + response.read() + + return response + + except BaseException as exc: + response.close() + raise exc + + def _send_handling_auth( + self, + request: Request, + auth: Auth, + follow_redirects: bool, + history: list[Response], + ) -> Response: + auth_flow = auth.sync_auth_flow(request) + try: + request = next(auth_flow) + + while True: + response = self._send_handling_redirects( + request, + follow_redirects=follow_redirects, + history=history, + ) + try: + try: + next_request = auth_flow.send(response) + except StopIteration: + return response + + response.history = list(history) + response.read() + request = next_request + history.append(response) + + except BaseException as exc: + response.close() + raise exc + finally: + auth_flow.close() + + def _send_handling_redirects( + self, + request: Request, + follow_redirects: bool, + history: list[Response], + ) -> Response: + while True: + if len(history) > self.max_redirects: + raise TooManyRedirects( + "Exceeded maximum allowed redirects.", request=request + ) + + for hook in self._event_hooks["request"]: + hook(request) + + response = self._send_single_request(request) + try: + for hook in self._event_hooks["response"]: + hook(response) + response.history = list(history) + + if not response.has_redirect_location: + return response + + request = self._build_redirect_request(request, response) + history = history + [response] + + if follow_redirects: + response.read() + else: + response.next_request = request + return response + + except BaseException as exc: + response.close() + raise exc + + def _send_single_request(self, request: Request) -> Response: + """ + Sends a single request, without handling any redirections. + """ + transport = self._transport_for_url(request.url) + start = time.perf_counter() + + if not isinstance(request.stream, SyncByteStream): + raise RuntimeError( + "Attempted to send an async request with a sync Client instance." + ) + + with request_context(request=request): + response = transport.handle_request(request) + + assert isinstance(response.stream, SyncByteStream) + + response.request = request + response.stream = BoundSyncStream( + response.stream, response=response, start=start + ) + self.cookies.extract_cookies(response) + response.default_encoding = self._default_encoding + + logger.info( + 'HTTP Request: %s %s "%s %d %s"', + request.method, + request.url, + response.http_version, + response.status_code, + response.reason_phrase, + ) + + return response + + def get( + self, + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send a `GET` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "GET", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def options( + self, + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send an `OPTIONS` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "OPTIONS", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def head( + self, + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send a `HEAD` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "HEAD", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def post( + self, + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send a `POST` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "POST", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def put( + self, + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send a `PUT` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "PUT", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def patch( + self, + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send a `PATCH` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "PATCH", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def delete( + self, + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send a `DELETE` request. + + **Parameters**: See `httpx.request`. + """ + return self.request( + "DELETE", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def close(self) -> None: + """ + Close transport and proxies. + """ + if self._state != ClientState.CLOSED: + self._state = ClientState.CLOSED + + self._transport.close() + for transport in self._mounts.values(): + if transport is not None: + transport.close() + + def __enter__(self: T) -> T: + if self._state != ClientState.UNOPENED: + msg = { + ClientState.OPENED: "Cannot open a client instance more than once.", + ClientState.CLOSED: ( + "Cannot reopen a client instance, once it has been closed." + ), + }[self._state] + raise RuntimeError(msg) + + self._state = ClientState.OPENED + + self._transport.__enter__() + for transport in self._mounts.values(): + if transport is not None: + transport.__enter__() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: TracebackType | None = None, + ) -> None: + self._state = ClientState.CLOSED + + self._transport.__exit__(exc_type, exc_value, traceback) + for transport in self._mounts.values(): + if transport is not None: + transport.__exit__(exc_type, exc_value, traceback) + + +class AsyncClient(BaseClient): + """ + An asynchronous HTTP client, with connection pooling, HTTP/2, redirects, + cookie persistence, etc. + + It can be shared between tasks. + + Usage: + + ```python + >>> async with httpx.AsyncClient() as client: + >>> response = await client.get('https://example.org') + ``` + + **Parameters:** + + * **auth** - *(optional)* An authentication class to use when sending + requests. + * **params** - *(optional)* Query parameters to include in request URLs, as + a string, dictionary, or sequence of two-tuples. + * **headers** - *(optional)* Dictionary of HTTP headers to include when + sending requests. + * **cookies** - *(optional)* Dictionary of Cookie items to include when + sending requests. + * **verify** - *(optional)* Either `True` to use an SSL context with the + default CA bundle, `False` to disable verification, or an instance of + `ssl.SSLContext` to use a custom context. + * **http2** - *(optional)* A boolean indicating if HTTP/2 support should be + enabled. Defaults to `False`. + * **proxy** - *(optional)* A proxy URL where all the traffic should be routed. + * **timeout** - *(optional)* The timeout configuration to use when sending + requests. + * **limits** - *(optional)* The limits configuration to use. + * **max_redirects** - *(optional)* The maximum number of redirect responses + that should be followed. + * **base_url** - *(optional)* A URL to use as the base when building + request URLs. + * **transport** - *(optional)* A transport class to use for sending requests + over the network. + * **trust_env** - *(optional)* Enables or disables usage of environment + variables for configuration. + * **default_encoding** - *(optional)* The default encoding to use for decoding + response text, if no charset information is included in a response Content-Type + header. Set to a callable for automatic character set detection. Default: "utf-8". + """ + + def __init__( + self, + *, + auth: AuthTypes | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + verify: ssl.SSLContext | str | bool = True, + cert: CertTypes | None = None, + http1: bool = True, + http2: bool = False, + proxy: ProxyTypes | None = None, + mounts: None | (typing.Mapping[str, AsyncBaseTransport | None]) = None, + timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG, + follow_redirects: bool = False, + limits: Limits = DEFAULT_LIMITS, + max_redirects: int = DEFAULT_MAX_REDIRECTS, + event_hooks: None | (typing.Mapping[str, list[EventHook]]) = None, + base_url: URL | str = "", + transport: AsyncBaseTransport | None = None, + trust_env: bool = True, + default_encoding: str | typing.Callable[[bytes], str] = "utf-8", + ) -> None: + super().__init__( + auth=auth, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + follow_redirects=follow_redirects, + max_redirects=max_redirects, + event_hooks=event_hooks, + base_url=base_url, + trust_env=trust_env, + default_encoding=default_encoding, + ) + + if http2: + try: + import h2 # noqa + except ImportError: # pragma: no cover + raise ImportError( + "Using http2=True, but the 'h2' package is not installed. " + "Make sure to install httpx using `pip install httpx[http2]`." + ) from None + + allow_env_proxies = trust_env and transport is None + proxy_map = self._get_proxy_map(proxy, allow_env_proxies) + + self._transport = self._init_transport( + verify=verify, + cert=cert, + trust_env=trust_env, + http1=http1, + http2=http2, + limits=limits, + transport=transport, + ) + + self._mounts: dict[URLPattern, AsyncBaseTransport | None] = { + URLPattern(key): None + if proxy is None + else self._init_proxy_transport( + proxy, + verify=verify, + cert=cert, + trust_env=trust_env, + http1=http1, + http2=http2, + limits=limits, + ) + for key, proxy in proxy_map.items() + } + if mounts is not None: + self._mounts.update( + {URLPattern(key): transport for key, transport in mounts.items()} + ) + self._mounts = dict(sorted(self._mounts.items())) + + def _init_transport( + self, + verify: ssl.SSLContext | str | bool = True, + cert: CertTypes | None = None, + trust_env: bool = True, + http1: bool = True, + http2: bool = False, + limits: Limits = DEFAULT_LIMITS, + transport: AsyncBaseTransport | None = None, + ) -> AsyncBaseTransport: + if transport is not None: + return transport + + return AsyncHTTPTransport( + verify=verify, + cert=cert, + trust_env=trust_env, + http1=http1, + http2=http2, + limits=limits, + ) + + def _init_proxy_transport( + self, + proxy: Proxy, + verify: ssl.SSLContext | str | bool = True, + cert: CertTypes | None = None, + trust_env: bool = True, + http1: bool = True, + http2: bool = False, + limits: Limits = DEFAULT_LIMITS, + ) -> AsyncBaseTransport: + return AsyncHTTPTransport( + verify=verify, + cert=cert, + trust_env=trust_env, + http1=http1, + http2=http2, + limits=limits, + proxy=proxy, + ) + + def _transport_for_url(self, url: URL) -> AsyncBaseTransport: + """ + Returns the transport instance that should be used for a given URL. + This will either be the standard connection pool, or a proxy. + """ + for pattern, transport in self._mounts.items(): + if pattern.matches(url): + return self._transport if transport is None else transport + + return self._transport + + async def request( + self, + method: str, + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Build and send a request. + + Equivalent to: + + ```python + request = client.build_request(...) + response = await client.send(request, ...) + ``` + + See `AsyncClient.build_request()`, `AsyncClient.send()` + and [Merging of configuration][0] for how the various parameters + are merged with client-level configuration. + + [0]: /advanced/clients/#merging-of-configuration + """ + + if cookies is not None: # pragma: no cover + message = ( + "Setting per-request cookies=<...> is being deprecated, because " + "the expected behaviour on cookie persistence is ambiguous. Set " + "cookies directly on the client instance instead." + ) + warnings.warn(message, DeprecationWarning, stacklevel=2) + + request = self.build_request( + method=method, + url=url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + return await self.send(request, auth=auth, follow_redirects=follow_redirects) + + @asynccontextmanager + async def stream( + self, + method: str, + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> typing.AsyncIterator[Response]: + """ + Alternative to `httpx.request()` that streams the response body + instead of loading it into memory at once. + + **Parameters**: See `httpx.request`. + + See also: [Streaming Responses][0] + + [0]: /quickstart#streaming-responses + """ + request = self.build_request( + method=method, + url=url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + response = await self.send( + request=request, + auth=auth, + follow_redirects=follow_redirects, + stream=True, + ) + try: + yield response + finally: + await response.aclose() + + async def send( + self, + request: Request, + *, + stream: bool = False, + auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + ) -> Response: + """ + Send a request. + + The request is sent as-is, unmodified. + + Typically you'll want to build one with `AsyncClient.build_request()` + so that any client-level configuration is merged into the request, + but passing an explicit `httpx.Request()` is supported as well. + + See also: [Request instances][0] + + [0]: /advanced/clients/#request-instances + """ + if self._state == ClientState.CLOSED: + raise RuntimeError("Cannot send a request, as the client has been closed.") + + self._state = ClientState.OPENED + follow_redirects = ( + self.follow_redirects + if isinstance(follow_redirects, UseClientDefault) + else follow_redirects + ) + + self._set_timeout(request) + + auth = self._build_request_auth(request, auth) + + response = await self._send_handling_auth( + request, + auth=auth, + follow_redirects=follow_redirects, + history=[], + ) + try: + if not stream: + await response.aread() + + return response + + except BaseException as exc: + await response.aclose() + raise exc + + async def _send_handling_auth( + self, + request: Request, + auth: Auth, + follow_redirects: bool, + history: list[Response], + ) -> Response: + auth_flow = auth.async_auth_flow(request) + try: + request = await auth_flow.__anext__() + + while True: + response = await self._send_handling_redirects( + request, + follow_redirects=follow_redirects, + history=history, + ) + try: + try: + next_request = await auth_flow.asend(response) + except StopAsyncIteration: + return response + + response.history = list(history) + await response.aread() + request = next_request + history.append(response) + + except BaseException as exc: + await response.aclose() + raise exc + finally: + await auth_flow.aclose() + + async def _send_handling_redirects( + self, + request: Request, + follow_redirects: bool, + history: list[Response], + ) -> Response: + while True: + if len(history) > self.max_redirects: + raise TooManyRedirects( + "Exceeded maximum allowed redirects.", request=request + ) + + for hook in self._event_hooks["request"]: + await hook(request) + + response = await self._send_single_request(request) + try: + for hook in self._event_hooks["response"]: + await hook(response) + + response.history = list(history) + + if not response.has_redirect_location: + return response + + request = self._build_redirect_request(request, response) + history = history + [response] + + if follow_redirects: + await response.aread() + else: + response.next_request = request + return response + + except BaseException as exc: + await response.aclose() + raise exc + + async def _send_single_request(self, request: Request) -> Response: + """ + Sends a single request, without handling any redirections. + """ + transport = self._transport_for_url(request.url) + start = time.perf_counter() + + if not isinstance(request.stream, AsyncByteStream): + raise RuntimeError( + "Attempted to send an sync request with an AsyncClient instance." + ) + + with request_context(request=request): + response = await transport.handle_async_request(request) + + assert isinstance(response.stream, AsyncByteStream) + response.request = request + response.stream = BoundAsyncStream( + response.stream, response=response, start=start + ) + self.cookies.extract_cookies(response) + response.default_encoding = self._default_encoding + + logger.info( + 'HTTP Request: %s %s "%s %d %s"', + request.method, + request.url, + response.http_version, + response.status_code, + response.reason_phrase, + ) + + return response + + async def get( + self, + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send a `GET` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "GET", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def options( + self, + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send an `OPTIONS` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "OPTIONS", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def head( + self, + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send a `HEAD` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "HEAD", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def post( + self, + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send a `POST` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "POST", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def put( + self, + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send a `PUT` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "PUT", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def patch( + self, + url: URL | str, + *, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send a `PATCH` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "PATCH", + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def delete( + self, + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT, + follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT, + timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT, + extensions: RequestExtensions | None = None, + ) -> Response: + """ + Send a `DELETE` request. + + **Parameters**: See `httpx.request`. + """ + return await self.request( + "DELETE", + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + async def aclose(self) -> None: + """ + Close transport and proxies. + """ + if self._state != ClientState.CLOSED: + self._state = ClientState.CLOSED + + await self._transport.aclose() + for proxy in self._mounts.values(): + if proxy is not None: + await proxy.aclose() + + async def __aenter__(self: U) -> U: + if self._state != ClientState.UNOPENED: + msg = { + ClientState.OPENED: "Cannot open a client instance more than once.", + ClientState.CLOSED: ( + "Cannot reopen a client instance, once it has been closed." + ), + }[self._state] + raise RuntimeError(msg) + + self._state = ClientState.OPENED + + await self._transport.__aenter__() + for proxy in self._mounts.values(): + if proxy is not None: + await proxy.__aenter__() + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: TracebackType | None = None, + ) -> None: + self._state = ClientState.CLOSED + + await self._transport.__aexit__(exc_type, exc_value, traceback) + for proxy in self._mounts.values(): + if proxy is not None: + await proxy.__aexit__(exc_type, exc_value, traceback) diff --git a/venv/Lib/site-packages/httpx/_config.py b/venv/Lib/site-packages/httpx/_config.py new file mode 100644 index 00000000..467a6c90 --- /dev/null +++ b/venv/Lib/site-packages/httpx/_config.py @@ -0,0 +1,248 @@ +from __future__ import annotations + +import os +import typing + +from ._models import Headers +from ._types import CertTypes, HeaderTypes, TimeoutTypes +from ._urls import URL + +if typing.TYPE_CHECKING: + import ssl # pragma: no cover + +__all__ = ["Limits", "Proxy", "Timeout", "create_ssl_context"] + + +class UnsetType: + pass # pragma: no cover + + +UNSET = UnsetType() + + +def create_ssl_context( + verify: ssl.SSLContext | str | bool = True, + cert: CertTypes | None = None, + trust_env: bool = True, +) -> ssl.SSLContext: + import ssl + import warnings + + import certifi + + if verify is True: + if trust_env and os.environ.get("SSL_CERT_FILE"): # pragma: nocover + ctx = ssl.create_default_context(cafile=os.environ["SSL_CERT_FILE"]) + elif trust_env and os.environ.get("SSL_CERT_DIR"): # pragma: nocover + ctx = ssl.create_default_context(capath=os.environ["SSL_CERT_DIR"]) + else: + # Default case... + ctx = ssl.create_default_context(cafile=certifi.where()) + elif verify is False: + ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE + elif isinstance(verify, str): # pragma: nocover + message = ( + "`verify=` is deprecated. " + "Use `verify=ssl.create_default_context(cafile=...)` " + "or `verify=ssl.create_default_context(capath=...)` instead." + ) + warnings.warn(message, DeprecationWarning) + if os.path.isdir(verify): + return ssl.create_default_context(capath=verify) + return ssl.create_default_context(cafile=verify) + else: + ctx = verify + + if cert: # pragma: nocover + message = ( + "`cert=...` is deprecated. Use `verify=` instead," + "with `.load_cert_chain()` to configure the certificate chain." + ) + warnings.warn(message, DeprecationWarning) + if isinstance(cert, str): + ctx.load_cert_chain(cert) + else: + ctx.load_cert_chain(*cert) + + return ctx + + +class Timeout: + """ + Timeout configuration. + + **Usage**: + + Timeout(None) # No timeouts. + Timeout(5.0) # 5s timeout on all operations. + Timeout(None, connect=5.0) # 5s timeout on connect, no other timeouts. + Timeout(5.0, connect=10.0) # 10s timeout on connect. 5s timeout elsewhere. + Timeout(5.0, pool=None) # No timeout on acquiring connection from pool. + # 5s timeout elsewhere. + """ + + def __init__( + self, + timeout: TimeoutTypes | UnsetType = UNSET, + *, + connect: None | float | UnsetType = UNSET, + read: None | float | UnsetType = UNSET, + write: None | float | UnsetType = UNSET, + pool: None | float | UnsetType = UNSET, + ) -> None: + if isinstance(timeout, Timeout): + # Passed as a single explicit Timeout. + assert connect is UNSET + assert read is UNSET + assert write is UNSET + assert pool is UNSET + self.connect = timeout.connect # type: typing.Optional[float] + self.read = timeout.read # type: typing.Optional[float] + self.write = timeout.write # type: typing.Optional[float] + self.pool = timeout.pool # type: typing.Optional[float] + elif isinstance(timeout, tuple): + # Passed as a tuple. + self.connect = timeout[0] + self.read = timeout[1] + self.write = None if len(timeout) < 3 else timeout[2] + self.pool = None if len(timeout) < 4 else timeout[3] + elif not ( + isinstance(connect, UnsetType) + or isinstance(read, UnsetType) + or isinstance(write, UnsetType) + or isinstance(pool, UnsetType) + ): + self.connect = connect + self.read = read + self.write = write + self.pool = pool + else: + if isinstance(timeout, UnsetType): + raise ValueError( + "httpx.Timeout must either include a default, or set all " + "four parameters explicitly." + ) + self.connect = timeout if isinstance(connect, UnsetType) else connect + self.read = timeout if isinstance(read, UnsetType) else read + self.write = timeout if isinstance(write, UnsetType) else write + self.pool = timeout if isinstance(pool, UnsetType) else pool + + def as_dict(self) -> dict[str, float | None]: + return { + "connect": self.connect, + "read": self.read, + "write": self.write, + "pool": self.pool, + } + + def __eq__(self, other: typing.Any) -> bool: + return ( + isinstance(other, self.__class__) + and self.connect == other.connect + and self.read == other.read + and self.write == other.write + and self.pool == other.pool + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + if len({self.connect, self.read, self.write, self.pool}) == 1: + return f"{class_name}(timeout={self.connect})" + return ( + f"{class_name}(connect={self.connect}, " + f"read={self.read}, write={self.write}, pool={self.pool})" + ) + + +class Limits: + """ + Configuration for limits to various client behaviors. + + **Parameters:** + + * **max_connections** - The maximum number of concurrent connections that may be + established. + * **max_keepalive_connections** - Allow the connection pool to maintain + keep-alive connections below this point. Should be less than or equal + to `max_connections`. + * **keepalive_expiry** - Time limit on idle keep-alive connections in seconds. + """ + + def __init__( + self, + *, + max_connections: int | None = None, + max_keepalive_connections: int | None = None, + keepalive_expiry: float | None = 5.0, + ) -> None: + self.max_connections = max_connections + self.max_keepalive_connections = max_keepalive_connections + self.keepalive_expiry = keepalive_expiry + + def __eq__(self, other: typing.Any) -> bool: + return ( + isinstance(other, self.__class__) + and self.max_connections == other.max_connections + and self.max_keepalive_connections == other.max_keepalive_connections + and self.keepalive_expiry == other.keepalive_expiry + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + return ( + f"{class_name}(max_connections={self.max_connections}, " + f"max_keepalive_connections={self.max_keepalive_connections}, " + f"keepalive_expiry={self.keepalive_expiry})" + ) + + +class Proxy: + def __init__( + self, + url: URL | str, + *, + ssl_context: ssl.SSLContext | None = None, + auth: tuple[str, str] | None = None, + headers: HeaderTypes | None = None, + ) -> None: + url = URL(url) + headers = Headers(headers) + + if url.scheme not in ("http", "https", "socks5", "socks5h"): + raise ValueError(f"Unknown scheme for proxy URL {url!r}") + + if url.username or url.password: + # Remove any auth credentials from the URL. + auth = (url.username, url.password) + url = url.copy_with(username=None, password=None) + + self.url = url + self.auth = auth + self.headers = headers + self.ssl_context = ssl_context + + @property + def raw_auth(self) -> tuple[bytes, bytes] | None: + # The proxy authentication as raw bytes. + return ( + None + if self.auth is None + else (self.auth[0].encode("utf-8"), self.auth[1].encode("utf-8")) + ) + + def __repr__(self) -> str: + # The authentication is represented with the password component masked. + auth = (self.auth[0], "********") if self.auth else None + + # Build a nice concise representation. + url_str = f"{str(self.url)!r}" + auth_str = f", auth={auth!r}" if auth else "" + headers_str = f", headers={dict(self.headers)!r}" if self.headers else "" + return f"Proxy({url_str}{auth_str}{headers_str})" + + +DEFAULT_TIMEOUT_CONFIG = Timeout(timeout=5.0) +DEFAULT_LIMITS = Limits(max_connections=100, max_keepalive_connections=20) +DEFAULT_MAX_REDIRECTS = 20 diff --git a/venv/Lib/site-packages/httpx/_content.py b/venv/Lib/site-packages/httpx/_content.py new file mode 100644 index 00000000..6f479a08 --- /dev/null +++ b/venv/Lib/site-packages/httpx/_content.py @@ -0,0 +1,240 @@ +from __future__ import annotations + +import inspect +import warnings +from json import dumps as json_dumps +from typing import ( + Any, + AsyncIterable, + AsyncIterator, + Iterable, + Iterator, + Mapping, +) +from urllib.parse import urlencode + +from ._exceptions import StreamClosed, StreamConsumed +from ._multipart import MultipartStream +from ._types import ( + AsyncByteStream, + RequestContent, + RequestData, + RequestFiles, + ResponseContent, + SyncByteStream, +) +from ._utils import peek_filelike_length, primitive_value_to_str + +__all__ = ["ByteStream"] + + +class ByteStream(AsyncByteStream, SyncByteStream): + def __init__(self, stream: bytes) -> None: + self._stream = stream + + def __iter__(self) -> Iterator[bytes]: + yield self._stream + + async def __aiter__(self) -> AsyncIterator[bytes]: + yield self._stream + + +class IteratorByteStream(SyncByteStream): + CHUNK_SIZE = 65_536 + + def __init__(self, stream: Iterable[bytes]) -> None: + self._stream = stream + self._is_stream_consumed = False + self._is_generator = inspect.isgenerator(stream) + + def __iter__(self) -> Iterator[bytes]: + if self._is_stream_consumed and self._is_generator: + raise StreamConsumed() + + self._is_stream_consumed = True + if hasattr(self._stream, "read"): + # File-like interfaces should use 'read' directly. + chunk = self._stream.read(self.CHUNK_SIZE) + while chunk: + yield chunk + chunk = self._stream.read(self.CHUNK_SIZE) + else: + # Otherwise iterate. + for part in self._stream: + yield part + + +class AsyncIteratorByteStream(AsyncByteStream): + CHUNK_SIZE = 65_536 + + def __init__(self, stream: AsyncIterable[bytes]) -> None: + self._stream = stream + self._is_stream_consumed = False + self._is_generator = inspect.isasyncgen(stream) + + async def __aiter__(self) -> AsyncIterator[bytes]: + if self._is_stream_consumed and self._is_generator: + raise StreamConsumed() + + self._is_stream_consumed = True + if hasattr(self._stream, "aread"): + # File-like interfaces should use 'aread' directly. + chunk = await self._stream.aread(self.CHUNK_SIZE) + while chunk: + yield chunk + chunk = await self._stream.aread(self.CHUNK_SIZE) + else: + # Otherwise iterate. + async for part in self._stream: + yield part + + +class UnattachedStream(AsyncByteStream, SyncByteStream): + """ + If a request or response is serialized using pickle, then it is no longer + attached to a stream for I/O purposes. Any stream operations should result + in `httpx.StreamClosed`. + """ + + def __iter__(self) -> Iterator[bytes]: + raise StreamClosed() + + async def __aiter__(self) -> AsyncIterator[bytes]: + raise StreamClosed() + yield b"" # pragma: no cover + + +def encode_content( + content: str | bytes | Iterable[bytes] | AsyncIterable[bytes], +) -> tuple[dict[str, str], SyncByteStream | AsyncByteStream]: + if isinstance(content, (bytes, str)): + body = content.encode("utf-8") if isinstance(content, str) else content + content_length = len(body) + headers = {"Content-Length": str(content_length)} if body else {} + return headers, ByteStream(body) + + elif isinstance(content, Iterable) and not isinstance(content, dict): + # `not isinstance(content, dict)` is a bit oddly specific, but it + # catches a case that's easy for users to make in error, and would + # otherwise pass through here, like any other bytes-iterable, + # because `dict` happens to be iterable. See issue #2491. + content_length_or_none = peek_filelike_length(content) + + if content_length_or_none is None: + headers = {"Transfer-Encoding": "chunked"} + else: + headers = {"Content-Length": str(content_length_or_none)} + return headers, IteratorByteStream(content) # type: ignore + + elif isinstance(content, AsyncIterable): + headers = {"Transfer-Encoding": "chunked"} + return headers, AsyncIteratorByteStream(content) + + raise TypeError(f"Unexpected type for 'content', {type(content)!r}") + + +def encode_urlencoded_data( + data: RequestData, +) -> tuple[dict[str, str], ByteStream]: + plain_data = [] + for key, value in data.items(): + if isinstance(value, (list, tuple)): + plain_data.extend([(key, primitive_value_to_str(item)) for item in value]) + else: + plain_data.append((key, primitive_value_to_str(value))) + body = urlencode(plain_data, doseq=True).encode("utf-8") + content_length = str(len(body)) + content_type = "application/x-www-form-urlencoded" + headers = {"Content-Length": content_length, "Content-Type": content_type} + return headers, ByteStream(body) + + +def encode_multipart_data( + data: RequestData, files: RequestFiles, boundary: bytes | None +) -> tuple[dict[str, str], MultipartStream]: + multipart = MultipartStream(data=data, files=files, boundary=boundary) + headers = multipart.get_headers() + return headers, multipart + + +def encode_text(text: str) -> tuple[dict[str, str], ByteStream]: + body = text.encode("utf-8") + content_length = str(len(body)) + content_type = "text/plain; charset=utf-8" + headers = {"Content-Length": content_length, "Content-Type": content_type} + return headers, ByteStream(body) + + +def encode_html(html: str) -> tuple[dict[str, str], ByteStream]: + body = html.encode("utf-8") + content_length = str(len(body)) + content_type = "text/html; charset=utf-8" + headers = {"Content-Length": content_length, "Content-Type": content_type} + return headers, ByteStream(body) + + +def encode_json(json: Any) -> tuple[dict[str, str], ByteStream]: + body = json_dumps( + json, ensure_ascii=False, separators=(",", ":"), allow_nan=False + ).encode("utf-8") + content_length = str(len(body)) + content_type = "application/json" + headers = {"Content-Length": content_length, "Content-Type": content_type} + return headers, ByteStream(body) + + +def encode_request( + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: Any | None = None, + boundary: bytes | None = None, +) -> tuple[dict[str, str], SyncByteStream | AsyncByteStream]: + """ + Handles encoding the given `content`, `data`, `files`, and `json`, + returning a two-tuple of (, ). + """ + if data is not None and not isinstance(data, Mapping): + # We prefer to separate `content=` + # for raw request content, and `data=
` for url encoded or + # multipart form content. + # + # However for compat with requests, we *do* still support + # `data=` usages. We deal with that case here, treating it + # as if `content=<...>` had been supplied instead. + message = "Use 'content=<...>' to upload raw bytes/text content." + warnings.warn(message, DeprecationWarning, stacklevel=2) + return encode_content(data) + + if content is not None: + return encode_content(content) + elif files: + return encode_multipart_data(data or {}, files, boundary) + elif data: + return encode_urlencoded_data(data) + elif json is not None: + return encode_json(json) + + return {}, ByteStream(b"") + + +def encode_response( + content: ResponseContent | None = None, + text: str | None = None, + html: str | None = None, + json: Any | None = None, +) -> tuple[dict[str, str], SyncByteStream | AsyncByteStream]: + """ + Handles encoding the given `content`, returning a two-tuple of + (, ). + """ + if content is not None: + return encode_content(content) + elif text is not None: + return encode_text(text) + elif html is not None: + return encode_html(html) + elif json is not None: + return encode_json(json) + + return {}, ByteStream(b"") diff --git a/venv/Lib/site-packages/httpx/_decoders.py b/venv/Lib/site-packages/httpx/_decoders.py new file mode 100644 index 00000000..899dfada --- /dev/null +++ b/venv/Lib/site-packages/httpx/_decoders.py @@ -0,0 +1,393 @@ +""" +Handlers for Content-Encoding. + +See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding +""" + +from __future__ import annotations + +import codecs +import io +import typing +import zlib + +from ._exceptions import DecodingError + +# Brotli support is optional +try: + # The C bindings in `brotli` are recommended for CPython. + import brotli +except ImportError: # pragma: no cover + try: + # The CFFI bindings in `brotlicffi` are recommended for PyPy + # and other environments. + import brotlicffi as brotli + except ImportError: + brotli = None + + +# Zstandard support is optional +try: + import zstandard +except ImportError: # pragma: no cover + zstandard = None # type: ignore + + +class ContentDecoder: + def decode(self, data: bytes) -> bytes: + raise NotImplementedError() # pragma: no cover + + def flush(self) -> bytes: + raise NotImplementedError() # pragma: no cover + + +class IdentityDecoder(ContentDecoder): + """ + Handle unencoded data. + """ + + def decode(self, data: bytes) -> bytes: + return data + + def flush(self) -> bytes: + return b"" + + +class DeflateDecoder(ContentDecoder): + """ + Handle 'deflate' decoding. + + See: https://stackoverflow.com/questions/1838699 + """ + + def __init__(self) -> None: + self.first_attempt = True + self.decompressor = zlib.decompressobj() + + def decode(self, data: bytes) -> bytes: + was_first_attempt = self.first_attempt + self.first_attempt = False + try: + return self.decompressor.decompress(data) + except zlib.error as exc: + if was_first_attempt: + self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS) + return self.decode(data) + raise DecodingError(str(exc)) from exc + + def flush(self) -> bytes: + try: + return self.decompressor.flush() + except zlib.error as exc: # pragma: no cover + raise DecodingError(str(exc)) from exc + + +class GZipDecoder(ContentDecoder): + """ + Handle 'gzip' decoding. + + See: https://stackoverflow.com/questions/1838699 + """ + + def __init__(self) -> None: + self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16) + + def decode(self, data: bytes) -> bytes: + try: + return self.decompressor.decompress(data) + except zlib.error as exc: + raise DecodingError(str(exc)) from exc + + def flush(self) -> bytes: + try: + return self.decompressor.flush() + except zlib.error as exc: # pragma: no cover + raise DecodingError(str(exc)) from exc + + +class BrotliDecoder(ContentDecoder): + """ + Handle 'brotli' decoding. + + Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/ + or `pip install brotli`. See https://github.com/google/brotli + Supports both 'brotlipy' and 'Brotli' packages since they share an import + name. The top branches are for 'brotlipy' and bottom branches for 'Brotli' + """ + + def __init__(self) -> None: + if brotli is None: # pragma: no cover + raise ImportError( + "Using 'BrotliDecoder', but neither of the 'brotlicffi' or 'brotli' " + "packages have been installed. " + "Make sure to install httpx using `pip install httpx[brotli]`." + ) from None + + self.decompressor = brotli.Decompressor() + self.seen_data = False + self._decompress: typing.Callable[[bytes], bytes] + if hasattr(self.decompressor, "decompress"): + # The 'brotlicffi' package. + self._decompress = self.decompressor.decompress # pragma: no cover + else: + # The 'brotli' package. + self._decompress = self.decompressor.process # pragma: no cover + + def decode(self, data: bytes) -> bytes: + if not data: + return b"" + self.seen_data = True + try: + return self._decompress(data) + except brotli.error as exc: + raise DecodingError(str(exc)) from exc + + def flush(self) -> bytes: + if not self.seen_data: + return b"" + try: + if hasattr(self.decompressor, "finish"): + # Only available in the 'brotlicffi' package. + + # As the decompressor decompresses eagerly, this + # will never actually emit any data. However, it will potentially throw + # errors if a truncated or damaged data stream has been used. + self.decompressor.finish() # pragma: no cover + return b"" + except brotli.error as exc: # pragma: no cover + raise DecodingError(str(exc)) from exc + + +class ZStandardDecoder(ContentDecoder): + """ + Handle 'zstd' RFC 8878 decoding. + + Requires `pip install zstandard`. + Can be installed as a dependency of httpx using `pip install httpx[zstd]`. + """ + + # inspired by the ZstdDecoder implementation in urllib3 + def __init__(self) -> None: + if zstandard is None: # pragma: no cover + raise ImportError( + "Using 'ZStandardDecoder', ..." + "Make sure to install httpx using `pip install httpx[zstd]`." + ) from None + + self.decompressor = zstandard.ZstdDecompressor().decompressobj() + self.seen_data = False + + def decode(self, data: bytes) -> bytes: + assert zstandard is not None + self.seen_data = True + output = io.BytesIO() + try: + output.write(self.decompressor.decompress(data)) + while self.decompressor.eof and self.decompressor.unused_data: + unused_data = self.decompressor.unused_data + self.decompressor = zstandard.ZstdDecompressor().decompressobj() + output.write(self.decompressor.decompress(unused_data)) + except zstandard.ZstdError as exc: + raise DecodingError(str(exc)) from exc + return output.getvalue() + + def flush(self) -> bytes: + if not self.seen_data: + return b"" + ret = self.decompressor.flush() # note: this is a no-op + if not self.decompressor.eof: + raise DecodingError("Zstandard data is incomplete") # pragma: no cover + return bytes(ret) + + +class MultiDecoder(ContentDecoder): + """ + Handle the case where multiple encodings have been applied. + """ + + def __init__(self, children: typing.Sequence[ContentDecoder]) -> None: + """ + 'children' should be a sequence of decoders in the order in which + each was applied. + """ + # Note that we reverse the order for decoding. + self.children = list(reversed(children)) + + def decode(self, data: bytes) -> bytes: + for child in self.children: + data = child.decode(data) + return data + + def flush(self) -> bytes: + data = b"" + for child in self.children: + data = child.decode(data) + child.flush() + return data + + +class ByteChunker: + """ + Handles returning byte content in fixed-size chunks. + """ + + def __init__(self, chunk_size: int | None = None) -> None: + self._buffer = io.BytesIO() + self._chunk_size = chunk_size + + def decode(self, content: bytes) -> list[bytes]: + if self._chunk_size is None: + return [content] if content else [] + + self._buffer.write(content) + if self._buffer.tell() >= self._chunk_size: + value = self._buffer.getvalue() + chunks = [ + value[i : i + self._chunk_size] + for i in range(0, len(value), self._chunk_size) + ] + if len(chunks[-1]) == self._chunk_size: + self._buffer.seek(0) + self._buffer.truncate() + return chunks + else: + self._buffer.seek(0) + self._buffer.write(chunks[-1]) + self._buffer.truncate() + return chunks[:-1] + else: + return [] + + def flush(self) -> list[bytes]: + value = self._buffer.getvalue() + self._buffer.seek(0) + self._buffer.truncate() + return [value] if value else [] + + +class TextChunker: + """ + Handles returning text content in fixed-size chunks. + """ + + def __init__(self, chunk_size: int | None = None) -> None: + self._buffer = io.StringIO() + self._chunk_size = chunk_size + + def decode(self, content: str) -> list[str]: + if self._chunk_size is None: + return [content] if content else [] + + self._buffer.write(content) + if self._buffer.tell() >= self._chunk_size: + value = self._buffer.getvalue() + chunks = [ + value[i : i + self._chunk_size] + for i in range(0, len(value), self._chunk_size) + ] + if len(chunks[-1]) == self._chunk_size: + self._buffer.seek(0) + self._buffer.truncate() + return chunks + else: + self._buffer.seek(0) + self._buffer.write(chunks[-1]) + self._buffer.truncate() + return chunks[:-1] + else: + return [] + + def flush(self) -> list[str]: + value = self._buffer.getvalue() + self._buffer.seek(0) + self._buffer.truncate() + return [value] if value else [] + + +class TextDecoder: + """ + Handles incrementally decoding bytes into text + """ + + def __init__(self, encoding: str = "utf-8") -> None: + self.decoder = codecs.getincrementaldecoder(encoding)(errors="replace") + + def decode(self, data: bytes) -> str: + return self.decoder.decode(data) + + def flush(self) -> str: + return self.decoder.decode(b"", True) + + +class LineDecoder: + """ + Handles incrementally reading lines from text. + + Has the same behaviour as the stdllib splitlines, + but handling the input iteratively. + """ + + def __init__(self) -> None: + self.buffer: list[str] = [] + self.trailing_cr: bool = False + + def decode(self, text: str) -> list[str]: + # See https://docs.python.org/3/library/stdtypes.html#str.splitlines + NEWLINE_CHARS = "\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029" + + # We always push a trailing `\r` into the next decode iteration. + if self.trailing_cr: + text = "\r" + text + self.trailing_cr = False + if text.endswith("\r"): + self.trailing_cr = True + text = text[:-1] + + if not text: + # NOTE: the edge case input of empty text doesn't occur in practice, + # because other httpx internals filter out this value + return [] # pragma: no cover + + trailing_newline = text[-1] in NEWLINE_CHARS + lines = text.splitlines() + + if len(lines) == 1 and not trailing_newline: + # No new lines, buffer the input and continue. + self.buffer.append(lines[0]) + return [] + + if self.buffer: + # Include any existing buffer in the first portion of the + # splitlines result. + lines = ["".join(self.buffer) + lines[0]] + lines[1:] + self.buffer = [] + + if not trailing_newline: + # If the last segment of splitlines is not newline terminated, + # then drop it from our output and start a new buffer. + self.buffer = [lines.pop()] + + return lines + + def flush(self) -> list[str]: + if not self.buffer and not self.trailing_cr: + return [] + + lines = ["".join(self.buffer)] + self.buffer = [] + self.trailing_cr = False + return lines + + +SUPPORTED_DECODERS = { + "identity": IdentityDecoder, + "gzip": GZipDecoder, + "deflate": DeflateDecoder, + "br": BrotliDecoder, + "zstd": ZStandardDecoder, +} + + +if brotli is None: + SUPPORTED_DECODERS.pop("br") # pragma: no cover +if zstandard is None: + SUPPORTED_DECODERS.pop("zstd") # pragma: no cover diff --git a/venv/Lib/site-packages/httpx/_exceptions.py b/venv/Lib/site-packages/httpx/_exceptions.py new file mode 100644 index 00000000..77f45a6d --- /dev/null +++ b/venv/Lib/site-packages/httpx/_exceptions.py @@ -0,0 +1,379 @@ +""" +Our exception hierarchy: + +* HTTPError + x RequestError + + TransportError + - TimeoutException + · ConnectTimeout + · ReadTimeout + · WriteTimeout + · PoolTimeout + - NetworkError + · ConnectError + · ReadError + · WriteError + · CloseError + - ProtocolError + · LocalProtocolError + · RemoteProtocolError + - ProxyError + - UnsupportedProtocol + + DecodingError + + TooManyRedirects + x HTTPStatusError +* InvalidURL +* CookieConflict +* StreamError + x StreamConsumed + x StreamClosed + x ResponseNotRead + x RequestNotRead +""" + +from __future__ import annotations + +import contextlib +import typing + +if typing.TYPE_CHECKING: + from ._models import Request, Response # pragma: no cover + +__all__ = [ + "CloseError", + "ConnectError", + "ConnectTimeout", + "CookieConflict", + "DecodingError", + "HTTPError", + "HTTPStatusError", + "InvalidURL", + "LocalProtocolError", + "NetworkError", + "PoolTimeout", + "ProtocolError", + "ProxyError", + "ReadError", + "ReadTimeout", + "RemoteProtocolError", + "RequestError", + "RequestNotRead", + "ResponseNotRead", + "StreamClosed", + "StreamConsumed", + "StreamError", + "TimeoutException", + "TooManyRedirects", + "TransportError", + "UnsupportedProtocol", + "WriteError", + "WriteTimeout", +] + + +class HTTPError(Exception): + """ + Base class for `RequestError` and `HTTPStatusError`. + + Useful for `try...except` blocks when issuing a request, + and then calling `.raise_for_status()`. + + For example: + + ``` + try: + response = httpx.get("https://www.example.com") + response.raise_for_status() + except httpx.HTTPError as exc: + print(f"HTTP Exception for {exc.request.url} - {exc}") + ``` + """ + + def __init__(self, message: str) -> None: + super().__init__(message) + self._request: Request | None = None + + @property + def request(self) -> Request: + if self._request is None: + raise RuntimeError("The .request property has not been set.") + return self._request + + @request.setter + def request(self, request: Request) -> None: + self._request = request + + +class RequestError(HTTPError): + """ + Base class for all exceptions that may occur when issuing a `.request()`. + """ + + def __init__(self, message: str, *, request: Request | None = None) -> None: + super().__init__(message) + # At the point an exception is raised we won't typically have a request + # instance to associate it with. + # + # The 'request_context' context manager is used within the Client and + # Response methods in order to ensure that any raised exceptions + # have a `.request` property set on them. + self._request = request + + +class TransportError(RequestError): + """ + Base class for all exceptions that occur at the level of the Transport API. + """ + + +# Timeout exceptions... + + +class TimeoutException(TransportError): + """ + The base class for timeout errors. + + An operation has timed out. + """ + + +class ConnectTimeout(TimeoutException): + """ + Timed out while connecting to the host. + """ + + +class ReadTimeout(TimeoutException): + """ + Timed out while receiving data from the host. + """ + + +class WriteTimeout(TimeoutException): + """ + Timed out while sending data to the host. + """ + + +class PoolTimeout(TimeoutException): + """ + Timed out waiting to acquire a connection from the pool. + """ + + +# Core networking exceptions... + + +class NetworkError(TransportError): + """ + The base class for network-related errors. + + An error occurred while interacting with the network. + """ + + +class ReadError(NetworkError): + """ + Failed to receive data from the network. + """ + + +class WriteError(NetworkError): + """ + Failed to send data through the network. + """ + + +class ConnectError(NetworkError): + """ + Failed to establish a connection. + """ + + +class CloseError(NetworkError): + """ + Failed to close a connection. + """ + + +# Other transport exceptions... + + +class ProxyError(TransportError): + """ + An error occurred while establishing a proxy connection. + """ + + +class UnsupportedProtocol(TransportError): + """ + Attempted to make a request to an unsupported protocol. + + For example issuing a request to `ftp://www.example.com`. + """ + + +class ProtocolError(TransportError): + """ + The protocol was violated. + """ + + +class LocalProtocolError(ProtocolError): + """ + A protocol was violated by the client. + + For example if the user instantiated a `Request` instance explicitly, + failed to include the mandatory `Host:` header, and then issued it directly + using `client.send()`. + """ + + +class RemoteProtocolError(ProtocolError): + """ + The protocol was violated by the server. + + For example, returning malformed HTTP. + """ + + +# Other request exceptions... + + +class DecodingError(RequestError): + """ + Decoding of the response failed, due to a malformed encoding. + """ + + +class TooManyRedirects(RequestError): + """ + Too many redirects. + """ + + +# Client errors + + +class HTTPStatusError(HTTPError): + """ + The response had an error HTTP status of 4xx or 5xx. + + May be raised when calling `response.raise_for_status()` + """ + + def __init__(self, message: str, *, request: Request, response: Response) -> None: + super().__init__(message) + self.request = request + self.response = response + + +class InvalidURL(Exception): + """ + URL is improperly formed or cannot be parsed. + """ + + def __init__(self, message: str) -> None: + super().__init__(message) + + +class CookieConflict(Exception): + """ + Attempted to lookup a cookie by name, but multiple cookies existed. + + Can occur when calling `response.cookies.get(...)`. + """ + + def __init__(self, message: str) -> None: + super().__init__(message) + + +# Stream exceptions... + +# These may occur as the result of a programming error, by accessing +# the request/response stream in an invalid manner. + + +class StreamError(RuntimeError): + """ + The base class for stream exceptions. + + The developer made an error in accessing the request stream in + an invalid way. + """ + + def __init__(self, message: str) -> None: + super().__init__(message) + + +class StreamConsumed(StreamError): + """ + Attempted to read or stream content, but the content has already + been streamed. + """ + + def __init__(self) -> None: + message = ( + "Attempted to read or stream some content, but the content has " + "already been streamed. For requests, this could be due to passing " + "a generator as request content, and then receiving a redirect " + "response or a secondary request as part of an authentication flow." + "For responses, this could be due to attempting to stream the response " + "content more than once." + ) + super().__init__(message) + + +class StreamClosed(StreamError): + """ + Attempted to read or stream response content, but the request has been + closed. + """ + + def __init__(self) -> None: + message = ( + "Attempted to read or stream content, but the stream has " "been closed." + ) + super().__init__(message) + + +class ResponseNotRead(StreamError): + """ + Attempted to access streaming response content, without having called `read()`. + """ + + def __init__(self) -> None: + message = ( + "Attempted to access streaming response content," + " without having called `read()`." + ) + super().__init__(message) + + +class RequestNotRead(StreamError): + """ + Attempted to access streaming request content, without having called `read()`. + """ + + def __init__(self) -> None: + message = ( + "Attempted to access streaming request content," + " without having called `read()`." + ) + super().__init__(message) + + +@contextlib.contextmanager +def request_context( + request: Request | None = None, +) -> typing.Iterator[None]: + """ + A context manager that can be used to attach the given request context + to any `RequestError` exceptions that are raised within the block. + """ + try: + yield + except RequestError as exc: + if request is not None: + exc.request = request + raise exc diff --git a/venv/Lib/site-packages/httpx/_main.py b/venv/Lib/site-packages/httpx/_main.py new file mode 100644 index 00000000..cffa4bb7 --- /dev/null +++ b/venv/Lib/site-packages/httpx/_main.py @@ -0,0 +1,506 @@ +from __future__ import annotations + +import functools +import json +import sys +import typing + +import click +import pygments.lexers +import pygments.util +import rich.console +import rich.markup +import rich.progress +import rich.syntax +import rich.table + +from ._client import Client +from ._exceptions import RequestError +from ._models import Response +from ._status_codes import codes + +if typing.TYPE_CHECKING: + import httpcore # pragma: no cover + + +def print_help() -> None: + console = rich.console.Console() + + console.print("[bold]HTTPX :butterfly:", justify="center") + console.print() + console.print("A next generation HTTP client.", justify="center") + console.print() + console.print( + "Usage: [bold]httpx[/bold] [cyan] [OPTIONS][/cyan] ", justify="left" + ) + console.print() + + table = rich.table.Table.grid(padding=1, pad_edge=True) + table.add_column("Parameter", no_wrap=True, justify="left", style="bold") + table.add_column("Description") + table.add_row( + "-m, --method [cyan]METHOD", + "Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD.\n" + "[Default: GET, or POST if a request body is included]", + ) + table.add_row( + "-p, --params [cyan] ...", + "Query parameters to include in the request URL.", + ) + table.add_row( + "-c, --content [cyan]TEXT", "Byte content to include in the request body." + ) + table.add_row( + "-d, --data [cyan] ...", "Form data to include in the request body." + ) + table.add_row( + "-f, --files [cyan] ...", + "Form files to include in the request body.", + ) + table.add_row("-j, --json [cyan]TEXT", "JSON data to include in the request body.") + table.add_row( + "-h, --headers [cyan] ...", + "Include additional HTTP headers in the request.", + ) + table.add_row( + "--cookies [cyan] ...", "Cookies to include in the request." + ) + table.add_row( + "--auth [cyan]", + "Username and password to include in the request. Specify '-' for the password" + " to use a password prompt. Note that using --verbose/-v will expose" + " the Authorization header, including the password encoding" + " in a trivially reversible format.", + ) + + table.add_row( + "--proxy [cyan]URL", + "Send the request via a proxy. Should be the URL giving the proxy address.", + ) + + table.add_row( + "--timeout [cyan]FLOAT", + "Timeout value to use for network operations, such as establishing the" + " connection, reading some data, etc... [Default: 5.0]", + ) + + table.add_row("--follow-redirects", "Automatically follow redirects.") + table.add_row("--no-verify", "Disable SSL verification.") + table.add_row( + "--http2", "Send the request using HTTP/2, if the remote server supports it." + ) + + table.add_row( + "--download [cyan]FILE", + "Save the response content as a file, rather than displaying it.", + ) + + table.add_row("-v, --verbose", "Verbose output. Show request as well as response.") + table.add_row("--help", "Show this message and exit.") + console.print(table) + + +def get_lexer_for_response(response: Response) -> str: + content_type = response.headers.get("Content-Type") + if content_type is not None: + mime_type, _, _ = content_type.partition(";") + try: + return typing.cast( + str, pygments.lexers.get_lexer_for_mimetype(mime_type.strip()).name + ) + except pygments.util.ClassNotFound: # pragma: no cover + pass + return "" # pragma: no cover + + +def format_request_headers(request: httpcore.Request, http2: bool = False) -> str: + version = "HTTP/2" if http2 else "HTTP/1.1" + headers = [ + (name.lower() if http2 else name, value) for name, value in request.headers + ] + method = request.method.decode("ascii") + target = request.url.target.decode("ascii") + lines = [f"{method} {target} {version}"] + [ + f"{name.decode('ascii')}: {value.decode('ascii')}" for name, value in headers + ] + return "\n".join(lines) + + +def format_response_headers( + http_version: bytes, + status: int, + reason_phrase: bytes | None, + headers: list[tuple[bytes, bytes]], +) -> str: + version = http_version.decode("ascii") + reason = ( + codes.get_reason_phrase(status) + if reason_phrase is None + else reason_phrase.decode("ascii") + ) + lines = [f"{version} {status} {reason}"] + [ + f"{name.decode('ascii')}: {value.decode('ascii')}" for name, value in headers + ] + return "\n".join(lines) + + +def print_request_headers(request: httpcore.Request, http2: bool = False) -> None: + console = rich.console.Console() + http_text = format_request_headers(request, http2=http2) + syntax = rich.syntax.Syntax(http_text, "http", theme="ansi_dark", word_wrap=True) + console.print(syntax) + syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True) + console.print(syntax) + + +def print_response_headers( + http_version: bytes, + status: int, + reason_phrase: bytes | None, + headers: list[tuple[bytes, bytes]], +) -> None: + console = rich.console.Console() + http_text = format_response_headers(http_version, status, reason_phrase, headers) + syntax = rich.syntax.Syntax(http_text, "http", theme="ansi_dark", word_wrap=True) + console.print(syntax) + syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True) + console.print(syntax) + + +def print_response(response: Response) -> None: + console = rich.console.Console() + lexer_name = get_lexer_for_response(response) + if lexer_name: + if lexer_name.lower() == "json": + try: + data = response.json() + text = json.dumps(data, indent=4) + except ValueError: # pragma: no cover + text = response.text + else: + text = response.text + + syntax = rich.syntax.Syntax(text, lexer_name, theme="ansi_dark", word_wrap=True) + console.print(syntax) + else: + console.print(f"<{len(response.content)} bytes of binary data>") + + +_PCTRTT = typing.Tuple[typing.Tuple[str, str], ...] +_PCTRTTT = typing.Tuple[_PCTRTT, ...] +_PeerCertRetDictType = typing.Dict[str, typing.Union[str, _PCTRTTT, _PCTRTT]] + + +def format_certificate(cert: _PeerCertRetDictType) -> str: # pragma: no cover + lines = [] + for key, value in cert.items(): + if isinstance(value, (list, tuple)): + lines.append(f"* {key}:") + for item in value: + if key in ("subject", "issuer"): + for sub_item in item: + lines.append(f"* {sub_item[0]}: {sub_item[1]!r}") + elif isinstance(item, tuple) and len(item) == 2: + lines.append(f"* {item[0]}: {item[1]!r}") + else: + lines.append(f"* {item!r}") + else: + lines.append(f"* {key}: {value!r}") + return "\n".join(lines) + + +def trace( + name: str, info: typing.Mapping[str, typing.Any], verbose: bool = False +) -> None: + console = rich.console.Console() + if name == "connection.connect_tcp.started" and verbose: + host = info["host"] + console.print(f"* Connecting to {host!r}") + elif name == "connection.connect_tcp.complete" and verbose: + stream = info["return_value"] + server_addr = stream.get_extra_info("server_addr") + console.print(f"* Connected to {server_addr[0]!r} on port {server_addr[1]}") + elif name == "connection.start_tls.complete" and verbose: # pragma: no cover + stream = info["return_value"] + ssl_object = stream.get_extra_info("ssl_object") + version = ssl_object.version() + cipher = ssl_object.cipher() + server_cert = ssl_object.getpeercert() + alpn = ssl_object.selected_alpn_protocol() + console.print(f"* SSL established using {version!r} / {cipher[0]!r}") + console.print(f"* Selected ALPN protocol: {alpn!r}") + if server_cert: + console.print("* Server certificate:") + console.print(format_certificate(server_cert)) + elif name == "http11.send_request_headers.started" and verbose: + request = info["request"] + print_request_headers(request, http2=False) + elif name == "http2.send_request_headers.started" and verbose: # pragma: no cover + request = info["request"] + print_request_headers(request, http2=True) + elif name == "http11.receive_response_headers.complete": + http_version, status, reason_phrase, headers = info["return_value"] + print_response_headers(http_version, status, reason_phrase, headers) + elif name == "http2.receive_response_headers.complete": # pragma: no cover + status, headers = info["return_value"] + http_version = b"HTTP/2" + reason_phrase = None + print_response_headers(http_version, status, reason_phrase, headers) + + +def download_response(response: Response, download: typing.BinaryIO) -> None: + console = rich.console.Console() + console.print() + content_length = response.headers.get("Content-Length") + with rich.progress.Progress( + "[progress.description]{task.description}", + "[progress.percentage]{task.percentage:>3.0f}%", + rich.progress.BarColumn(bar_width=None), + rich.progress.DownloadColumn(), + rich.progress.TransferSpeedColumn(), + ) as progress: + description = f"Downloading [bold]{rich.markup.escape(download.name)}" + download_task = progress.add_task( + description, + total=int(content_length or 0), + start=content_length is not None, + ) + for chunk in response.iter_bytes(): + download.write(chunk) + progress.update(download_task, completed=response.num_bytes_downloaded) + + +def validate_json( + ctx: click.Context, + param: click.Option | click.Parameter, + value: typing.Any, +) -> typing.Any: + if value is None: + return None + + try: + return json.loads(value) + except json.JSONDecodeError: # pragma: no cover + raise click.BadParameter("Not valid JSON") + + +def validate_auth( + ctx: click.Context, + param: click.Option | click.Parameter, + value: typing.Any, +) -> typing.Any: + if value == (None, None): + return None + + username, password = value + if password == "-": # pragma: no cover + password = click.prompt("Password", hide_input=True) + return (username, password) + + +def handle_help( + ctx: click.Context, + param: click.Option | click.Parameter, + value: typing.Any, +) -> None: + if not value or ctx.resilient_parsing: + return + + print_help() + ctx.exit() + + +@click.command(add_help_option=False) +@click.argument("url", type=str) +@click.option( + "--method", + "-m", + "method", + type=str, + help=( + "Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD. " + "[Default: GET, or POST if a request body is included]" + ), +) +@click.option( + "--params", + "-p", + "params", + type=(str, str), + multiple=True, + help="Query parameters to include in the request URL.", +) +@click.option( + "--content", + "-c", + "content", + type=str, + help="Byte content to include in the request body.", +) +@click.option( + "--data", + "-d", + "data", + type=(str, str), + multiple=True, + help="Form data to include in the request body.", +) +@click.option( + "--files", + "-f", + "files", + type=(str, click.File(mode="rb")), + multiple=True, + help="Form files to include in the request body.", +) +@click.option( + "--json", + "-j", + "json", + type=str, + callback=validate_json, + help="JSON data to include in the request body.", +) +@click.option( + "--headers", + "-h", + "headers", + type=(str, str), + multiple=True, + help="Include additional HTTP headers in the request.", +) +@click.option( + "--cookies", + "cookies", + type=(str, str), + multiple=True, + help="Cookies to include in the request.", +) +@click.option( + "--auth", + "auth", + type=(str, str), + default=(None, None), + callback=validate_auth, + help=( + "Username and password to include in the request. " + "Specify '-' for the password to use a password prompt. " + "Note that using --verbose/-v will expose the Authorization header, " + "including the password encoding in a trivially reversible format." + ), +) +@click.option( + "--proxy", + "proxy", + type=str, + default=None, + help="Send the request via a proxy. Should be the URL giving the proxy address.", +) +@click.option( + "--timeout", + "timeout", + type=float, + default=5.0, + help=( + "Timeout value to use for network operations, such as establishing the " + "connection, reading some data, etc... [Default: 5.0]" + ), +) +@click.option( + "--follow-redirects", + "follow_redirects", + is_flag=True, + default=False, + help="Automatically follow redirects.", +) +@click.option( + "--no-verify", + "verify", + is_flag=True, + default=True, + help="Disable SSL verification.", +) +@click.option( + "--http2", + "http2", + type=bool, + is_flag=True, + default=False, + help="Send the request using HTTP/2, if the remote server supports it.", +) +@click.option( + "--download", + type=click.File("wb"), + help="Save the response content as a file, rather than displaying it.", +) +@click.option( + "--verbose", + "-v", + type=bool, + is_flag=True, + default=False, + help="Verbose. Show request as well as response.", +) +@click.option( + "--help", + is_flag=True, + is_eager=True, + expose_value=False, + callback=handle_help, + help="Show this message and exit.", +) +def main( + url: str, + method: str, + params: list[tuple[str, str]], + content: str, + data: list[tuple[str, str]], + files: list[tuple[str, click.File]], + json: str, + headers: list[tuple[str, str]], + cookies: list[tuple[str, str]], + auth: tuple[str, str] | None, + proxy: str, + timeout: float, + follow_redirects: bool, + verify: bool, + http2: bool, + download: typing.BinaryIO | None, + verbose: bool, +) -> None: + """ + An HTTP command line client. + Sends a request and displays the response. + """ + if not method: + method = "POST" if content or data or files or json else "GET" + + try: + with Client(proxy=proxy, timeout=timeout, http2=http2, verify=verify) as client: + with client.stream( + method, + url, + params=list(params), + content=content, + data=dict(data), + files=files, # type: ignore + json=json, + headers=headers, + cookies=dict(cookies), + auth=auth, + follow_redirects=follow_redirects, + extensions={"trace": functools.partial(trace, verbose=verbose)}, + ) as response: + if download is not None: + download_response(response, download) + else: + response.read() + if response.content: + print_response(response) + + except RequestError as exc: + console = rich.console.Console() + console.print(f"[red]{type(exc).__name__}[/red]: {exc}") + sys.exit(1) + + sys.exit(0 if response.is_success else 1) diff --git a/venv/Lib/site-packages/httpx/_models.py b/venv/Lib/site-packages/httpx/_models.py new file mode 100644 index 00000000..67d74bf8 --- /dev/null +++ b/venv/Lib/site-packages/httpx/_models.py @@ -0,0 +1,1277 @@ +from __future__ import annotations + +import codecs +import datetime +import email.message +import json as jsonlib +import re +import typing +import urllib.request +from collections.abc import Mapping +from http.cookiejar import Cookie, CookieJar + +from ._content import ByteStream, UnattachedStream, encode_request, encode_response +from ._decoders import ( + SUPPORTED_DECODERS, + ByteChunker, + ContentDecoder, + IdentityDecoder, + LineDecoder, + MultiDecoder, + TextChunker, + TextDecoder, +) +from ._exceptions import ( + CookieConflict, + HTTPStatusError, + RequestNotRead, + ResponseNotRead, + StreamClosed, + StreamConsumed, + request_context, +) +from ._multipart import get_multipart_boundary_from_content_type +from ._status_codes import codes +from ._types import ( + AsyncByteStream, + CookieTypes, + HeaderTypes, + QueryParamTypes, + RequestContent, + RequestData, + RequestExtensions, + RequestFiles, + ResponseContent, + ResponseExtensions, + SyncByteStream, +) +from ._urls import URL +from ._utils import to_bytes_or_str, to_str + +__all__ = ["Cookies", "Headers", "Request", "Response"] + +SENSITIVE_HEADERS = {"authorization", "proxy-authorization"} + + +def _is_known_encoding(encoding: str) -> bool: + """ + Return `True` if `encoding` is a known codec. + """ + try: + codecs.lookup(encoding) + except LookupError: + return False + return True + + +def _normalize_header_key(key: str | bytes, encoding: str | None = None) -> bytes: + """ + Coerce str/bytes into a strictly byte-wise HTTP header key. + """ + return key if isinstance(key, bytes) else key.encode(encoding or "ascii") + + +def _normalize_header_value(value: str | bytes, encoding: str | None = None) -> bytes: + """ + Coerce str/bytes into a strictly byte-wise HTTP header value. + """ + if isinstance(value, bytes): + return value + if not isinstance(value, str): + raise TypeError(f"Header value must be str or bytes, not {type(value)}") + return value.encode(encoding or "ascii") + + +def _parse_content_type_charset(content_type: str) -> str | None: + # We used to use `cgi.parse_header()` here, but `cgi` became a dead battery. + # See: https://peps.python.org/pep-0594/#cgi + msg = email.message.Message() + msg["content-type"] = content_type + return msg.get_content_charset(failobj=None) + + +def _parse_header_links(value: str) -> list[dict[str, str]]: + """ + Returns a list of parsed link headers, for more info see: + https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Link + The generic syntax of those is: + Link: < uri-reference >; param1=value1; param2="value2" + So for instance: + Link; '; type="image/jpeg",;' + would return + [ + {"url": "http:/.../front.jpeg", "type": "image/jpeg"}, + {"url": "http://.../back.jpeg"}, + ] + :param value: HTTP Link entity-header field + :return: list of parsed link headers + """ + links: list[dict[str, str]] = [] + replace_chars = " '\"" + value = value.strip(replace_chars) + if not value: + return links + for val in re.split(", *<", value): + try: + url, params = val.split(";", 1) + except ValueError: + url, params = val, "" + link = {"url": url.strip("<> '\"")} + for param in params.split(";"): + try: + key, value = param.split("=") + except ValueError: + break + link[key.strip(replace_chars)] = value.strip(replace_chars) + links.append(link) + return links + + +def _obfuscate_sensitive_headers( + items: typing.Iterable[tuple[typing.AnyStr, typing.AnyStr]], +) -> typing.Iterator[tuple[typing.AnyStr, typing.AnyStr]]: + for k, v in items: + if to_str(k.lower()) in SENSITIVE_HEADERS: + v = to_bytes_or_str("[secure]", match_type_of=v) + yield k, v + + +class Headers(typing.MutableMapping[str, str]): + """ + HTTP headers, as a case-insensitive multi-dict. + """ + + def __init__( + self, + headers: HeaderTypes | None = None, + encoding: str | None = None, + ) -> None: + self._list = [] # type: typing.List[typing.Tuple[bytes, bytes, bytes]] + + if isinstance(headers, Headers): + self._list = list(headers._list) + elif isinstance(headers, Mapping): + for k, v in headers.items(): + bytes_key = _normalize_header_key(k, encoding) + bytes_value = _normalize_header_value(v, encoding) + self._list.append((bytes_key, bytes_key.lower(), bytes_value)) + elif headers is not None: + for k, v in headers: + bytes_key = _normalize_header_key(k, encoding) + bytes_value = _normalize_header_value(v, encoding) + self._list.append((bytes_key, bytes_key.lower(), bytes_value)) + + self._encoding = encoding + + @property + def encoding(self) -> str: + """ + Header encoding is mandated as ascii, but we allow fallbacks to utf-8 + or iso-8859-1. + """ + if self._encoding is None: + for encoding in ["ascii", "utf-8"]: + for key, value in self.raw: + try: + key.decode(encoding) + value.decode(encoding) + except UnicodeDecodeError: + break + else: + # The else block runs if 'break' did not occur, meaning + # all values fitted the encoding. + self._encoding = encoding + break + else: + # The ISO-8859-1 encoding covers all 256 code points in a byte, + # so will never raise decode errors. + self._encoding = "iso-8859-1" + return self._encoding + + @encoding.setter + def encoding(self, value: str) -> None: + self._encoding = value + + @property + def raw(self) -> list[tuple[bytes, bytes]]: + """ + Returns a list of the raw header items, as byte pairs. + """ + return [(raw_key, value) for raw_key, _, value in self._list] + + def keys(self) -> typing.KeysView[str]: + return {key.decode(self.encoding): None for _, key, value in self._list}.keys() + + def values(self) -> typing.ValuesView[str]: + values_dict: dict[str, str] = {} + for _, key, value in self._list: + str_key = key.decode(self.encoding) + str_value = value.decode(self.encoding) + if str_key in values_dict: + values_dict[str_key] += f", {str_value}" + else: + values_dict[str_key] = str_value + return values_dict.values() + + def items(self) -> typing.ItemsView[str, str]: + """ + Return `(key, value)` items of headers. Concatenate headers + into a single comma separated value when a key occurs multiple times. + """ + values_dict: dict[str, str] = {} + for _, key, value in self._list: + str_key = key.decode(self.encoding) + str_value = value.decode(self.encoding) + if str_key in values_dict: + values_dict[str_key] += f", {str_value}" + else: + values_dict[str_key] = str_value + return values_dict.items() + + def multi_items(self) -> list[tuple[str, str]]: + """ + Return a list of `(key, value)` pairs of headers. Allow multiple + occurrences of the same key without concatenating into a single + comma separated value. + """ + return [ + (key.decode(self.encoding), value.decode(self.encoding)) + for _, key, value in self._list + ] + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + """ + Return a header value. If multiple occurrences of the header occur + then concatenate them together with commas. + """ + try: + return self[key] + except KeyError: + return default + + def get_list(self, key: str, split_commas: bool = False) -> list[str]: + """ + Return a list of all header values for a given key. + If `split_commas=True` is passed, then any comma separated header + values are split into multiple return strings. + """ + get_header_key = key.lower().encode(self.encoding) + + values = [ + item_value.decode(self.encoding) + for _, item_key, item_value in self._list + if item_key.lower() == get_header_key + ] + + if not split_commas: + return values + + split_values = [] + for value in values: + split_values.extend([item.strip() for item in value.split(",")]) + return split_values + + def update(self, headers: HeaderTypes | None = None) -> None: # type: ignore + headers = Headers(headers) + for key in headers.keys(): + if key in self: + self.pop(key) + self._list.extend(headers._list) + + def copy(self) -> Headers: + return Headers(self, encoding=self.encoding) + + def __getitem__(self, key: str) -> str: + """ + Return a single header value. + + If there are multiple headers with the same key, then we concatenate + them with commas. See: https://tools.ietf.org/html/rfc7230#section-3.2.2 + """ + normalized_key = key.lower().encode(self.encoding) + + items = [ + header_value.decode(self.encoding) + for _, header_key, header_value in self._list + if header_key == normalized_key + ] + + if items: + return ", ".join(items) + + raise KeyError(key) + + def __setitem__(self, key: str, value: str) -> None: + """ + Set the header `key` to `value`, removing any duplicate entries. + Retains insertion order. + """ + set_key = key.encode(self._encoding or "utf-8") + set_value = value.encode(self._encoding or "utf-8") + lookup_key = set_key.lower() + + found_indexes = [ + idx + for idx, (_, item_key, _) in enumerate(self._list) + if item_key == lookup_key + ] + + for idx in reversed(found_indexes[1:]): + del self._list[idx] + + if found_indexes: + idx = found_indexes[0] + self._list[idx] = (set_key, lookup_key, set_value) + else: + self._list.append((set_key, lookup_key, set_value)) + + def __delitem__(self, key: str) -> None: + """ + Remove the header `key`. + """ + del_key = key.lower().encode(self.encoding) + + pop_indexes = [ + idx + for idx, (_, item_key, _) in enumerate(self._list) + if item_key.lower() == del_key + ] + + if not pop_indexes: + raise KeyError(key) + + for idx in reversed(pop_indexes): + del self._list[idx] + + def __contains__(self, key: typing.Any) -> bool: + header_key = key.lower().encode(self.encoding) + return header_key in [key for _, key, _ in self._list] + + def __iter__(self) -> typing.Iterator[typing.Any]: + return iter(self.keys()) + + def __len__(self) -> int: + return len(self._list) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_headers = Headers(other) + except ValueError: + return False + + self_list = [(key, value) for _, key, value in self._list] + other_list = [(key, value) for _, key, value in other_headers._list] + return sorted(self_list) == sorted(other_list) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + + encoding_str = "" + if self.encoding != "ascii": + encoding_str = f", encoding={self.encoding!r}" + + as_list = list(_obfuscate_sensitive_headers(self.multi_items())) + as_dict = dict(as_list) + + no_duplicate_keys = len(as_dict) == len(as_list) + if no_duplicate_keys: + return f"{class_name}({as_dict!r}{encoding_str})" + return f"{class_name}({as_list!r}{encoding_str})" + + +class Request: + def __init__( + self, + method: str, + url: URL | str, + *, + params: QueryParamTypes | None = None, + headers: HeaderTypes | None = None, + cookies: CookieTypes | None = None, + content: RequestContent | None = None, + data: RequestData | None = None, + files: RequestFiles | None = None, + json: typing.Any | None = None, + stream: SyncByteStream | AsyncByteStream | None = None, + extensions: RequestExtensions | None = None, + ) -> None: + self.method = method.upper() + self.url = URL(url) if params is None else URL(url, params=params) + self.headers = Headers(headers) + self.extensions = {} if extensions is None else dict(extensions) + + if cookies: + Cookies(cookies).set_cookie_header(self) + + if stream is None: + content_type: str | None = self.headers.get("content-type") + headers, stream = encode_request( + content=content, + data=data, + files=files, + json=json, + boundary=get_multipart_boundary_from_content_type( + content_type=content_type.encode(self.headers.encoding) + if content_type + else None + ), + ) + self._prepare(headers) + self.stream = stream + # Load the request body, except for streaming content. + if isinstance(stream, ByteStream): + self.read() + else: + # There's an important distinction between `Request(content=...)`, + # and `Request(stream=...)`. + # + # Using `content=...` implies automatically populated `Host` and content + # headers, of either `Content-Length: ...` or `Transfer-Encoding: chunked`. + # + # Using `stream=...` will not automatically include *any* + # auto-populated headers. + # + # As an end-user you don't really need `stream=...`. It's only + # useful when: + # + # * Preserving the request stream when copying requests, eg for redirects. + # * Creating request instances on the *server-side* of the transport API. + self.stream = stream + + def _prepare(self, default_headers: dict[str, str]) -> None: + for key, value in default_headers.items(): + # Ignore Transfer-Encoding if the Content-Length has been set explicitly. + if key.lower() == "transfer-encoding" and "Content-Length" in self.headers: + continue + self.headers.setdefault(key, value) + + auto_headers: list[tuple[bytes, bytes]] = [] + + has_host = "Host" in self.headers + has_content_length = ( + "Content-Length" in self.headers or "Transfer-Encoding" in self.headers + ) + + if not has_host and self.url.host: + auto_headers.append((b"Host", self.url.netloc)) + if not has_content_length and self.method in ("POST", "PUT", "PATCH"): + auto_headers.append((b"Content-Length", b"0")) + + self.headers = Headers(auto_headers + self.headers.raw) + + @property + def content(self) -> bytes: + if not hasattr(self, "_content"): + raise RequestNotRead() + return self._content + + def read(self) -> bytes: + """ + Read and return the request content. + """ + if not hasattr(self, "_content"): + assert isinstance(self.stream, typing.Iterable) + self._content = b"".join(self.stream) + if not isinstance(self.stream, ByteStream): + # If a streaming request has been read entirely into memory, then + # we can replace the stream with a raw bytes implementation, + # to ensure that any non-replayable streams can still be used. + self.stream = ByteStream(self._content) + return self._content + + async def aread(self) -> bytes: + """ + Read and return the request content. + """ + if not hasattr(self, "_content"): + assert isinstance(self.stream, typing.AsyncIterable) + self._content = b"".join([part async for part in self.stream]) + if not isinstance(self.stream, ByteStream): + # If a streaming request has been read entirely into memory, then + # we can replace the stream with a raw bytes implementation, + # to ensure that any non-replayable streams can still be used. + self.stream = ByteStream(self._content) + return self._content + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + url = str(self.url) + return f"<{class_name}({self.method!r}, {url!r})>" + + def __getstate__(self) -> dict[str, typing.Any]: + return { + name: value + for name, value in self.__dict__.items() + if name not in ["extensions", "stream"] + } + + def __setstate__(self, state: dict[str, typing.Any]) -> None: + for name, value in state.items(): + setattr(self, name, value) + self.extensions = {} + self.stream = UnattachedStream() + + +class Response: + def __init__( + self, + status_code: int, + *, + headers: HeaderTypes | None = None, + content: ResponseContent | None = None, + text: str | None = None, + html: str | None = None, + json: typing.Any = None, + stream: SyncByteStream | AsyncByteStream | None = None, + request: Request | None = None, + extensions: ResponseExtensions | None = None, + history: list[Response] | None = None, + default_encoding: str | typing.Callable[[bytes], str] = "utf-8", + ) -> None: + self.status_code = status_code + self.headers = Headers(headers) + + self._request: Request | None = request + + # When follow_redirects=False and a redirect is received, + # the client will set `response.next_request`. + self.next_request: Request | None = None + + self.extensions = {} if extensions is None else dict(extensions) + self.history = [] if history is None else list(history) + + self.is_closed = False + self.is_stream_consumed = False + + self.default_encoding = default_encoding + + if stream is None: + headers, stream = encode_response(content, text, html, json) + self._prepare(headers) + self.stream = stream + if isinstance(stream, ByteStream): + # Load the response body, except for streaming content. + self.read() + else: + # There's an important distinction between `Response(content=...)`, + # and `Response(stream=...)`. + # + # Using `content=...` implies automatically populated content headers, + # of either `Content-Length: ...` or `Transfer-Encoding: chunked`. + # + # Using `stream=...` will not automatically include any content headers. + # + # As an end-user you don't really need `stream=...`. It's only + # useful when creating response instances having received a stream + # from the transport API. + self.stream = stream + + self._num_bytes_downloaded = 0 + + def _prepare(self, default_headers: dict[str, str]) -> None: + for key, value in default_headers.items(): + # Ignore Transfer-Encoding if the Content-Length has been set explicitly. + if key.lower() == "transfer-encoding" and "content-length" in self.headers: + continue + self.headers.setdefault(key, value) + + @property + def elapsed(self) -> datetime.timedelta: + """ + Returns the time taken for the complete request/response + cycle to complete. + """ + if not hasattr(self, "_elapsed"): + raise RuntimeError( + "'.elapsed' may only be accessed after the response " + "has been read or closed." + ) + return self._elapsed + + @elapsed.setter + def elapsed(self, elapsed: datetime.timedelta) -> None: + self._elapsed = elapsed + + @property + def request(self) -> Request: + """ + Returns the request instance associated to the current response. + """ + if self._request is None: + raise RuntimeError( + "The request instance has not been set on this response." + ) + return self._request + + @request.setter + def request(self, value: Request) -> None: + self._request = value + + @property + def http_version(self) -> str: + try: + http_version: bytes = self.extensions["http_version"] + except KeyError: + return "HTTP/1.1" + else: + return http_version.decode("ascii", errors="ignore") + + @property + def reason_phrase(self) -> str: + try: + reason_phrase: bytes = self.extensions["reason_phrase"] + except KeyError: + return codes.get_reason_phrase(self.status_code) + else: + return reason_phrase.decode("ascii", errors="ignore") + + @property + def url(self) -> URL: + """ + Returns the URL for which the request was made. + """ + return self.request.url + + @property + def content(self) -> bytes: + if not hasattr(self, "_content"): + raise ResponseNotRead() + return self._content + + @property + def text(self) -> str: + if not hasattr(self, "_text"): + content = self.content + if not content: + self._text = "" + else: + decoder = TextDecoder(encoding=self.encoding or "utf-8") + self._text = "".join([decoder.decode(self.content), decoder.flush()]) + return self._text + + @property + def encoding(self) -> str | None: + """ + Return an encoding to use for decoding the byte content into text. + The priority for determining this is given by... + + * `.encoding = <>` has been set explicitly. + * The encoding as specified by the charset parameter in the Content-Type header. + * The encoding as determined by `default_encoding`, which may either be + a string like "utf-8" indicating the encoding to use, or may be a callable + which enables charset autodetection. + """ + if not hasattr(self, "_encoding"): + encoding = self.charset_encoding + if encoding is None or not _is_known_encoding(encoding): + if isinstance(self.default_encoding, str): + encoding = self.default_encoding + elif hasattr(self, "_content"): + encoding = self.default_encoding(self._content) + self._encoding = encoding or "utf-8" + return self._encoding + + @encoding.setter + def encoding(self, value: str) -> None: + """ + Set the encoding to use for decoding the byte content into text. + + If the `text` attribute has been accessed, attempting to set the + encoding will throw a ValueError. + """ + if hasattr(self, "_text"): + raise ValueError( + "Setting encoding after `text` has been accessed is not allowed." + ) + self._encoding = value + + @property + def charset_encoding(self) -> str | None: + """ + Return the encoding, as specified by the Content-Type header. + """ + content_type = self.headers.get("Content-Type") + if content_type is None: + return None + + return _parse_content_type_charset(content_type) + + def _get_content_decoder(self) -> ContentDecoder: + """ + Returns a decoder instance which can be used to decode the raw byte + content, depending on the Content-Encoding used in the response. + """ + if not hasattr(self, "_decoder"): + decoders: list[ContentDecoder] = [] + values = self.headers.get_list("content-encoding", split_commas=True) + for value in values: + value = value.strip().lower() + try: + decoder_cls = SUPPORTED_DECODERS[value] + decoders.append(decoder_cls()) + except KeyError: + continue + + if len(decoders) == 1: + self._decoder = decoders[0] + elif len(decoders) > 1: + self._decoder = MultiDecoder(children=decoders) + else: + self._decoder = IdentityDecoder() + + return self._decoder + + @property + def is_informational(self) -> bool: + """ + A property which is `True` for 1xx status codes, `False` otherwise. + """ + return codes.is_informational(self.status_code) + + @property + def is_success(self) -> bool: + """ + A property which is `True` for 2xx status codes, `False` otherwise. + """ + return codes.is_success(self.status_code) + + @property + def is_redirect(self) -> bool: + """ + A property which is `True` for 3xx status codes, `False` otherwise. + + Note that not all responses with a 3xx status code indicate a URL redirect. + + Use `response.has_redirect_location` to determine responses with a properly + formed URL redirection. + """ + return codes.is_redirect(self.status_code) + + @property + def is_client_error(self) -> bool: + """ + A property which is `True` for 4xx status codes, `False` otherwise. + """ + return codes.is_client_error(self.status_code) + + @property + def is_server_error(self) -> bool: + """ + A property which is `True` for 5xx status codes, `False` otherwise. + """ + return codes.is_server_error(self.status_code) + + @property + def is_error(self) -> bool: + """ + A property which is `True` for 4xx and 5xx status codes, `False` otherwise. + """ + return codes.is_error(self.status_code) + + @property + def has_redirect_location(self) -> bool: + """ + Returns True for 3xx responses with a properly formed URL redirection, + `False` otherwise. + """ + return ( + self.status_code + in ( + # 301 (Cacheable redirect. Method may change to GET.) + codes.MOVED_PERMANENTLY, + # 302 (Uncacheable redirect. Method may change to GET.) + codes.FOUND, + # 303 (Client should make a GET or HEAD request.) + codes.SEE_OTHER, + # 307 (Equiv. 302, but retain method) + codes.TEMPORARY_REDIRECT, + # 308 (Equiv. 301, but retain method) + codes.PERMANENT_REDIRECT, + ) + and "Location" in self.headers + ) + + def raise_for_status(self) -> Response: + """ + Raise the `HTTPStatusError` if one occurred. + """ + request = self._request + if request is None: + raise RuntimeError( + "Cannot call `raise_for_status` as the request " + "instance has not been set on this response." + ) + + if self.is_success: + return self + + if self.has_redirect_location: + message = ( + "{error_type} '{0.status_code} {0.reason_phrase}' for url '{0.url}'\n" + "Redirect location: '{0.headers[location]}'\n" + "For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/{0.status_code}" + ) + else: + message = ( + "{error_type} '{0.status_code} {0.reason_phrase}' for url '{0.url}'\n" + "For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/{0.status_code}" + ) + + status_class = self.status_code // 100 + error_types = { + 1: "Informational response", + 3: "Redirect response", + 4: "Client error", + 5: "Server error", + } + error_type = error_types.get(status_class, "Invalid status code") + message = message.format(self, error_type=error_type) + raise HTTPStatusError(message, request=request, response=self) + + def json(self, **kwargs: typing.Any) -> typing.Any: + return jsonlib.loads(self.content, **kwargs) + + @property + def cookies(self) -> Cookies: + if not hasattr(self, "_cookies"): + self._cookies = Cookies() + self._cookies.extract_cookies(self) + return self._cookies + + @property + def links(self) -> dict[str | None, dict[str, str]]: + """ + Returns the parsed header links of the response, if any + """ + header = self.headers.get("link") + if header is None: + return {} + + return { + (link.get("rel") or link.get("url")): link + for link in _parse_header_links(header) + } + + @property + def num_bytes_downloaded(self) -> int: + return self._num_bytes_downloaded + + def __repr__(self) -> str: + return f"" + + def __getstate__(self) -> dict[str, typing.Any]: + return { + name: value + for name, value in self.__dict__.items() + if name not in ["extensions", "stream", "is_closed", "_decoder"] + } + + def __setstate__(self, state: dict[str, typing.Any]) -> None: + for name, value in state.items(): + setattr(self, name, value) + self.is_closed = True + self.extensions = {} + self.stream = UnattachedStream() + + def read(self) -> bytes: + """ + Read and return the response content. + """ + if not hasattr(self, "_content"): + self._content = b"".join(self.iter_bytes()) + return self._content + + def iter_bytes(self, chunk_size: int | None = None) -> typing.Iterator[bytes]: + """ + A byte-iterator over the decoded response content. + This allows us to handle gzip, deflate, brotli, and zstd encoded responses. + """ + if hasattr(self, "_content"): + chunk_size = len(self._content) if chunk_size is None else chunk_size + for i in range(0, len(self._content), max(chunk_size, 1)): + yield self._content[i : i + chunk_size] + else: + decoder = self._get_content_decoder() + chunker = ByteChunker(chunk_size=chunk_size) + with request_context(request=self._request): + for raw_bytes in self.iter_raw(): + decoded = decoder.decode(raw_bytes) + for chunk in chunker.decode(decoded): + yield chunk + decoded = decoder.flush() + for chunk in chunker.decode(decoded): + yield chunk # pragma: no cover + for chunk in chunker.flush(): + yield chunk + + def iter_text(self, chunk_size: int | None = None) -> typing.Iterator[str]: + """ + A str-iterator over the decoded response content + that handles both gzip, deflate, etc but also detects the content's + string encoding. + """ + decoder = TextDecoder(encoding=self.encoding or "utf-8") + chunker = TextChunker(chunk_size=chunk_size) + with request_context(request=self._request): + for byte_content in self.iter_bytes(): + text_content = decoder.decode(byte_content) + for chunk in chunker.decode(text_content): + yield chunk + text_content = decoder.flush() + for chunk in chunker.decode(text_content): + yield chunk # pragma: no cover + for chunk in chunker.flush(): + yield chunk + + def iter_lines(self) -> typing.Iterator[str]: + decoder = LineDecoder() + with request_context(request=self._request): + for text in self.iter_text(): + for line in decoder.decode(text): + yield line + for line in decoder.flush(): + yield line + + def iter_raw(self, chunk_size: int | None = None) -> typing.Iterator[bytes]: + """ + A byte-iterator over the raw response content. + """ + if self.is_stream_consumed: + raise StreamConsumed() + if self.is_closed: + raise StreamClosed() + if not isinstance(self.stream, SyncByteStream): + raise RuntimeError("Attempted to call a sync iterator on an async stream.") + + self.is_stream_consumed = True + self._num_bytes_downloaded = 0 + chunker = ByteChunker(chunk_size=chunk_size) + + with request_context(request=self._request): + for raw_stream_bytes in self.stream: + self._num_bytes_downloaded += len(raw_stream_bytes) + for chunk in chunker.decode(raw_stream_bytes): + yield chunk + + for chunk in chunker.flush(): + yield chunk + + self.close() + + def close(self) -> None: + """ + Close the response and release the connection. + Automatically called if the response body is read to completion. + """ + if not isinstance(self.stream, SyncByteStream): + raise RuntimeError("Attempted to call an sync close on an async stream.") + + if not self.is_closed: + self.is_closed = True + with request_context(request=self._request): + self.stream.close() + + async def aread(self) -> bytes: + """ + Read and return the response content. + """ + if not hasattr(self, "_content"): + self._content = b"".join([part async for part in self.aiter_bytes()]) + return self._content + + async def aiter_bytes( + self, chunk_size: int | None = None + ) -> typing.AsyncIterator[bytes]: + """ + A byte-iterator over the decoded response content. + This allows us to handle gzip, deflate, brotli, and zstd encoded responses. + """ + if hasattr(self, "_content"): + chunk_size = len(self._content) if chunk_size is None else chunk_size + for i in range(0, len(self._content), max(chunk_size, 1)): + yield self._content[i : i + chunk_size] + else: + decoder = self._get_content_decoder() + chunker = ByteChunker(chunk_size=chunk_size) + with request_context(request=self._request): + async for raw_bytes in self.aiter_raw(): + decoded = decoder.decode(raw_bytes) + for chunk in chunker.decode(decoded): + yield chunk + decoded = decoder.flush() + for chunk in chunker.decode(decoded): + yield chunk # pragma: no cover + for chunk in chunker.flush(): + yield chunk + + async def aiter_text( + self, chunk_size: int | None = None + ) -> typing.AsyncIterator[str]: + """ + A str-iterator over the decoded response content + that handles both gzip, deflate, etc but also detects the content's + string encoding. + """ + decoder = TextDecoder(encoding=self.encoding or "utf-8") + chunker = TextChunker(chunk_size=chunk_size) + with request_context(request=self._request): + async for byte_content in self.aiter_bytes(): + text_content = decoder.decode(byte_content) + for chunk in chunker.decode(text_content): + yield chunk + text_content = decoder.flush() + for chunk in chunker.decode(text_content): + yield chunk # pragma: no cover + for chunk in chunker.flush(): + yield chunk + + async def aiter_lines(self) -> typing.AsyncIterator[str]: + decoder = LineDecoder() + with request_context(request=self._request): + async for text in self.aiter_text(): + for line in decoder.decode(text): + yield line + for line in decoder.flush(): + yield line + + async def aiter_raw( + self, chunk_size: int | None = None + ) -> typing.AsyncIterator[bytes]: + """ + A byte-iterator over the raw response content. + """ + if self.is_stream_consumed: + raise StreamConsumed() + if self.is_closed: + raise StreamClosed() + if not isinstance(self.stream, AsyncByteStream): + raise RuntimeError("Attempted to call an async iterator on an sync stream.") + + self.is_stream_consumed = True + self._num_bytes_downloaded = 0 + chunker = ByteChunker(chunk_size=chunk_size) + + with request_context(request=self._request): + async for raw_stream_bytes in self.stream: + self._num_bytes_downloaded += len(raw_stream_bytes) + for chunk in chunker.decode(raw_stream_bytes): + yield chunk + + for chunk in chunker.flush(): + yield chunk + + await self.aclose() + + async def aclose(self) -> None: + """ + Close the response and release the connection. + Automatically called if the response body is read to completion. + """ + if not isinstance(self.stream, AsyncByteStream): + raise RuntimeError("Attempted to call an async close on an sync stream.") + + if not self.is_closed: + self.is_closed = True + with request_context(request=self._request): + await self.stream.aclose() + + +class Cookies(typing.MutableMapping[str, str]): + """ + HTTP Cookies, as a mutable mapping. + """ + + def __init__(self, cookies: CookieTypes | None = None) -> None: + if cookies is None or isinstance(cookies, dict): + self.jar = CookieJar() + if isinstance(cookies, dict): + for key, value in cookies.items(): + self.set(key, value) + elif isinstance(cookies, list): + self.jar = CookieJar() + for key, value in cookies: + self.set(key, value) + elif isinstance(cookies, Cookies): + self.jar = CookieJar() + for cookie in cookies.jar: + self.jar.set_cookie(cookie) + else: + self.jar = cookies + + def extract_cookies(self, response: Response) -> None: + """ + Loads any cookies based on the response `Set-Cookie` headers. + """ + urllib_response = self._CookieCompatResponse(response) + urllib_request = self._CookieCompatRequest(response.request) + + self.jar.extract_cookies(urllib_response, urllib_request) # type: ignore + + def set_cookie_header(self, request: Request) -> None: + """ + Sets an appropriate 'Cookie:' HTTP header on the `Request`. + """ + urllib_request = self._CookieCompatRequest(request) + self.jar.add_cookie_header(urllib_request) + + def set(self, name: str, value: str, domain: str = "", path: str = "/") -> None: + """ + Set a cookie value by name. May optionally include domain and path. + """ + kwargs = { + "version": 0, + "name": name, + "value": value, + "port": None, + "port_specified": False, + "domain": domain, + "domain_specified": bool(domain), + "domain_initial_dot": domain.startswith("."), + "path": path, + "path_specified": bool(path), + "secure": False, + "expires": None, + "discard": True, + "comment": None, + "comment_url": None, + "rest": {"HttpOnly": None}, + "rfc2109": False, + } + cookie = Cookie(**kwargs) # type: ignore + self.jar.set_cookie(cookie) + + def get( # type: ignore + self, + name: str, + default: str | None = None, + domain: str | None = None, + path: str | None = None, + ) -> str | None: + """ + Get a cookie by name. May optionally include domain and path + in order to specify exactly which cookie to retrieve. + """ + value = None + for cookie in self.jar: + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + if value is not None: + message = f"Multiple cookies exist with name={name}" + raise CookieConflict(message) + value = cookie.value + + if value is None: + return default + return value + + def delete( + self, + name: str, + domain: str | None = None, + path: str | None = None, + ) -> None: + """ + Delete a cookie by name. May optionally include domain and path + in order to specify exactly which cookie to delete. + """ + if domain is not None and path is not None: + return self.jar.clear(domain, path, name) + + remove = [ + cookie + for cookie in self.jar + if cookie.name == name + and (domain is None or cookie.domain == domain) + and (path is None or cookie.path == path) + ] + + for cookie in remove: + self.jar.clear(cookie.domain, cookie.path, cookie.name) + + def clear(self, domain: str | None = None, path: str | None = None) -> None: + """ + Delete all cookies. Optionally include a domain and path in + order to only delete a subset of all the cookies. + """ + args = [] + if domain is not None: + args.append(domain) + if path is not None: + assert domain is not None + args.append(path) + self.jar.clear(*args) + + def update(self, cookies: CookieTypes | None = None) -> None: # type: ignore + cookies = Cookies(cookies) + for cookie in cookies.jar: + self.jar.set_cookie(cookie) + + def __setitem__(self, name: str, value: str) -> None: + return self.set(name, value) + + def __getitem__(self, name: str) -> str: + value = self.get(name) + if value is None: + raise KeyError(name) + return value + + def __delitem__(self, name: str) -> None: + return self.delete(name) + + def __len__(self) -> int: + return len(self.jar) + + def __iter__(self) -> typing.Iterator[str]: + return (cookie.name for cookie in self.jar) + + def __bool__(self) -> bool: + for _ in self.jar: + return True + return False + + def __repr__(self) -> str: + cookies_repr = ", ".join( + [ + f"" + for cookie in self.jar + ] + ) + + return f"" + + class _CookieCompatRequest(urllib.request.Request): + """ + Wraps a `Request` instance up in a compatibility interface suitable + for use with `CookieJar` operations. + """ + + def __init__(self, request: Request) -> None: + super().__init__( + url=str(request.url), + headers=dict(request.headers), + method=request.method, + ) + self.request = request + + def add_unredirected_header(self, key: str, value: str) -> None: + super().add_unredirected_header(key, value) + self.request.headers[key] = value + + class _CookieCompatResponse: + """ + Wraps a `Request` instance up in a compatibility interface suitable + for use with `CookieJar` operations. + """ + + def __init__(self, response: Response) -> None: + self.response = response + + def info(self) -> email.message.Message: + info = email.message.Message() + for key, value in self.response.headers.multi_items(): + # Note that setting `info[key]` here is an "append" operation, + # not a "replace" operation. + # https://docs.python.org/3/library/email.compat32-message.html#email.message.Message.__setitem__ + info[key] = value + return info diff --git a/venv/Lib/site-packages/httpx/_multipart.py b/venv/Lib/site-packages/httpx/_multipart.py new file mode 100644 index 00000000..b4761af9 --- /dev/null +++ b/venv/Lib/site-packages/httpx/_multipart.py @@ -0,0 +1,300 @@ +from __future__ import annotations + +import io +import mimetypes +import os +import re +import typing +from pathlib import Path + +from ._types import ( + AsyncByteStream, + FileContent, + FileTypes, + RequestData, + RequestFiles, + SyncByteStream, +) +from ._utils import ( + peek_filelike_length, + primitive_value_to_str, + to_bytes, +) + +_HTML5_FORM_ENCODING_REPLACEMENTS = {'"': "%22", "\\": "\\\\"} +_HTML5_FORM_ENCODING_REPLACEMENTS.update( + {chr(c): "%{:02X}".format(c) for c in range(0x1F + 1) if c != 0x1B} +) +_HTML5_FORM_ENCODING_RE = re.compile( + r"|".join([re.escape(c) for c in _HTML5_FORM_ENCODING_REPLACEMENTS.keys()]) +) + + +def _format_form_param(name: str, value: str) -> bytes: + """ + Encode a name/value pair within a multipart form. + """ + + def replacer(match: typing.Match[str]) -> str: + return _HTML5_FORM_ENCODING_REPLACEMENTS[match.group(0)] + + value = _HTML5_FORM_ENCODING_RE.sub(replacer, value) + return f'{name}="{value}"'.encode() + + +def _guess_content_type(filename: str | None) -> str | None: + """ + Guesses the mimetype based on a filename. Defaults to `application/octet-stream`. + + Returns `None` if `filename` is `None` or empty. + """ + if filename: + return mimetypes.guess_type(filename)[0] or "application/octet-stream" + return None + + +def get_multipart_boundary_from_content_type( + content_type: bytes | None, +) -> bytes | None: + if not content_type or not content_type.startswith(b"multipart/form-data"): + return None + # parse boundary according to + # https://www.rfc-editor.org/rfc/rfc2046#section-5.1.1 + if b";" in content_type: + for section in content_type.split(b";"): + if section.strip().lower().startswith(b"boundary="): + return section.strip()[len(b"boundary=") :].strip(b'"') + return None + + +class DataField: + """ + A single form field item, within a multipart form field. + """ + + def __init__(self, name: str, value: str | bytes | int | float | None) -> None: + if not isinstance(name, str): + raise TypeError( + f"Invalid type for name. Expected str, got {type(name)}: {name!r}" + ) + if value is not None and not isinstance(value, (str, bytes, int, float)): + raise TypeError( + "Invalid type for value. Expected primitive type," + f" got {type(value)}: {value!r}" + ) + self.name = name + self.value: str | bytes = ( + value if isinstance(value, bytes) else primitive_value_to_str(value) + ) + + def render_headers(self) -> bytes: + if not hasattr(self, "_headers"): + name = _format_form_param("name", self.name) + self._headers = b"".join( + [b"Content-Disposition: form-data; ", name, b"\r\n\r\n"] + ) + + return self._headers + + def render_data(self) -> bytes: + if not hasattr(self, "_data"): + self._data = to_bytes(self.value) + + return self._data + + def get_length(self) -> int: + headers = self.render_headers() + data = self.render_data() + return len(headers) + len(data) + + def render(self) -> typing.Iterator[bytes]: + yield self.render_headers() + yield self.render_data() + + +class FileField: + """ + A single file field item, within a multipart form field. + """ + + CHUNK_SIZE = 64 * 1024 + + def __init__(self, name: str, value: FileTypes) -> None: + self.name = name + + fileobj: FileContent + + headers: dict[str, str] = {} + content_type: str | None = None + + # This large tuple based API largely mirror's requests' API + # It would be good to think of better APIs for this that we could + # include in httpx 2.0 since variable length tuples(especially of 4 elements) + # are quite unwieldly + if isinstance(value, tuple): + if len(value) == 2: + # neither the 3rd parameter (content_type) nor the 4th (headers) + # was included + filename, fileobj = value + elif len(value) == 3: + filename, fileobj, content_type = value + else: + # all 4 parameters included + filename, fileobj, content_type, headers = value # type: ignore + else: + filename = Path(str(getattr(value, "name", "upload"))).name + fileobj = value + + if content_type is None: + content_type = _guess_content_type(filename) + + has_content_type_header = any("content-type" in key.lower() for key in headers) + if content_type is not None and not has_content_type_header: + # note that unlike requests, we ignore the content_type provided in the 3rd + # tuple element if it is also included in the headers requests does + # the opposite (it overwrites the headerwith the 3rd tuple element) + headers["Content-Type"] = content_type + + if isinstance(fileobj, io.StringIO): + raise TypeError( + "Multipart file uploads require 'io.BytesIO', not 'io.StringIO'." + ) + if isinstance(fileobj, io.TextIOBase): + raise TypeError( + "Multipart file uploads must be opened in binary mode, not text mode." + ) + + self.filename = filename + self.file = fileobj + self.headers = headers + + def get_length(self) -> int | None: + headers = self.render_headers() + + if isinstance(self.file, (str, bytes)): + return len(headers) + len(to_bytes(self.file)) + + file_length = peek_filelike_length(self.file) + + # If we can't determine the filesize without reading it into memory, + # then return `None` here, to indicate an unknown file length. + if file_length is None: + return None + + return len(headers) + file_length + + def render_headers(self) -> bytes: + if not hasattr(self, "_headers"): + parts = [ + b"Content-Disposition: form-data; ", + _format_form_param("name", self.name), + ] + if self.filename: + filename = _format_form_param("filename", self.filename) + parts.extend([b"; ", filename]) + for header_name, header_value in self.headers.items(): + key, val = f"\r\n{header_name}: ".encode(), header_value.encode() + parts.extend([key, val]) + parts.append(b"\r\n\r\n") + self._headers = b"".join(parts) + + return self._headers + + def render_data(self) -> typing.Iterator[bytes]: + if isinstance(self.file, (str, bytes)): + yield to_bytes(self.file) + return + + if hasattr(self.file, "seek"): + try: + self.file.seek(0) + except io.UnsupportedOperation: + pass + + chunk = self.file.read(self.CHUNK_SIZE) + while chunk: + yield to_bytes(chunk) + chunk = self.file.read(self.CHUNK_SIZE) + + def render(self) -> typing.Iterator[bytes]: + yield self.render_headers() + yield from self.render_data() + + +class MultipartStream(SyncByteStream, AsyncByteStream): + """ + Request content as streaming multipart encoded form data. + """ + + def __init__( + self, + data: RequestData, + files: RequestFiles, + boundary: bytes | None = None, + ) -> None: + if boundary is None: + boundary = os.urandom(16).hex().encode("ascii") + + self.boundary = boundary + self.content_type = "multipart/form-data; boundary=%s" % boundary.decode( + "ascii" + ) + self.fields = list(self._iter_fields(data, files)) + + def _iter_fields( + self, data: RequestData, files: RequestFiles + ) -> typing.Iterator[FileField | DataField]: + for name, value in data.items(): + if isinstance(value, (tuple, list)): + for item in value: + yield DataField(name=name, value=item) + else: + yield DataField(name=name, value=value) + + file_items = files.items() if isinstance(files, typing.Mapping) else files + for name, value in file_items: + yield FileField(name=name, value=value) + + def iter_chunks(self) -> typing.Iterator[bytes]: + for field in self.fields: + yield b"--%s\r\n" % self.boundary + yield from field.render() + yield b"\r\n" + yield b"--%s--\r\n" % self.boundary + + def get_content_length(self) -> int | None: + """ + Return the length of the multipart encoded content, or `None` if + any of the files have a length that cannot be determined upfront. + """ + boundary_length = len(self.boundary) + length = 0 + + for field in self.fields: + field_length = field.get_length() + if field_length is None: + return None + + length += 2 + boundary_length + 2 # b"--{boundary}\r\n" + length += field_length + length += 2 # b"\r\n" + + length += 2 + boundary_length + 4 # b"--{boundary}--\r\n" + return length + + # Content stream interface. + + def get_headers(self) -> dict[str, str]: + content_length = self.get_content_length() + content_type = self.content_type + if content_length is None: + return {"Transfer-Encoding": "chunked", "Content-Type": content_type} + return {"Content-Length": str(content_length), "Content-Type": content_type} + + def __iter__(self) -> typing.Iterator[bytes]: + for chunk in self.iter_chunks(): + yield chunk + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + for chunk in self.iter_chunks(): + yield chunk diff --git a/venv/Lib/site-packages/httpx/_status_codes.py b/venv/Lib/site-packages/httpx/_status_codes.py new file mode 100644 index 00000000..133a6231 --- /dev/null +++ b/venv/Lib/site-packages/httpx/_status_codes.py @@ -0,0 +1,162 @@ +from __future__ import annotations + +from enum import IntEnum + +__all__ = ["codes"] + + +class codes(IntEnum): + """HTTP status codes and reason phrases + + Status codes from the following RFCs are all observed: + + * RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616 + * RFC 6585: Additional HTTP Status Codes + * RFC 3229: Delta encoding in HTTP + * RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518 + * RFC 5842: Binding Extensions to WebDAV + * RFC 7238: Permanent Redirect + * RFC 2295: Transparent Content Negotiation in HTTP + * RFC 2774: An HTTP Extension Framework + * RFC 7540: Hypertext Transfer Protocol Version 2 (HTTP/2) + * RFC 2324: Hyper Text Coffee Pot Control Protocol (HTCPCP/1.0) + * RFC 7725: An HTTP Status Code to Report Legal Obstacles + * RFC 8297: An HTTP Status Code for Indicating Hints + * RFC 8470: Using Early Data in HTTP + """ + + def __new__(cls, value: int, phrase: str = "") -> codes: + obj = int.__new__(cls, value) + obj._value_ = value + + obj.phrase = phrase # type: ignore[attr-defined] + return obj + + def __str__(self) -> str: + return str(self.value) + + @classmethod + def get_reason_phrase(cls, value: int) -> str: + try: + return codes(value).phrase # type: ignore + except ValueError: + return "" + + @classmethod + def is_informational(cls, value: int) -> bool: + """ + Returns `True` for 1xx status codes, `False` otherwise. + """ + return 100 <= value <= 199 + + @classmethod + def is_success(cls, value: int) -> bool: + """ + Returns `True` for 2xx status codes, `False` otherwise. + """ + return 200 <= value <= 299 + + @classmethod + def is_redirect(cls, value: int) -> bool: + """ + Returns `True` for 3xx status codes, `False` otherwise. + """ + return 300 <= value <= 399 + + @classmethod + def is_client_error(cls, value: int) -> bool: + """ + Returns `True` for 4xx status codes, `False` otherwise. + """ + return 400 <= value <= 499 + + @classmethod + def is_server_error(cls, value: int) -> bool: + """ + Returns `True` for 5xx status codes, `False` otherwise. + """ + return 500 <= value <= 599 + + @classmethod + def is_error(cls, value: int) -> bool: + """ + Returns `True` for 4xx or 5xx status codes, `False` otherwise. + """ + return 400 <= value <= 599 + + # informational + CONTINUE = 100, "Continue" + SWITCHING_PROTOCOLS = 101, "Switching Protocols" + PROCESSING = 102, "Processing" + EARLY_HINTS = 103, "Early Hints" + + # success + OK = 200, "OK" + CREATED = 201, "Created" + ACCEPTED = 202, "Accepted" + NON_AUTHORITATIVE_INFORMATION = 203, "Non-Authoritative Information" + NO_CONTENT = 204, "No Content" + RESET_CONTENT = 205, "Reset Content" + PARTIAL_CONTENT = 206, "Partial Content" + MULTI_STATUS = 207, "Multi-Status" + ALREADY_REPORTED = 208, "Already Reported" + IM_USED = 226, "IM Used" + + # redirection + MULTIPLE_CHOICES = 300, "Multiple Choices" + MOVED_PERMANENTLY = 301, "Moved Permanently" + FOUND = 302, "Found" + SEE_OTHER = 303, "See Other" + NOT_MODIFIED = 304, "Not Modified" + USE_PROXY = 305, "Use Proxy" + TEMPORARY_REDIRECT = 307, "Temporary Redirect" + PERMANENT_REDIRECT = 308, "Permanent Redirect" + + # client error + BAD_REQUEST = 400, "Bad Request" + UNAUTHORIZED = 401, "Unauthorized" + PAYMENT_REQUIRED = 402, "Payment Required" + FORBIDDEN = 403, "Forbidden" + NOT_FOUND = 404, "Not Found" + METHOD_NOT_ALLOWED = 405, "Method Not Allowed" + NOT_ACCEPTABLE = 406, "Not Acceptable" + PROXY_AUTHENTICATION_REQUIRED = 407, "Proxy Authentication Required" + REQUEST_TIMEOUT = 408, "Request Timeout" + CONFLICT = 409, "Conflict" + GONE = 410, "Gone" + LENGTH_REQUIRED = 411, "Length Required" + PRECONDITION_FAILED = 412, "Precondition Failed" + REQUEST_ENTITY_TOO_LARGE = 413, "Request Entity Too Large" + REQUEST_URI_TOO_LONG = 414, "Request-URI Too Long" + UNSUPPORTED_MEDIA_TYPE = 415, "Unsupported Media Type" + REQUESTED_RANGE_NOT_SATISFIABLE = 416, "Requested Range Not Satisfiable" + EXPECTATION_FAILED = 417, "Expectation Failed" + IM_A_TEAPOT = 418, "I'm a teapot" + MISDIRECTED_REQUEST = 421, "Misdirected Request" + UNPROCESSABLE_ENTITY = 422, "Unprocessable Entity" + LOCKED = 423, "Locked" + FAILED_DEPENDENCY = 424, "Failed Dependency" + TOO_EARLY = 425, "Too Early" + UPGRADE_REQUIRED = 426, "Upgrade Required" + PRECONDITION_REQUIRED = 428, "Precondition Required" + TOO_MANY_REQUESTS = 429, "Too Many Requests" + REQUEST_HEADER_FIELDS_TOO_LARGE = 431, "Request Header Fields Too Large" + UNAVAILABLE_FOR_LEGAL_REASONS = 451, "Unavailable For Legal Reasons" + + # server errors + INTERNAL_SERVER_ERROR = 500, "Internal Server Error" + NOT_IMPLEMENTED = 501, "Not Implemented" + BAD_GATEWAY = 502, "Bad Gateway" + SERVICE_UNAVAILABLE = 503, "Service Unavailable" + GATEWAY_TIMEOUT = 504, "Gateway Timeout" + HTTP_VERSION_NOT_SUPPORTED = 505, "HTTP Version Not Supported" + VARIANT_ALSO_NEGOTIATES = 506, "Variant Also Negotiates" + INSUFFICIENT_STORAGE = 507, "Insufficient Storage" + LOOP_DETECTED = 508, "Loop Detected" + NOT_EXTENDED = 510, "Not Extended" + NETWORK_AUTHENTICATION_REQUIRED = 511, "Network Authentication Required" + + +# Include lower-case styles for `requests` compatibility. +for code in codes: + setattr(codes, code._name_.lower(), int(code)) diff --git a/venv/Lib/site-packages/httpx/_transports/__init__.py b/venv/Lib/site-packages/httpx/_transports/__init__.py new file mode 100644 index 00000000..7a321053 --- /dev/null +++ b/venv/Lib/site-packages/httpx/_transports/__init__.py @@ -0,0 +1,15 @@ +from .asgi import * +from .base import * +from .default import * +from .mock import * +from .wsgi import * + +__all__ = [ + "ASGITransport", + "AsyncBaseTransport", + "BaseTransport", + "AsyncHTTPTransport", + "HTTPTransport", + "MockTransport", + "WSGITransport", +] diff --git a/venv/Lib/site-packages/httpx/_transports/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/httpx/_transports/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..97e30afb Binary files /dev/null and b/venv/Lib/site-packages/httpx/_transports/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/_transports/__pycache__/asgi.cpython-312.pyc b/venv/Lib/site-packages/httpx/_transports/__pycache__/asgi.cpython-312.pyc new file mode 100644 index 00000000..40c402a9 Binary files /dev/null and b/venv/Lib/site-packages/httpx/_transports/__pycache__/asgi.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/_transports/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/httpx/_transports/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..9df8f062 Binary files /dev/null and b/venv/Lib/site-packages/httpx/_transports/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/_transports/__pycache__/default.cpython-312.pyc b/venv/Lib/site-packages/httpx/_transports/__pycache__/default.cpython-312.pyc new file mode 100644 index 00000000..02bf4d5b Binary files /dev/null and b/venv/Lib/site-packages/httpx/_transports/__pycache__/default.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/_transports/__pycache__/mock.cpython-312.pyc b/venv/Lib/site-packages/httpx/_transports/__pycache__/mock.cpython-312.pyc new file mode 100644 index 00000000..f94bbbf9 Binary files /dev/null and b/venv/Lib/site-packages/httpx/_transports/__pycache__/mock.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/_transports/__pycache__/wsgi.cpython-312.pyc b/venv/Lib/site-packages/httpx/_transports/__pycache__/wsgi.cpython-312.pyc new file mode 100644 index 00000000..8617556d Binary files /dev/null and b/venv/Lib/site-packages/httpx/_transports/__pycache__/wsgi.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/httpx/_transports/asgi.py b/venv/Lib/site-packages/httpx/_transports/asgi.py new file mode 100644 index 00000000..2bc4efae --- /dev/null +++ b/venv/Lib/site-packages/httpx/_transports/asgi.py @@ -0,0 +1,187 @@ +from __future__ import annotations + +import typing + +from .._models import Request, Response +from .._types import AsyncByteStream +from .base import AsyncBaseTransport + +if typing.TYPE_CHECKING: # pragma: no cover + import asyncio + + import trio + + Event = typing.Union[asyncio.Event, trio.Event] + + +_Message = typing.MutableMapping[str, typing.Any] +_Receive = typing.Callable[[], typing.Awaitable[_Message]] +_Send = typing.Callable[ + [typing.MutableMapping[str, typing.Any]], typing.Awaitable[None] +] +_ASGIApp = typing.Callable[ + [typing.MutableMapping[str, typing.Any], _Receive, _Send], typing.Awaitable[None] +] + +__all__ = ["ASGITransport"] + + +def is_running_trio() -> bool: + try: + # sniffio is a dependency of trio. + + # See https://github.com/python-trio/trio/issues/2802 + import sniffio + + if sniffio.current_async_library() == "trio": + return True + except ImportError: # pragma: nocover + pass + + return False + + +def create_event() -> Event: + if is_running_trio(): + import trio + + return trio.Event() + + import asyncio + + return asyncio.Event() + + +class ASGIResponseStream(AsyncByteStream): + def __init__(self, body: list[bytes]) -> None: + self._body = body + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + yield b"".join(self._body) + + +class ASGITransport(AsyncBaseTransport): + """ + A custom AsyncTransport that handles sending requests directly to an ASGI app. + + ```python + transport = httpx.ASGITransport( + app=app, + root_path="/submount", + client=("1.2.3.4", 123) + ) + client = httpx.AsyncClient(transport=transport) + ``` + + Arguments: + + * `app` - The ASGI application. + * `raise_app_exceptions` - Boolean indicating if exceptions in the application + should be raised. Default to `True`. Can be set to `False` for use cases + such as testing the content of a client 500 response. + * `root_path` - The root path on which the ASGI application should be mounted. + * `client` - A two-tuple indicating the client IP and port of incoming requests. + ``` + """ + + def __init__( + self, + app: _ASGIApp, + raise_app_exceptions: bool = True, + root_path: str = "", + client: tuple[str, int] = ("127.0.0.1", 123), + ) -> None: + self.app = app + self.raise_app_exceptions = raise_app_exceptions + self.root_path = root_path + self.client = client + + async def handle_async_request( + self, + request: Request, + ) -> Response: + assert isinstance(request.stream, AsyncByteStream) + + # ASGI scope. + scope = { + "type": "http", + "asgi": {"version": "3.0"}, + "http_version": "1.1", + "method": request.method, + "headers": [(k.lower(), v) for (k, v) in request.headers.raw], + "scheme": request.url.scheme, + "path": request.url.path, + "raw_path": request.url.raw_path.split(b"?")[0], + "query_string": request.url.query, + "server": (request.url.host, request.url.port), + "client": self.client, + "root_path": self.root_path, + } + + # Request. + request_body_chunks = request.stream.__aiter__() + request_complete = False + + # Response. + status_code = None + response_headers = None + body_parts = [] + response_started = False + response_complete = create_event() + + # ASGI callables. + + async def receive() -> dict[str, typing.Any]: + nonlocal request_complete + + if request_complete: + await response_complete.wait() + return {"type": "http.disconnect"} + + try: + body = await request_body_chunks.__anext__() + except StopAsyncIteration: + request_complete = True + return {"type": "http.request", "body": b"", "more_body": False} + return {"type": "http.request", "body": body, "more_body": True} + + async def send(message: typing.MutableMapping[str, typing.Any]) -> None: + nonlocal status_code, response_headers, response_started + + if message["type"] == "http.response.start": + assert not response_started + + status_code = message["status"] + response_headers = message.get("headers", []) + response_started = True + + elif message["type"] == "http.response.body": + assert not response_complete.is_set() + body = message.get("body", b"") + more_body = message.get("more_body", False) + + if body and request.method != "HEAD": + body_parts.append(body) + + if not more_body: + response_complete.set() + + try: + await self.app(scope, receive, send) + except Exception: # noqa: PIE-786 + if self.raise_app_exceptions: + raise + + response_complete.set() + if status_code is None: + status_code = 500 + if response_headers is None: + response_headers = {} + + assert response_complete.is_set() + assert status_code is not None + assert response_headers is not None + + stream = ASGIResponseStream(body_parts) + + return Response(status_code, headers=response_headers, stream=stream) diff --git a/venv/Lib/site-packages/httpx/_transports/base.py b/venv/Lib/site-packages/httpx/_transports/base.py new file mode 100644 index 00000000..66fd99d7 --- /dev/null +++ b/venv/Lib/site-packages/httpx/_transports/base.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +import typing +from types import TracebackType + +from .._models import Request, Response + +T = typing.TypeVar("T", bound="BaseTransport") +A = typing.TypeVar("A", bound="AsyncBaseTransport") + +__all__ = ["AsyncBaseTransport", "BaseTransport"] + + +class BaseTransport: + def __enter__(self: T) -> T: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: TracebackType | None = None, + ) -> None: + self.close() + + def handle_request(self, request: Request) -> Response: + """ + Send a single HTTP request and return a response. + + Developers shouldn't typically ever need to call into this API directly, + since the Client class provides all the higher level user-facing API + niceties. + + In order to properly release any network resources, the response + stream should *either* be consumed immediately, with a call to + `response.stream.read()`, or else the `handle_request` call should + be followed with a try/finally block to ensuring the stream is + always closed. + + Example usage: + + with httpx.HTTPTransport() as transport: + req = httpx.Request( + method=b"GET", + url=(b"https", b"www.example.com", 443, b"/"), + headers=[(b"Host", b"www.example.com")], + ) + resp = transport.handle_request(req) + body = resp.stream.read() + print(resp.status_code, resp.headers, body) + + + Takes a `Request` instance as the only argument. + + Returns a `Response` instance. + """ + raise NotImplementedError( + "The 'handle_request' method must be implemented." + ) # pragma: no cover + + def close(self) -> None: + pass + + +class AsyncBaseTransport: + async def __aenter__(self: A) -> A: + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: TracebackType | None = None, + ) -> None: + await self.aclose() + + async def handle_async_request( + self, + request: Request, + ) -> Response: + raise NotImplementedError( + "The 'handle_async_request' method must be implemented." + ) # pragma: no cover + + async def aclose(self) -> None: + pass diff --git a/venv/Lib/site-packages/httpx/_transports/default.py b/venv/Lib/site-packages/httpx/_transports/default.py new file mode 100644 index 00000000..d5aa05ff --- /dev/null +++ b/venv/Lib/site-packages/httpx/_transports/default.py @@ -0,0 +1,406 @@ +""" +Custom transports, with nicely configured defaults. + +The following additional keyword arguments are currently supported by httpcore... + +* uds: str +* local_address: str +* retries: int + +Example usages... + +# Disable HTTP/2 on a single specific domain. +mounts = { + "all://": httpx.HTTPTransport(http2=True), + "all://*example.org": httpx.HTTPTransport() +} + +# Using advanced httpcore configuration, with connection retries. +transport = httpx.HTTPTransport(retries=1) +client = httpx.Client(transport=transport) + +# Using advanced httpcore configuration, with unix domain sockets. +transport = httpx.HTTPTransport(uds="socket.uds") +client = httpx.Client(transport=transport) +""" + +from __future__ import annotations + +import contextlib +import typing +from types import TracebackType + +if typing.TYPE_CHECKING: + import ssl # pragma: no cover + + import httpx # pragma: no cover + +from .._config import DEFAULT_LIMITS, Limits, Proxy, create_ssl_context +from .._exceptions import ( + ConnectError, + ConnectTimeout, + LocalProtocolError, + NetworkError, + PoolTimeout, + ProtocolError, + ProxyError, + ReadError, + ReadTimeout, + RemoteProtocolError, + TimeoutException, + UnsupportedProtocol, + WriteError, + WriteTimeout, +) +from .._models import Request, Response +from .._types import AsyncByteStream, CertTypes, ProxyTypes, SyncByteStream +from .._urls import URL +from .base import AsyncBaseTransport, BaseTransport + +T = typing.TypeVar("T", bound="HTTPTransport") +A = typing.TypeVar("A", bound="AsyncHTTPTransport") + +SOCKET_OPTION = typing.Union[ + typing.Tuple[int, int, int], + typing.Tuple[int, int, typing.Union[bytes, bytearray]], + typing.Tuple[int, int, None, int], +] + +__all__ = ["AsyncHTTPTransport", "HTTPTransport"] + +HTTPCORE_EXC_MAP: dict[type[Exception], type[httpx.HTTPError]] = {} + + +def _load_httpcore_exceptions() -> dict[type[Exception], type[httpx.HTTPError]]: + import httpcore + + return { + httpcore.TimeoutException: TimeoutException, + httpcore.ConnectTimeout: ConnectTimeout, + httpcore.ReadTimeout: ReadTimeout, + httpcore.WriteTimeout: WriteTimeout, + httpcore.PoolTimeout: PoolTimeout, + httpcore.NetworkError: NetworkError, + httpcore.ConnectError: ConnectError, + httpcore.ReadError: ReadError, + httpcore.WriteError: WriteError, + httpcore.ProxyError: ProxyError, + httpcore.UnsupportedProtocol: UnsupportedProtocol, + httpcore.ProtocolError: ProtocolError, + httpcore.LocalProtocolError: LocalProtocolError, + httpcore.RemoteProtocolError: RemoteProtocolError, + } + + +@contextlib.contextmanager +def map_httpcore_exceptions() -> typing.Iterator[None]: + global HTTPCORE_EXC_MAP + if len(HTTPCORE_EXC_MAP) == 0: + HTTPCORE_EXC_MAP = _load_httpcore_exceptions() + try: + yield + except Exception as exc: + mapped_exc = None + + for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): + if not isinstance(exc, from_exc): + continue + # We want to map to the most specific exception we can find. + # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to + # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. + if mapped_exc is None or issubclass(to_exc, mapped_exc): + mapped_exc = to_exc + + if mapped_exc is None: # pragma: no cover + raise + + message = str(exc) + raise mapped_exc(message) from exc + + +class ResponseStream(SyncByteStream): + def __init__(self, httpcore_stream: typing.Iterable[bytes]) -> None: + self._httpcore_stream = httpcore_stream + + def __iter__(self) -> typing.Iterator[bytes]: + with map_httpcore_exceptions(): + for part in self._httpcore_stream: + yield part + + def close(self) -> None: + if hasattr(self._httpcore_stream, "close"): + self._httpcore_stream.close() + + +class HTTPTransport(BaseTransport): + def __init__( + self, + verify: ssl.SSLContext | str | bool = True, + cert: CertTypes | None = None, + trust_env: bool = True, + http1: bool = True, + http2: bool = False, + limits: Limits = DEFAULT_LIMITS, + proxy: ProxyTypes | None = None, + uds: str | None = None, + local_address: str | None = None, + retries: int = 0, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> None: + import httpcore + + proxy = Proxy(url=proxy) if isinstance(proxy, (str, URL)) else proxy + ssl_context = create_ssl_context(verify=verify, cert=cert, trust_env=trust_env) + + if proxy is None: + self._pool = httpcore.ConnectionPool( + ssl_context=ssl_context, + max_connections=limits.max_connections, + max_keepalive_connections=limits.max_keepalive_connections, + keepalive_expiry=limits.keepalive_expiry, + http1=http1, + http2=http2, + uds=uds, + local_address=local_address, + retries=retries, + socket_options=socket_options, + ) + elif proxy.url.scheme in ("http", "https"): + self._pool = httpcore.HTTPProxy( + proxy_url=httpcore.URL( + scheme=proxy.url.raw_scheme, + host=proxy.url.raw_host, + port=proxy.url.port, + target=proxy.url.raw_path, + ), + proxy_auth=proxy.raw_auth, + proxy_headers=proxy.headers.raw, + ssl_context=ssl_context, + proxy_ssl_context=proxy.ssl_context, + max_connections=limits.max_connections, + max_keepalive_connections=limits.max_keepalive_connections, + keepalive_expiry=limits.keepalive_expiry, + http1=http1, + http2=http2, + socket_options=socket_options, + ) + elif proxy.url.scheme in ("socks5", "socks5h"): + try: + import socksio # noqa + except ImportError: # pragma: no cover + raise ImportError( + "Using SOCKS proxy, but the 'socksio' package is not installed. " + "Make sure to install httpx using `pip install httpx[socks]`." + ) from None + + self._pool = httpcore.SOCKSProxy( + proxy_url=httpcore.URL( + scheme=proxy.url.raw_scheme, + host=proxy.url.raw_host, + port=proxy.url.port, + target=proxy.url.raw_path, + ), + proxy_auth=proxy.raw_auth, + ssl_context=ssl_context, + max_connections=limits.max_connections, + max_keepalive_connections=limits.max_keepalive_connections, + keepalive_expiry=limits.keepalive_expiry, + http1=http1, + http2=http2, + ) + else: # pragma: no cover + raise ValueError( + "Proxy protocol must be either 'http', 'https', 'socks5', or 'socks5h'," + f" but got {proxy.url.scheme!r}." + ) + + def __enter__(self: T) -> T: # Use generics for subclass support. + self._pool.__enter__() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: TracebackType | None = None, + ) -> None: + with map_httpcore_exceptions(): + self._pool.__exit__(exc_type, exc_value, traceback) + + def handle_request( + self, + request: Request, + ) -> Response: + assert isinstance(request.stream, SyncByteStream) + import httpcore + + req = httpcore.Request( + method=request.method, + url=httpcore.URL( + scheme=request.url.raw_scheme, + host=request.url.raw_host, + port=request.url.port, + target=request.url.raw_path, + ), + headers=request.headers.raw, + content=request.stream, + extensions=request.extensions, + ) + with map_httpcore_exceptions(): + resp = self._pool.handle_request(req) + + assert isinstance(resp.stream, typing.Iterable) + + return Response( + status_code=resp.status, + headers=resp.headers, + stream=ResponseStream(resp.stream), + extensions=resp.extensions, + ) + + def close(self) -> None: + self._pool.close() + + +class AsyncResponseStream(AsyncByteStream): + def __init__(self, httpcore_stream: typing.AsyncIterable[bytes]) -> None: + self._httpcore_stream = httpcore_stream + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + with map_httpcore_exceptions(): + async for part in self._httpcore_stream: + yield part + + async def aclose(self) -> None: + if hasattr(self._httpcore_stream, "aclose"): + await self._httpcore_stream.aclose() + + +class AsyncHTTPTransport(AsyncBaseTransport): + def __init__( + self, + verify: ssl.SSLContext | str | bool = True, + cert: CertTypes | None = None, + trust_env: bool = True, + http1: bool = True, + http2: bool = False, + limits: Limits = DEFAULT_LIMITS, + proxy: ProxyTypes | None = None, + uds: str | None = None, + local_address: str | None = None, + retries: int = 0, + socket_options: typing.Iterable[SOCKET_OPTION] | None = None, + ) -> None: + import httpcore + + proxy = Proxy(url=proxy) if isinstance(proxy, (str, URL)) else proxy + ssl_context = create_ssl_context(verify=verify, cert=cert, trust_env=trust_env) + + if proxy is None: + self._pool = httpcore.AsyncConnectionPool( + ssl_context=ssl_context, + max_connections=limits.max_connections, + max_keepalive_connections=limits.max_keepalive_connections, + keepalive_expiry=limits.keepalive_expiry, + http1=http1, + http2=http2, + uds=uds, + local_address=local_address, + retries=retries, + socket_options=socket_options, + ) + elif proxy.url.scheme in ("http", "https"): + self._pool = httpcore.AsyncHTTPProxy( + proxy_url=httpcore.URL( + scheme=proxy.url.raw_scheme, + host=proxy.url.raw_host, + port=proxy.url.port, + target=proxy.url.raw_path, + ), + proxy_auth=proxy.raw_auth, + proxy_headers=proxy.headers.raw, + proxy_ssl_context=proxy.ssl_context, + ssl_context=ssl_context, + max_connections=limits.max_connections, + max_keepalive_connections=limits.max_keepalive_connections, + keepalive_expiry=limits.keepalive_expiry, + http1=http1, + http2=http2, + socket_options=socket_options, + ) + elif proxy.url.scheme in ("socks5", "socks5h"): + try: + import socksio # noqa + except ImportError: # pragma: no cover + raise ImportError( + "Using SOCKS proxy, but the 'socksio' package is not installed. " + "Make sure to install httpx using `pip install httpx[socks]`." + ) from None + + self._pool = httpcore.AsyncSOCKSProxy( + proxy_url=httpcore.URL( + scheme=proxy.url.raw_scheme, + host=proxy.url.raw_host, + port=proxy.url.port, + target=proxy.url.raw_path, + ), + proxy_auth=proxy.raw_auth, + ssl_context=ssl_context, + max_connections=limits.max_connections, + max_keepalive_connections=limits.max_keepalive_connections, + keepalive_expiry=limits.keepalive_expiry, + http1=http1, + http2=http2, + ) + else: # pragma: no cover + raise ValueError( + "Proxy protocol must be either 'http', 'https', 'socks5', or 'socks5h'," + " but got {proxy.url.scheme!r}." + ) + + async def __aenter__(self: A) -> A: # Use generics for subclass support. + await self._pool.__aenter__() + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: TracebackType | None = None, + ) -> None: + with map_httpcore_exceptions(): + await self._pool.__aexit__(exc_type, exc_value, traceback) + + async def handle_async_request( + self, + request: Request, + ) -> Response: + assert isinstance(request.stream, AsyncByteStream) + import httpcore + + req = httpcore.Request( + method=request.method, + url=httpcore.URL( + scheme=request.url.raw_scheme, + host=request.url.raw_host, + port=request.url.port, + target=request.url.raw_path, + ), + headers=request.headers.raw, + content=request.stream, + extensions=request.extensions, + ) + with map_httpcore_exceptions(): + resp = await self._pool.handle_async_request(req) + + assert isinstance(resp.stream, typing.AsyncIterable) + + return Response( + status_code=resp.status, + headers=resp.headers, + stream=AsyncResponseStream(resp.stream), + extensions=resp.extensions, + ) + + async def aclose(self) -> None: + await self._pool.aclose() diff --git a/venv/Lib/site-packages/httpx/_transports/mock.py b/venv/Lib/site-packages/httpx/_transports/mock.py new file mode 100644 index 00000000..8c418f59 --- /dev/null +++ b/venv/Lib/site-packages/httpx/_transports/mock.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +import typing + +from .._models import Request, Response +from .base import AsyncBaseTransport, BaseTransport + +SyncHandler = typing.Callable[[Request], Response] +AsyncHandler = typing.Callable[[Request], typing.Coroutine[None, None, Response]] + + +__all__ = ["MockTransport"] + + +class MockTransport(AsyncBaseTransport, BaseTransport): + def __init__(self, handler: SyncHandler | AsyncHandler) -> None: + self.handler = handler + + def handle_request( + self, + request: Request, + ) -> Response: + request.read() + response = self.handler(request) + if not isinstance(response, Response): # pragma: no cover + raise TypeError("Cannot use an async handler in a sync Client") + return response + + async def handle_async_request( + self, + request: Request, + ) -> Response: + await request.aread() + response = self.handler(request) + + # Allow handler to *optionally* be an `async` function. + # If it is, then the `response` variable need to be awaited to actually + # return the result. + + if not isinstance(response, Response): + response = await response + + return response diff --git a/venv/Lib/site-packages/httpx/_transports/wsgi.py b/venv/Lib/site-packages/httpx/_transports/wsgi.py new file mode 100644 index 00000000..8592ffe0 --- /dev/null +++ b/venv/Lib/site-packages/httpx/_transports/wsgi.py @@ -0,0 +1,149 @@ +from __future__ import annotations + +import io +import itertools +import sys +import typing + +from .._models import Request, Response +from .._types import SyncByteStream +from .base import BaseTransport + +if typing.TYPE_CHECKING: + from _typeshed import OptExcInfo # pragma: no cover + from _typeshed.wsgi import WSGIApplication # pragma: no cover + +_T = typing.TypeVar("_T") + + +__all__ = ["WSGITransport"] + + +def _skip_leading_empty_chunks(body: typing.Iterable[_T]) -> typing.Iterable[_T]: + body = iter(body) + for chunk in body: + if chunk: + return itertools.chain([chunk], body) + return [] + + +class WSGIByteStream(SyncByteStream): + def __init__(self, result: typing.Iterable[bytes]) -> None: + self._close = getattr(result, "close", None) + self._result = _skip_leading_empty_chunks(result) + + def __iter__(self) -> typing.Iterator[bytes]: + for part in self._result: + yield part + + def close(self) -> None: + if self._close is not None: + self._close() + + +class WSGITransport(BaseTransport): + """ + A custom transport that handles sending requests directly to an WSGI app. + The simplest way to use this functionality is to use the `app` argument. + + ``` + client = httpx.Client(app=app) + ``` + + Alternatively, you can setup the transport instance explicitly. + This allows you to include any additional configuration arguments specific + to the WSGITransport class: + + ``` + transport = httpx.WSGITransport( + app=app, + script_name="/submount", + remote_addr="1.2.3.4" + ) + client = httpx.Client(transport=transport) + ``` + + Arguments: + + * `app` - The WSGI application. + * `raise_app_exceptions` - Boolean indicating if exceptions in the application + should be raised. Default to `True`. Can be set to `False` for use cases + such as testing the content of a client 500 response. + * `script_name` - The root path on which the WSGI application should be mounted. + * `remote_addr` - A string indicating the client IP of incoming requests. + ``` + """ + + def __init__( + self, + app: WSGIApplication, + raise_app_exceptions: bool = True, + script_name: str = "", + remote_addr: str = "127.0.0.1", + wsgi_errors: typing.TextIO | None = None, + ) -> None: + self.app = app + self.raise_app_exceptions = raise_app_exceptions + self.script_name = script_name + self.remote_addr = remote_addr + self.wsgi_errors = wsgi_errors + + def handle_request(self, request: Request) -> Response: + request.read() + wsgi_input = io.BytesIO(request.content) + + port = request.url.port or {"http": 80, "https": 443}[request.url.scheme] + environ = { + "wsgi.version": (1, 0), + "wsgi.url_scheme": request.url.scheme, + "wsgi.input": wsgi_input, + "wsgi.errors": self.wsgi_errors or sys.stderr, + "wsgi.multithread": True, + "wsgi.multiprocess": False, + "wsgi.run_once": False, + "REQUEST_METHOD": request.method, + "SCRIPT_NAME": self.script_name, + "PATH_INFO": request.url.path, + "QUERY_STRING": request.url.query.decode("ascii"), + "SERVER_NAME": request.url.host, + "SERVER_PORT": str(port), + "SERVER_PROTOCOL": "HTTP/1.1", + "REMOTE_ADDR": self.remote_addr, + } + for header_key, header_value in request.headers.raw: + key = header_key.decode("ascii").upper().replace("-", "_") + if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"): + key = "HTTP_" + key + environ[key] = header_value.decode("ascii") + + seen_status = None + seen_response_headers = None + seen_exc_info = None + + def start_response( + status: str, + response_headers: list[tuple[str, str]], + exc_info: OptExcInfo | None = None, + ) -> typing.Callable[[bytes], typing.Any]: + nonlocal seen_status, seen_response_headers, seen_exc_info + seen_status = status + seen_response_headers = response_headers + seen_exc_info = exc_info + return lambda _: None + + result = self.app(environ, start_response) + + stream = WSGIByteStream(result) + + assert seen_status is not None + assert seen_response_headers is not None + if seen_exc_info and seen_exc_info[0] and self.raise_app_exceptions: + raise seen_exc_info[1] + + status_code = int(seen_status.split()[0]) + headers = [ + (key.encode("ascii"), value.encode("ascii")) + for key, value in seen_response_headers + ] + + return Response(status_code, headers=headers, stream=stream) diff --git a/venv/Lib/site-packages/httpx/_types.py b/venv/Lib/site-packages/httpx/_types.py new file mode 100644 index 00000000..704dfdff --- /dev/null +++ b/venv/Lib/site-packages/httpx/_types.py @@ -0,0 +1,114 @@ +""" +Type definitions for type checking purposes. +""" + +from http.cookiejar import CookieJar +from typing import ( + IO, + TYPE_CHECKING, + Any, + AsyncIterable, + AsyncIterator, + Callable, + Dict, + Iterable, + Iterator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) + +if TYPE_CHECKING: # pragma: no cover + from ._auth import Auth # noqa: F401 + from ._config import Proxy, Timeout # noqa: F401 + from ._models import Cookies, Headers, Request # noqa: F401 + from ._urls import URL, QueryParams # noqa: F401 + + +PrimitiveData = Optional[Union[str, int, float, bool]] + +URLTypes = Union["URL", str] + +QueryParamTypes = Union[ + "QueryParams", + Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]], + List[Tuple[str, PrimitiveData]], + Tuple[Tuple[str, PrimitiveData], ...], + str, + bytes, +] + +HeaderTypes = Union[ + "Headers", + Mapping[str, str], + Mapping[bytes, bytes], + Sequence[Tuple[str, str]], + Sequence[Tuple[bytes, bytes]], +] + +CookieTypes = Union["Cookies", CookieJar, Dict[str, str], List[Tuple[str, str]]] + +TimeoutTypes = Union[ + Optional[float], + Tuple[Optional[float], Optional[float], Optional[float], Optional[float]], + "Timeout", +] +ProxyTypes = Union["URL", str, "Proxy"] +CertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]] + +AuthTypes = Union[ + Tuple[Union[str, bytes], Union[str, bytes]], + Callable[["Request"], "Request"], + "Auth", +] + +RequestContent = Union[str, bytes, Iterable[bytes], AsyncIterable[bytes]] +ResponseContent = Union[str, bytes, Iterable[bytes], AsyncIterable[bytes]] +ResponseExtensions = Mapping[str, Any] + +RequestData = Mapping[str, Any] + +FileContent = Union[IO[bytes], bytes, str] +FileTypes = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], + # (filename, file (or bytes), content_type, headers) + Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], +] +RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]] + +RequestExtensions = Mapping[str, Any] + +__all__ = ["AsyncByteStream", "SyncByteStream"] + + +class SyncByteStream: + def __iter__(self) -> Iterator[bytes]: + raise NotImplementedError( + "The '__iter__' method must be implemented." + ) # pragma: no cover + yield b"" # pragma: no cover + + def close(self) -> None: + """ + Subclasses can override this method to release any network resources + after a request/response cycle is complete. + """ + + +class AsyncByteStream: + async def __aiter__(self) -> AsyncIterator[bytes]: + raise NotImplementedError( + "The '__aiter__' method must be implemented." + ) # pragma: no cover + yield b"" # pragma: no cover + + async def aclose(self) -> None: + pass diff --git a/venv/Lib/site-packages/httpx/_urlparse.py b/venv/Lib/site-packages/httpx/_urlparse.py new file mode 100644 index 00000000..bf190fd5 --- /dev/null +++ b/venv/Lib/site-packages/httpx/_urlparse.py @@ -0,0 +1,527 @@ +""" +An implementation of `urlparse` that provides URL validation and normalization +as described by RFC3986. + +We rely on this implementation rather than the one in Python's stdlib, because: + +* It provides more complete URL validation. +* It properly differentiates between an empty querystring and an absent querystring, + to distinguish URLs with a trailing '?'. +* It handles scheme, hostname, port, and path normalization. +* It supports IDNA hostnames, normalizing them to their encoded form. +* The API supports passing individual components, as well as the complete URL string. + +Previously we relied on the excellent `rfc3986` package to handle URL parsing and +validation, but this module provides a simpler alternative, with less indirection +required. +""" + +from __future__ import annotations + +import ipaddress +import re +import typing + +import idna + +from ._exceptions import InvalidURL + +MAX_URL_LENGTH = 65536 + +# https://datatracker.ietf.org/doc/html/rfc3986.html#section-2.3 +UNRESERVED_CHARACTERS = ( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~" +) +SUB_DELIMS = "!$&'()*+,;=" + +PERCENT_ENCODED_REGEX = re.compile("%[A-Fa-f0-9]{2}") + +# https://url.spec.whatwg.org/#percent-encoded-bytes + +# The fragment percent-encode set is the C0 control percent-encode set +# and U+0020 SPACE, U+0022 ("), U+003C (<), U+003E (>), and U+0060 (`). +FRAG_SAFE = "".join( + [chr(i) for i in range(0x20, 0x7F) if i not in (0x20, 0x22, 0x3C, 0x3E, 0x60)] +) + +# The query percent-encode set is the C0 control percent-encode set +# and U+0020 SPACE, U+0022 ("), U+0023 (#), U+003C (<), and U+003E (>). +QUERY_SAFE = "".join( + [chr(i) for i in range(0x20, 0x7F) if i not in (0x20, 0x22, 0x23, 0x3C, 0x3E)] +) + +# The path percent-encode set is the query percent-encode set +# and U+003F (?), U+0060 (`), U+007B ({), and U+007D (}). +PATH_SAFE = "".join( + [ + chr(i) + for i in range(0x20, 0x7F) + if i not in (0x20, 0x22, 0x23, 0x3C, 0x3E) + (0x3F, 0x60, 0x7B, 0x7D) + ] +) + +# The userinfo percent-encode set is the path percent-encode set +# and U+002F (/), U+003A (:), U+003B (;), U+003D (=), U+0040 (@), +# U+005B ([) to U+005E (^), inclusive, and U+007C (|). +USERNAME_SAFE = "".join( + [ + chr(i) + for i in range(0x20, 0x7F) + if i + not in (0x20, 0x22, 0x23, 0x3C, 0x3E) + + (0x3F, 0x60, 0x7B, 0x7D) + + (0x2F, 0x3A, 0x3B, 0x3D, 0x40, 0x5B, 0x5C, 0x5D, 0x5E, 0x7C) + ] +) +PASSWORD_SAFE = "".join( + [ + chr(i) + for i in range(0x20, 0x7F) + if i + not in (0x20, 0x22, 0x23, 0x3C, 0x3E) + + (0x3F, 0x60, 0x7B, 0x7D) + + (0x2F, 0x3A, 0x3B, 0x3D, 0x40, 0x5B, 0x5C, 0x5D, 0x5E, 0x7C) + ] +) +# Note... The terminology 'userinfo' percent-encode set in the WHATWG document +# is used for the username and password quoting. For the joint userinfo component +# we remove U+003A (:) from the safe set. +USERINFO_SAFE = "".join( + [ + chr(i) + for i in range(0x20, 0x7F) + if i + not in (0x20, 0x22, 0x23, 0x3C, 0x3E) + + (0x3F, 0x60, 0x7B, 0x7D) + + (0x2F, 0x3B, 0x3D, 0x40, 0x5B, 0x5C, 0x5D, 0x5E, 0x7C) + ] +) + + +# {scheme}: (optional) +# //{authority} (optional) +# {path} +# ?{query} (optional) +# #{fragment} (optional) +URL_REGEX = re.compile( + ( + r"(?:(?P{scheme}):)?" + r"(?://(?P{authority}))?" + r"(?P{path})" + r"(?:\?(?P{query}))?" + r"(?:#(?P{fragment}))?" + ).format( + scheme="([a-zA-Z][a-zA-Z0-9+.-]*)?", + authority="[^/?#]*", + path="[^?#]*", + query="[^#]*", + fragment=".*", + ) +) + +# {userinfo}@ (optional) +# {host} +# :{port} (optional) +AUTHORITY_REGEX = re.compile( + ( + r"(?:(?P{userinfo})@)?" r"(?P{host})" r":?(?P{port})?" + ).format( + userinfo=".*", # Any character sequence. + host="(\\[.*\\]|[^:@]*)", # Either any character sequence excluding ':' or '@', + # or an IPv6 address enclosed within square brackets. + port=".*", # Any character sequence. + ) +) + + +# If we call urlparse with an individual component, then we need to regex +# validate that component individually. +# Note that we're duplicating the same strings as above. Shock! Horror!! +COMPONENT_REGEX = { + "scheme": re.compile("([a-zA-Z][a-zA-Z0-9+.-]*)?"), + "authority": re.compile("[^/?#]*"), + "path": re.compile("[^?#]*"), + "query": re.compile("[^#]*"), + "fragment": re.compile(".*"), + "userinfo": re.compile("[^@]*"), + "host": re.compile("(\\[.*\\]|[^:]*)"), + "port": re.compile(".*"), +} + + +# We use these simple regexs as a first pass before handing off to +# the stdlib 'ipaddress' module for IP address validation. +IPv4_STYLE_HOSTNAME = re.compile(r"^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$") +IPv6_STYLE_HOSTNAME = re.compile(r"^\[.*\]$") + + +class ParseResult(typing.NamedTuple): + scheme: str + userinfo: str + host: str + port: int | None + path: str + query: str | None + fragment: str | None + + @property + def authority(self) -> str: + return "".join( + [ + f"{self.userinfo}@" if self.userinfo else "", + f"[{self.host}]" if ":" in self.host else self.host, + f":{self.port}" if self.port is not None else "", + ] + ) + + @property + def netloc(self) -> str: + return "".join( + [ + f"[{self.host}]" if ":" in self.host else self.host, + f":{self.port}" if self.port is not None else "", + ] + ) + + def copy_with(self, **kwargs: str | None) -> ParseResult: + if not kwargs: + return self + + defaults = { + "scheme": self.scheme, + "authority": self.authority, + "path": self.path, + "query": self.query, + "fragment": self.fragment, + } + defaults.update(kwargs) + return urlparse("", **defaults) + + def __str__(self) -> str: + authority = self.authority + return "".join( + [ + f"{self.scheme}:" if self.scheme else "", + f"//{authority}" if authority else "", + self.path, + f"?{self.query}" if self.query is not None else "", + f"#{self.fragment}" if self.fragment is not None else "", + ] + ) + + +def urlparse(url: str = "", **kwargs: str | None) -> ParseResult: + # Initial basic checks on allowable URLs. + # --------------------------------------- + + # Hard limit the maximum allowable URL length. + if len(url) > MAX_URL_LENGTH: + raise InvalidURL("URL too long") + + # If a URL includes any ASCII control characters including \t, \r, \n, + # then treat it as invalid. + if any(char.isascii() and not char.isprintable() for char in url): + char = next(char for char in url if char.isascii() and not char.isprintable()) + idx = url.find(char) + error = ( + f"Invalid non-printable ASCII character in URL, {char!r} at position {idx}." + ) + raise InvalidURL(error) + + # Some keyword arguments require special handling. + # ------------------------------------------------ + + # Coerce "port" to a string, if it is provided as an integer. + if "port" in kwargs: + port = kwargs["port"] + kwargs["port"] = str(port) if isinstance(port, int) else port + + # Replace "netloc" with "host and "port". + if "netloc" in kwargs: + netloc = kwargs.pop("netloc") or "" + kwargs["host"], _, kwargs["port"] = netloc.partition(":") + + # Replace "username" and/or "password" with "userinfo". + if "username" in kwargs or "password" in kwargs: + username = quote(kwargs.pop("username", "") or "", safe=USERNAME_SAFE) + password = quote(kwargs.pop("password", "") or "", safe=PASSWORD_SAFE) + kwargs["userinfo"] = f"{username}:{password}" if password else username + + # Replace "raw_path" with "path" and "query". + if "raw_path" in kwargs: + raw_path = kwargs.pop("raw_path") or "" + kwargs["path"], seperator, kwargs["query"] = raw_path.partition("?") + if not seperator: + kwargs["query"] = None + + # Ensure that IPv6 "host" addresses are always escaped with "[...]". + if "host" in kwargs: + host = kwargs.get("host") or "" + if ":" in host and not (host.startswith("[") and host.endswith("]")): + kwargs["host"] = f"[{host}]" + + # If any keyword arguments are provided, ensure they are valid. + # ------------------------------------------------------------- + + for key, value in kwargs.items(): + if value is not None: + if len(value) > MAX_URL_LENGTH: + raise InvalidURL(f"URL component '{key}' too long") + + # If a component includes any ASCII control characters including \t, \r, \n, + # then treat it as invalid. + if any(char.isascii() and not char.isprintable() for char in value): + char = next( + char for char in value if char.isascii() and not char.isprintable() + ) + idx = value.find(char) + error = ( + f"Invalid non-printable ASCII character in URL {key} component, " + f"{char!r} at position {idx}." + ) + raise InvalidURL(error) + + # Ensure that keyword arguments match as a valid regex. + if not COMPONENT_REGEX[key].fullmatch(value): + raise InvalidURL(f"Invalid URL component '{key}'") + + # The URL_REGEX will always match, but may have empty components. + url_match = URL_REGEX.match(url) + assert url_match is not None + url_dict = url_match.groupdict() + + # * 'scheme', 'authority', and 'path' may be empty strings. + # * 'query' may be 'None', indicating no trailing "?" portion. + # Any string including the empty string, indicates a trailing "?". + # * 'fragment' may be 'None', indicating no trailing "#" portion. + # Any string including the empty string, indicates a trailing "#". + scheme = kwargs.get("scheme", url_dict["scheme"]) or "" + authority = kwargs.get("authority", url_dict["authority"]) or "" + path = kwargs.get("path", url_dict["path"]) or "" + query = kwargs.get("query", url_dict["query"]) + frag = kwargs.get("fragment", url_dict["fragment"]) + + # The AUTHORITY_REGEX will always match, but may have empty components. + authority_match = AUTHORITY_REGEX.match(authority) + assert authority_match is not None + authority_dict = authority_match.groupdict() + + # * 'userinfo' and 'host' may be empty strings. + # * 'port' may be 'None'. + userinfo = kwargs.get("userinfo", authority_dict["userinfo"]) or "" + host = kwargs.get("host", authority_dict["host"]) or "" + port = kwargs.get("port", authority_dict["port"]) + + # Normalize and validate each component. + # We end up with a parsed representation of the URL, + # with components that are plain ASCII bytestrings. + parsed_scheme: str = scheme.lower() + parsed_userinfo: str = quote(userinfo, safe=USERINFO_SAFE) + parsed_host: str = encode_host(host) + parsed_port: int | None = normalize_port(port, scheme) + + has_scheme = parsed_scheme != "" + has_authority = ( + parsed_userinfo != "" or parsed_host != "" or parsed_port is not None + ) + validate_path(path, has_scheme=has_scheme, has_authority=has_authority) + if has_scheme or has_authority: + path = normalize_path(path) + + parsed_path: str = quote(path, safe=PATH_SAFE) + parsed_query: str | None = None if query is None else quote(query, safe=QUERY_SAFE) + parsed_frag: str | None = None if frag is None else quote(frag, safe=FRAG_SAFE) + + # The parsed ASCII bytestrings are our canonical form. + # All properties of the URL are derived from these. + return ParseResult( + parsed_scheme, + parsed_userinfo, + parsed_host, + parsed_port, + parsed_path, + parsed_query, + parsed_frag, + ) + + +def encode_host(host: str) -> str: + if not host: + return "" + + elif IPv4_STYLE_HOSTNAME.match(host): + # Validate IPv4 hostnames like #.#.#.# + # + # From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2 + # + # IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet + try: + ipaddress.IPv4Address(host) + except ipaddress.AddressValueError: + raise InvalidURL(f"Invalid IPv4 address: {host!r}") + return host + + elif IPv6_STYLE_HOSTNAME.match(host): + # Validate IPv6 hostnames like [...] + # + # From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2 + # + # "A host identified by an Internet Protocol literal address, version 6 + # [RFC3513] or later, is distinguished by enclosing the IP literal + # within square brackets ("[" and "]"). This is the only place where + # square bracket characters are allowed in the URI syntax." + try: + ipaddress.IPv6Address(host[1:-1]) + except ipaddress.AddressValueError: + raise InvalidURL(f"Invalid IPv6 address: {host!r}") + return host[1:-1] + + elif host.isascii(): + # Regular ASCII hostnames + # + # From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2 + # + # reg-name = *( unreserved / pct-encoded / sub-delims ) + WHATWG_SAFE = '"`{}%|\\' + return quote(host.lower(), safe=SUB_DELIMS + WHATWG_SAFE) + + # IDNA hostnames + try: + return idna.encode(host.lower()).decode("ascii") + except idna.IDNAError: + raise InvalidURL(f"Invalid IDNA hostname: {host!r}") + + +def normalize_port(port: str | int | None, scheme: str) -> int | None: + # From https://tools.ietf.org/html/rfc3986#section-3.2.3 + # + # "A scheme may define a default port. For example, the "http" scheme + # defines a default port of "80", corresponding to its reserved TCP + # port number. The type of port designated by the port number (e.g., + # TCP, UDP, SCTP) is defined by the URI scheme. URI producers and + # normalizers should omit the port component and its ":" delimiter if + # port is empty or if its value would be the same as that of the + # scheme's default." + if port is None or port == "": + return None + + try: + port_as_int = int(port) + except ValueError: + raise InvalidURL(f"Invalid port: {port!r}") + + # See https://url.spec.whatwg.org/#url-miscellaneous + default_port = {"ftp": 21, "http": 80, "https": 443, "ws": 80, "wss": 443}.get( + scheme + ) + if port_as_int == default_port: + return None + return port_as_int + + +def validate_path(path: str, has_scheme: bool, has_authority: bool) -> None: + """ + Path validation rules that depend on if the URL contains + a scheme or authority component. + + See https://datatracker.ietf.org/doc/html/rfc3986.html#section-3.3 + """ + if has_authority: + # If a URI contains an authority component, then the path component + # must either be empty or begin with a slash ("/") character." + if path and not path.startswith("/"): + raise InvalidURL("For absolute URLs, path must be empty or begin with '/'") + + if not has_scheme and not has_authority: + # If a URI does not contain an authority component, then the path cannot begin + # with two slash characters ("//"). + if path.startswith("//"): + raise InvalidURL("Relative URLs cannot have a path starting with '//'") + + # In addition, a URI reference (Section 4.1) may be a relative-path reference, + # in which case the first path segment cannot contain a colon (":") character. + if path.startswith(":"): + raise InvalidURL("Relative URLs cannot have a path starting with ':'") + + +def normalize_path(path: str) -> str: + """ + Drop "." and ".." segments from a URL path. + + For example: + + normalize_path("/path/./to/somewhere/..") == "/path/to" + """ + # Fast return when no '.' characters in the path. + if "." not in path: + return path + + components = path.split("/") + + # Fast return when no '.' or '..' components in the path. + if "." not in components and ".." not in components: + return path + + # https://datatracker.ietf.org/doc/html/rfc3986#section-5.2.4 + output: list[str] = [] + for component in components: + if component == ".": + pass + elif component == "..": + if output and output != [""]: + output.pop() + else: + output.append(component) + return "/".join(output) + + +def PERCENT(string: str) -> str: + return "".join([f"%{byte:02X}" for byte in string.encode("utf-8")]) + + +def percent_encoded(string: str, safe: str) -> str: + """ + Use percent-encoding to quote a string. + """ + NON_ESCAPED_CHARS = UNRESERVED_CHARACTERS + safe + + # Fast path for strings that don't need escaping. + if not string.rstrip(NON_ESCAPED_CHARS): + return string + + return "".join( + [char if char in NON_ESCAPED_CHARS else PERCENT(char) for char in string] + ) + + +def quote(string: str, safe: str) -> str: + """ + Use percent-encoding to quote a string, omitting existing '%xx' escape sequences. + + See: https://www.rfc-editor.org/rfc/rfc3986#section-2.1 + + * `string`: The string to be percent-escaped. + * `safe`: A string containing characters that may be treated as safe, and do not + need to be escaped. Unreserved characters are always treated as safe. + See: https://www.rfc-editor.org/rfc/rfc3986#section-2.3 + """ + parts = [] + current_position = 0 + for match in re.finditer(PERCENT_ENCODED_REGEX, string): + start_position, end_position = match.start(), match.end() + matched_text = match.group(0) + # Add any text up to the '%xx' escape sequence. + if start_position != current_position: + leading_text = string[current_position:start_position] + parts.append(percent_encoded(leading_text, safe=safe)) + + # Add the '%xx' escape sequence. + parts.append(matched_text) + current_position = end_position + + # Add any text after the final '%xx' escape sequence. + if current_position != len(string): + trailing_text = string[current_position:] + parts.append(percent_encoded(trailing_text, safe=safe)) + + return "".join(parts) diff --git a/venv/Lib/site-packages/httpx/_urls.py b/venv/Lib/site-packages/httpx/_urls.py new file mode 100644 index 00000000..147a8fa3 --- /dev/null +++ b/venv/Lib/site-packages/httpx/_urls.py @@ -0,0 +1,641 @@ +from __future__ import annotations + +import typing +from urllib.parse import parse_qs, unquote, urlencode + +import idna + +from ._types import QueryParamTypes +from ._urlparse import urlparse +from ._utils import primitive_value_to_str + +__all__ = ["URL", "QueryParams"] + + +class URL: + """ + url = httpx.URL("HTTPS://jo%40email.com:a%20secret@müller.de:1234/pa%20th?search=ab#anchorlink") + + assert url.scheme == "https" + assert url.username == "jo@email.com" + assert url.password == "a secret" + assert url.userinfo == b"jo%40email.com:a%20secret" + assert url.host == "müller.de" + assert url.raw_host == b"xn--mller-kva.de" + assert url.port == 1234 + assert url.netloc == b"xn--mller-kva.de:1234" + assert url.path == "/pa th" + assert url.query == b"?search=ab" + assert url.raw_path == b"/pa%20th?search=ab" + assert url.fragment == "anchorlink" + + The components of a URL are broken down like this: + + https://jo%40email.com:a%20secret@müller.de:1234/pa%20th?search=ab#anchorlink + [scheme] [ username ] [password] [ host ][port][ path ] [ query ] [fragment] + [ userinfo ] [ netloc ][ raw_path ] + + Note that: + + * `url.scheme` is normalized to always be lowercased. + + * `url.host` is normalized to always be lowercased. Internationalized domain + names are represented in unicode, without IDNA encoding applied. For instance: + + url = httpx.URL("http://中国.icom.museum") + assert url.host == "中国.icom.museum" + url = httpx.URL("http://xn--fiqs8s.icom.museum") + assert url.host == "中国.icom.museum" + + * `url.raw_host` is normalized to always be lowercased, and is IDNA encoded. + + url = httpx.URL("http://中国.icom.museum") + assert url.raw_host == b"xn--fiqs8s.icom.museum" + url = httpx.URL("http://xn--fiqs8s.icom.museum") + assert url.raw_host == b"xn--fiqs8s.icom.museum" + + * `url.port` is either None or an integer. URLs that include the default port for + "http", "https", "ws", "wss", and "ftp" schemes have their port + normalized to `None`. + + assert httpx.URL("http://example.com") == httpx.URL("http://example.com:80") + assert httpx.URL("http://example.com").port is None + assert httpx.URL("http://example.com:80").port is None + + * `url.userinfo` is raw bytes, without URL escaping. Usually you'll want to work + with `url.username` and `url.password` instead, which handle the URL escaping. + + * `url.raw_path` is raw bytes of both the path and query, without URL escaping. + This portion is used as the target when constructing HTTP requests. Usually you'll + want to work with `url.path` instead. + + * `url.query` is raw bytes, without URL escaping. A URL query string portion can + only be properly URL escaped when decoding the parameter names and values + themselves. + """ + + def __init__(self, url: URL | str = "", **kwargs: typing.Any) -> None: + if kwargs: + allowed = { + "scheme": str, + "username": str, + "password": str, + "userinfo": bytes, + "host": str, + "port": int, + "netloc": bytes, + "path": str, + "query": bytes, + "raw_path": bytes, + "fragment": str, + "params": object, + } + + # Perform type checking for all supported keyword arguments. + for key, value in kwargs.items(): + if key not in allowed: + message = f"{key!r} is an invalid keyword argument for URL()" + raise TypeError(message) + if value is not None and not isinstance(value, allowed[key]): + expected = allowed[key].__name__ + seen = type(value).__name__ + message = f"Argument {key!r} must be {expected} but got {seen}" + raise TypeError(message) + if isinstance(value, bytes): + kwargs[key] = value.decode("ascii") + + if "params" in kwargs: + # Replace any "params" keyword with the raw "query" instead. + # + # Ensure that empty params use `kwargs["query"] = None` rather + # than `kwargs["query"] = ""`, so that generated URLs do not + # include an empty trailing "?". + params = kwargs.pop("params") + kwargs["query"] = None if not params else str(QueryParams(params)) + + if isinstance(url, str): + self._uri_reference = urlparse(url, **kwargs) + elif isinstance(url, URL): + self._uri_reference = url._uri_reference.copy_with(**kwargs) + else: + raise TypeError( + "Invalid type for url. Expected str or httpx.URL," + f" got {type(url)}: {url!r}" + ) + + @property + def scheme(self) -> str: + """ + The URL scheme, such as "http", "https". + Always normalised to lowercase. + """ + return self._uri_reference.scheme + + @property + def raw_scheme(self) -> bytes: + """ + The raw bytes representation of the URL scheme, such as b"http", b"https". + Always normalised to lowercase. + """ + return self._uri_reference.scheme.encode("ascii") + + @property + def userinfo(self) -> bytes: + """ + The URL userinfo as a raw bytestring. + For example: b"jo%40email.com:a%20secret". + """ + return self._uri_reference.userinfo.encode("ascii") + + @property + def username(self) -> str: + """ + The URL username as a string, with URL decoding applied. + For example: "jo@email.com" + """ + userinfo = self._uri_reference.userinfo + return unquote(userinfo.partition(":")[0]) + + @property + def password(self) -> str: + """ + The URL password as a string, with URL decoding applied. + For example: "a secret" + """ + userinfo = self._uri_reference.userinfo + return unquote(userinfo.partition(":")[2]) + + @property + def host(self) -> str: + """ + The URL host as a string. + Always normalized to lowercase, with IDNA hosts decoded into unicode. + + Examples: + + url = httpx.URL("http://www.EXAMPLE.org") + assert url.host == "www.example.org" + + url = httpx.URL("http://中国.icom.museum") + assert url.host == "中国.icom.museum" + + url = httpx.URL("http://xn--fiqs8s.icom.museum") + assert url.host == "中国.icom.museum" + + url = httpx.URL("https://[::ffff:192.168.0.1]") + assert url.host == "::ffff:192.168.0.1" + """ + host: str = self._uri_reference.host + + if host.startswith("xn--"): + host = idna.decode(host) + + return host + + @property + def raw_host(self) -> bytes: + """ + The raw bytes representation of the URL host. + Always normalized to lowercase, and IDNA encoded. + + Examples: + + url = httpx.URL("http://www.EXAMPLE.org") + assert url.raw_host == b"www.example.org" + + url = httpx.URL("http://中国.icom.museum") + assert url.raw_host == b"xn--fiqs8s.icom.museum" + + url = httpx.URL("http://xn--fiqs8s.icom.museum") + assert url.raw_host == b"xn--fiqs8s.icom.museum" + + url = httpx.URL("https://[::ffff:192.168.0.1]") + assert url.raw_host == b"::ffff:192.168.0.1" + """ + return self._uri_reference.host.encode("ascii") + + @property + def port(self) -> int | None: + """ + The URL port as an integer. + + Note that the URL class performs port normalization as per the WHATWG spec. + Default ports for "http", "https", "ws", "wss", and "ftp" schemes are always + treated as `None`. + + For example: + + assert httpx.URL("http://www.example.com") == httpx.URL("http://www.example.com:80") + assert httpx.URL("http://www.example.com:80").port is None + """ + return self._uri_reference.port + + @property + def netloc(self) -> bytes: + """ + Either `` or `:` as bytes. + Always normalized to lowercase, and IDNA encoded. + + This property may be used for generating the value of a request + "Host" header. + """ + return self._uri_reference.netloc.encode("ascii") + + @property + def path(self) -> str: + """ + The URL path as a string. Excluding the query string, and URL decoded. + + For example: + + url = httpx.URL("https://example.com/pa%20th") + assert url.path == "/pa th" + """ + path = self._uri_reference.path or "/" + return unquote(path) + + @property + def query(self) -> bytes: + """ + The URL query string, as raw bytes, excluding the leading b"?". + + This is necessarily a bytewise interface, because we cannot + perform URL decoding of this representation until we've parsed + the keys and values into a QueryParams instance. + + For example: + + url = httpx.URL("https://example.com/?filter=some%20search%20terms") + assert url.query == b"filter=some%20search%20terms" + """ + query = self._uri_reference.query or "" + return query.encode("ascii") + + @property + def params(self) -> QueryParams: + """ + The URL query parameters, neatly parsed and packaged into an immutable + multidict representation. + """ + return QueryParams(self._uri_reference.query) + + @property + def raw_path(self) -> bytes: + """ + The complete URL path and query string as raw bytes. + Used as the target when constructing HTTP requests. + + For example: + + GET /users?search=some%20text HTTP/1.1 + Host: www.example.org + Connection: close + """ + path = self._uri_reference.path or "/" + if self._uri_reference.query is not None: + path += "?" + self._uri_reference.query + return path.encode("ascii") + + @property + def fragment(self) -> str: + """ + The URL fragments, as used in HTML anchors. + As a string, without the leading '#'. + """ + return unquote(self._uri_reference.fragment or "") + + @property + def is_absolute_url(self) -> bool: + """ + Return `True` for absolute URLs such as 'http://example.com/path', + and `False` for relative URLs such as '/path'. + """ + # We don't use `.is_absolute` from `rfc3986` because it treats + # URLs with a fragment portion as not absolute. + # What we actually care about is if the URL provides + # a scheme and hostname to which connections should be made. + return bool(self._uri_reference.scheme and self._uri_reference.host) + + @property + def is_relative_url(self) -> bool: + """ + Return `False` for absolute URLs such as 'http://example.com/path', + and `True` for relative URLs such as '/path'. + """ + return not self.is_absolute_url + + def copy_with(self, **kwargs: typing.Any) -> URL: + """ + Copy this URL, returning a new URL with some components altered. + Accepts the same set of parameters as the components that are made + available via properties on the `URL` class. + + For example: + + url = httpx.URL("https://www.example.com").copy_with( + username="jo@gmail.com", password="a secret" + ) + assert url == "https://jo%40email.com:a%20secret@www.example.com" + """ + return URL(self, **kwargs) + + def copy_set_param(self, key: str, value: typing.Any = None) -> URL: + return self.copy_with(params=self.params.set(key, value)) + + def copy_add_param(self, key: str, value: typing.Any = None) -> URL: + return self.copy_with(params=self.params.add(key, value)) + + def copy_remove_param(self, key: str) -> URL: + return self.copy_with(params=self.params.remove(key)) + + def copy_merge_params(self, params: QueryParamTypes) -> URL: + return self.copy_with(params=self.params.merge(params)) + + def join(self, url: URL | str) -> URL: + """ + Return an absolute URL, using this URL as the base. + + Eg. + + url = httpx.URL("https://www.example.com/test") + url = url.join("/new/path") + assert url == "https://www.example.com/new/path" + """ + from urllib.parse import urljoin + + return URL(urljoin(str(self), str(URL(url)))) + + def __hash__(self) -> int: + return hash(str(self)) + + def __eq__(self, other: typing.Any) -> bool: + return isinstance(other, (URL, str)) and str(self) == str(URL(other)) + + def __str__(self) -> str: + return str(self._uri_reference) + + def __repr__(self) -> str: + scheme, userinfo, host, port, path, query, fragment = self._uri_reference + + if ":" in userinfo: + # Mask any password component. + userinfo = f'{userinfo.split(":")[0]}:[secure]' + + authority = "".join( + [ + f"{userinfo}@" if userinfo else "", + f"[{host}]" if ":" in host else host, + f":{port}" if port is not None else "", + ] + ) + url = "".join( + [ + f"{self.scheme}:" if scheme else "", + f"//{authority}" if authority else "", + path, + f"?{query}" if query is not None else "", + f"#{fragment}" if fragment is not None else "", + ] + ) + + return f"{self.__class__.__name__}({url!r})" + + @property + def raw(self) -> tuple[bytes, bytes, int, bytes]: # pragma: nocover + import collections + import warnings + + warnings.warn("URL.raw is deprecated.") + RawURL = collections.namedtuple( + "RawURL", ["raw_scheme", "raw_host", "port", "raw_path"] + ) + return RawURL( + raw_scheme=self.raw_scheme, + raw_host=self.raw_host, + port=self.port, + raw_path=self.raw_path, + ) + + +class QueryParams(typing.Mapping[str, str]): + """ + URL query parameters, as a multi-dict. + """ + + def __init__(self, *args: QueryParamTypes | None, **kwargs: typing.Any) -> None: + assert len(args) < 2, "Too many arguments." + assert not (args and kwargs), "Cannot mix named and unnamed arguments." + + value = args[0] if args else kwargs + + if value is None or isinstance(value, (str, bytes)): + value = value.decode("ascii") if isinstance(value, bytes) else value + self._dict = parse_qs(value, keep_blank_values=True) + elif isinstance(value, QueryParams): + self._dict = {k: list(v) for k, v in value._dict.items()} + else: + dict_value: dict[typing.Any, list[typing.Any]] = {} + if isinstance(value, (list, tuple)): + # Convert list inputs like: + # [("a", "123"), ("a", "456"), ("b", "789")] + # To a dict representation, like: + # {"a": ["123", "456"], "b": ["789"]} + for item in value: + dict_value.setdefault(item[0], []).append(item[1]) + else: + # Convert dict inputs like: + # {"a": "123", "b": ["456", "789"]} + # To dict inputs where values are always lists, like: + # {"a": ["123"], "b": ["456", "789"]} + dict_value = { + k: list(v) if isinstance(v, (list, tuple)) else [v] + for k, v in value.items() + } + + # Ensure that keys and values are neatly coerced to strings. + # We coerce values `True` and `False` to JSON-like "true" and "false" + # representations, and coerce `None` values to the empty string. + self._dict = { + str(k): [primitive_value_to_str(item) for item in v] + for k, v in dict_value.items() + } + + def keys(self) -> typing.KeysView[str]: + """ + Return all the keys in the query params. + + Usage: + + q = httpx.QueryParams("a=123&a=456&b=789") + assert list(q.keys()) == ["a", "b"] + """ + return self._dict.keys() + + def values(self) -> typing.ValuesView[str]: + """ + Return all the values in the query params. If a key occurs more than once + only the first item for that key is returned. + + Usage: + + q = httpx.QueryParams("a=123&a=456&b=789") + assert list(q.values()) == ["123", "789"] + """ + return {k: v[0] for k, v in self._dict.items()}.values() + + def items(self) -> typing.ItemsView[str, str]: + """ + Return all items in the query params. If a key occurs more than once + only the first item for that key is returned. + + Usage: + + q = httpx.QueryParams("a=123&a=456&b=789") + assert list(q.items()) == [("a", "123"), ("b", "789")] + """ + return {k: v[0] for k, v in self._dict.items()}.items() + + def multi_items(self) -> list[tuple[str, str]]: + """ + Return all items in the query params. Allow duplicate keys to occur. + + Usage: + + q = httpx.QueryParams("a=123&a=456&b=789") + assert list(q.multi_items()) == [("a", "123"), ("a", "456"), ("b", "789")] + """ + multi_items: list[tuple[str, str]] = [] + for k, v in self._dict.items(): + multi_items.extend([(k, i) for i in v]) + return multi_items + + def get(self, key: typing.Any, default: typing.Any = None) -> typing.Any: + """ + Get a value from the query param for a given key. If the key occurs + more than once, then only the first value is returned. + + Usage: + + q = httpx.QueryParams("a=123&a=456&b=789") + assert q.get("a") == "123" + """ + if key in self._dict: + return self._dict[str(key)][0] + return default + + def get_list(self, key: str) -> list[str]: + """ + Get all values from the query param for a given key. + + Usage: + + q = httpx.QueryParams("a=123&a=456&b=789") + assert q.get_list("a") == ["123", "456"] + """ + return list(self._dict.get(str(key), [])) + + def set(self, key: str, value: typing.Any = None) -> QueryParams: + """ + Return a new QueryParams instance, setting the value of a key. + + Usage: + + q = httpx.QueryParams("a=123") + q = q.set("a", "456") + assert q == httpx.QueryParams("a=456") + """ + q = QueryParams() + q._dict = dict(self._dict) + q._dict[str(key)] = [primitive_value_to_str(value)] + return q + + def add(self, key: str, value: typing.Any = None) -> QueryParams: + """ + Return a new QueryParams instance, setting or appending the value of a key. + + Usage: + + q = httpx.QueryParams("a=123") + q = q.add("a", "456") + assert q == httpx.QueryParams("a=123&a=456") + """ + q = QueryParams() + q._dict = dict(self._dict) + q._dict[str(key)] = q.get_list(key) + [primitive_value_to_str(value)] + return q + + def remove(self, key: str) -> QueryParams: + """ + Return a new QueryParams instance, removing the value of a key. + + Usage: + + q = httpx.QueryParams("a=123") + q = q.remove("a") + assert q == httpx.QueryParams("") + """ + q = QueryParams() + q._dict = dict(self._dict) + q._dict.pop(str(key), None) + return q + + def merge(self, params: QueryParamTypes | None = None) -> QueryParams: + """ + Return a new QueryParams instance, updated with. + + Usage: + + q = httpx.QueryParams("a=123") + q = q.merge({"b": "456"}) + assert q == httpx.QueryParams("a=123&b=456") + + q = httpx.QueryParams("a=123") + q = q.merge({"a": "456", "b": "789"}) + assert q == httpx.QueryParams("a=456&b=789") + """ + q = QueryParams(params) + q._dict = {**self._dict, **q._dict} + return q + + def __getitem__(self, key: typing.Any) -> str: + return self._dict[key][0] + + def __contains__(self, key: typing.Any) -> bool: + return key in self._dict + + def __iter__(self) -> typing.Iterator[typing.Any]: + return iter(self.keys()) + + def __len__(self) -> int: + return len(self._dict) + + def __bool__(self) -> bool: + return bool(self._dict) + + def __hash__(self) -> int: + return hash(str(self)) + + def __eq__(self, other: typing.Any) -> bool: + if not isinstance(other, self.__class__): + return False + return sorted(self.multi_items()) == sorted(other.multi_items()) + + def __str__(self) -> str: + return urlencode(self.multi_items()) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + query_string = str(self) + return f"{class_name}({query_string!r})" + + def update(self, params: QueryParamTypes | None = None) -> None: + raise RuntimeError( + "QueryParams are immutable since 0.18.0. " + "Use `q = q.merge(...)` to create an updated copy." + ) + + def __setitem__(self, key: str, value: str) -> None: + raise RuntimeError( + "QueryParams are immutable since 0.18.0. " + "Use `q = q.set(key, value)` to create an updated copy." + ) diff --git a/venv/Lib/site-packages/httpx/_utils.py b/venv/Lib/site-packages/httpx/_utils.py new file mode 100644 index 00000000..7fe827da --- /dev/null +++ b/venv/Lib/site-packages/httpx/_utils.py @@ -0,0 +1,242 @@ +from __future__ import annotations + +import ipaddress +import os +import re +import typing +from urllib.request import getproxies + +from ._types import PrimitiveData + +if typing.TYPE_CHECKING: # pragma: no cover + from ._urls import URL + + +def primitive_value_to_str(value: PrimitiveData) -> str: + """ + Coerce a primitive data type into a string value. + + Note that we prefer JSON-style 'true'/'false' for boolean values here. + """ + if value is True: + return "true" + elif value is False: + return "false" + elif value is None: + return "" + return str(value) + + +def get_environment_proxies() -> dict[str, str | None]: + """Gets proxy information from the environment""" + + # urllib.request.getproxies() falls back on System + # Registry and Config for proxies on Windows and macOS. + # We don't want to propagate non-HTTP proxies into + # our configuration such as 'TRAVIS_APT_PROXY'. + proxy_info = getproxies() + mounts: dict[str, str | None] = {} + + for scheme in ("http", "https", "all"): + if proxy_info.get(scheme): + hostname = proxy_info[scheme] + mounts[f"{scheme}://"] = ( + hostname if "://" in hostname else f"http://{hostname}" + ) + + no_proxy_hosts = [host.strip() for host in proxy_info.get("no", "").split(",")] + for hostname in no_proxy_hosts: + # See https://curl.haxx.se/libcurl/c/CURLOPT_NOPROXY.html for details + # on how names in `NO_PROXY` are handled. + if hostname == "*": + # If NO_PROXY=* is used or if "*" occurs as any one of the comma + # separated hostnames, then we should just bypass any information + # from HTTP_PROXY, HTTPS_PROXY, ALL_PROXY, and always ignore + # proxies. + return {} + elif hostname: + # NO_PROXY=.google.com is marked as "all://*.google.com, + # which disables "www.google.com" but not "google.com" + # NO_PROXY=google.com is marked as "all://*google.com, + # which disables "www.google.com" and "google.com". + # (But not "wwwgoogle.com") + # NO_PROXY can include domains, IPv6, IPv4 addresses and "localhost" + # NO_PROXY=example.com,::1,localhost,192.168.0.0/16 + if "://" in hostname: + mounts[hostname] = None + elif is_ipv4_hostname(hostname): + mounts[f"all://{hostname}"] = None + elif is_ipv6_hostname(hostname): + mounts[f"all://[{hostname}]"] = None + elif hostname.lower() == "localhost": + mounts[f"all://{hostname}"] = None + else: + mounts[f"all://*{hostname}"] = None + + return mounts + + +def to_bytes(value: str | bytes, encoding: str = "utf-8") -> bytes: + return value.encode(encoding) if isinstance(value, str) else value + + +def to_str(value: str | bytes, encoding: str = "utf-8") -> str: + return value if isinstance(value, str) else value.decode(encoding) + + +def to_bytes_or_str(value: str, match_type_of: typing.AnyStr) -> typing.AnyStr: + return value if isinstance(match_type_of, str) else value.encode() + + +def unquote(value: str) -> str: + return value[1:-1] if value[0] == value[-1] == '"' else value + + +def peek_filelike_length(stream: typing.Any) -> int | None: + """ + Given a file-like stream object, return its length in number of bytes + without reading it into memory. + """ + try: + # Is it an actual file? + fd = stream.fileno() + # Yup, seems to be an actual file. + length = os.fstat(fd).st_size + except (AttributeError, OSError): + # No... Maybe it's something that supports random access, like `io.BytesIO`? + try: + # Assuming so, go to end of stream to figure out its length, + # then put it back in place. + offset = stream.tell() + length = stream.seek(0, os.SEEK_END) + stream.seek(offset) + except (AttributeError, OSError): + # Not even that? Sorry, we're doomed... + return None + + return length + + +class URLPattern: + """ + A utility class currently used for making lookups against proxy keys... + + # Wildcard matching... + >>> pattern = URLPattern("all://") + >>> pattern.matches(httpx.URL("http://example.com")) + True + + # Witch scheme matching... + >>> pattern = URLPattern("https://") + >>> pattern.matches(httpx.URL("https://example.com")) + True + >>> pattern.matches(httpx.URL("http://example.com")) + False + + # With domain matching... + >>> pattern = URLPattern("https://example.com") + >>> pattern.matches(httpx.URL("https://example.com")) + True + >>> pattern.matches(httpx.URL("http://example.com")) + False + >>> pattern.matches(httpx.URL("https://other.com")) + False + + # Wildcard scheme, with domain matching... + >>> pattern = URLPattern("all://example.com") + >>> pattern.matches(httpx.URL("https://example.com")) + True + >>> pattern.matches(httpx.URL("http://example.com")) + True + >>> pattern.matches(httpx.URL("https://other.com")) + False + + # With port matching... + >>> pattern = URLPattern("https://example.com:1234") + >>> pattern.matches(httpx.URL("https://example.com:1234")) + True + >>> pattern.matches(httpx.URL("https://example.com")) + False + """ + + def __init__(self, pattern: str) -> None: + from ._urls import URL + + if pattern and ":" not in pattern: + raise ValueError( + f"Proxy keys should use proper URL forms rather " + f"than plain scheme strings. " + f'Instead of "{pattern}", use "{pattern}://"' + ) + + url = URL(pattern) + self.pattern = pattern + self.scheme = "" if url.scheme == "all" else url.scheme + self.host = "" if url.host == "*" else url.host + self.port = url.port + if not url.host or url.host == "*": + self.host_regex: typing.Pattern[str] | None = None + elif url.host.startswith("*."): + # *.example.com should match "www.example.com", but not "example.com" + domain = re.escape(url.host[2:]) + self.host_regex = re.compile(f"^.+\\.{domain}$") + elif url.host.startswith("*"): + # *example.com should match "www.example.com" and "example.com" + domain = re.escape(url.host[1:]) + self.host_regex = re.compile(f"^(.+\\.)?{domain}$") + else: + # example.com should match "example.com" but not "www.example.com" + domain = re.escape(url.host) + self.host_regex = re.compile(f"^{domain}$") + + def matches(self, other: URL) -> bool: + if self.scheme and self.scheme != other.scheme: + return False + if ( + self.host + and self.host_regex is not None + and not self.host_regex.match(other.host) + ): + return False + if self.port is not None and self.port != other.port: + return False + return True + + @property + def priority(self) -> tuple[int, int, int]: + """ + The priority allows URLPattern instances to be sortable, so that + we can match from most specific to least specific. + """ + # URLs with a port should take priority over URLs without a port. + port_priority = 0 if self.port is not None else 1 + # Longer hostnames should match first. + host_priority = -len(self.host) + # Longer schemes should match first. + scheme_priority = -len(self.scheme) + return (port_priority, host_priority, scheme_priority) + + def __hash__(self) -> int: + return hash(self.pattern) + + def __lt__(self, other: URLPattern) -> bool: + return self.priority < other.priority + + def __eq__(self, other: typing.Any) -> bool: + return isinstance(other, URLPattern) and self.pattern == other.pattern + + +def is_ipv4_hostname(hostname: str) -> bool: + try: + ipaddress.IPv4Address(hostname.split("/")[0]) + except Exception: + return False + return True + + +def is_ipv6_hostname(hostname: str) -> bool: + try: + ipaddress.IPv6Address(hostname.split("/")[0]) + except Exception: + return False + return True diff --git a/venv/Lib/site-packages/httpx/py.typed b/venv/Lib/site-packages/httpx/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/idna-3.10.dist-info/INSTALLER b/venv/Lib/site-packages/idna-3.10.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/idna-3.10.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/idna-3.10.dist-info/LICENSE.md b/venv/Lib/site-packages/idna-3.10.dist-info/LICENSE.md new file mode 100644 index 00000000..19b6b452 --- /dev/null +++ b/venv/Lib/site-packages/idna-3.10.dist-info/LICENSE.md @@ -0,0 +1,31 @@ +BSD 3-Clause License + +Copyright (c) 2013-2024, Kim Davies and contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/Lib/site-packages/idna-3.10.dist-info/METADATA b/venv/Lib/site-packages/idna-3.10.dist-info/METADATA new file mode 100644 index 00000000..c42623e9 --- /dev/null +++ b/venv/Lib/site-packages/idna-3.10.dist-info/METADATA @@ -0,0 +1,250 @@ +Metadata-Version: 2.1 +Name: idna +Version: 3.10 +Summary: Internationalized Domain Names in Applications (IDNA) +Author-email: Kim Davies +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Internet :: Name Service (DNS) +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Utilities +Requires-Dist: ruff >= 0.6.2 ; extra == "all" +Requires-Dist: mypy >= 1.11.2 ; extra == "all" +Requires-Dist: pytest >= 8.3.2 ; extra == "all" +Requires-Dist: flake8 >= 7.1.1 ; extra == "all" +Project-URL: Changelog, https://github.com/kjd/idna/blob/master/HISTORY.rst +Project-URL: Issue tracker, https://github.com/kjd/idna/issues +Project-URL: Source, https://github.com/kjd/idna +Provides-Extra: all + +Internationalized Domain Names in Applications (IDNA) +===================================================== + +Support for the Internationalized Domain Names in +Applications (IDNA) protocol as specified in `RFC 5891 +`_. This is the latest version of +the protocol and is sometimes referred to as “IDNA 2008”. + +This library also provides support for Unicode Technical +Standard 46, `Unicode IDNA Compatibility Processing +`_. + +This acts as a suitable replacement for the “encodings.idna” +module that comes with the Python standard library, but which +only supports the older superseded IDNA specification (`RFC 3490 +`_). + +Basic functions are simply executed: + +.. code-block:: pycon + + >>> import idna + >>> idna.encode('ドメイン.テスト') + b'xn--eckwd4c7c.xn--zckzah' + >>> print(idna.decode('xn--eckwd4c7c.xn--zckzah')) + ドメイン.テスト + + +Installation +------------ + +This package is available for installation from PyPI: + +.. code-block:: bash + + $ python3 -m pip install idna + + +Usage +----- + +For typical usage, the ``encode`` and ``decode`` functions will take a +domain name argument and perform a conversion to A-labels or U-labels +respectively. + +.. code-block:: pycon + + >>> import idna + >>> idna.encode('ドメイン.テスト') + b'xn--eckwd4c7c.xn--zckzah' + >>> print(idna.decode('xn--eckwd4c7c.xn--zckzah')) + ドメイン.テスト + +You may use the codec encoding and decoding methods using the +``idna.codec`` module: + +.. code-block:: pycon + + >>> import idna.codec + >>> print('домен.испытание'.encode('idna2008')) + b'xn--d1acufc.xn--80akhbyknj4f' + >>> print(b'xn--d1acufc.xn--80akhbyknj4f'.decode('idna2008')) + домен.испытание + +Conversions can be applied at a per-label basis using the ``ulabel`` or +``alabel`` functions if necessary: + +.. code-block:: pycon + + >>> idna.alabel('测试') + b'xn--0zwm56d' + +Compatibility Mapping (UTS #46) ++++++++++++++++++++++++++++++++ + +As described in `RFC 5895 `_, the +IDNA specification does not normalize input from different potential +ways a user may input a domain name. This functionality, known as +a “mapping”, is considered by the specification to be a local +user-interface issue distinct from IDNA conversion functionality. + +This library provides one such mapping that was developed by the +Unicode Consortium. Known as `Unicode IDNA Compatibility Processing +`_, it provides for both a regular +mapping for typical applications, as well as a transitional mapping to +help migrate from older IDNA 2003 applications. Strings are +preprocessed according to Section 4.4 “Preprocessing for IDNA2008” +prior to the IDNA operations. + +For example, “Königsgäßchen” is not a permissible label as *LATIN +CAPITAL LETTER K* is not allowed (nor are capital letters in general). +UTS 46 will convert this into lower case prior to applying the IDNA +conversion. + +.. code-block:: pycon + + >>> import idna + >>> idna.encode('Königsgäßchen') + ... + idna.core.InvalidCodepoint: Codepoint U+004B at position 1 of 'Königsgäßchen' not allowed + >>> idna.encode('Königsgäßchen', uts46=True) + b'xn--knigsgchen-b4a3dun' + >>> print(idna.decode('xn--knigsgchen-b4a3dun')) + königsgäßchen + +Transitional processing provides conversions to help transition from +the older 2003 standard to the current standard. For example, in the +original IDNA specification, the *LATIN SMALL LETTER SHARP S* (ß) was +converted into two *LATIN SMALL LETTER S* (ss), whereas in the current +IDNA specification this conversion is not performed. + +.. code-block:: pycon + + >>> idna.encode('Königsgäßchen', uts46=True, transitional=True) + 'xn--knigsgsschen-lcb0w' + +Implementers should use transitional processing with caution, only in +rare cases where conversion from legacy labels to current labels must be +performed (i.e. IDNA implementations that pre-date 2008). For typical +applications that just need to convert labels, transitional processing +is unlikely to be beneficial and could produce unexpected incompatible +results. + +``encodings.idna`` Compatibility +++++++++++++++++++++++++++++++++ + +Function calls from the Python built-in ``encodings.idna`` module are +mapped to their IDNA 2008 equivalents using the ``idna.compat`` module. +Simply substitute the ``import`` clause in your code to refer to the new +module name. + +Exceptions +---------- + +All errors raised during the conversion following the specification +should raise an exception derived from the ``idna.IDNAError`` base +class. + +More specific exceptions that may be generated as ``idna.IDNABidiError`` +when the error reflects an illegal combination of left-to-right and +right-to-left characters in a label; ``idna.InvalidCodepoint`` when +a specific codepoint is an illegal character in an IDN label (i.e. +INVALID); and ``idna.InvalidCodepointContext`` when the codepoint is +illegal based on its positional context (i.e. it is CONTEXTO or CONTEXTJ +but the contextual requirements are not satisfied.) + +Building and Diagnostics +------------------------ + +The IDNA and UTS 46 functionality relies upon pre-calculated lookup +tables for performance. These tables are derived from computing against +eligibility criteria in the respective standards. These tables are +computed using the command-line script ``tools/idna-data``. + +This tool will fetch relevant codepoint data from the Unicode repository +and perform the required calculations to identify eligibility. There are +three main modes: + +* ``idna-data make-libdata``. Generates ``idnadata.py`` and + ``uts46data.py``, the pre-calculated lookup tables used for IDNA and + UTS 46 conversions. Implementers who wish to track this library against + a different Unicode version may use this tool to manually generate a + different version of the ``idnadata.py`` and ``uts46data.py`` files. + +* ``idna-data make-table``. Generate a table of the IDNA disposition + (e.g. PVALID, CONTEXTJ, CONTEXTO) in the format found in Appendix + B.1 of RFC 5892 and the pre-computed tables published by `IANA + `_. + +* ``idna-data U+0061``. Prints debugging output on the various + properties associated with an individual Unicode codepoint (in this + case, U+0061), that are used to assess the IDNA and UTS 46 status of a + codepoint. This is helpful in debugging or analysis. + +The tool accepts a number of arguments, described using ``idna-data +-h``. Most notably, the ``--version`` argument allows the specification +of the version of Unicode to be used in computing the table data. For +example, ``idna-data --version 9.0.0 make-libdata`` will generate +library data against Unicode 9.0.0. + + +Additional Notes +---------------- + +* **Packages**. The latest tagged release version is published in the + `Python Package Index `_. + +* **Version support**. This library supports Python 3.6 and higher. + As this library serves as a low-level toolkit for a variety of + applications, many of which strive for broad compatibility with older + Python versions, there is no rush to remove older interpreter support. + Removing support for older versions should be well justified in that the + maintenance burden has become too high. + +* **Python 2**. Python 2 is supported by version 2.x of this library. + Use "idna<3" in your requirements file if you need this library for + a Python 2 application. Be advised that these versions are no longer + actively developed. + +* **Testing**. The library has a test suite based on each rule of the + IDNA specification, as well as tests that are provided as part of the + Unicode Technical Standard 46, `Unicode IDNA Compatibility Processing + `_. + +* **Emoji**. It is an occasional request to support emoji domains in + this library. Encoding of symbols like emoji is expressly prohibited by + the technical standard IDNA 2008 and emoji domains are broadly phased + out across the domain industry due to associated security risks. For + now, applications that need to support these non-compliant labels + may wish to consider trying the encode/decode operation in this library + first, and then falling back to using `encodings.idna`. See `the Github + project `_ for more discussion. + diff --git a/venv/Lib/site-packages/idna-3.10.dist-info/RECORD b/venv/Lib/site-packages/idna-3.10.dist-info/RECORD new file mode 100644 index 00000000..54f66098 --- /dev/null +++ b/venv/Lib/site-packages/idna-3.10.dist-info/RECORD @@ -0,0 +1,22 @@ +idna-3.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +idna-3.10.dist-info/LICENSE.md,sha256=pZ8LDvNjWHQQmkRhykT_enDVBpboFHZ7-vch1Mmw2w8,1541 +idna-3.10.dist-info/METADATA,sha256=URR5ZyDfQ1PCEGhkYoojqfi2Ra0tau2--lhwG4XSfjI,10158 +idna-3.10.dist-info/RECORD,, +idna-3.10.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81 +idna/__init__.py,sha256=MPqNDLZbXqGaNdXxAFhiqFPKEQXju2jNQhCey6-5eJM,868 +idna/__pycache__/__init__.cpython-312.pyc,, +idna/__pycache__/codec.cpython-312.pyc,, +idna/__pycache__/compat.cpython-312.pyc,, +idna/__pycache__/core.cpython-312.pyc,, +idna/__pycache__/idnadata.cpython-312.pyc,, +idna/__pycache__/intranges.cpython-312.pyc,, +idna/__pycache__/package_data.cpython-312.pyc,, +idna/__pycache__/uts46data.cpython-312.pyc,, +idna/codec.py,sha256=PEew3ItwzjW4hymbasnty2N2OXvNcgHB-JjrBuxHPYY,3422 +idna/compat.py,sha256=RzLy6QQCdl9784aFhb2EX9EKGCJjg0P3PilGdeXXcx8,316 +idna/core.py,sha256=YJYyAMnwiQEPjVC4-Fqu_p4CJ6yKKuDGmppBNQNQpFs,13239 +idna/idnadata.py,sha256=W30GcIGvtOWYwAjZj4ZjuouUutC6ffgNuyjJy7fZ-lo,78306 +idna/intranges.py,sha256=amUtkdhYcQG8Zr-CoMM_kVRacxkivC1WgxN1b63KKdU,1898 +idna/package_data.py,sha256=q59S3OXsc5VI8j6vSD0sGBMyk6zZ4vWFREE88yCJYKs,21 +idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +idna/uts46data.py,sha256=rt90K9J40gUSwppDPCrhjgi5AA6pWM65dEGRSf6rIhM,239289 diff --git a/venv/Lib/site-packages/idna-3.10.dist-info/WHEEL b/venv/Lib/site-packages/idna-3.10.dist-info/WHEEL new file mode 100644 index 00000000..3b5e64b5 --- /dev/null +++ b/venv/Lib/site-packages/idna-3.10.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.9.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/Lib/site-packages/idna/__init__.py b/venv/Lib/site-packages/idna/__init__.py new file mode 100644 index 00000000..cfdc030a --- /dev/null +++ b/venv/Lib/site-packages/idna/__init__.py @@ -0,0 +1,45 @@ +from .core import ( + IDNABidiError, + IDNAError, + InvalidCodepoint, + InvalidCodepointContext, + alabel, + check_bidi, + check_hyphen_ok, + check_initial_combiner, + check_label, + check_nfc, + decode, + encode, + ulabel, + uts46_remap, + valid_contextj, + valid_contexto, + valid_label_length, + valid_string_length, +) +from .intranges import intranges_contain +from .package_data import __version__ + +__all__ = [ + "__version__", + "IDNABidiError", + "IDNAError", + "InvalidCodepoint", + "InvalidCodepointContext", + "alabel", + "check_bidi", + "check_hyphen_ok", + "check_initial_combiner", + "check_label", + "check_nfc", + "decode", + "encode", + "intranges_contain", + "ulabel", + "uts46_remap", + "valid_contextj", + "valid_contexto", + "valid_label_length", + "valid_string_length", +] diff --git a/venv/Lib/site-packages/idna/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/idna/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..02bc232a Binary files /dev/null and b/venv/Lib/site-packages/idna/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/idna/__pycache__/codec.cpython-312.pyc b/venv/Lib/site-packages/idna/__pycache__/codec.cpython-312.pyc new file mode 100644 index 00000000..afc23384 Binary files /dev/null and b/venv/Lib/site-packages/idna/__pycache__/codec.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/idna/__pycache__/compat.cpython-312.pyc b/venv/Lib/site-packages/idna/__pycache__/compat.cpython-312.pyc new file mode 100644 index 00000000..39c21784 Binary files /dev/null and b/venv/Lib/site-packages/idna/__pycache__/compat.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/idna/__pycache__/core.cpython-312.pyc b/venv/Lib/site-packages/idna/__pycache__/core.cpython-312.pyc new file mode 100644 index 00000000..02e5a3ab Binary files /dev/null and b/venv/Lib/site-packages/idna/__pycache__/core.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/idna/__pycache__/idnadata.cpython-312.pyc b/venv/Lib/site-packages/idna/__pycache__/idnadata.cpython-312.pyc new file mode 100644 index 00000000..746a47a2 Binary files /dev/null and b/venv/Lib/site-packages/idna/__pycache__/idnadata.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/idna/__pycache__/intranges.cpython-312.pyc b/venv/Lib/site-packages/idna/__pycache__/intranges.cpython-312.pyc new file mode 100644 index 00000000..e528c6e9 Binary files /dev/null and b/venv/Lib/site-packages/idna/__pycache__/intranges.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/idna/__pycache__/package_data.cpython-312.pyc b/venv/Lib/site-packages/idna/__pycache__/package_data.cpython-312.pyc new file mode 100644 index 00000000..a209252a Binary files /dev/null and b/venv/Lib/site-packages/idna/__pycache__/package_data.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/idna/__pycache__/uts46data.cpython-312.pyc b/venv/Lib/site-packages/idna/__pycache__/uts46data.cpython-312.pyc new file mode 100644 index 00000000..954e8ee1 Binary files /dev/null and b/venv/Lib/site-packages/idna/__pycache__/uts46data.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/idna/codec.py b/venv/Lib/site-packages/idna/codec.py new file mode 100644 index 00000000..913abfd6 --- /dev/null +++ b/venv/Lib/site-packages/idna/codec.py @@ -0,0 +1,122 @@ +import codecs +import re +from typing import Any, Optional, Tuple + +from .core import IDNAError, alabel, decode, encode, ulabel + +_unicode_dots_re = re.compile("[\u002e\u3002\uff0e\uff61]") + + +class Codec(codecs.Codec): + def encode(self, data: str, errors: str = "strict") -> Tuple[bytes, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return b"", 0 + + return encode(data), len(data) + + def decode(self, data: bytes, errors: str = "strict") -> Tuple[str, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return "", 0 + + return decode(data), len(data) + + +class IncrementalEncoder(codecs.BufferedIncrementalEncoder): + def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[bytes, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return b"", 0 + + labels = _unicode_dots_re.split(data) + trailing_dot = b"" + if labels: + if not labels[-1]: + trailing_dot = b"." + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = b"." + + result = [] + size = 0 + for label in labels: + result.append(alabel(label)) + if size: + size += 1 + size += len(label) + + # Join with U+002E + result_bytes = b".".join(result) + trailing_dot + size += len(trailing_dot) + return result_bytes, size + + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + def _buffer_decode(self, data: Any, errors: str, final: bool) -> Tuple[str, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return ("", 0) + + if not isinstance(data, str): + data = str(data, "ascii") + + labels = _unicode_dots_re.split(data) + trailing_dot = "" + if labels: + if not labels[-1]: + trailing_dot = "." + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = "." + + result = [] + size = 0 + for label in labels: + result.append(ulabel(label)) + if size: + size += 1 + size += len(label) + + result_str = ".".join(result) + trailing_dot + size += len(trailing_dot) + return (result_str, size) + + +class StreamWriter(Codec, codecs.StreamWriter): + pass + + +class StreamReader(Codec, codecs.StreamReader): + pass + + +def search_function(name: str) -> Optional[codecs.CodecInfo]: + if name != "idna2008": + return None + return codecs.CodecInfo( + name=name, + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) + + +codecs.register(search_function) diff --git a/venv/Lib/site-packages/idna/compat.py b/venv/Lib/site-packages/idna/compat.py new file mode 100644 index 00000000..1df9f2a7 --- /dev/null +++ b/venv/Lib/site-packages/idna/compat.py @@ -0,0 +1,15 @@ +from typing import Any, Union + +from .core import decode, encode + + +def ToASCII(label: str) -> bytes: + return encode(label) + + +def ToUnicode(label: Union[bytes, bytearray]) -> str: + return decode(label) + + +def nameprep(s: Any) -> None: + raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol") diff --git a/venv/Lib/site-packages/idna/core.py b/venv/Lib/site-packages/idna/core.py new file mode 100644 index 00000000..9115f123 --- /dev/null +++ b/venv/Lib/site-packages/idna/core.py @@ -0,0 +1,437 @@ +import bisect +import re +import unicodedata +from typing import Optional, Union + +from . import idnadata +from .intranges import intranges_contain + +_virama_combining_class = 9 +_alabel_prefix = b"xn--" +_unicode_dots_re = re.compile("[\u002e\u3002\uff0e\uff61]") + + +class IDNAError(UnicodeError): + """Base exception for all IDNA-encoding related problems""" + + pass + + +class IDNABidiError(IDNAError): + """Exception when bidirectional requirements are not satisfied""" + + pass + + +class InvalidCodepoint(IDNAError): + """Exception when a disallowed or unallocated codepoint is used""" + + pass + + +class InvalidCodepointContext(IDNAError): + """Exception when the codepoint is not valid in the context it is used""" + + pass + + +def _combining_class(cp: int) -> int: + v = unicodedata.combining(chr(cp)) + if v == 0: + if not unicodedata.name(chr(cp)): + raise ValueError("Unknown character in unicodedata") + return v + + +def _is_script(cp: str, script: str) -> bool: + return intranges_contain(ord(cp), idnadata.scripts[script]) + + +def _punycode(s: str) -> bytes: + return s.encode("punycode") + + +def _unot(s: int) -> str: + return "U+{:04X}".format(s) + + +def valid_label_length(label: Union[bytes, str]) -> bool: + if len(label) > 63: + return False + return True + + +def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool: + if len(label) > (254 if trailing_dot else 253): + return False + return True + + +def check_bidi(label: str, check_ltr: bool = False) -> bool: + # Bidi rules should only be applied if string contains RTL characters + bidi_label = False + for idx, cp in enumerate(label, 1): + direction = unicodedata.bidirectional(cp) + if direction == "": + # String likely comes from a newer version of Unicode + raise IDNABidiError("Unknown directionality in label {} at position {}".format(repr(label), idx)) + if direction in ["R", "AL", "AN"]: + bidi_label = True + if not bidi_label and not check_ltr: + return True + + # Bidi rule 1 + direction = unicodedata.bidirectional(label[0]) + if direction in ["R", "AL"]: + rtl = True + elif direction == "L": + rtl = False + else: + raise IDNABidiError("First codepoint in label {} must be directionality L, R or AL".format(repr(label))) + + valid_ending = False + number_type: Optional[str] = None + for idx, cp in enumerate(label, 1): + direction = unicodedata.bidirectional(cp) + + if rtl: + # Bidi rule 2 + if direction not in [ + "R", + "AL", + "AN", + "EN", + "ES", + "CS", + "ET", + "ON", + "BN", + "NSM", + ]: + raise IDNABidiError("Invalid direction for codepoint at position {} in a right-to-left label".format(idx)) + # Bidi rule 3 + if direction in ["R", "AL", "EN", "AN"]: + valid_ending = True + elif direction != "NSM": + valid_ending = False + # Bidi rule 4 + if direction in ["AN", "EN"]: + if not number_type: + number_type = direction + else: + if number_type != direction: + raise IDNABidiError("Can not mix numeral types in a right-to-left label") + else: + # Bidi rule 5 + if direction not in ["L", "EN", "ES", "CS", "ET", "ON", "BN", "NSM"]: + raise IDNABidiError("Invalid direction for codepoint at position {} in a left-to-right label".format(idx)) + # Bidi rule 6 + if direction in ["L", "EN"]: + valid_ending = True + elif direction != "NSM": + valid_ending = False + + if not valid_ending: + raise IDNABidiError("Label ends with illegal codepoint directionality") + + return True + + +def check_initial_combiner(label: str) -> bool: + if unicodedata.category(label[0])[0] == "M": + raise IDNAError("Label begins with an illegal combining character") + return True + + +def check_hyphen_ok(label: str) -> bool: + if label[2:4] == "--": + raise IDNAError("Label has disallowed hyphens in 3rd and 4th position") + if label[0] == "-" or label[-1] == "-": + raise IDNAError("Label must not start or end with a hyphen") + return True + + +def check_nfc(label: str) -> None: + if unicodedata.normalize("NFC", label) != label: + raise IDNAError("Label must be in Normalization Form C") + + +def valid_contextj(label: str, pos: int) -> bool: + cp_value = ord(label[pos]) + + if cp_value == 0x200C: + if pos > 0: + if _combining_class(ord(label[pos - 1])) == _virama_combining_class: + return True + + ok = False + for i in range(pos - 1, -1, -1): + joining_type = idnadata.joining_types.get(ord(label[i])) + if joining_type == ord("T"): + continue + elif joining_type in [ord("L"), ord("D")]: + ok = True + break + else: + break + + if not ok: + return False + + ok = False + for i in range(pos + 1, len(label)): + joining_type = idnadata.joining_types.get(ord(label[i])) + if joining_type == ord("T"): + continue + elif joining_type in [ord("R"), ord("D")]: + ok = True + break + else: + break + return ok + + if cp_value == 0x200D: + if pos > 0: + if _combining_class(ord(label[pos - 1])) == _virama_combining_class: + return True + return False + + else: + return False + + +def valid_contexto(label: str, pos: int, exception: bool = False) -> bool: + cp_value = ord(label[pos]) + + if cp_value == 0x00B7: + if 0 < pos < len(label) - 1: + if ord(label[pos - 1]) == 0x006C and ord(label[pos + 1]) == 0x006C: + return True + return False + + elif cp_value == 0x0375: + if pos < len(label) - 1 and len(label) > 1: + return _is_script(label[pos + 1], "Greek") + return False + + elif cp_value == 0x05F3 or cp_value == 0x05F4: + if pos > 0: + return _is_script(label[pos - 1], "Hebrew") + return False + + elif cp_value == 0x30FB: + for cp in label: + if cp == "\u30fb": + continue + if _is_script(cp, "Hiragana") or _is_script(cp, "Katakana") or _is_script(cp, "Han"): + return True + return False + + elif 0x660 <= cp_value <= 0x669: + for cp in label: + if 0x6F0 <= ord(cp) <= 0x06F9: + return False + return True + + elif 0x6F0 <= cp_value <= 0x6F9: + for cp in label: + if 0x660 <= ord(cp) <= 0x0669: + return False + return True + + return False + + +def check_label(label: Union[str, bytes, bytearray]) -> None: + if isinstance(label, (bytes, bytearray)): + label = label.decode("utf-8") + if len(label) == 0: + raise IDNAError("Empty Label") + + check_nfc(label) + check_hyphen_ok(label) + check_initial_combiner(label) + + for pos, cp in enumerate(label): + cp_value = ord(cp) + if intranges_contain(cp_value, idnadata.codepoint_classes["PVALID"]): + continue + elif intranges_contain(cp_value, idnadata.codepoint_classes["CONTEXTJ"]): + try: + if not valid_contextj(label, pos): + raise InvalidCodepointContext( + "Joiner {} not allowed at position {} in {}".format(_unot(cp_value), pos + 1, repr(label)) + ) + except ValueError: + raise IDNAError( + "Unknown codepoint adjacent to joiner {} at position {} in {}".format( + _unot(cp_value), pos + 1, repr(label) + ) + ) + elif intranges_contain(cp_value, idnadata.codepoint_classes["CONTEXTO"]): + if not valid_contexto(label, pos): + raise InvalidCodepointContext( + "Codepoint {} not allowed at position {} in {}".format(_unot(cp_value), pos + 1, repr(label)) + ) + else: + raise InvalidCodepoint( + "Codepoint {} at position {} of {} not allowed".format(_unot(cp_value), pos + 1, repr(label)) + ) + + check_bidi(label) + + +def alabel(label: str) -> bytes: + try: + label_bytes = label.encode("ascii") + ulabel(label_bytes) + if not valid_label_length(label_bytes): + raise IDNAError("Label too long") + return label_bytes + except UnicodeEncodeError: + pass + + check_label(label) + label_bytes = _alabel_prefix + _punycode(label) + + if not valid_label_length(label_bytes): + raise IDNAError("Label too long") + + return label_bytes + + +def ulabel(label: Union[str, bytes, bytearray]) -> str: + if not isinstance(label, (bytes, bytearray)): + try: + label_bytes = label.encode("ascii") + except UnicodeEncodeError: + check_label(label) + return label + else: + label_bytes = label + + label_bytes = label_bytes.lower() + if label_bytes.startswith(_alabel_prefix): + label_bytes = label_bytes[len(_alabel_prefix) :] + if not label_bytes: + raise IDNAError("Malformed A-label, no Punycode eligible content found") + if label_bytes.decode("ascii")[-1] == "-": + raise IDNAError("A-label must not end with a hyphen") + else: + check_label(label_bytes) + return label_bytes.decode("ascii") + + try: + label = label_bytes.decode("punycode") + except UnicodeError: + raise IDNAError("Invalid A-label") + check_label(label) + return label + + +def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str: + """Re-map the characters in the string according to UTS46 processing.""" + from .uts46data import uts46data + + output = "" + + for pos, char in enumerate(domain): + code_point = ord(char) + try: + uts46row = uts46data[code_point if code_point < 256 else bisect.bisect_left(uts46data, (code_point, "Z")) - 1] + status = uts46row[1] + replacement: Optional[str] = None + if len(uts46row) == 3: + replacement = uts46row[2] + if ( + status == "V" + or (status == "D" and not transitional) + or (status == "3" and not std3_rules and replacement is None) + ): + output += char + elif replacement is not None and ( + status == "M" or (status == "3" and not std3_rules) or (status == "D" and transitional) + ): + output += replacement + elif status != "I": + raise IndexError() + except IndexError: + raise InvalidCodepoint( + "Codepoint {} not allowed at position {} in {}".format(_unot(code_point), pos + 1, repr(domain)) + ) + + return unicodedata.normalize("NFC", output) + + +def encode( + s: Union[str, bytes, bytearray], + strict: bool = False, + uts46: bool = False, + std3_rules: bool = False, + transitional: bool = False, +) -> bytes: + if not isinstance(s, str): + try: + s = str(s, "ascii") + except UnicodeDecodeError: + raise IDNAError("should pass a unicode string to the function rather than a byte string.") + if uts46: + s = uts46_remap(s, std3_rules, transitional) + trailing_dot = False + result = [] + if strict: + labels = s.split(".") + else: + labels = _unicode_dots_re.split(s) + if not labels or labels == [""]: + raise IDNAError("Empty domain") + if labels[-1] == "": + del labels[-1] + trailing_dot = True + for label in labels: + s = alabel(label) + if s: + result.append(s) + else: + raise IDNAError("Empty label") + if trailing_dot: + result.append(b"") + s = b".".join(result) + if not valid_string_length(s, trailing_dot): + raise IDNAError("Domain too long") + return s + + +def decode( + s: Union[str, bytes, bytearray], + strict: bool = False, + uts46: bool = False, + std3_rules: bool = False, +) -> str: + try: + if not isinstance(s, str): + s = str(s, "ascii") + except UnicodeDecodeError: + raise IDNAError("Invalid ASCII in A-label") + if uts46: + s = uts46_remap(s, std3_rules, False) + trailing_dot = False + result = [] + if not strict: + labels = _unicode_dots_re.split(s) + else: + labels = s.split(".") + if not labels or labels == [""]: + raise IDNAError("Empty domain") + if not labels[-1]: + del labels[-1] + trailing_dot = True + for label in labels: + s = ulabel(label) + if s: + result.append(s) + else: + raise IDNAError("Empty label") + if trailing_dot: + result.append("") + return ".".join(result) diff --git a/venv/Lib/site-packages/idna/idnadata.py b/venv/Lib/site-packages/idna/idnadata.py new file mode 100644 index 00000000..4be60046 --- /dev/null +++ b/venv/Lib/site-packages/idna/idnadata.py @@ -0,0 +1,4243 @@ +# This file is automatically generated by tools/idna-data + +__version__ = "15.1.0" +scripts = { + "Greek": ( + 0x37000000374, + 0x37500000378, + 0x37A0000037E, + 0x37F00000380, + 0x38400000385, + 0x38600000387, + 0x3880000038B, + 0x38C0000038D, + 0x38E000003A2, + 0x3A3000003E2, + 0x3F000000400, + 0x1D2600001D2B, + 0x1D5D00001D62, + 0x1D6600001D6B, + 0x1DBF00001DC0, + 0x1F0000001F16, + 0x1F1800001F1E, + 0x1F2000001F46, + 0x1F4800001F4E, + 0x1F5000001F58, + 0x1F5900001F5A, + 0x1F5B00001F5C, + 0x1F5D00001F5E, + 0x1F5F00001F7E, + 0x1F8000001FB5, + 0x1FB600001FC5, + 0x1FC600001FD4, + 0x1FD600001FDC, + 0x1FDD00001FF0, + 0x1FF200001FF5, + 0x1FF600001FFF, + 0x212600002127, + 0xAB650000AB66, + 0x101400001018F, + 0x101A0000101A1, + 0x1D2000001D246, + ), + "Han": ( + 0x2E8000002E9A, + 0x2E9B00002EF4, + 0x2F0000002FD6, + 0x300500003006, + 0x300700003008, + 0x30210000302A, + 0x30380000303C, + 0x340000004DC0, + 0x4E000000A000, + 0xF9000000FA6E, + 0xFA700000FADA, + 0x16FE200016FE4, + 0x16FF000016FF2, + 0x200000002A6E0, + 0x2A7000002B73A, + 0x2B7400002B81E, + 0x2B8200002CEA2, + 0x2CEB00002EBE1, + 0x2EBF00002EE5E, + 0x2F8000002FA1E, + 0x300000003134B, + 0x31350000323B0, + ), + "Hebrew": ( + 0x591000005C8, + 0x5D0000005EB, + 0x5EF000005F5, + 0xFB1D0000FB37, + 0xFB380000FB3D, + 0xFB3E0000FB3F, + 0xFB400000FB42, + 0xFB430000FB45, + 0xFB460000FB50, + ), + "Hiragana": ( + 0x304100003097, + 0x309D000030A0, + 0x1B0010001B120, + 0x1B1320001B133, + 0x1B1500001B153, + 0x1F2000001F201, + ), + "Katakana": ( + 0x30A1000030FB, + 0x30FD00003100, + 0x31F000003200, + 0x32D0000032FF, + 0x330000003358, + 0xFF660000FF70, + 0xFF710000FF9E, + 0x1AFF00001AFF4, + 0x1AFF50001AFFC, + 0x1AFFD0001AFFF, + 0x1B0000001B001, + 0x1B1200001B123, + 0x1B1550001B156, + 0x1B1640001B168, + ), +} +joining_types = { + 0xAD: 84, + 0x300: 84, + 0x301: 84, + 0x302: 84, + 0x303: 84, + 0x304: 84, + 0x305: 84, + 0x306: 84, + 0x307: 84, + 0x308: 84, + 0x309: 84, + 0x30A: 84, + 0x30B: 84, + 0x30C: 84, + 0x30D: 84, + 0x30E: 84, + 0x30F: 84, + 0x310: 84, + 0x311: 84, + 0x312: 84, + 0x313: 84, + 0x314: 84, + 0x315: 84, + 0x316: 84, + 0x317: 84, + 0x318: 84, + 0x319: 84, + 0x31A: 84, + 0x31B: 84, + 0x31C: 84, + 0x31D: 84, + 0x31E: 84, + 0x31F: 84, + 0x320: 84, + 0x321: 84, + 0x322: 84, + 0x323: 84, + 0x324: 84, + 0x325: 84, + 0x326: 84, + 0x327: 84, + 0x328: 84, + 0x329: 84, + 0x32A: 84, + 0x32B: 84, + 0x32C: 84, + 0x32D: 84, + 0x32E: 84, + 0x32F: 84, + 0x330: 84, + 0x331: 84, + 0x332: 84, + 0x333: 84, + 0x334: 84, + 0x335: 84, + 0x336: 84, + 0x337: 84, + 0x338: 84, + 0x339: 84, + 0x33A: 84, + 0x33B: 84, + 0x33C: 84, + 0x33D: 84, + 0x33E: 84, + 0x33F: 84, + 0x340: 84, + 0x341: 84, + 0x342: 84, + 0x343: 84, + 0x344: 84, + 0x345: 84, + 0x346: 84, + 0x347: 84, + 0x348: 84, + 0x349: 84, + 0x34A: 84, + 0x34B: 84, + 0x34C: 84, + 0x34D: 84, + 0x34E: 84, + 0x34F: 84, + 0x350: 84, + 0x351: 84, + 0x352: 84, + 0x353: 84, + 0x354: 84, + 0x355: 84, + 0x356: 84, + 0x357: 84, + 0x358: 84, + 0x359: 84, + 0x35A: 84, + 0x35B: 84, + 0x35C: 84, + 0x35D: 84, + 0x35E: 84, + 0x35F: 84, + 0x360: 84, + 0x361: 84, + 0x362: 84, + 0x363: 84, + 0x364: 84, + 0x365: 84, + 0x366: 84, + 0x367: 84, + 0x368: 84, + 0x369: 84, + 0x36A: 84, + 0x36B: 84, + 0x36C: 84, + 0x36D: 84, + 0x36E: 84, + 0x36F: 84, + 0x483: 84, + 0x484: 84, + 0x485: 84, + 0x486: 84, + 0x487: 84, + 0x488: 84, + 0x489: 84, + 0x591: 84, + 0x592: 84, + 0x593: 84, + 0x594: 84, + 0x595: 84, + 0x596: 84, + 0x597: 84, + 0x598: 84, + 0x599: 84, + 0x59A: 84, + 0x59B: 84, + 0x59C: 84, + 0x59D: 84, + 0x59E: 84, + 0x59F: 84, + 0x5A0: 84, + 0x5A1: 84, + 0x5A2: 84, + 0x5A3: 84, + 0x5A4: 84, + 0x5A5: 84, + 0x5A6: 84, + 0x5A7: 84, + 0x5A8: 84, + 0x5A9: 84, + 0x5AA: 84, + 0x5AB: 84, + 0x5AC: 84, + 0x5AD: 84, + 0x5AE: 84, + 0x5AF: 84, + 0x5B0: 84, + 0x5B1: 84, + 0x5B2: 84, + 0x5B3: 84, + 0x5B4: 84, + 0x5B5: 84, + 0x5B6: 84, + 0x5B7: 84, + 0x5B8: 84, + 0x5B9: 84, + 0x5BA: 84, + 0x5BB: 84, + 0x5BC: 84, + 0x5BD: 84, + 0x5BF: 84, + 0x5C1: 84, + 0x5C2: 84, + 0x5C4: 84, + 0x5C5: 84, + 0x5C7: 84, + 0x610: 84, + 0x611: 84, + 0x612: 84, + 0x613: 84, + 0x614: 84, + 0x615: 84, + 0x616: 84, + 0x617: 84, + 0x618: 84, + 0x619: 84, + 0x61A: 84, + 0x61C: 84, + 0x620: 68, + 0x622: 82, + 0x623: 82, + 0x624: 82, + 0x625: 82, + 0x626: 68, + 0x627: 82, + 0x628: 68, + 0x629: 82, + 0x62A: 68, + 0x62B: 68, + 0x62C: 68, + 0x62D: 68, + 0x62E: 68, + 0x62F: 82, + 0x630: 82, + 0x631: 82, + 0x632: 82, + 0x633: 68, + 0x634: 68, + 0x635: 68, + 0x636: 68, + 0x637: 68, + 0x638: 68, + 0x639: 68, + 0x63A: 68, + 0x63B: 68, + 0x63C: 68, + 0x63D: 68, + 0x63E: 68, + 0x63F: 68, + 0x640: 67, + 0x641: 68, + 0x642: 68, + 0x643: 68, + 0x644: 68, + 0x645: 68, + 0x646: 68, + 0x647: 68, + 0x648: 82, + 0x649: 68, + 0x64A: 68, + 0x64B: 84, + 0x64C: 84, + 0x64D: 84, + 0x64E: 84, + 0x64F: 84, + 0x650: 84, + 0x651: 84, + 0x652: 84, + 0x653: 84, + 0x654: 84, + 0x655: 84, + 0x656: 84, + 0x657: 84, + 0x658: 84, + 0x659: 84, + 0x65A: 84, + 0x65B: 84, + 0x65C: 84, + 0x65D: 84, + 0x65E: 84, + 0x65F: 84, + 0x66E: 68, + 0x66F: 68, + 0x670: 84, + 0x671: 82, + 0x672: 82, + 0x673: 82, + 0x675: 82, + 0x676: 82, + 0x677: 82, + 0x678: 68, + 0x679: 68, + 0x67A: 68, + 0x67B: 68, + 0x67C: 68, + 0x67D: 68, + 0x67E: 68, + 0x67F: 68, + 0x680: 68, + 0x681: 68, + 0x682: 68, + 0x683: 68, + 0x684: 68, + 0x685: 68, + 0x686: 68, + 0x687: 68, + 0x688: 82, + 0x689: 82, + 0x68A: 82, + 0x68B: 82, + 0x68C: 82, + 0x68D: 82, + 0x68E: 82, + 0x68F: 82, + 0x690: 82, + 0x691: 82, + 0x692: 82, + 0x693: 82, + 0x694: 82, + 0x695: 82, + 0x696: 82, + 0x697: 82, + 0x698: 82, + 0x699: 82, + 0x69A: 68, + 0x69B: 68, + 0x69C: 68, + 0x69D: 68, + 0x69E: 68, + 0x69F: 68, + 0x6A0: 68, + 0x6A1: 68, + 0x6A2: 68, + 0x6A3: 68, + 0x6A4: 68, + 0x6A5: 68, + 0x6A6: 68, + 0x6A7: 68, + 0x6A8: 68, + 0x6A9: 68, + 0x6AA: 68, + 0x6AB: 68, + 0x6AC: 68, + 0x6AD: 68, + 0x6AE: 68, + 0x6AF: 68, + 0x6B0: 68, + 0x6B1: 68, + 0x6B2: 68, + 0x6B3: 68, + 0x6B4: 68, + 0x6B5: 68, + 0x6B6: 68, + 0x6B7: 68, + 0x6B8: 68, + 0x6B9: 68, + 0x6BA: 68, + 0x6BB: 68, + 0x6BC: 68, + 0x6BD: 68, + 0x6BE: 68, + 0x6BF: 68, + 0x6C0: 82, + 0x6C1: 68, + 0x6C2: 68, + 0x6C3: 82, + 0x6C4: 82, + 0x6C5: 82, + 0x6C6: 82, + 0x6C7: 82, + 0x6C8: 82, + 0x6C9: 82, + 0x6CA: 82, + 0x6CB: 82, + 0x6CC: 68, + 0x6CD: 82, + 0x6CE: 68, + 0x6CF: 82, + 0x6D0: 68, + 0x6D1: 68, + 0x6D2: 82, + 0x6D3: 82, + 0x6D5: 82, + 0x6D6: 84, + 0x6D7: 84, + 0x6D8: 84, + 0x6D9: 84, + 0x6DA: 84, + 0x6DB: 84, + 0x6DC: 84, + 0x6DF: 84, + 0x6E0: 84, + 0x6E1: 84, + 0x6E2: 84, + 0x6E3: 84, + 0x6E4: 84, + 0x6E7: 84, + 0x6E8: 84, + 0x6EA: 84, + 0x6EB: 84, + 0x6EC: 84, + 0x6ED: 84, + 0x6EE: 82, + 0x6EF: 82, + 0x6FA: 68, + 0x6FB: 68, + 0x6FC: 68, + 0x6FF: 68, + 0x70F: 84, + 0x710: 82, + 0x711: 84, + 0x712: 68, + 0x713: 68, + 0x714: 68, + 0x715: 82, + 0x716: 82, + 0x717: 82, + 0x718: 82, + 0x719: 82, + 0x71A: 68, + 0x71B: 68, + 0x71C: 68, + 0x71D: 68, + 0x71E: 82, + 0x71F: 68, + 0x720: 68, + 0x721: 68, + 0x722: 68, + 0x723: 68, + 0x724: 68, + 0x725: 68, + 0x726: 68, + 0x727: 68, + 0x728: 82, + 0x729: 68, + 0x72A: 82, + 0x72B: 68, + 0x72C: 82, + 0x72D: 68, + 0x72E: 68, + 0x72F: 82, + 0x730: 84, + 0x731: 84, + 0x732: 84, + 0x733: 84, + 0x734: 84, + 0x735: 84, + 0x736: 84, + 0x737: 84, + 0x738: 84, + 0x739: 84, + 0x73A: 84, + 0x73B: 84, + 0x73C: 84, + 0x73D: 84, + 0x73E: 84, + 0x73F: 84, + 0x740: 84, + 0x741: 84, + 0x742: 84, + 0x743: 84, + 0x744: 84, + 0x745: 84, + 0x746: 84, + 0x747: 84, + 0x748: 84, + 0x749: 84, + 0x74A: 84, + 0x74D: 82, + 0x74E: 68, + 0x74F: 68, + 0x750: 68, + 0x751: 68, + 0x752: 68, + 0x753: 68, + 0x754: 68, + 0x755: 68, + 0x756: 68, + 0x757: 68, + 0x758: 68, + 0x759: 82, + 0x75A: 82, + 0x75B: 82, + 0x75C: 68, + 0x75D: 68, + 0x75E: 68, + 0x75F: 68, + 0x760: 68, + 0x761: 68, + 0x762: 68, + 0x763: 68, + 0x764: 68, + 0x765: 68, + 0x766: 68, + 0x767: 68, + 0x768: 68, + 0x769: 68, + 0x76A: 68, + 0x76B: 82, + 0x76C: 82, + 0x76D: 68, + 0x76E: 68, + 0x76F: 68, + 0x770: 68, + 0x771: 82, + 0x772: 68, + 0x773: 82, + 0x774: 82, + 0x775: 68, + 0x776: 68, + 0x777: 68, + 0x778: 82, + 0x779: 82, + 0x77A: 68, + 0x77B: 68, + 0x77C: 68, + 0x77D: 68, + 0x77E: 68, + 0x77F: 68, + 0x7A6: 84, + 0x7A7: 84, + 0x7A8: 84, + 0x7A9: 84, + 0x7AA: 84, + 0x7AB: 84, + 0x7AC: 84, + 0x7AD: 84, + 0x7AE: 84, + 0x7AF: 84, + 0x7B0: 84, + 0x7CA: 68, + 0x7CB: 68, + 0x7CC: 68, + 0x7CD: 68, + 0x7CE: 68, + 0x7CF: 68, + 0x7D0: 68, + 0x7D1: 68, + 0x7D2: 68, + 0x7D3: 68, + 0x7D4: 68, + 0x7D5: 68, + 0x7D6: 68, + 0x7D7: 68, + 0x7D8: 68, + 0x7D9: 68, + 0x7DA: 68, + 0x7DB: 68, + 0x7DC: 68, + 0x7DD: 68, + 0x7DE: 68, + 0x7DF: 68, + 0x7E0: 68, + 0x7E1: 68, + 0x7E2: 68, + 0x7E3: 68, + 0x7E4: 68, + 0x7E5: 68, + 0x7E6: 68, + 0x7E7: 68, + 0x7E8: 68, + 0x7E9: 68, + 0x7EA: 68, + 0x7EB: 84, + 0x7EC: 84, + 0x7ED: 84, + 0x7EE: 84, + 0x7EF: 84, + 0x7F0: 84, + 0x7F1: 84, + 0x7F2: 84, + 0x7F3: 84, + 0x7FA: 67, + 0x7FD: 84, + 0x816: 84, + 0x817: 84, + 0x818: 84, + 0x819: 84, + 0x81B: 84, + 0x81C: 84, + 0x81D: 84, + 0x81E: 84, + 0x81F: 84, + 0x820: 84, + 0x821: 84, + 0x822: 84, + 0x823: 84, + 0x825: 84, + 0x826: 84, + 0x827: 84, + 0x829: 84, + 0x82A: 84, + 0x82B: 84, + 0x82C: 84, + 0x82D: 84, + 0x840: 82, + 0x841: 68, + 0x842: 68, + 0x843: 68, + 0x844: 68, + 0x845: 68, + 0x846: 82, + 0x847: 82, + 0x848: 68, + 0x849: 82, + 0x84A: 68, + 0x84B: 68, + 0x84C: 68, + 0x84D: 68, + 0x84E: 68, + 0x84F: 68, + 0x850: 68, + 0x851: 68, + 0x852: 68, + 0x853: 68, + 0x854: 82, + 0x855: 68, + 0x856: 82, + 0x857: 82, + 0x858: 82, + 0x859: 84, + 0x85A: 84, + 0x85B: 84, + 0x860: 68, + 0x862: 68, + 0x863: 68, + 0x864: 68, + 0x865: 68, + 0x867: 82, + 0x868: 68, + 0x869: 82, + 0x86A: 82, + 0x870: 82, + 0x871: 82, + 0x872: 82, + 0x873: 82, + 0x874: 82, + 0x875: 82, + 0x876: 82, + 0x877: 82, + 0x878: 82, + 0x879: 82, + 0x87A: 82, + 0x87B: 82, + 0x87C: 82, + 0x87D: 82, + 0x87E: 82, + 0x87F: 82, + 0x880: 82, + 0x881: 82, + 0x882: 82, + 0x883: 67, + 0x884: 67, + 0x885: 67, + 0x886: 68, + 0x889: 68, + 0x88A: 68, + 0x88B: 68, + 0x88C: 68, + 0x88D: 68, + 0x88E: 82, + 0x898: 84, + 0x899: 84, + 0x89A: 84, + 0x89B: 84, + 0x89C: 84, + 0x89D: 84, + 0x89E: 84, + 0x89F: 84, + 0x8A0: 68, + 0x8A1: 68, + 0x8A2: 68, + 0x8A3: 68, + 0x8A4: 68, + 0x8A5: 68, + 0x8A6: 68, + 0x8A7: 68, + 0x8A8: 68, + 0x8A9: 68, + 0x8AA: 82, + 0x8AB: 82, + 0x8AC: 82, + 0x8AE: 82, + 0x8AF: 68, + 0x8B0: 68, + 0x8B1: 82, + 0x8B2: 82, + 0x8B3: 68, + 0x8B4: 68, + 0x8B5: 68, + 0x8B6: 68, + 0x8B7: 68, + 0x8B8: 68, + 0x8B9: 82, + 0x8BA: 68, + 0x8BB: 68, + 0x8BC: 68, + 0x8BD: 68, + 0x8BE: 68, + 0x8BF: 68, + 0x8C0: 68, + 0x8C1: 68, + 0x8C2: 68, + 0x8C3: 68, + 0x8C4: 68, + 0x8C5: 68, + 0x8C6: 68, + 0x8C7: 68, + 0x8C8: 68, + 0x8CA: 84, + 0x8CB: 84, + 0x8CC: 84, + 0x8CD: 84, + 0x8CE: 84, + 0x8CF: 84, + 0x8D0: 84, + 0x8D1: 84, + 0x8D2: 84, + 0x8D3: 84, + 0x8D4: 84, + 0x8D5: 84, + 0x8D6: 84, + 0x8D7: 84, + 0x8D8: 84, + 0x8D9: 84, + 0x8DA: 84, + 0x8DB: 84, + 0x8DC: 84, + 0x8DD: 84, + 0x8DE: 84, + 0x8DF: 84, + 0x8E0: 84, + 0x8E1: 84, + 0x8E3: 84, + 0x8E4: 84, + 0x8E5: 84, + 0x8E6: 84, + 0x8E7: 84, + 0x8E8: 84, + 0x8E9: 84, + 0x8EA: 84, + 0x8EB: 84, + 0x8EC: 84, + 0x8ED: 84, + 0x8EE: 84, + 0x8EF: 84, + 0x8F0: 84, + 0x8F1: 84, + 0x8F2: 84, + 0x8F3: 84, + 0x8F4: 84, + 0x8F5: 84, + 0x8F6: 84, + 0x8F7: 84, + 0x8F8: 84, + 0x8F9: 84, + 0x8FA: 84, + 0x8FB: 84, + 0x8FC: 84, + 0x8FD: 84, + 0x8FE: 84, + 0x8FF: 84, + 0x900: 84, + 0x901: 84, + 0x902: 84, + 0x93A: 84, + 0x93C: 84, + 0x941: 84, + 0x942: 84, + 0x943: 84, + 0x944: 84, + 0x945: 84, + 0x946: 84, + 0x947: 84, + 0x948: 84, + 0x94D: 84, + 0x951: 84, + 0x952: 84, + 0x953: 84, + 0x954: 84, + 0x955: 84, + 0x956: 84, + 0x957: 84, + 0x962: 84, + 0x963: 84, + 0x981: 84, + 0x9BC: 84, + 0x9C1: 84, + 0x9C2: 84, + 0x9C3: 84, + 0x9C4: 84, + 0x9CD: 84, + 0x9E2: 84, + 0x9E3: 84, + 0x9FE: 84, + 0xA01: 84, + 0xA02: 84, + 0xA3C: 84, + 0xA41: 84, + 0xA42: 84, + 0xA47: 84, + 0xA48: 84, + 0xA4B: 84, + 0xA4C: 84, + 0xA4D: 84, + 0xA51: 84, + 0xA70: 84, + 0xA71: 84, + 0xA75: 84, + 0xA81: 84, + 0xA82: 84, + 0xABC: 84, + 0xAC1: 84, + 0xAC2: 84, + 0xAC3: 84, + 0xAC4: 84, + 0xAC5: 84, + 0xAC7: 84, + 0xAC8: 84, + 0xACD: 84, + 0xAE2: 84, + 0xAE3: 84, + 0xAFA: 84, + 0xAFB: 84, + 0xAFC: 84, + 0xAFD: 84, + 0xAFE: 84, + 0xAFF: 84, + 0xB01: 84, + 0xB3C: 84, + 0xB3F: 84, + 0xB41: 84, + 0xB42: 84, + 0xB43: 84, + 0xB44: 84, + 0xB4D: 84, + 0xB55: 84, + 0xB56: 84, + 0xB62: 84, + 0xB63: 84, + 0xB82: 84, + 0xBC0: 84, + 0xBCD: 84, + 0xC00: 84, + 0xC04: 84, + 0xC3C: 84, + 0xC3E: 84, + 0xC3F: 84, + 0xC40: 84, + 0xC46: 84, + 0xC47: 84, + 0xC48: 84, + 0xC4A: 84, + 0xC4B: 84, + 0xC4C: 84, + 0xC4D: 84, + 0xC55: 84, + 0xC56: 84, + 0xC62: 84, + 0xC63: 84, + 0xC81: 84, + 0xCBC: 84, + 0xCBF: 84, + 0xCC6: 84, + 0xCCC: 84, + 0xCCD: 84, + 0xCE2: 84, + 0xCE3: 84, + 0xD00: 84, + 0xD01: 84, + 0xD3B: 84, + 0xD3C: 84, + 0xD41: 84, + 0xD42: 84, + 0xD43: 84, + 0xD44: 84, + 0xD4D: 84, + 0xD62: 84, + 0xD63: 84, + 0xD81: 84, + 0xDCA: 84, + 0xDD2: 84, + 0xDD3: 84, + 0xDD4: 84, + 0xDD6: 84, + 0xE31: 84, + 0xE34: 84, + 0xE35: 84, + 0xE36: 84, + 0xE37: 84, + 0xE38: 84, + 0xE39: 84, + 0xE3A: 84, + 0xE47: 84, + 0xE48: 84, + 0xE49: 84, + 0xE4A: 84, + 0xE4B: 84, + 0xE4C: 84, + 0xE4D: 84, + 0xE4E: 84, + 0xEB1: 84, + 0xEB4: 84, + 0xEB5: 84, + 0xEB6: 84, + 0xEB7: 84, + 0xEB8: 84, + 0xEB9: 84, + 0xEBA: 84, + 0xEBB: 84, + 0xEBC: 84, + 0xEC8: 84, + 0xEC9: 84, + 0xECA: 84, + 0xECB: 84, + 0xECC: 84, + 0xECD: 84, + 0xECE: 84, + 0xF18: 84, + 0xF19: 84, + 0xF35: 84, + 0xF37: 84, + 0xF39: 84, + 0xF71: 84, + 0xF72: 84, + 0xF73: 84, + 0xF74: 84, + 0xF75: 84, + 0xF76: 84, + 0xF77: 84, + 0xF78: 84, + 0xF79: 84, + 0xF7A: 84, + 0xF7B: 84, + 0xF7C: 84, + 0xF7D: 84, + 0xF7E: 84, + 0xF80: 84, + 0xF81: 84, + 0xF82: 84, + 0xF83: 84, + 0xF84: 84, + 0xF86: 84, + 0xF87: 84, + 0xF8D: 84, + 0xF8E: 84, + 0xF8F: 84, + 0xF90: 84, + 0xF91: 84, + 0xF92: 84, + 0xF93: 84, + 0xF94: 84, + 0xF95: 84, + 0xF96: 84, + 0xF97: 84, + 0xF99: 84, + 0xF9A: 84, + 0xF9B: 84, + 0xF9C: 84, + 0xF9D: 84, + 0xF9E: 84, + 0xF9F: 84, + 0xFA0: 84, + 0xFA1: 84, + 0xFA2: 84, + 0xFA3: 84, + 0xFA4: 84, + 0xFA5: 84, + 0xFA6: 84, + 0xFA7: 84, + 0xFA8: 84, + 0xFA9: 84, + 0xFAA: 84, + 0xFAB: 84, + 0xFAC: 84, + 0xFAD: 84, + 0xFAE: 84, + 0xFAF: 84, + 0xFB0: 84, + 0xFB1: 84, + 0xFB2: 84, + 0xFB3: 84, + 0xFB4: 84, + 0xFB5: 84, + 0xFB6: 84, + 0xFB7: 84, + 0xFB8: 84, + 0xFB9: 84, + 0xFBA: 84, + 0xFBB: 84, + 0xFBC: 84, + 0xFC6: 84, + 0x102D: 84, + 0x102E: 84, + 0x102F: 84, + 0x1030: 84, + 0x1032: 84, + 0x1033: 84, + 0x1034: 84, + 0x1035: 84, + 0x1036: 84, + 0x1037: 84, + 0x1039: 84, + 0x103A: 84, + 0x103D: 84, + 0x103E: 84, + 0x1058: 84, + 0x1059: 84, + 0x105E: 84, + 0x105F: 84, + 0x1060: 84, + 0x1071: 84, + 0x1072: 84, + 0x1073: 84, + 0x1074: 84, + 0x1082: 84, + 0x1085: 84, + 0x1086: 84, + 0x108D: 84, + 0x109D: 84, + 0x135D: 84, + 0x135E: 84, + 0x135F: 84, + 0x1712: 84, + 0x1713: 84, + 0x1714: 84, + 0x1732: 84, + 0x1733: 84, + 0x1752: 84, + 0x1753: 84, + 0x1772: 84, + 0x1773: 84, + 0x17B4: 84, + 0x17B5: 84, + 0x17B7: 84, + 0x17B8: 84, + 0x17B9: 84, + 0x17BA: 84, + 0x17BB: 84, + 0x17BC: 84, + 0x17BD: 84, + 0x17C6: 84, + 0x17C9: 84, + 0x17CA: 84, + 0x17CB: 84, + 0x17CC: 84, + 0x17CD: 84, + 0x17CE: 84, + 0x17CF: 84, + 0x17D0: 84, + 0x17D1: 84, + 0x17D2: 84, + 0x17D3: 84, + 0x17DD: 84, + 0x1807: 68, + 0x180A: 67, + 0x180B: 84, + 0x180C: 84, + 0x180D: 84, + 0x180F: 84, + 0x1820: 68, + 0x1821: 68, + 0x1822: 68, + 0x1823: 68, + 0x1824: 68, + 0x1825: 68, + 0x1826: 68, + 0x1827: 68, + 0x1828: 68, + 0x1829: 68, + 0x182A: 68, + 0x182B: 68, + 0x182C: 68, + 0x182D: 68, + 0x182E: 68, + 0x182F: 68, + 0x1830: 68, + 0x1831: 68, + 0x1832: 68, + 0x1833: 68, + 0x1834: 68, + 0x1835: 68, + 0x1836: 68, + 0x1837: 68, + 0x1838: 68, + 0x1839: 68, + 0x183A: 68, + 0x183B: 68, + 0x183C: 68, + 0x183D: 68, + 0x183E: 68, + 0x183F: 68, + 0x1840: 68, + 0x1841: 68, + 0x1842: 68, + 0x1843: 68, + 0x1844: 68, + 0x1845: 68, + 0x1846: 68, + 0x1847: 68, + 0x1848: 68, + 0x1849: 68, + 0x184A: 68, + 0x184B: 68, + 0x184C: 68, + 0x184D: 68, + 0x184E: 68, + 0x184F: 68, + 0x1850: 68, + 0x1851: 68, + 0x1852: 68, + 0x1853: 68, + 0x1854: 68, + 0x1855: 68, + 0x1856: 68, + 0x1857: 68, + 0x1858: 68, + 0x1859: 68, + 0x185A: 68, + 0x185B: 68, + 0x185C: 68, + 0x185D: 68, + 0x185E: 68, + 0x185F: 68, + 0x1860: 68, + 0x1861: 68, + 0x1862: 68, + 0x1863: 68, + 0x1864: 68, + 0x1865: 68, + 0x1866: 68, + 0x1867: 68, + 0x1868: 68, + 0x1869: 68, + 0x186A: 68, + 0x186B: 68, + 0x186C: 68, + 0x186D: 68, + 0x186E: 68, + 0x186F: 68, + 0x1870: 68, + 0x1871: 68, + 0x1872: 68, + 0x1873: 68, + 0x1874: 68, + 0x1875: 68, + 0x1876: 68, + 0x1877: 68, + 0x1878: 68, + 0x1885: 84, + 0x1886: 84, + 0x1887: 68, + 0x1888: 68, + 0x1889: 68, + 0x188A: 68, + 0x188B: 68, + 0x188C: 68, + 0x188D: 68, + 0x188E: 68, + 0x188F: 68, + 0x1890: 68, + 0x1891: 68, + 0x1892: 68, + 0x1893: 68, + 0x1894: 68, + 0x1895: 68, + 0x1896: 68, + 0x1897: 68, + 0x1898: 68, + 0x1899: 68, + 0x189A: 68, + 0x189B: 68, + 0x189C: 68, + 0x189D: 68, + 0x189E: 68, + 0x189F: 68, + 0x18A0: 68, + 0x18A1: 68, + 0x18A2: 68, + 0x18A3: 68, + 0x18A4: 68, + 0x18A5: 68, + 0x18A6: 68, + 0x18A7: 68, + 0x18A8: 68, + 0x18A9: 84, + 0x18AA: 68, + 0x1920: 84, + 0x1921: 84, + 0x1922: 84, + 0x1927: 84, + 0x1928: 84, + 0x1932: 84, + 0x1939: 84, + 0x193A: 84, + 0x193B: 84, + 0x1A17: 84, + 0x1A18: 84, + 0x1A1B: 84, + 0x1A56: 84, + 0x1A58: 84, + 0x1A59: 84, + 0x1A5A: 84, + 0x1A5B: 84, + 0x1A5C: 84, + 0x1A5D: 84, + 0x1A5E: 84, + 0x1A60: 84, + 0x1A62: 84, + 0x1A65: 84, + 0x1A66: 84, + 0x1A67: 84, + 0x1A68: 84, + 0x1A69: 84, + 0x1A6A: 84, + 0x1A6B: 84, + 0x1A6C: 84, + 0x1A73: 84, + 0x1A74: 84, + 0x1A75: 84, + 0x1A76: 84, + 0x1A77: 84, + 0x1A78: 84, + 0x1A79: 84, + 0x1A7A: 84, + 0x1A7B: 84, + 0x1A7C: 84, + 0x1A7F: 84, + 0x1AB0: 84, + 0x1AB1: 84, + 0x1AB2: 84, + 0x1AB3: 84, + 0x1AB4: 84, + 0x1AB5: 84, + 0x1AB6: 84, + 0x1AB7: 84, + 0x1AB8: 84, + 0x1AB9: 84, + 0x1ABA: 84, + 0x1ABB: 84, + 0x1ABC: 84, + 0x1ABD: 84, + 0x1ABE: 84, + 0x1ABF: 84, + 0x1AC0: 84, + 0x1AC1: 84, + 0x1AC2: 84, + 0x1AC3: 84, + 0x1AC4: 84, + 0x1AC5: 84, + 0x1AC6: 84, + 0x1AC7: 84, + 0x1AC8: 84, + 0x1AC9: 84, + 0x1ACA: 84, + 0x1ACB: 84, + 0x1ACC: 84, + 0x1ACD: 84, + 0x1ACE: 84, + 0x1B00: 84, + 0x1B01: 84, + 0x1B02: 84, + 0x1B03: 84, + 0x1B34: 84, + 0x1B36: 84, + 0x1B37: 84, + 0x1B38: 84, + 0x1B39: 84, + 0x1B3A: 84, + 0x1B3C: 84, + 0x1B42: 84, + 0x1B6B: 84, + 0x1B6C: 84, + 0x1B6D: 84, + 0x1B6E: 84, + 0x1B6F: 84, + 0x1B70: 84, + 0x1B71: 84, + 0x1B72: 84, + 0x1B73: 84, + 0x1B80: 84, + 0x1B81: 84, + 0x1BA2: 84, + 0x1BA3: 84, + 0x1BA4: 84, + 0x1BA5: 84, + 0x1BA8: 84, + 0x1BA9: 84, + 0x1BAB: 84, + 0x1BAC: 84, + 0x1BAD: 84, + 0x1BE6: 84, + 0x1BE8: 84, + 0x1BE9: 84, + 0x1BED: 84, + 0x1BEF: 84, + 0x1BF0: 84, + 0x1BF1: 84, + 0x1C2C: 84, + 0x1C2D: 84, + 0x1C2E: 84, + 0x1C2F: 84, + 0x1C30: 84, + 0x1C31: 84, + 0x1C32: 84, + 0x1C33: 84, + 0x1C36: 84, + 0x1C37: 84, + 0x1CD0: 84, + 0x1CD1: 84, + 0x1CD2: 84, + 0x1CD4: 84, + 0x1CD5: 84, + 0x1CD6: 84, + 0x1CD7: 84, + 0x1CD8: 84, + 0x1CD9: 84, + 0x1CDA: 84, + 0x1CDB: 84, + 0x1CDC: 84, + 0x1CDD: 84, + 0x1CDE: 84, + 0x1CDF: 84, + 0x1CE0: 84, + 0x1CE2: 84, + 0x1CE3: 84, + 0x1CE4: 84, + 0x1CE5: 84, + 0x1CE6: 84, + 0x1CE7: 84, + 0x1CE8: 84, + 0x1CED: 84, + 0x1CF4: 84, + 0x1CF8: 84, + 0x1CF9: 84, + 0x1DC0: 84, + 0x1DC1: 84, + 0x1DC2: 84, + 0x1DC3: 84, + 0x1DC4: 84, + 0x1DC5: 84, + 0x1DC6: 84, + 0x1DC7: 84, + 0x1DC8: 84, + 0x1DC9: 84, + 0x1DCA: 84, + 0x1DCB: 84, + 0x1DCC: 84, + 0x1DCD: 84, + 0x1DCE: 84, + 0x1DCF: 84, + 0x1DD0: 84, + 0x1DD1: 84, + 0x1DD2: 84, + 0x1DD3: 84, + 0x1DD4: 84, + 0x1DD5: 84, + 0x1DD6: 84, + 0x1DD7: 84, + 0x1DD8: 84, + 0x1DD9: 84, + 0x1DDA: 84, + 0x1DDB: 84, + 0x1DDC: 84, + 0x1DDD: 84, + 0x1DDE: 84, + 0x1DDF: 84, + 0x1DE0: 84, + 0x1DE1: 84, + 0x1DE2: 84, + 0x1DE3: 84, + 0x1DE4: 84, + 0x1DE5: 84, + 0x1DE6: 84, + 0x1DE7: 84, + 0x1DE8: 84, + 0x1DE9: 84, + 0x1DEA: 84, + 0x1DEB: 84, + 0x1DEC: 84, + 0x1DED: 84, + 0x1DEE: 84, + 0x1DEF: 84, + 0x1DF0: 84, + 0x1DF1: 84, + 0x1DF2: 84, + 0x1DF3: 84, + 0x1DF4: 84, + 0x1DF5: 84, + 0x1DF6: 84, + 0x1DF7: 84, + 0x1DF8: 84, + 0x1DF9: 84, + 0x1DFA: 84, + 0x1DFB: 84, + 0x1DFC: 84, + 0x1DFD: 84, + 0x1DFE: 84, + 0x1DFF: 84, + 0x200B: 84, + 0x200D: 67, + 0x200E: 84, + 0x200F: 84, + 0x202A: 84, + 0x202B: 84, + 0x202C: 84, + 0x202D: 84, + 0x202E: 84, + 0x2060: 84, + 0x2061: 84, + 0x2062: 84, + 0x2063: 84, + 0x2064: 84, + 0x206A: 84, + 0x206B: 84, + 0x206C: 84, + 0x206D: 84, + 0x206E: 84, + 0x206F: 84, + 0x20D0: 84, + 0x20D1: 84, + 0x20D2: 84, + 0x20D3: 84, + 0x20D4: 84, + 0x20D5: 84, + 0x20D6: 84, + 0x20D7: 84, + 0x20D8: 84, + 0x20D9: 84, + 0x20DA: 84, + 0x20DB: 84, + 0x20DC: 84, + 0x20DD: 84, + 0x20DE: 84, + 0x20DF: 84, + 0x20E0: 84, + 0x20E1: 84, + 0x20E2: 84, + 0x20E3: 84, + 0x20E4: 84, + 0x20E5: 84, + 0x20E6: 84, + 0x20E7: 84, + 0x20E8: 84, + 0x20E9: 84, + 0x20EA: 84, + 0x20EB: 84, + 0x20EC: 84, + 0x20ED: 84, + 0x20EE: 84, + 0x20EF: 84, + 0x20F0: 84, + 0x2CEF: 84, + 0x2CF0: 84, + 0x2CF1: 84, + 0x2D7F: 84, + 0x2DE0: 84, + 0x2DE1: 84, + 0x2DE2: 84, + 0x2DE3: 84, + 0x2DE4: 84, + 0x2DE5: 84, + 0x2DE6: 84, + 0x2DE7: 84, + 0x2DE8: 84, + 0x2DE9: 84, + 0x2DEA: 84, + 0x2DEB: 84, + 0x2DEC: 84, + 0x2DED: 84, + 0x2DEE: 84, + 0x2DEF: 84, + 0x2DF0: 84, + 0x2DF1: 84, + 0x2DF2: 84, + 0x2DF3: 84, + 0x2DF4: 84, + 0x2DF5: 84, + 0x2DF6: 84, + 0x2DF7: 84, + 0x2DF8: 84, + 0x2DF9: 84, + 0x2DFA: 84, + 0x2DFB: 84, + 0x2DFC: 84, + 0x2DFD: 84, + 0x2DFE: 84, + 0x2DFF: 84, + 0x302A: 84, + 0x302B: 84, + 0x302C: 84, + 0x302D: 84, + 0x3099: 84, + 0x309A: 84, + 0xA66F: 84, + 0xA670: 84, + 0xA671: 84, + 0xA672: 84, + 0xA674: 84, + 0xA675: 84, + 0xA676: 84, + 0xA677: 84, + 0xA678: 84, + 0xA679: 84, + 0xA67A: 84, + 0xA67B: 84, + 0xA67C: 84, + 0xA67D: 84, + 0xA69E: 84, + 0xA69F: 84, + 0xA6F0: 84, + 0xA6F1: 84, + 0xA802: 84, + 0xA806: 84, + 0xA80B: 84, + 0xA825: 84, + 0xA826: 84, + 0xA82C: 84, + 0xA840: 68, + 0xA841: 68, + 0xA842: 68, + 0xA843: 68, + 0xA844: 68, + 0xA845: 68, + 0xA846: 68, + 0xA847: 68, + 0xA848: 68, + 0xA849: 68, + 0xA84A: 68, + 0xA84B: 68, + 0xA84C: 68, + 0xA84D: 68, + 0xA84E: 68, + 0xA84F: 68, + 0xA850: 68, + 0xA851: 68, + 0xA852: 68, + 0xA853: 68, + 0xA854: 68, + 0xA855: 68, + 0xA856: 68, + 0xA857: 68, + 0xA858: 68, + 0xA859: 68, + 0xA85A: 68, + 0xA85B: 68, + 0xA85C: 68, + 0xA85D: 68, + 0xA85E: 68, + 0xA85F: 68, + 0xA860: 68, + 0xA861: 68, + 0xA862: 68, + 0xA863: 68, + 0xA864: 68, + 0xA865: 68, + 0xA866: 68, + 0xA867: 68, + 0xA868: 68, + 0xA869: 68, + 0xA86A: 68, + 0xA86B: 68, + 0xA86C: 68, + 0xA86D: 68, + 0xA86E: 68, + 0xA86F: 68, + 0xA870: 68, + 0xA871: 68, + 0xA872: 76, + 0xA8C4: 84, + 0xA8C5: 84, + 0xA8E0: 84, + 0xA8E1: 84, + 0xA8E2: 84, + 0xA8E3: 84, + 0xA8E4: 84, + 0xA8E5: 84, + 0xA8E6: 84, + 0xA8E7: 84, + 0xA8E8: 84, + 0xA8E9: 84, + 0xA8EA: 84, + 0xA8EB: 84, + 0xA8EC: 84, + 0xA8ED: 84, + 0xA8EE: 84, + 0xA8EF: 84, + 0xA8F0: 84, + 0xA8F1: 84, + 0xA8FF: 84, + 0xA926: 84, + 0xA927: 84, + 0xA928: 84, + 0xA929: 84, + 0xA92A: 84, + 0xA92B: 84, + 0xA92C: 84, + 0xA92D: 84, + 0xA947: 84, + 0xA948: 84, + 0xA949: 84, + 0xA94A: 84, + 0xA94B: 84, + 0xA94C: 84, + 0xA94D: 84, + 0xA94E: 84, + 0xA94F: 84, + 0xA950: 84, + 0xA951: 84, + 0xA980: 84, + 0xA981: 84, + 0xA982: 84, + 0xA9B3: 84, + 0xA9B6: 84, + 0xA9B7: 84, + 0xA9B8: 84, + 0xA9B9: 84, + 0xA9BC: 84, + 0xA9BD: 84, + 0xA9E5: 84, + 0xAA29: 84, + 0xAA2A: 84, + 0xAA2B: 84, + 0xAA2C: 84, + 0xAA2D: 84, + 0xAA2E: 84, + 0xAA31: 84, + 0xAA32: 84, + 0xAA35: 84, + 0xAA36: 84, + 0xAA43: 84, + 0xAA4C: 84, + 0xAA7C: 84, + 0xAAB0: 84, + 0xAAB2: 84, + 0xAAB3: 84, + 0xAAB4: 84, + 0xAAB7: 84, + 0xAAB8: 84, + 0xAABE: 84, + 0xAABF: 84, + 0xAAC1: 84, + 0xAAEC: 84, + 0xAAED: 84, + 0xAAF6: 84, + 0xABE5: 84, + 0xABE8: 84, + 0xABED: 84, + 0xFB1E: 84, + 0xFE00: 84, + 0xFE01: 84, + 0xFE02: 84, + 0xFE03: 84, + 0xFE04: 84, + 0xFE05: 84, + 0xFE06: 84, + 0xFE07: 84, + 0xFE08: 84, + 0xFE09: 84, + 0xFE0A: 84, + 0xFE0B: 84, + 0xFE0C: 84, + 0xFE0D: 84, + 0xFE0E: 84, + 0xFE0F: 84, + 0xFE20: 84, + 0xFE21: 84, + 0xFE22: 84, + 0xFE23: 84, + 0xFE24: 84, + 0xFE25: 84, + 0xFE26: 84, + 0xFE27: 84, + 0xFE28: 84, + 0xFE29: 84, + 0xFE2A: 84, + 0xFE2B: 84, + 0xFE2C: 84, + 0xFE2D: 84, + 0xFE2E: 84, + 0xFE2F: 84, + 0xFEFF: 84, + 0xFFF9: 84, + 0xFFFA: 84, + 0xFFFB: 84, + 0x101FD: 84, + 0x102E0: 84, + 0x10376: 84, + 0x10377: 84, + 0x10378: 84, + 0x10379: 84, + 0x1037A: 84, + 0x10A01: 84, + 0x10A02: 84, + 0x10A03: 84, + 0x10A05: 84, + 0x10A06: 84, + 0x10A0C: 84, + 0x10A0D: 84, + 0x10A0E: 84, + 0x10A0F: 84, + 0x10A38: 84, + 0x10A39: 84, + 0x10A3A: 84, + 0x10A3F: 84, + 0x10AC0: 68, + 0x10AC1: 68, + 0x10AC2: 68, + 0x10AC3: 68, + 0x10AC4: 68, + 0x10AC5: 82, + 0x10AC7: 82, + 0x10AC9: 82, + 0x10ACA: 82, + 0x10ACD: 76, + 0x10ACE: 82, + 0x10ACF: 82, + 0x10AD0: 82, + 0x10AD1: 82, + 0x10AD2: 82, + 0x10AD3: 68, + 0x10AD4: 68, + 0x10AD5: 68, + 0x10AD6: 68, + 0x10AD7: 76, + 0x10AD8: 68, + 0x10AD9: 68, + 0x10ADA: 68, + 0x10ADB: 68, + 0x10ADC: 68, + 0x10ADD: 82, + 0x10ADE: 68, + 0x10ADF: 68, + 0x10AE0: 68, + 0x10AE1: 82, + 0x10AE4: 82, + 0x10AE5: 84, + 0x10AE6: 84, + 0x10AEB: 68, + 0x10AEC: 68, + 0x10AED: 68, + 0x10AEE: 68, + 0x10AEF: 82, + 0x10B80: 68, + 0x10B81: 82, + 0x10B82: 68, + 0x10B83: 82, + 0x10B84: 82, + 0x10B85: 82, + 0x10B86: 68, + 0x10B87: 68, + 0x10B88: 68, + 0x10B89: 82, + 0x10B8A: 68, + 0x10B8B: 68, + 0x10B8C: 82, + 0x10B8D: 68, + 0x10B8E: 82, + 0x10B8F: 82, + 0x10B90: 68, + 0x10B91: 82, + 0x10BA9: 82, + 0x10BAA: 82, + 0x10BAB: 82, + 0x10BAC: 82, + 0x10BAD: 68, + 0x10BAE: 68, + 0x10D00: 76, + 0x10D01: 68, + 0x10D02: 68, + 0x10D03: 68, + 0x10D04: 68, + 0x10D05: 68, + 0x10D06: 68, + 0x10D07: 68, + 0x10D08: 68, + 0x10D09: 68, + 0x10D0A: 68, + 0x10D0B: 68, + 0x10D0C: 68, + 0x10D0D: 68, + 0x10D0E: 68, + 0x10D0F: 68, + 0x10D10: 68, + 0x10D11: 68, + 0x10D12: 68, + 0x10D13: 68, + 0x10D14: 68, + 0x10D15: 68, + 0x10D16: 68, + 0x10D17: 68, + 0x10D18: 68, + 0x10D19: 68, + 0x10D1A: 68, + 0x10D1B: 68, + 0x10D1C: 68, + 0x10D1D: 68, + 0x10D1E: 68, + 0x10D1F: 68, + 0x10D20: 68, + 0x10D21: 68, + 0x10D22: 82, + 0x10D23: 68, + 0x10D24: 84, + 0x10D25: 84, + 0x10D26: 84, + 0x10D27: 84, + 0x10EAB: 84, + 0x10EAC: 84, + 0x10EFD: 84, + 0x10EFE: 84, + 0x10EFF: 84, + 0x10F30: 68, + 0x10F31: 68, + 0x10F32: 68, + 0x10F33: 82, + 0x10F34: 68, + 0x10F35: 68, + 0x10F36: 68, + 0x10F37: 68, + 0x10F38: 68, + 0x10F39: 68, + 0x10F3A: 68, + 0x10F3B: 68, + 0x10F3C: 68, + 0x10F3D: 68, + 0x10F3E: 68, + 0x10F3F: 68, + 0x10F40: 68, + 0x10F41: 68, + 0x10F42: 68, + 0x10F43: 68, + 0x10F44: 68, + 0x10F46: 84, + 0x10F47: 84, + 0x10F48: 84, + 0x10F49: 84, + 0x10F4A: 84, + 0x10F4B: 84, + 0x10F4C: 84, + 0x10F4D: 84, + 0x10F4E: 84, + 0x10F4F: 84, + 0x10F50: 84, + 0x10F51: 68, + 0x10F52: 68, + 0x10F53: 68, + 0x10F54: 82, + 0x10F70: 68, + 0x10F71: 68, + 0x10F72: 68, + 0x10F73: 68, + 0x10F74: 82, + 0x10F75: 82, + 0x10F76: 68, + 0x10F77: 68, + 0x10F78: 68, + 0x10F79: 68, + 0x10F7A: 68, + 0x10F7B: 68, + 0x10F7C: 68, + 0x10F7D: 68, + 0x10F7E: 68, + 0x10F7F: 68, + 0x10F80: 68, + 0x10F81: 68, + 0x10F82: 84, + 0x10F83: 84, + 0x10F84: 84, + 0x10F85: 84, + 0x10FB0: 68, + 0x10FB2: 68, + 0x10FB3: 68, + 0x10FB4: 82, + 0x10FB5: 82, + 0x10FB6: 82, + 0x10FB8: 68, + 0x10FB9: 82, + 0x10FBA: 82, + 0x10FBB: 68, + 0x10FBC: 68, + 0x10FBD: 82, + 0x10FBE: 68, + 0x10FBF: 68, + 0x10FC1: 68, + 0x10FC2: 82, + 0x10FC3: 82, + 0x10FC4: 68, + 0x10FC9: 82, + 0x10FCA: 68, + 0x10FCB: 76, + 0x11001: 84, + 0x11038: 84, + 0x11039: 84, + 0x1103A: 84, + 0x1103B: 84, + 0x1103C: 84, + 0x1103D: 84, + 0x1103E: 84, + 0x1103F: 84, + 0x11040: 84, + 0x11041: 84, + 0x11042: 84, + 0x11043: 84, + 0x11044: 84, + 0x11045: 84, + 0x11046: 84, + 0x11070: 84, + 0x11073: 84, + 0x11074: 84, + 0x1107F: 84, + 0x11080: 84, + 0x11081: 84, + 0x110B3: 84, + 0x110B4: 84, + 0x110B5: 84, + 0x110B6: 84, + 0x110B9: 84, + 0x110BA: 84, + 0x110C2: 84, + 0x11100: 84, + 0x11101: 84, + 0x11102: 84, + 0x11127: 84, + 0x11128: 84, + 0x11129: 84, + 0x1112A: 84, + 0x1112B: 84, + 0x1112D: 84, + 0x1112E: 84, + 0x1112F: 84, + 0x11130: 84, + 0x11131: 84, + 0x11132: 84, + 0x11133: 84, + 0x11134: 84, + 0x11173: 84, + 0x11180: 84, + 0x11181: 84, + 0x111B6: 84, + 0x111B7: 84, + 0x111B8: 84, + 0x111B9: 84, + 0x111BA: 84, + 0x111BB: 84, + 0x111BC: 84, + 0x111BD: 84, + 0x111BE: 84, + 0x111C9: 84, + 0x111CA: 84, + 0x111CB: 84, + 0x111CC: 84, + 0x111CF: 84, + 0x1122F: 84, + 0x11230: 84, + 0x11231: 84, + 0x11234: 84, + 0x11236: 84, + 0x11237: 84, + 0x1123E: 84, + 0x11241: 84, + 0x112DF: 84, + 0x112E3: 84, + 0x112E4: 84, + 0x112E5: 84, + 0x112E6: 84, + 0x112E7: 84, + 0x112E8: 84, + 0x112E9: 84, + 0x112EA: 84, + 0x11300: 84, + 0x11301: 84, + 0x1133B: 84, + 0x1133C: 84, + 0x11340: 84, + 0x11366: 84, + 0x11367: 84, + 0x11368: 84, + 0x11369: 84, + 0x1136A: 84, + 0x1136B: 84, + 0x1136C: 84, + 0x11370: 84, + 0x11371: 84, + 0x11372: 84, + 0x11373: 84, + 0x11374: 84, + 0x11438: 84, + 0x11439: 84, + 0x1143A: 84, + 0x1143B: 84, + 0x1143C: 84, + 0x1143D: 84, + 0x1143E: 84, + 0x1143F: 84, + 0x11442: 84, + 0x11443: 84, + 0x11444: 84, + 0x11446: 84, + 0x1145E: 84, + 0x114B3: 84, + 0x114B4: 84, + 0x114B5: 84, + 0x114B6: 84, + 0x114B7: 84, + 0x114B8: 84, + 0x114BA: 84, + 0x114BF: 84, + 0x114C0: 84, + 0x114C2: 84, + 0x114C3: 84, + 0x115B2: 84, + 0x115B3: 84, + 0x115B4: 84, + 0x115B5: 84, + 0x115BC: 84, + 0x115BD: 84, + 0x115BF: 84, + 0x115C0: 84, + 0x115DC: 84, + 0x115DD: 84, + 0x11633: 84, + 0x11634: 84, + 0x11635: 84, + 0x11636: 84, + 0x11637: 84, + 0x11638: 84, + 0x11639: 84, + 0x1163A: 84, + 0x1163D: 84, + 0x1163F: 84, + 0x11640: 84, + 0x116AB: 84, + 0x116AD: 84, + 0x116B0: 84, + 0x116B1: 84, + 0x116B2: 84, + 0x116B3: 84, + 0x116B4: 84, + 0x116B5: 84, + 0x116B7: 84, + 0x1171D: 84, + 0x1171E: 84, + 0x1171F: 84, + 0x11722: 84, + 0x11723: 84, + 0x11724: 84, + 0x11725: 84, + 0x11727: 84, + 0x11728: 84, + 0x11729: 84, + 0x1172A: 84, + 0x1172B: 84, + 0x1182F: 84, + 0x11830: 84, + 0x11831: 84, + 0x11832: 84, + 0x11833: 84, + 0x11834: 84, + 0x11835: 84, + 0x11836: 84, + 0x11837: 84, + 0x11839: 84, + 0x1183A: 84, + 0x1193B: 84, + 0x1193C: 84, + 0x1193E: 84, + 0x11943: 84, + 0x119D4: 84, + 0x119D5: 84, + 0x119D6: 84, + 0x119D7: 84, + 0x119DA: 84, + 0x119DB: 84, + 0x119E0: 84, + 0x11A01: 84, + 0x11A02: 84, + 0x11A03: 84, + 0x11A04: 84, + 0x11A05: 84, + 0x11A06: 84, + 0x11A07: 84, + 0x11A08: 84, + 0x11A09: 84, + 0x11A0A: 84, + 0x11A33: 84, + 0x11A34: 84, + 0x11A35: 84, + 0x11A36: 84, + 0x11A37: 84, + 0x11A38: 84, + 0x11A3B: 84, + 0x11A3C: 84, + 0x11A3D: 84, + 0x11A3E: 84, + 0x11A47: 84, + 0x11A51: 84, + 0x11A52: 84, + 0x11A53: 84, + 0x11A54: 84, + 0x11A55: 84, + 0x11A56: 84, + 0x11A59: 84, + 0x11A5A: 84, + 0x11A5B: 84, + 0x11A8A: 84, + 0x11A8B: 84, + 0x11A8C: 84, + 0x11A8D: 84, + 0x11A8E: 84, + 0x11A8F: 84, + 0x11A90: 84, + 0x11A91: 84, + 0x11A92: 84, + 0x11A93: 84, + 0x11A94: 84, + 0x11A95: 84, + 0x11A96: 84, + 0x11A98: 84, + 0x11A99: 84, + 0x11C30: 84, + 0x11C31: 84, + 0x11C32: 84, + 0x11C33: 84, + 0x11C34: 84, + 0x11C35: 84, + 0x11C36: 84, + 0x11C38: 84, + 0x11C39: 84, + 0x11C3A: 84, + 0x11C3B: 84, + 0x11C3C: 84, + 0x11C3D: 84, + 0x11C3F: 84, + 0x11C92: 84, + 0x11C93: 84, + 0x11C94: 84, + 0x11C95: 84, + 0x11C96: 84, + 0x11C97: 84, + 0x11C98: 84, + 0x11C99: 84, + 0x11C9A: 84, + 0x11C9B: 84, + 0x11C9C: 84, + 0x11C9D: 84, + 0x11C9E: 84, + 0x11C9F: 84, + 0x11CA0: 84, + 0x11CA1: 84, + 0x11CA2: 84, + 0x11CA3: 84, + 0x11CA4: 84, + 0x11CA5: 84, + 0x11CA6: 84, + 0x11CA7: 84, + 0x11CAA: 84, + 0x11CAB: 84, + 0x11CAC: 84, + 0x11CAD: 84, + 0x11CAE: 84, + 0x11CAF: 84, + 0x11CB0: 84, + 0x11CB2: 84, + 0x11CB3: 84, + 0x11CB5: 84, + 0x11CB6: 84, + 0x11D31: 84, + 0x11D32: 84, + 0x11D33: 84, + 0x11D34: 84, + 0x11D35: 84, + 0x11D36: 84, + 0x11D3A: 84, + 0x11D3C: 84, + 0x11D3D: 84, + 0x11D3F: 84, + 0x11D40: 84, + 0x11D41: 84, + 0x11D42: 84, + 0x11D43: 84, + 0x11D44: 84, + 0x11D45: 84, + 0x11D47: 84, + 0x11D90: 84, + 0x11D91: 84, + 0x11D95: 84, + 0x11D97: 84, + 0x11EF3: 84, + 0x11EF4: 84, + 0x11F00: 84, + 0x11F01: 84, + 0x11F36: 84, + 0x11F37: 84, + 0x11F38: 84, + 0x11F39: 84, + 0x11F3A: 84, + 0x11F40: 84, + 0x11F42: 84, + 0x13430: 84, + 0x13431: 84, + 0x13432: 84, + 0x13433: 84, + 0x13434: 84, + 0x13435: 84, + 0x13436: 84, + 0x13437: 84, + 0x13438: 84, + 0x13439: 84, + 0x1343A: 84, + 0x1343B: 84, + 0x1343C: 84, + 0x1343D: 84, + 0x1343E: 84, + 0x1343F: 84, + 0x13440: 84, + 0x13447: 84, + 0x13448: 84, + 0x13449: 84, + 0x1344A: 84, + 0x1344B: 84, + 0x1344C: 84, + 0x1344D: 84, + 0x1344E: 84, + 0x1344F: 84, + 0x13450: 84, + 0x13451: 84, + 0x13452: 84, + 0x13453: 84, + 0x13454: 84, + 0x13455: 84, + 0x16AF0: 84, + 0x16AF1: 84, + 0x16AF2: 84, + 0x16AF3: 84, + 0x16AF4: 84, + 0x16B30: 84, + 0x16B31: 84, + 0x16B32: 84, + 0x16B33: 84, + 0x16B34: 84, + 0x16B35: 84, + 0x16B36: 84, + 0x16F4F: 84, + 0x16F8F: 84, + 0x16F90: 84, + 0x16F91: 84, + 0x16F92: 84, + 0x16FE4: 84, + 0x1BC9D: 84, + 0x1BC9E: 84, + 0x1BCA0: 84, + 0x1BCA1: 84, + 0x1BCA2: 84, + 0x1BCA3: 84, + 0x1CF00: 84, + 0x1CF01: 84, + 0x1CF02: 84, + 0x1CF03: 84, + 0x1CF04: 84, + 0x1CF05: 84, + 0x1CF06: 84, + 0x1CF07: 84, + 0x1CF08: 84, + 0x1CF09: 84, + 0x1CF0A: 84, + 0x1CF0B: 84, + 0x1CF0C: 84, + 0x1CF0D: 84, + 0x1CF0E: 84, + 0x1CF0F: 84, + 0x1CF10: 84, + 0x1CF11: 84, + 0x1CF12: 84, + 0x1CF13: 84, + 0x1CF14: 84, + 0x1CF15: 84, + 0x1CF16: 84, + 0x1CF17: 84, + 0x1CF18: 84, + 0x1CF19: 84, + 0x1CF1A: 84, + 0x1CF1B: 84, + 0x1CF1C: 84, + 0x1CF1D: 84, + 0x1CF1E: 84, + 0x1CF1F: 84, + 0x1CF20: 84, + 0x1CF21: 84, + 0x1CF22: 84, + 0x1CF23: 84, + 0x1CF24: 84, + 0x1CF25: 84, + 0x1CF26: 84, + 0x1CF27: 84, + 0x1CF28: 84, + 0x1CF29: 84, + 0x1CF2A: 84, + 0x1CF2B: 84, + 0x1CF2C: 84, + 0x1CF2D: 84, + 0x1CF30: 84, + 0x1CF31: 84, + 0x1CF32: 84, + 0x1CF33: 84, + 0x1CF34: 84, + 0x1CF35: 84, + 0x1CF36: 84, + 0x1CF37: 84, + 0x1CF38: 84, + 0x1CF39: 84, + 0x1CF3A: 84, + 0x1CF3B: 84, + 0x1CF3C: 84, + 0x1CF3D: 84, + 0x1CF3E: 84, + 0x1CF3F: 84, + 0x1CF40: 84, + 0x1CF41: 84, + 0x1CF42: 84, + 0x1CF43: 84, + 0x1CF44: 84, + 0x1CF45: 84, + 0x1CF46: 84, + 0x1D167: 84, + 0x1D168: 84, + 0x1D169: 84, + 0x1D173: 84, + 0x1D174: 84, + 0x1D175: 84, + 0x1D176: 84, + 0x1D177: 84, + 0x1D178: 84, + 0x1D179: 84, + 0x1D17A: 84, + 0x1D17B: 84, + 0x1D17C: 84, + 0x1D17D: 84, + 0x1D17E: 84, + 0x1D17F: 84, + 0x1D180: 84, + 0x1D181: 84, + 0x1D182: 84, + 0x1D185: 84, + 0x1D186: 84, + 0x1D187: 84, + 0x1D188: 84, + 0x1D189: 84, + 0x1D18A: 84, + 0x1D18B: 84, + 0x1D1AA: 84, + 0x1D1AB: 84, + 0x1D1AC: 84, + 0x1D1AD: 84, + 0x1D242: 84, + 0x1D243: 84, + 0x1D244: 84, + 0x1DA00: 84, + 0x1DA01: 84, + 0x1DA02: 84, + 0x1DA03: 84, + 0x1DA04: 84, + 0x1DA05: 84, + 0x1DA06: 84, + 0x1DA07: 84, + 0x1DA08: 84, + 0x1DA09: 84, + 0x1DA0A: 84, + 0x1DA0B: 84, + 0x1DA0C: 84, + 0x1DA0D: 84, + 0x1DA0E: 84, + 0x1DA0F: 84, + 0x1DA10: 84, + 0x1DA11: 84, + 0x1DA12: 84, + 0x1DA13: 84, + 0x1DA14: 84, + 0x1DA15: 84, + 0x1DA16: 84, + 0x1DA17: 84, + 0x1DA18: 84, + 0x1DA19: 84, + 0x1DA1A: 84, + 0x1DA1B: 84, + 0x1DA1C: 84, + 0x1DA1D: 84, + 0x1DA1E: 84, + 0x1DA1F: 84, + 0x1DA20: 84, + 0x1DA21: 84, + 0x1DA22: 84, + 0x1DA23: 84, + 0x1DA24: 84, + 0x1DA25: 84, + 0x1DA26: 84, + 0x1DA27: 84, + 0x1DA28: 84, + 0x1DA29: 84, + 0x1DA2A: 84, + 0x1DA2B: 84, + 0x1DA2C: 84, + 0x1DA2D: 84, + 0x1DA2E: 84, + 0x1DA2F: 84, + 0x1DA30: 84, + 0x1DA31: 84, + 0x1DA32: 84, + 0x1DA33: 84, + 0x1DA34: 84, + 0x1DA35: 84, + 0x1DA36: 84, + 0x1DA3B: 84, + 0x1DA3C: 84, + 0x1DA3D: 84, + 0x1DA3E: 84, + 0x1DA3F: 84, + 0x1DA40: 84, + 0x1DA41: 84, + 0x1DA42: 84, + 0x1DA43: 84, + 0x1DA44: 84, + 0x1DA45: 84, + 0x1DA46: 84, + 0x1DA47: 84, + 0x1DA48: 84, + 0x1DA49: 84, + 0x1DA4A: 84, + 0x1DA4B: 84, + 0x1DA4C: 84, + 0x1DA4D: 84, + 0x1DA4E: 84, + 0x1DA4F: 84, + 0x1DA50: 84, + 0x1DA51: 84, + 0x1DA52: 84, + 0x1DA53: 84, + 0x1DA54: 84, + 0x1DA55: 84, + 0x1DA56: 84, + 0x1DA57: 84, + 0x1DA58: 84, + 0x1DA59: 84, + 0x1DA5A: 84, + 0x1DA5B: 84, + 0x1DA5C: 84, + 0x1DA5D: 84, + 0x1DA5E: 84, + 0x1DA5F: 84, + 0x1DA60: 84, + 0x1DA61: 84, + 0x1DA62: 84, + 0x1DA63: 84, + 0x1DA64: 84, + 0x1DA65: 84, + 0x1DA66: 84, + 0x1DA67: 84, + 0x1DA68: 84, + 0x1DA69: 84, + 0x1DA6A: 84, + 0x1DA6B: 84, + 0x1DA6C: 84, + 0x1DA75: 84, + 0x1DA84: 84, + 0x1DA9B: 84, + 0x1DA9C: 84, + 0x1DA9D: 84, + 0x1DA9E: 84, + 0x1DA9F: 84, + 0x1DAA1: 84, + 0x1DAA2: 84, + 0x1DAA3: 84, + 0x1DAA4: 84, + 0x1DAA5: 84, + 0x1DAA6: 84, + 0x1DAA7: 84, + 0x1DAA8: 84, + 0x1DAA9: 84, + 0x1DAAA: 84, + 0x1DAAB: 84, + 0x1DAAC: 84, + 0x1DAAD: 84, + 0x1DAAE: 84, + 0x1DAAF: 84, + 0x1E000: 84, + 0x1E001: 84, + 0x1E002: 84, + 0x1E003: 84, + 0x1E004: 84, + 0x1E005: 84, + 0x1E006: 84, + 0x1E008: 84, + 0x1E009: 84, + 0x1E00A: 84, + 0x1E00B: 84, + 0x1E00C: 84, + 0x1E00D: 84, + 0x1E00E: 84, + 0x1E00F: 84, + 0x1E010: 84, + 0x1E011: 84, + 0x1E012: 84, + 0x1E013: 84, + 0x1E014: 84, + 0x1E015: 84, + 0x1E016: 84, + 0x1E017: 84, + 0x1E018: 84, + 0x1E01B: 84, + 0x1E01C: 84, + 0x1E01D: 84, + 0x1E01E: 84, + 0x1E01F: 84, + 0x1E020: 84, + 0x1E021: 84, + 0x1E023: 84, + 0x1E024: 84, + 0x1E026: 84, + 0x1E027: 84, + 0x1E028: 84, + 0x1E029: 84, + 0x1E02A: 84, + 0x1E08F: 84, + 0x1E130: 84, + 0x1E131: 84, + 0x1E132: 84, + 0x1E133: 84, + 0x1E134: 84, + 0x1E135: 84, + 0x1E136: 84, + 0x1E2AE: 84, + 0x1E2EC: 84, + 0x1E2ED: 84, + 0x1E2EE: 84, + 0x1E2EF: 84, + 0x1E4EC: 84, + 0x1E4ED: 84, + 0x1E4EE: 84, + 0x1E4EF: 84, + 0x1E8D0: 84, + 0x1E8D1: 84, + 0x1E8D2: 84, + 0x1E8D3: 84, + 0x1E8D4: 84, + 0x1E8D5: 84, + 0x1E8D6: 84, + 0x1E900: 68, + 0x1E901: 68, + 0x1E902: 68, + 0x1E903: 68, + 0x1E904: 68, + 0x1E905: 68, + 0x1E906: 68, + 0x1E907: 68, + 0x1E908: 68, + 0x1E909: 68, + 0x1E90A: 68, + 0x1E90B: 68, + 0x1E90C: 68, + 0x1E90D: 68, + 0x1E90E: 68, + 0x1E90F: 68, + 0x1E910: 68, + 0x1E911: 68, + 0x1E912: 68, + 0x1E913: 68, + 0x1E914: 68, + 0x1E915: 68, + 0x1E916: 68, + 0x1E917: 68, + 0x1E918: 68, + 0x1E919: 68, + 0x1E91A: 68, + 0x1E91B: 68, + 0x1E91C: 68, + 0x1E91D: 68, + 0x1E91E: 68, + 0x1E91F: 68, + 0x1E920: 68, + 0x1E921: 68, + 0x1E922: 68, + 0x1E923: 68, + 0x1E924: 68, + 0x1E925: 68, + 0x1E926: 68, + 0x1E927: 68, + 0x1E928: 68, + 0x1E929: 68, + 0x1E92A: 68, + 0x1E92B: 68, + 0x1E92C: 68, + 0x1E92D: 68, + 0x1E92E: 68, + 0x1E92F: 68, + 0x1E930: 68, + 0x1E931: 68, + 0x1E932: 68, + 0x1E933: 68, + 0x1E934: 68, + 0x1E935: 68, + 0x1E936: 68, + 0x1E937: 68, + 0x1E938: 68, + 0x1E939: 68, + 0x1E93A: 68, + 0x1E93B: 68, + 0x1E93C: 68, + 0x1E93D: 68, + 0x1E93E: 68, + 0x1E93F: 68, + 0x1E940: 68, + 0x1E941: 68, + 0x1E942: 68, + 0x1E943: 68, + 0x1E944: 84, + 0x1E945: 84, + 0x1E946: 84, + 0x1E947: 84, + 0x1E948: 84, + 0x1E949: 84, + 0x1E94A: 84, + 0x1E94B: 84, + 0xE0001: 84, + 0xE0020: 84, + 0xE0021: 84, + 0xE0022: 84, + 0xE0023: 84, + 0xE0024: 84, + 0xE0025: 84, + 0xE0026: 84, + 0xE0027: 84, + 0xE0028: 84, + 0xE0029: 84, + 0xE002A: 84, + 0xE002B: 84, + 0xE002C: 84, + 0xE002D: 84, + 0xE002E: 84, + 0xE002F: 84, + 0xE0030: 84, + 0xE0031: 84, + 0xE0032: 84, + 0xE0033: 84, + 0xE0034: 84, + 0xE0035: 84, + 0xE0036: 84, + 0xE0037: 84, + 0xE0038: 84, + 0xE0039: 84, + 0xE003A: 84, + 0xE003B: 84, + 0xE003C: 84, + 0xE003D: 84, + 0xE003E: 84, + 0xE003F: 84, + 0xE0040: 84, + 0xE0041: 84, + 0xE0042: 84, + 0xE0043: 84, + 0xE0044: 84, + 0xE0045: 84, + 0xE0046: 84, + 0xE0047: 84, + 0xE0048: 84, + 0xE0049: 84, + 0xE004A: 84, + 0xE004B: 84, + 0xE004C: 84, + 0xE004D: 84, + 0xE004E: 84, + 0xE004F: 84, + 0xE0050: 84, + 0xE0051: 84, + 0xE0052: 84, + 0xE0053: 84, + 0xE0054: 84, + 0xE0055: 84, + 0xE0056: 84, + 0xE0057: 84, + 0xE0058: 84, + 0xE0059: 84, + 0xE005A: 84, + 0xE005B: 84, + 0xE005C: 84, + 0xE005D: 84, + 0xE005E: 84, + 0xE005F: 84, + 0xE0060: 84, + 0xE0061: 84, + 0xE0062: 84, + 0xE0063: 84, + 0xE0064: 84, + 0xE0065: 84, + 0xE0066: 84, + 0xE0067: 84, + 0xE0068: 84, + 0xE0069: 84, + 0xE006A: 84, + 0xE006B: 84, + 0xE006C: 84, + 0xE006D: 84, + 0xE006E: 84, + 0xE006F: 84, + 0xE0070: 84, + 0xE0071: 84, + 0xE0072: 84, + 0xE0073: 84, + 0xE0074: 84, + 0xE0075: 84, + 0xE0076: 84, + 0xE0077: 84, + 0xE0078: 84, + 0xE0079: 84, + 0xE007A: 84, + 0xE007B: 84, + 0xE007C: 84, + 0xE007D: 84, + 0xE007E: 84, + 0xE007F: 84, + 0xE0100: 84, + 0xE0101: 84, + 0xE0102: 84, + 0xE0103: 84, + 0xE0104: 84, + 0xE0105: 84, + 0xE0106: 84, + 0xE0107: 84, + 0xE0108: 84, + 0xE0109: 84, + 0xE010A: 84, + 0xE010B: 84, + 0xE010C: 84, + 0xE010D: 84, + 0xE010E: 84, + 0xE010F: 84, + 0xE0110: 84, + 0xE0111: 84, + 0xE0112: 84, + 0xE0113: 84, + 0xE0114: 84, + 0xE0115: 84, + 0xE0116: 84, + 0xE0117: 84, + 0xE0118: 84, + 0xE0119: 84, + 0xE011A: 84, + 0xE011B: 84, + 0xE011C: 84, + 0xE011D: 84, + 0xE011E: 84, + 0xE011F: 84, + 0xE0120: 84, + 0xE0121: 84, + 0xE0122: 84, + 0xE0123: 84, + 0xE0124: 84, + 0xE0125: 84, + 0xE0126: 84, + 0xE0127: 84, + 0xE0128: 84, + 0xE0129: 84, + 0xE012A: 84, + 0xE012B: 84, + 0xE012C: 84, + 0xE012D: 84, + 0xE012E: 84, + 0xE012F: 84, + 0xE0130: 84, + 0xE0131: 84, + 0xE0132: 84, + 0xE0133: 84, + 0xE0134: 84, + 0xE0135: 84, + 0xE0136: 84, + 0xE0137: 84, + 0xE0138: 84, + 0xE0139: 84, + 0xE013A: 84, + 0xE013B: 84, + 0xE013C: 84, + 0xE013D: 84, + 0xE013E: 84, + 0xE013F: 84, + 0xE0140: 84, + 0xE0141: 84, + 0xE0142: 84, + 0xE0143: 84, + 0xE0144: 84, + 0xE0145: 84, + 0xE0146: 84, + 0xE0147: 84, + 0xE0148: 84, + 0xE0149: 84, + 0xE014A: 84, + 0xE014B: 84, + 0xE014C: 84, + 0xE014D: 84, + 0xE014E: 84, + 0xE014F: 84, + 0xE0150: 84, + 0xE0151: 84, + 0xE0152: 84, + 0xE0153: 84, + 0xE0154: 84, + 0xE0155: 84, + 0xE0156: 84, + 0xE0157: 84, + 0xE0158: 84, + 0xE0159: 84, + 0xE015A: 84, + 0xE015B: 84, + 0xE015C: 84, + 0xE015D: 84, + 0xE015E: 84, + 0xE015F: 84, + 0xE0160: 84, + 0xE0161: 84, + 0xE0162: 84, + 0xE0163: 84, + 0xE0164: 84, + 0xE0165: 84, + 0xE0166: 84, + 0xE0167: 84, + 0xE0168: 84, + 0xE0169: 84, + 0xE016A: 84, + 0xE016B: 84, + 0xE016C: 84, + 0xE016D: 84, + 0xE016E: 84, + 0xE016F: 84, + 0xE0170: 84, + 0xE0171: 84, + 0xE0172: 84, + 0xE0173: 84, + 0xE0174: 84, + 0xE0175: 84, + 0xE0176: 84, + 0xE0177: 84, + 0xE0178: 84, + 0xE0179: 84, + 0xE017A: 84, + 0xE017B: 84, + 0xE017C: 84, + 0xE017D: 84, + 0xE017E: 84, + 0xE017F: 84, + 0xE0180: 84, + 0xE0181: 84, + 0xE0182: 84, + 0xE0183: 84, + 0xE0184: 84, + 0xE0185: 84, + 0xE0186: 84, + 0xE0187: 84, + 0xE0188: 84, + 0xE0189: 84, + 0xE018A: 84, + 0xE018B: 84, + 0xE018C: 84, + 0xE018D: 84, + 0xE018E: 84, + 0xE018F: 84, + 0xE0190: 84, + 0xE0191: 84, + 0xE0192: 84, + 0xE0193: 84, + 0xE0194: 84, + 0xE0195: 84, + 0xE0196: 84, + 0xE0197: 84, + 0xE0198: 84, + 0xE0199: 84, + 0xE019A: 84, + 0xE019B: 84, + 0xE019C: 84, + 0xE019D: 84, + 0xE019E: 84, + 0xE019F: 84, + 0xE01A0: 84, + 0xE01A1: 84, + 0xE01A2: 84, + 0xE01A3: 84, + 0xE01A4: 84, + 0xE01A5: 84, + 0xE01A6: 84, + 0xE01A7: 84, + 0xE01A8: 84, + 0xE01A9: 84, + 0xE01AA: 84, + 0xE01AB: 84, + 0xE01AC: 84, + 0xE01AD: 84, + 0xE01AE: 84, + 0xE01AF: 84, + 0xE01B0: 84, + 0xE01B1: 84, + 0xE01B2: 84, + 0xE01B3: 84, + 0xE01B4: 84, + 0xE01B5: 84, + 0xE01B6: 84, + 0xE01B7: 84, + 0xE01B8: 84, + 0xE01B9: 84, + 0xE01BA: 84, + 0xE01BB: 84, + 0xE01BC: 84, + 0xE01BD: 84, + 0xE01BE: 84, + 0xE01BF: 84, + 0xE01C0: 84, + 0xE01C1: 84, + 0xE01C2: 84, + 0xE01C3: 84, + 0xE01C4: 84, + 0xE01C5: 84, + 0xE01C6: 84, + 0xE01C7: 84, + 0xE01C8: 84, + 0xE01C9: 84, + 0xE01CA: 84, + 0xE01CB: 84, + 0xE01CC: 84, + 0xE01CD: 84, + 0xE01CE: 84, + 0xE01CF: 84, + 0xE01D0: 84, + 0xE01D1: 84, + 0xE01D2: 84, + 0xE01D3: 84, + 0xE01D4: 84, + 0xE01D5: 84, + 0xE01D6: 84, + 0xE01D7: 84, + 0xE01D8: 84, + 0xE01D9: 84, + 0xE01DA: 84, + 0xE01DB: 84, + 0xE01DC: 84, + 0xE01DD: 84, + 0xE01DE: 84, + 0xE01DF: 84, + 0xE01E0: 84, + 0xE01E1: 84, + 0xE01E2: 84, + 0xE01E3: 84, + 0xE01E4: 84, + 0xE01E5: 84, + 0xE01E6: 84, + 0xE01E7: 84, + 0xE01E8: 84, + 0xE01E9: 84, + 0xE01EA: 84, + 0xE01EB: 84, + 0xE01EC: 84, + 0xE01ED: 84, + 0xE01EE: 84, + 0xE01EF: 84, +} +codepoint_classes = { + "PVALID": ( + 0x2D0000002E, + 0x300000003A, + 0x610000007B, + 0xDF000000F7, + 0xF800000100, + 0x10100000102, + 0x10300000104, + 0x10500000106, + 0x10700000108, + 0x1090000010A, + 0x10B0000010C, + 0x10D0000010E, + 0x10F00000110, + 0x11100000112, + 0x11300000114, + 0x11500000116, + 0x11700000118, + 0x1190000011A, + 0x11B0000011C, + 0x11D0000011E, + 0x11F00000120, + 0x12100000122, + 0x12300000124, + 0x12500000126, + 0x12700000128, + 0x1290000012A, + 0x12B0000012C, + 0x12D0000012E, + 0x12F00000130, + 0x13100000132, + 0x13500000136, + 0x13700000139, + 0x13A0000013B, + 0x13C0000013D, + 0x13E0000013F, + 0x14200000143, + 0x14400000145, + 0x14600000147, + 0x14800000149, + 0x14B0000014C, + 0x14D0000014E, + 0x14F00000150, + 0x15100000152, + 0x15300000154, + 0x15500000156, + 0x15700000158, + 0x1590000015A, + 0x15B0000015C, + 0x15D0000015E, + 0x15F00000160, + 0x16100000162, + 0x16300000164, + 0x16500000166, + 0x16700000168, + 0x1690000016A, + 0x16B0000016C, + 0x16D0000016E, + 0x16F00000170, + 0x17100000172, + 0x17300000174, + 0x17500000176, + 0x17700000178, + 0x17A0000017B, + 0x17C0000017D, + 0x17E0000017F, + 0x18000000181, + 0x18300000184, + 0x18500000186, + 0x18800000189, + 0x18C0000018E, + 0x19200000193, + 0x19500000196, + 0x1990000019C, + 0x19E0000019F, + 0x1A1000001A2, + 0x1A3000001A4, + 0x1A5000001A6, + 0x1A8000001A9, + 0x1AA000001AC, + 0x1AD000001AE, + 0x1B0000001B1, + 0x1B4000001B5, + 0x1B6000001B7, + 0x1B9000001BC, + 0x1BD000001C4, + 0x1CE000001CF, + 0x1D0000001D1, + 0x1D2000001D3, + 0x1D4000001D5, + 0x1D6000001D7, + 0x1D8000001D9, + 0x1DA000001DB, + 0x1DC000001DE, + 0x1DF000001E0, + 0x1E1000001E2, + 0x1E3000001E4, + 0x1E5000001E6, + 0x1E7000001E8, + 0x1E9000001EA, + 0x1EB000001EC, + 0x1ED000001EE, + 0x1EF000001F1, + 0x1F5000001F6, + 0x1F9000001FA, + 0x1FB000001FC, + 0x1FD000001FE, + 0x1FF00000200, + 0x20100000202, + 0x20300000204, + 0x20500000206, + 0x20700000208, + 0x2090000020A, + 0x20B0000020C, + 0x20D0000020E, + 0x20F00000210, + 0x21100000212, + 0x21300000214, + 0x21500000216, + 0x21700000218, + 0x2190000021A, + 0x21B0000021C, + 0x21D0000021E, + 0x21F00000220, + 0x22100000222, + 0x22300000224, + 0x22500000226, + 0x22700000228, + 0x2290000022A, + 0x22B0000022C, + 0x22D0000022E, + 0x22F00000230, + 0x23100000232, + 0x2330000023A, + 0x23C0000023D, + 0x23F00000241, + 0x24200000243, + 0x24700000248, + 0x2490000024A, + 0x24B0000024C, + 0x24D0000024E, + 0x24F000002B0, + 0x2B9000002C2, + 0x2C6000002D2, + 0x2EC000002ED, + 0x2EE000002EF, + 0x30000000340, + 0x34200000343, + 0x3460000034F, + 0x35000000370, + 0x37100000372, + 0x37300000374, + 0x37700000378, + 0x37B0000037E, + 0x39000000391, + 0x3AC000003CF, + 0x3D7000003D8, + 0x3D9000003DA, + 0x3DB000003DC, + 0x3DD000003DE, + 0x3DF000003E0, + 0x3E1000003E2, + 0x3E3000003E4, + 0x3E5000003E6, + 0x3E7000003E8, + 0x3E9000003EA, + 0x3EB000003EC, + 0x3ED000003EE, + 0x3EF000003F0, + 0x3F3000003F4, + 0x3F8000003F9, + 0x3FB000003FD, + 0x43000000460, + 0x46100000462, + 0x46300000464, + 0x46500000466, + 0x46700000468, + 0x4690000046A, + 0x46B0000046C, + 0x46D0000046E, + 0x46F00000470, + 0x47100000472, + 0x47300000474, + 0x47500000476, + 0x47700000478, + 0x4790000047A, + 0x47B0000047C, + 0x47D0000047E, + 0x47F00000480, + 0x48100000482, + 0x48300000488, + 0x48B0000048C, + 0x48D0000048E, + 0x48F00000490, + 0x49100000492, + 0x49300000494, + 0x49500000496, + 0x49700000498, + 0x4990000049A, + 0x49B0000049C, + 0x49D0000049E, + 0x49F000004A0, + 0x4A1000004A2, + 0x4A3000004A4, + 0x4A5000004A6, + 0x4A7000004A8, + 0x4A9000004AA, + 0x4AB000004AC, + 0x4AD000004AE, + 0x4AF000004B0, + 0x4B1000004B2, + 0x4B3000004B4, + 0x4B5000004B6, + 0x4B7000004B8, + 0x4B9000004BA, + 0x4BB000004BC, + 0x4BD000004BE, + 0x4BF000004C0, + 0x4C2000004C3, + 0x4C4000004C5, + 0x4C6000004C7, + 0x4C8000004C9, + 0x4CA000004CB, + 0x4CC000004CD, + 0x4CE000004D0, + 0x4D1000004D2, + 0x4D3000004D4, + 0x4D5000004D6, + 0x4D7000004D8, + 0x4D9000004DA, + 0x4DB000004DC, + 0x4DD000004DE, + 0x4DF000004E0, + 0x4E1000004E2, + 0x4E3000004E4, + 0x4E5000004E6, + 0x4E7000004E8, + 0x4E9000004EA, + 0x4EB000004EC, + 0x4ED000004EE, + 0x4EF000004F0, + 0x4F1000004F2, + 0x4F3000004F4, + 0x4F5000004F6, + 0x4F7000004F8, + 0x4F9000004FA, + 0x4FB000004FC, + 0x4FD000004FE, + 0x4FF00000500, + 0x50100000502, + 0x50300000504, + 0x50500000506, + 0x50700000508, + 0x5090000050A, + 0x50B0000050C, + 0x50D0000050E, + 0x50F00000510, + 0x51100000512, + 0x51300000514, + 0x51500000516, + 0x51700000518, + 0x5190000051A, + 0x51B0000051C, + 0x51D0000051E, + 0x51F00000520, + 0x52100000522, + 0x52300000524, + 0x52500000526, + 0x52700000528, + 0x5290000052A, + 0x52B0000052C, + 0x52D0000052E, + 0x52F00000530, + 0x5590000055A, + 0x56000000587, + 0x58800000589, + 0x591000005BE, + 0x5BF000005C0, + 0x5C1000005C3, + 0x5C4000005C6, + 0x5C7000005C8, + 0x5D0000005EB, + 0x5EF000005F3, + 0x6100000061B, + 0x62000000640, + 0x64100000660, + 0x66E00000675, + 0x679000006D4, + 0x6D5000006DD, + 0x6DF000006E9, + 0x6EA000006F0, + 0x6FA00000700, + 0x7100000074B, + 0x74D000007B2, + 0x7C0000007F6, + 0x7FD000007FE, + 0x8000000082E, + 0x8400000085C, + 0x8600000086B, + 0x87000000888, + 0x8890000088F, + 0x898000008E2, + 0x8E300000958, + 0x96000000964, + 0x96600000970, + 0x97100000984, + 0x9850000098D, + 0x98F00000991, + 0x993000009A9, + 0x9AA000009B1, + 0x9B2000009B3, + 0x9B6000009BA, + 0x9BC000009C5, + 0x9C7000009C9, + 0x9CB000009CF, + 0x9D7000009D8, + 0x9E0000009E4, + 0x9E6000009F2, + 0x9FC000009FD, + 0x9FE000009FF, + 0xA0100000A04, + 0xA0500000A0B, + 0xA0F00000A11, + 0xA1300000A29, + 0xA2A00000A31, + 0xA3200000A33, + 0xA3500000A36, + 0xA3800000A3A, + 0xA3C00000A3D, + 0xA3E00000A43, + 0xA4700000A49, + 0xA4B00000A4E, + 0xA5100000A52, + 0xA5C00000A5D, + 0xA6600000A76, + 0xA8100000A84, + 0xA8500000A8E, + 0xA8F00000A92, + 0xA9300000AA9, + 0xAAA00000AB1, + 0xAB200000AB4, + 0xAB500000ABA, + 0xABC00000AC6, + 0xAC700000ACA, + 0xACB00000ACE, + 0xAD000000AD1, + 0xAE000000AE4, + 0xAE600000AF0, + 0xAF900000B00, + 0xB0100000B04, + 0xB0500000B0D, + 0xB0F00000B11, + 0xB1300000B29, + 0xB2A00000B31, + 0xB3200000B34, + 0xB3500000B3A, + 0xB3C00000B45, + 0xB4700000B49, + 0xB4B00000B4E, + 0xB5500000B58, + 0xB5F00000B64, + 0xB6600000B70, + 0xB7100000B72, + 0xB8200000B84, + 0xB8500000B8B, + 0xB8E00000B91, + 0xB9200000B96, + 0xB9900000B9B, + 0xB9C00000B9D, + 0xB9E00000BA0, + 0xBA300000BA5, + 0xBA800000BAB, + 0xBAE00000BBA, + 0xBBE00000BC3, + 0xBC600000BC9, + 0xBCA00000BCE, + 0xBD000000BD1, + 0xBD700000BD8, + 0xBE600000BF0, + 0xC0000000C0D, + 0xC0E00000C11, + 0xC1200000C29, + 0xC2A00000C3A, + 0xC3C00000C45, + 0xC4600000C49, + 0xC4A00000C4E, + 0xC5500000C57, + 0xC5800000C5B, + 0xC5D00000C5E, + 0xC6000000C64, + 0xC6600000C70, + 0xC8000000C84, + 0xC8500000C8D, + 0xC8E00000C91, + 0xC9200000CA9, + 0xCAA00000CB4, + 0xCB500000CBA, + 0xCBC00000CC5, + 0xCC600000CC9, + 0xCCA00000CCE, + 0xCD500000CD7, + 0xCDD00000CDF, + 0xCE000000CE4, + 0xCE600000CF0, + 0xCF100000CF4, + 0xD0000000D0D, + 0xD0E00000D11, + 0xD1200000D45, + 0xD4600000D49, + 0xD4A00000D4F, + 0xD5400000D58, + 0xD5F00000D64, + 0xD6600000D70, + 0xD7A00000D80, + 0xD8100000D84, + 0xD8500000D97, + 0xD9A00000DB2, + 0xDB300000DBC, + 0xDBD00000DBE, + 0xDC000000DC7, + 0xDCA00000DCB, + 0xDCF00000DD5, + 0xDD600000DD7, + 0xDD800000DE0, + 0xDE600000DF0, + 0xDF200000DF4, + 0xE0100000E33, + 0xE3400000E3B, + 0xE4000000E4F, + 0xE5000000E5A, + 0xE8100000E83, + 0xE8400000E85, + 0xE8600000E8B, + 0xE8C00000EA4, + 0xEA500000EA6, + 0xEA700000EB3, + 0xEB400000EBE, + 0xEC000000EC5, + 0xEC600000EC7, + 0xEC800000ECF, + 0xED000000EDA, + 0xEDE00000EE0, + 0xF0000000F01, + 0xF0B00000F0C, + 0xF1800000F1A, + 0xF2000000F2A, + 0xF3500000F36, + 0xF3700000F38, + 0xF3900000F3A, + 0xF3E00000F43, + 0xF4400000F48, + 0xF4900000F4D, + 0xF4E00000F52, + 0xF5300000F57, + 0xF5800000F5C, + 0xF5D00000F69, + 0xF6A00000F6D, + 0xF7100000F73, + 0xF7400000F75, + 0xF7A00000F81, + 0xF8200000F85, + 0xF8600000F93, + 0xF9400000F98, + 0xF9900000F9D, + 0xF9E00000FA2, + 0xFA300000FA7, + 0xFA800000FAC, + 0xFAD00000FB9, + 0xFBA00000FBD, + 0xFC600000FC7, + 0x10000000104A, + 0x10500000109E, + 0x10D0000010FB, + 0x10FD00001100, + 0x120000001249, + 0x124A0000124E, + 0x125000001257, + 0x125800001259, + 0x125A0000125E, + 0x126000001289, + 0x128A0000128E, + 0x1290000012B1, + 0x12B2000012B6, + 0x12B8000012BF, + 0x12C0000012C1, + 0x12C2000012C6, + 0x12C8000012D7, + 0x12D800001311, + 0x131200001316, + 0x13180000135B, + 0x135D00001360, + 0x138000001390, + 0x13A0000013F6, + 0x14010000166D, + 0x166F00001680, + 0x16810000169B, + 0x16A0000016EB, + 0x16F1000016F9, + 0x170000001716, + 0x171F00001735, + 0x174000001754, + 0x17600000176D, + 0x176E00001771, + 0x177200001774, + 0x1780000017B4, + 0x17B6000017D4, + 0x17D7000017D8, + 0x17DC000017DE, + 0x17E0000017EA, + 0x18100000181A, + 0x182000001879, + 0x1880000018AB, + 0x18B0000018F6, + 0x19000000191F, + 0x19200000192C, + 0x19300000193C, + 0x19460000196E, + 0x197000001975, + 0x1980000019AC, + 0x19B0000019CA, + 0x19D0000019DA, + 0x1A0000001A1C, + 0x1A2000001A5F, + 0x1A6000001A7D, + 0x1A7F00001A8A, + 0x1A9000001A9A, + 0x1AA700001AA8, + 0x1AB000001ABE, + 0x1ABF00001ACF, + 0x1B0000001B4D, + 0x1B5000001B5A, + 0x1B6B00001B74, + 0x1B8000001BF4, + 0x1C0000001C38, + 0x1C4000001C4A, + 0x1C4D00001C7E, + 0x1CD000001CD3, + 0x1CD400001CFB, + 0x1D0000001D2C, + 0x1D2F00001D30, + 0x1D3B00001D3C, + 0x1D4E00001D4F, + 0x1D6B00001D78, + 0x1D7900001D9B, + 0x1DC000001E00, + 0x1E0100001E02, + 0x1E0300001E04, + 0x1E0500001E06, + 0x1E0700001E08, + 0x1E0900001E0A, + 0x1E0B00001E0C, + 0x1E0D00001E0E, + 0x1E0F00001E10, + 0x1E1100001E12, + 0x1E1300001E14, + 0x1E1500001E16, + 0x1E1700001E18, + 0x1E1900001E1A, + 0x1E1B00001E1C, + 0x1E1D00001E1E, + 0x1E1F00001E20, + 0x1E2100001E22, + 0x1E2300001E24, + 0x1E2500001E26, + 0x1E2700001E28, + 0x1E2900001E2A, + 0x1E2B00001E2C, + 0x1E2D00001E2E, + 0x1E2F00001E30, + 0x1E3100001E32, + 0x1E3300001E34, + 0x1E3500001E36, + 0x1E3700001E38, + 0x1E3900001E3A, + 0x1E3B00001E3C, + 0x1E3D00001E3E, + 0x1E3F00001E40, + 0x1E4100001E42, + 0x1E4300001E44, + 0x1E4500001E46, + 0x1E4700001E48, + 0x1E4900001E4A, + 0x1E4B00001E4C, + 0x1E4D00001E4E, + 0x1E4F00001E50, + 0x1E5100001E52, + 0x1E5300001E54, + 0x1E5500001E56, + 0x1E5700001E58, + 0x1E5900001E5A, + 0x1E5B00001E5C, + 0x1E5D00001E5E, + 0x1E5F00001E60, + 0x1E6100001E62, + 0x1E6300001E64, + 0x1E6500001E66, + 0x1E6700001E68, + 0x1E6900001E6A, + 0x1E6B00001E6C, + 0x1E6D00001E6E, + 0x1E6F00001E70, + 0x1E7100001E72, + 0x1E7300001E74, + 0x1E7500001E76, + 0x1E7700001E78, + 0x1E7900001E7A, + 0x1E7B00001E7C, + 0x1E7D00001E7E, + 0x1E7F00001E80, + 0x1E8100001E82, + 0x1E8300001E84, + 0x1E8500001E86, + 0x1E8700001E88, + 0x1E8900001E8A, + 0x1E8B00001E8C, + 0x1E8D00001E8E, + 0x1E8F00001E90, + 0x1E9100001E92, + 0x1E9300001E94, + 0x1E9500001E9A, + 0x1E9C00001E9E, + 0x1E9F00001EA0, + 0x1EA100001EA2, + 0x1EA300001EA4, + 0x1EA500001EA6, + 0x1EA700001EA8, + 0x1EA900001EAA, + 0x1EAB00001EAC, + 0x1EAD00001EAE, + 0x1EAF00001EB0, + 0x1EB100001EB2, + 0x1EB300001EB4, + 0x1EB500001EB6, + 0x1EB700001EB8, + 0x1EB900001EBA, + 0x1EBB00001EBC, + 0x1EBD00001EBE, + 0x1EBF00001EC0, + 0x1EC100001EC2, + 0x1EC300001EC4, + 0x1EC500001EC6, + 0x1EC700001EC8, + 0x1EC900001ECA, + 0x1ECB00001ECC, + 0x1ECD00001ECE, + 0x1ECF00001ED0, + 0x1ED100001ED2, + 0x1ED300001ED4, + 0x1ED500001ED6, + 0x1ED700001ED8, + 0x1ED900001EDA, + 0x1EDB00001EDC, + 0x1EDD00001EDE, + 0x1EDF00001EE0, + 0x1EE100001EE2, + 0x1EE300001EE4, + 0x1EE500001EE6, + 0x1EE700001EE8, + 0x1EE900001EEA, + 0x1EEB00001EEC, + 0x1EED00001EEE, + 0x1EEF00001EF0, + 0x1EF100001EF2, + 0x1EF300001EF4, + 0x1EF500001EF6, + 0x1EF700001EF8, + 0x1EF900001EFA, + 0x1EFB00001EFC, + 0x1EFD00001EFE, + 0x1EFF00001F08, + 0x1F1000001F16, + 0x1F2000001F28, + 0x1F3000001F38, + 0x1F4000001F46, + 0x1F5000001F58, + 0x1F6000001F68, + 0x1F7000001F71, + 0x1F7200001F73, + 0x1F7400001F75, + 0x1F7600001F77, + 0x1F7800001F79, + 0x1F7A00001F7B, + 0x1F7C00001F7D, + 0x1FB000001FB2, + 0x1FB600001FB7, + 0x1FC600001FC7, + 0x1FD000001FD3, + 0x1FD600001FD8, + 0x1FE000001FE3, + 0x1FE400001FE8, + 0x1FF600001FF7, + 0x214E0000214F, + 0x218400002185, + 0x2C3000002C60, + 0x2C6100002C62, + 0x2C6500002C67, + 0x2C6800002C69, + 0x2C6A00002C6B, + 0x2C6C00002C6D, + 0x2C7100002C72, + 0x2C7300002C75, + 0x2C7600002C7C, + 0x2C8100002C82, + 0x2C8300002C84, + 0x2C8500002C86, + 0x2C8700002C88, + 0x2C8900002C8A, + 0x2C8B00002C8C, + 0x2C8D00002C8E, + 0x2C8F00002C90, + 0x2C9100002C92, + 0x2C9300002C94, + 0x2C9500002C96, + 0x2C9700002C98, + 0x2C9900002C9A, + 0x2C9B00002C9C, + 0x2C9D00002C9E, + 0x2C9F00002CA0, + 0x2CA100002CA2, + 0x2CA300002CA4, + 0x2CA500002CA6, + 0x2CA700002CA8, + 0x2CA900002CAA, + 0x2CAB00002CAC, + 0x2CAD00002CAE, + 0x2CAF00002CB0, + 0x2CB100002CB2, + 0x2CB300002CB4, + 0x2CB500002CB6, + 0x2CB700002CB8, + 0x2CB900002CBA, + 0x2CBB00002CBC, + 0x2CBD00002CBE, + 0x2CBF00002CC0, + 0x2CC100002CC2, + 0x2CC300002CC4, + 0x2CC500002CC6, + 0x2CC700002CC8, + 0x2CC900002CCA, + 0x2CCB00002CCC, + 0x2CCD00002CCE, + 0x2CCF00002CD0, + 0x2CD100002CD2, + 0x2CD300002CD4, + 0x2CD500002CD6, + 0x2CD700002CD8, + 0x2CD900002CDA, + 0x2CDB00002CDC, + 0x2CDD00002CDE, + 0x2CDF00002CE0, + 0x2CE100002CE2, + 0x2CE300002CE5, + 0x2CEC00002CED, + 0x2CEE00002CF2, + 0x2CF300002CF4, + 0x2D0000002D26, + 0x2D2700002D28, + 0x2D2D00002D2E, + 0x2D3000002D68, + 0x2D7F00002D97, + 0x2DA000002DA7, + 0x2DA800002DAF, + 0x2DB000002DB7, + 0x2DB800002DBF, + 0x2DC000002DC7, + 0x2DC800002DCF, + 0x2DD000002DD7, + 0x2DD800002DDF, + 0x2DE000002E00, + 0x2E2F00002E30, + 0x300500003008, + 0x302A0000302E, + 0x303C0000303D, + 0x304100003097, + 0x30990000309B, + 0x309D0000309F, + 0x30A1000030FB, + 0x30FC000030FF, + 0x310500003130, + 0x31A0000031C0, + 0x31F000003200, + 0x340000004DC0, + 0x4E000000A48D, + 0xA4D00000A4FE, + 0xA5000000A60D, + 0xA6100000A62C, + 0xA6410000A642, + 0xA6430000A644, + 0xA6450000A646, + 0xA6470000A648, + 0xA6490000A64A, + 0xA64B0000A64C, + 0xA64D0000A64E, + 0xA64F0000A650, + 0xA6510000A652, + 0xA6530000A654, + 0xA6550000A656, + 0xA6570000A658, + 0xA6590000A65A, + 0xA65B0000A65C, + 0xA65D0000A65E, + 0xA65F0000A660, + 0xA6610000A662, + 0xA6630000A664, + 0xA6650000A666, + 0xA6670000A668, + 0xA6690000A66A, + 0xA66B0000A66C, + 0xA66D0000A670, + 0xA6740000A67E, + 0xA67F0000A680, + 0xA6810000A682, + 0xA6830000A684, + 0xA6850000A686, + 0xA6870000A688, + 0xA6890000A68A, + 0xA68B0000A68C, + 0xA68D0000A68E, + 0xA68F0000A690, + 0xA6910000A692, + 0xA6930000A694, + 0xA6950000A696, + 0xA6970000A698, + 0xA6990000A69A, + 0xA69B0000A69C, + 0xA69E0000A6E6, + 0xA6F00000A6F2, + 0xA7170000A720, + 0xA7230000A724, + 0xA7250000A726, + 0xA7270000A728, + 0xA7290000A72A, + 0xA72B0000A72C, + 0xA72D0000A72E, + 0xA72F0000A732, + 0xA7330000A734, + 0xA7350000A736, + 0xA7370000A738, + 0xA7390000A73A, + 0xA73B0000A73C, + 0xA73D0000A73E, + 0xA73F0000A740, + 0xA7410000A742, + 0xA7430000A744, + 0xA7450000A746, + 0xA7470000A748, + 0xA7490000A74A, + 0xA74B0000A74C, + 0xA74D0000A74E, + 0xA74F0000A750, + 0xA7510000A752, + 0xA7530000A754, + 0xA7550000A756, + 0xA7570000A758, + 0xA7590000A75A, + 0xA75B0000A75C, + 0xA75D0000A75E, + 0xA75F0000A760, + 0xA7610000A762, + 0xA7630000A764, + 0xA7650000A766, + 0xA7670000A768, + 0xA7690000A76A, + 0xA76B0000A76C, + 0xA76D0000A76E, + 0xA76F0000A770, + 0xA7710000A779, + 0xA77A0000A77B, + 0xA77C0000A77D, + 0xA77F0000A780, + 0xA7810000A782, + 0xA7830000A784, + 0xA7850000A786, + 0xA7870000A789, + 0xA78C0000A78D, + 0xA78E0000A790, + 0xA7910000A792, + 0xA7930000A796, + 0xA7970000A798, + 0xA7990000A79A, + 0xA79B0000A79C, + 0xA79D0000A79E, + 0xA79F0000A7A0, + 0xA7A10000A7A2, + 0xA7A30000A7A4, + 0xA7A50000A7A6, + 0xA7A70000A7A8, + 0xA7A90000A7AA, + 0xA7AF0000A7B0, + 0xA7B50000A7B6, + 0xA7B70000A7B8, + 0xA7B90000A7BA, + 0xA7BB0000A7BC, + 0xA7BD0000A7BE, + 0xA7BF0000A7C0, + 0xA7C10000A7C2, + 0xA7C30000A7C4, + 0xA7C80000A7C9, + 0xA7CA0000A7CB, + 0xA7D10000A7D2, + 0xA7D30000A7D4, + 0xA7D50000A7D6, + 0xA7D70000A7D8, + 0xA7D90000A7DA, + 0xA7F60000A7F8, + 0xA7FA0000A828, + 0xA82C0000A82D, + 0xA8400000A874, + 0xA8800000A8C6, + 0xA8D00000A8DA, + 0xA8E00000A8F8, + 0xA8FB0000A8FC, + 0xA8FD0000A92E, + 0xA9300000A954, + 0xA9800000A9C1, + 0xA9CF0000A9DA, + 0xA9E00000A9FF, + 0xAA000000AA37, + 0xAA400000AA4E, + 0xAA500000AA5A, + 0xAA600000AA77, + 0xAA7A0000AAC3, + 0xAADB0000AADE, + 0xAAE00000AAF0, + 0xAAF20000AAF7, + 0xAB010000AB07, + 0xAB090000AB0F, + 0xAB110000AB17, + 0xAB200000AB27, + 0xAB280000AB2F, + 0xAB300000AB5B, + 0xAB600000AB69, + 0xABC00000ABEB, + 0xABEC0000ABEE, + 0xABF00000ABFA, + 0xAC000000D7A4, + 0xFA0E0000FA10, + 0xFA110000FA12, + 0xFA130000FA15, + 0xFA1F0000FA20, + 0xFA210000FA22, + 0xFA230000FA25, + 0xFA270000FA2A, + 0xFB1E0000FB1F, + 0xFE200000FE30, + 0xFE730000FE74, + 0x100000001000C, + 0x1000D00010027, + 0x100280001003B, + 0x1003C0001003E, + 0x1003F0001004E, + 0x100500001005E, + 0x10080000100FB, + 0x101FD000101FE, + 0x102800001029D, + 0x102A0000102D1, + 0x102E0000102E1, + 0x1030000010320, + 0x1032D00010341, + 0x103420001034A, + 0x103500001037B, + 0x103800001039E, + 0x103A0000103C4, + 0x103C8000103D0, + 0x104280001049E, + 0x104A0000104AA, + 0x104D8000104FC, + 0x1050000010528, + 0x1053000010564, + 0x10597000105A2, + 0x105A3000105B2, + 0x105B3000105BA, + 0x105BB000105BD, + 0x1060000010737, + 0x1074000010756, + 0x1076000010768, + 0x1078000010781, + 0x1080000010806, + 0x1080800010809, + 0x1080A00010836, + 0x1083700010839, + 0x1083C0001083D, + 0x1083F00010856, + 0x1086000010877, + 0x108800001089F, + 0x108E0000108F3, + 0x108F4000108F6, + 0x1090000010916, + 0x109200001093A, + 0x10980000109B8, + 0x109BE000109C0, + 0x10A0000010A04, + 0x10A0500010A07, + 0x10A0C00010A14, + 0x10A1500010A18, + 0x10A1900010A36, + 0x10A3800010A3B, + 0x10A3F00010A40, + 0x10A6000010A7D, + 0x10A8000010A9D, + 0x10AC000010AC8, + 0x10AC900010AE7, + 0x10B0000010B36, + 0x10B4000010B56, + 0x10B6000010B73, + 0x10B8000010B92, + 0x10C0000010C49, + 0x10CC000010CF3, + 0x10D0000010D28, + 0x10D3000010D3A, + 0x10E8000010EAA, + 0x10EAB00010EAD, + 0x10EB000010EB2, + 0x10EFD00010F1D, + 0x10F2700010F28, + 0x10F3000010F51, + 0x10F7000010F86, + 0x10FB000010FC5, + 0x10FE000010FF7, + 0x1100000011047, + 0x1106600011076, + 0x1107F000110BB, + 0x110C2000110C3, + 0x110D0000110E9, + 0x110F0000110FA, + 0x1110000011135, + 0x1113600011140, + 0x1114400011148, + 0x1115000011174, + 0x1117600011177, + 0x11180000111C5, + 0x111C9000111CD, + 0x111CE000111DB, + 0x111DC000111DD, + 0x1120000011212, + 0x1121300011238, + 0x1123E00011242, + 0x1128000011287, + 0x1128800011289, + 0x1128A0001128E, + 0x1128F0001129E, + 0x1129F000112A9, + 0x112B0000112EB, + 0x112F0000112FA, + 0x1130000011304, + 0x113050001130D, + 0x1130F00011311, + 0x1131300011329, + 0x1132A00011331, + 0x1133200011334, + 0x113350001133A, + 0x1133B00011345, + 0x1134700011349, + 0x1134B0001134E, + 0x1135000011351, + 0x1135700011358, + 0x1135D00011364, + 0x113660001136D, + 0x1137000011375, + 0x114000001144B, + 0x114500001145A, + 0x1145E00011462, + 0x11480000114C6, + 0x114C7000114C8, + 0x114D0000114DA, + 0x11580000115B6, + 0x115B8000115C1, + 0x115D8000115DE, + 0x1160000011641, + 0x1164400011645, + 0x116500001165A, + 0x11680000116B9, + 0x116C0000116CA, + 0x117000001171B, + 0x1171D0001172C, + 0x117300001173A, + 0x1174000011747, + 0x118000001183B, + 0x118C0000118EA, + 0x118FF00011907, + 0x119090001190A, + 0x1190C00011914, + 0x1191500011917, + 0x1191800011936, + 0x1193700011939, + 0x1193B00011944, + 0x119500001195A, + 0x119A0000119A8, + 0x119AA000119D8, + 0x119DA000119E2, + 0x119E3000119E5, + 0x11A0000011A3F, + 0x11A4700011A48, + 0x11A5000011A9A, + 0x11A9D00011A9E, + 0x11AB000011AF9, + 0x11C0000011C09, + 0x11C0A00011C37, + 0x11C3800011C41, + 0x11C5000011C5A, + 0x11C7200011C90, + 0x11C9200011CA8, + 0x11CA900011CB7, + 0x11D0000011D07, + 0x11D0800011D0A, + 0x11D0B00011D37, + 0x11D3A00011D3B, + 0x11D3C00011D3E, + 0x11D3F00011D48, + 0x11D5000011D5A, + 0x11D6000011D66, + 0x11D6700011D69, + 0x11D6A00011D8F, + 0x11D9000011D92, + 0x11D9300011D99, + 0x11DA000011DAA, + 0x11EE000011EF7, + 0x11F0000011F11, + 0x11F1200011F3B, + 0x11F3E00011F43, + 0x11F5000011F5A, + 0x11FB000011FB1, + 0x120000001239A, + 0x1248000012544, + 0x12F9000012FF1, + 0x1300000013430, + 0x1344000013456, + 0x1440000014647, + 0x1680000016A39, + 0x16A4000016A5F, + 0x16A6000016A6A, + 0x16A7000016ABF, + 0x16AC000016ACA, + 0x16AD000016AEE, + 0x16AF000016AF5, + 0x16B0000016B37, + 0x16B4000016B44, + 0x16B5000016B5A, + 0x16B6300016B78, + 0x16B7D00016B90, + 0x16E6000016E80, + 0x16F0000016F4B, + 0x16F4F00016F88, + 0x16F8F00016FA0, + 0x16FE000016FE2, + 0x16FE300016FE5, + 0x16FF000016FF2, + 0x17000000187F8, + 0x1880000018CD6, + 0x18D0000018D09, + 0x1AFF00001AFF4, + 0x1AFF50001AFFC, + 0x1AFFD0001AFFF, + 0x1B0000001B123, + 0x1B1320001B133, + 0x1B1500001B153, + 0x1B1550001B156, + 0x1B1640001B168, + 0x1B1700001B2FC, + 0x1BC000001BC6B, + 0x1BC700001BC7D, + 0x1BC800001BC89, + 0x1BC900001BC9A, + 0x1BC9D0001BC9F, + 0x1CF000001CF2E, + 0x1CF300001CF47, + 0x1DA000001DA37, + 0x1DA3B0001DA6D, + 0x1DA750001DA76, + 0x1DA840001DA85, + 0x1DA9B0001DAA0, + 0x1DAA10001DAB0, + 0x1DF000001DF1F, + 0x1DF250001DF2B, + 0x1E0000001E007, + 0x1E0080001E019, + 0x1E01B0001E022, + 0x1E0230001E025, + 0x1E0260001E02B, + 0x1E08F0001E090, + 0x1E1000001E12D, + 0x1E1300001E13E, + 0x1E1400001E14A, + 0x1E14E0001E14F, + 0x1E2900001E2AF, + 0x1E2C00001E2FA, + 0x1E4D00001E4FA, + 0x1E7E00001E7E7, + 0x1E7E80001E7EC, + 0x1E7ED0001E7EF, + 0x1E7F00001E7FF, + 0x1E8000001E8C5, + 0x1E8D00001E8D7, + 0x1E9220001E94C, + 0x1E9500001E95A, + 0x200000002A6E0, + 0x2A7000002B73A, + 0x2B7400002B81E, + 0x2B8200002CEA2, + 0x2CEB00002EBE1, + 0x2EBF00002EE5E, + 0x300000003134B, + 0x31350000323B0, + ), + "CONTEXTJ": (0x200C0000200E,), + "CONTEXTO": ( + 0xB7000000B8, + 0x37500000376, + 0x5F3000005F5, + 0x6600000066A, + 0x6F0000006FA, + 0x30FB000030FC, + ), +} diff --git a/venv/Lib/site-packages/idna/intranges.py b/venv/Lib/site-packages/idna/intranges.py new file mode 100644 index 00000000..7bfaa8d8 --- /dev/null +++ b/venv/Lib/site-packages/idna/intranges.py @@ -0,0 +1,57 @@ +""" +Given a list of integers, made up of (hopefully) a small number of long runs +of consecutive integers, compute a representation of the form +((start1, end1), (start2, end2) ...). Then answer the question "was x present +in the original list?" in time O(log(# runs)). +""" + +import bisect +from typing import List, Tuple + + +def intranges_from_list(list_: List[int]) -> Tuple[int, ...]: + """Represent a list of integers as a sequence of ranges: + ((start_0, end_0), (start_1, end_1), ...), such that the original + integers are exactly those x such that start_i <= x < end_i for some i. + + Ranges are encoded as single integers (start << 32 | end), not as tuples. + """ + + sorted_list = sorted(list_) + ranges = [] + last_write = -1 + for i in range(len(sorted_list)): + if i + 1 < len(sorted_list): + if sorted_list[i] == sorted_list[i + 1] - 1: + continue + current_range = sorted_list[last_write + 1 : i + 1] + ranges.append(_encode_range(current_range[0], current_range[-1] + 1)) + last_write = i + + return tuple(ranges) + + +def _encode_range(start: int, end: int) -> int: + return (start << 32) | end + + +def _decode_range(r: int) -> Tuple[int, int]: + return (r >> 32), (r & ((1 << 32) - 1)) + + +def intranges_contain(int_: int, ranges: Tuple[int, ...]) -> bool: + """Determine if `int_` falls into one of the ranges in `ranges`.""" + tuple_ = _encode_range(int_, 0) + pos = bisect.bisect_left(ranges, tuple_) + # we could be immediately ahead of a tuple (start, end) + # with start < int_ <= end + if pos > 0: + left, right = _decode_range(ranges[pos - 1]) + if left <= int_ < right: + return True + # or we could be immediately behind a tuple (int_, end) + if pos < len(ranges): + left, _ = _decode_range(ranges[pos]) + if left == int_: + return True + return False diff --git a/venv/Lib/site-packages/idna/package_data.py b/venv/Lib/site-packages/idna/package_data.py new file mode 100644 index 00000000..514ff7e2 --- /dev/null +++ b/venv/Lib/site-packages/idna/package_data.py @@ -0,0 +1 @@ +__version__ = "3.10" diff --git a/venv/Lib/site-packages/idna/py.typed b/venv/Lib/site-packages/idna/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/idna/uts46data.py b/venv/Lib/site-packages/idna/uts46data.py new file mode 100644 index 00000000..eb894327 --- /dev/null +++ b/venv/Lib/site-packages/idna/uts46data.py @@ -0,0 +1,8681 @@ +# This file is automatically generated by tools/idna-data +# vim: set fileencoding=utf-8 : + +from typing import List, Tuple, Union + +"""IDNA Mapping Table from UTS46.""" + + +__version__ = "15.1.0" + + +def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x0, "3"), + (0x1, "3"), + (0x2, "3"), + (0x3, "3"), + (0x4, "3"), + (0x5, "3"), + (0x6, "3"), + (0x7, "3"), + (0x8, "3"), + (0x9, "3"), + (0xA, "3"), + (0xB, "3"), + (0xC, "3"), + (0xD, "3"), + (0xE, "3"), + (0xF, "3"), + (0x10, "3"), + (0x11, "3"), + (0x12, "3"), + (0x13, "3"), + (0x14, "3"), + (0x15, "3"), + (0x16, "3"), + (0x17, "3"), + (0x18, "3"), + (0x19, "3"), + (0x1A, "3"), + (0x1B, "3"), + (0x1C, "3"), + (0x1D, "3"), + (0x1E, "3"), + (0x1F, "3"), + (0x20, "3"), + (0x21, "3"), + (0x22, "3"), + (0x23, "3"), + (0x24, "3"), + (0x25, "3"), + (0x26, "3"), + (0x27, "3"), + (0x28, "3"), + (0x29, "3"), + (0x2A, "3"), + (0x2B, "3"), + (0x2C, "3"), + (0x2D, "V"), + (0x2E, "V"), + (0x2F, "3"), + (0x30, "V"), + (0x31, "V"), + (0x32, "V"), + (0x33, "V"), + (0x34, "V"), + (0x35, "V"), + (0x36, "V"), + (0x37, "V"), + (0x38, "V"), + (0x39, "V"), + (0x3A, "3"), + (0x3B, "3"), + (0x3C, "3"), + (0x3D, "3"), + (0x3E, "3"), + (0x3F, "3"), + (0x40, "3"), + (0x41, "M", "a"), + (0x42, "M", "b"), + (0x43, "M", "c"), + (0x44, "M", "d"), + (0x45, "M", "e"), + (0x46, "M", "f"), + (0x47, "M", "g"), + (0x48, "M", "h"), + (0x49, "M", "i"), + (0x4A, "M", "j"), + (0x4B, "M", "k"), + (0x4C, "M", "l"), + (0x4D, "M", "m"), + (0x4E, "M", "n"), + (0x4F, "M", "o"), + (0x50, "M", "p"), + (0x51, "M", "q"), + (0x52, "M", "r"), + (0x53, "M", "s"), + (0x54, "M", "t"), + (0x55, "M", "u"), + (0x56, "M", "v"), + (0x57, "M", "w"), + (0x58, "M", "x"), + (0x59, "M", "y"), + (0x5A, "M", "z"), + (0x5B, "3"), + (0x5C, "3"), + (0x5D, "3"), + (0x5E, "3"), + (0x5F, "3"), + (0x60, "3"), + (0x61, "V"), + (0x62, "V"), + (0x63, "V"), + ] + + +def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x64, "V"), + (0x65, "V"), + (0x66, "V"), + (0x67, "V"), + (0x68, "V"), + (0x69, "V"), + (0x6A, "V"), + (0x6B, "V"), + (0x6C, "V"), + (0x6D, "V"), + (0x6E, "V"), + (0x6F, "V"), + (0x70, "V"), + (0x71, "V"), + (0x72, "V"), + (0x73, "V"), + (0x74, "V"), + (0x75, "V"), + (0x76, "V"), + (0x77, "V"), + (0x78, "V"), + (0x79, "V"), + (0x7A, "V"), + (0x7B, "3"), + (0x7C, "3"), + (0x7D, "3"), + (0x7E, "3"), + (0x7F, "3"), + (0x80, "X"), + (0x81, "X"), + (0x82, "X"), + (0x83, "X"), + (0x84, "X"), + (0x85, "X"), + (0x86, "X"), + (0x87, "X"), + (0x88, "X"), + (0x89, "X"), + (0x8A, "X"), + (0x8B, "X"), + (0x8C, "X"), + (0x8D, "X"), + (0x8E, "X"), + (0x8F, "X"), + (0x90, "X"), + (0x91, "X"), + (0x92, "X"), + (0x93, "X"), + (0x94, "X"), + (0x95, "X"), + (0x96, "X"), + (0x97, "X"), + (0x98, "X"), + (0x99, "X"), + (0x9A, "X"), + (0x9B, "X"), + (0x9C, "X"), + (0x9D, "X"), + (0x9E, "X"), + (0x9F, "X"), + (0xA0, "3", " "), + (0xA1, "V"), + (0xA2, "V"), + (0xA3, "V"), + (0xA4, "V"), + (0xA5, "V"), + (0xA6, "V"), + (0xA7, "V"), + (0xA8, "3", " ̈"), + (0xA9, "V"), + (0xAA, "M", "a"), + (0xAB, "V"), + (0xAC, "V"), + (0xAD, "I"), + (0xAE, "V"), + (0xAF, "3", " ̄"), + (0xB0, "V"), + (0xB1, "V"), + (0xB2, "M", "2"), + (0xB3, "M", "3"), + (0xB4, "3", " ́"), + (0xB5, "M", "μ"), + (0xB6, "V"), + (0xB7, "V"), + (0xB8, "3", " ̧"), + (0xB9, "M", "1"), + (0xBA, "M", "o"), + (0xBB, "V"), + (0xBC, "M", "1⁄4"), + (0xBD, "M", "1⁄2"), + (0xBE, "M", "3⁄4"), + (0xBF, "V"), + (0xC0, "M", "à"), + (0xC1, "M", "á"), + (0xC2, "M", "â"), + (0xC3, "M", "ã"), + (0xC4, "M", "ä"), + (0xC5, "M", "å"), + (0xC6, "M", "æ"), + (0xC7, "M", "ç"), + ] + + +def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xC8, "M", "è"), + (0xC9, "M", "é"), + (0xCA, "M", "ê"), + (0xCB, "M", "ë"), + (0xCC, "M", "ì"), + (0xCD, "M", "í"), + (0xCE, "M", "î"), + (0xCF, "M", "ï"), + (0xD0, "M", "ð"), + (0xD1, "M", "ñ"), + (0xD2, "M", "ò"), + (0xD3, "M", "ó"), + (0xD4, "M", "ô"), + (0xD5, "M", "õ"), + (0xD6, "M", "ö"), + (0xD7, "V"), + (0xD8, "M", "ø"), + (0xD9, "M", "ù"), + (0xDA, "M", "ú"), + (0xDB, "M", "û"), + (0xDC, "M", "ü"), + (0xDD, "M", "ý"), + (0xDE, "M", "þ"), + (0xDF, "D", "ss"), + (0xE0, "V"), + (0xE1, "V"), + (0xE2, "V"), + (0xE3, "V"), + (0xE4, "V"), + (0xE5, "V"), + (0xE6, "V"), + (0xE7, "V"), + (0xE8, "V"), + (0xE9, "V"), + (0xEA, "V"), + (0xEB, "V"), + (0xEC, "V"), + (0xED, "V"), + (0xEE, "V"), + (0xEF, "V"), + (0xF0, "V"), + (0xF1, "V"), + (0xF2, "V"), + (0xF3, "V"), + (0xF4, "V"), + (0xF5, "V"), + (0xF6, "V"), + (0xF7, "V"), + (0xF8, "V"), + (0xF9, "V"), + (0xFA, "V"), + (0xFB, "V"), + (0xFC, "V"), + (0xFD, "V"), + (0xFE, "V"), + (0xFF, "V"), + (0x100, "M", "ā"), + (0x101, "V"), + (0x102, "M", "ă"), + (0x103, "V"), + (0x104, "M", "ą"), + (0x105, "V"), + (0x106, "M", "ć"), + (0x107, "V"), + (0x108, "M", "ĉ"), + (0x109, "V"), + (0x10A, "M", "ċ"), + (0x10B, "V"), + (0x10C, "M", "č"), + (0x10D, "V"), + (0x10E, "M", "ď"), + (0x10F, "V"), + (0x110, "M", "đ"), + (0x111, "V"), + (0x112, "M", "ē"), + (0x113, "V"), + (0x114, "M", "ĕ"), + (0x115, "V"), + (0x116, "M", "ė"), + (0x117, "V"), + (0x118, "M", "ę"), + (0x119, "V"), + (0x11A, "M", "ě"), + (0x11B, "V"), + (0x11C, "M", "ĝ"), + (0x11D, "V"), + (0x11E, "M", "ğ"), + (0x11F, "V"), + (0x120, "M", "ġ"), + (0x121, "V"), + (0x122, "M", "ģ"), + (0x123, "V"), + (0x124, "M", "ĥ"), + (0x125, "V"), + (0x126, "M", "ħ"), + (0x127, "V"), + (0x128, "M", "ĩ"), + (0x129, "V"), + (0x12A, "M", "ī"), + (0x12B, "V"), + ] + + +def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x12C, "M", "ĭ"), + (0x12D, "V"), + (0x12E, "M", "į"), + (0x12F, "V"), + (0x130, "M", "i̇"), + (0x131, "V"), + (0x132, "M", "ij"), + (0x134, "M", "ĵ"), + (0x135, "V"), + (0x136, "M", "ķ"), + (0x137, "V"), + (0x139, "M", "ĺ"), + (0x13A, "V"), + (0x13B, "M", "ļ"), + (0x13C, "V"), + (0x13D, "M", "ľ"), + (0x13E, "V"), + (0x13F, "M", "l·"), + (0x141, "M", "ł"), + (0x142, "V"), + (0x143, "M", "ń"), + (0x144, "V"), + (0x145, "M", "ņ"), + (0x146, "V"), + (0x147, "M", "ň"), + (0x148, "V"), + (0x149, "M", "ʼn"), + (0x14A, "M", "ŋ"), + (0x14B, "V"), + (0x14C, "M", "ō"), + (0x14D, "V"), + (0x14E, "M", "ŏ"), + (0x14F, "V"), + (0x150, "M", "ő"), + (0x151, "V"), + (0x152, "M", "œ"), + (0x153, "V"), + (0x154, "M", "ŕ"), + (0x155, "V"), + (0x156, "M", "ŗ"), + (0x157, "V"), + (0x158, "M", "ř"), + (0x159, "V"), + (0x15A, "M", "ś"), + (0x15B, "V"), + (0x15C, "M", "ŝ"), + (0x15D, "V"), + (0x15E, "M", "ş"), + (0x15F, "V"), + (0x160, "M", "š"), + (0x161, "V"), + (0x162, "M", "ţ"), + (0x163, "V"), + (0x164, "M", "ť"), + (0x165, "V"), + (0x166, "M", "ŧ"), + (0x167, "V"), + (0x168, "M", "ũ"), + (0x169, "V"), + (0x16A, "M", "ū"), + (0x16B, "V"), + (0x16C, "M", "ŭ"), + (0x16D, "V"), + (0x16E, "M", "ů"), + (0x16F, "V"), + (0x170, "M", "ű"), + (0x171, "V"), + (0x172, "M", "ų"), + (0x173, "V"), + (0x174, "M", "ŵ"), + (0x175, "V"), + (0x176, "M", "ŷ"), + (0x177, "V"), + (0x178, "M", "ÿ"), + (0x179, "M", "ź"), + (0x17A, "V"), + (0x17B, "M", "ż"), + (0x17C, "V"), + (0x17D, "M", "ž"), + (0x17E, "V"), + (0x17F, "M", "s"), + (0x180, "V"), + (0x181, "M", "ɓ"), + (0x182, "M", "ƃ"), + (0x183, "V"), + (0x184, "M", "ƅ"), + (0x185, "V"), + (0x186, "M", "ɔ"), + (0x187, "M", "ƈ"), + (0x188, "V"), + (0x189, "M", "ɖ"), + (0x18A, "M", "ɗ"), + (0x18B, "M", "ƌ"), + (0x18C, "V"), + (0x18E, "M", "ǝ"), + (0x18F, "M", "ə"), + (0x190, "M", "ɛ"), + (0x191, "M", "ƒ"), + (0x192, "V"), + (0x193, "M", "ɠ"), + ] + + +def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x194, "M", "ɣ"), + (0x195, "V"), + (0x196, "M", "ɩ"), + (0x197, "M", "ɨ"), + (0x198, "M", "ƙ"), + (0x199, "V"), + (0x19C, "M", "ɯ"), + (0x19D, "M", "ɲ"), + (0x19E, "V"), + (0x19F, "M", "ɵ"), + (0x1A0, "M", "ơ"), + (0x1A1, "V"), + (0x1A2, "M", "ƣ"), + (0x1A3, "V"), + (0x1A4, "M", "ƥ"), + (0x1A5, "V"), + (0x1A6, "M", "ʀ"), + (0x1A7, "M", "ƨ"), + (0x1A8, "V"), + (0x1A9, "M", "ʃ"), + (0x1AA, "V"), + (0x1AC, "M", "ƭ"), + (0x1AD, "V"), + (0x1AE, "M", "ʈ"), + (0x1AF, "M", "ư"), + (0x1B0, "V"), + (0x1B1, "M", "ʊ"), + (0x1B2, "M", "ʋ"), + (0x1B3, "M", "ƴ"), + (0x1B4, "V"), + (0x1B5, "M", "ƶ"), + (0x1B6, "V"), + (0x1B7, "M", "ʒ"), + (0x1B8, "M", "ƹ"), + (0x1B9, "V"), + (0x1BC, "M", "ƽ"), + (0x1BD, "V"), + (0x1C4, "M", "dž"), + (0x1C7, "M", "lj"), + (0x1CA, "M", "nj"), + (0x1CD, "M", "ǎ"), + (0x1CE, "V"), + (0x1CF, "M", "ǐ"), + (0x1D0, "V"), + (0x1D1, "M", "ǒ"), + (0x1D2, "V"), + (0x1D3, "M", "ǔ"), + (0x1D4, "V"), + (0x1D5, "M", "ǖ"), + (0x1D6, "V"), + (0x1D7, "M", "ǘ"), + (0x1D8, "V"), + (0x1D9, "M", "ǚ"), + (0x1DA, "V"), + (0x1DB, "M", "ǜ"), + (0x1DC, "V"), + (0x1DE, "M", "ǟ"), + (0x1DF, "V"), + (0x1E0, "M", "ǡ"), + (0x1E1, "V"), + (0x1E2, "M", "ǣ"), + (0x1E3, "V"), + (0x1E4, "M", "ǥ"), + (0x1E5, "V"), + (0x1E6, "M", "ǧ"), + (0x1E7, "V"), + (0x1E8, "M", "ǩ"), + (0x1E9, "V"), + (0x1EA, "M", "ǫ"), + (0x1EB, "V"), + (0x1EC, "M", "ǭ"), + (0x1ED, "V"), + (0x1EE, "M", "ǯ"), + (0x1EF, "V"), + (0x1F1, "M", "dz"), + (0x1F4, "M", "ǵ"), + (0x1F5, "V"), + (0x1F6, "M", "ƕ"), + (0x1F7, "M", "ƿ"), + (0x1F8, "M", "ǹ"), + (0x1F9, "V"), + (0x1FA, "M", "ǻ"), + (0x1FB, "V"), + (0x1FC, "M", "ǽ"), + (0x1FD, "V"), + (0x1FE, "M", "ǿ"), + (0x1FF, "V"), + (0x200, "M", "ȁ"), + (0x201, "V"), + (0x202, "M", "ȃ"), + (0x203, "V"), + (0x204, "M", "ȅ"), + (0x205, "V"), + (0x206, "M", "ȇ"), + (0x207, "V"), + (0x208, "M", "ȉ"), + (0x209, "V"), + (0x20A, "M", "ȋ"), + (0x20B, "V"), + (0x20C, "M", "ȍ"), + ] + + +def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x20D, "V"), + (0x20E, "M", "ȏ"), + (0x20F, "V"), + (0x210, "M", "ȑ"), + (0x211, "V"), + (0x212, "M", "ȓ"), + (0x213, "V"), + (0x214, "M", "ȕ"), + (0x215, "V"), + (0x216, "M", "ȗ"), + (0x217, "V"), + (0x218, "M", "ș"), + (0x219, "V"), + (0x21A, "M", "ț"), + (0x21B, "V"), + (0x21C, "M", "ȝ"), + (0x21D, "V"), + (0x21E, "M", "ȟ"), + (0x21F, "V"), + (0x220, "M", "ƞ"), + (0x221, "V"), + (0x222, "M", "ȣ"), + (0x223, "V"), + (0x224, "M", "ȥ"), + (0x225, "V"), + (0x226, "M", "ȧ"), + (0x227, "V"), + (0x228, "M", "ȩ"), + (0x229, "V"), + (0x22A, "M", "ȫ"), + (0x22B, "V"), + (0x22C, "M", "ȭ"), + (0x22D, "V"), + (0x22E, "M", "ȯ"), + (0x22F, "V"), + (0x230, "M", "ȱ"), + (0x231, "V"), + (0x232, "M", "ȳ"), + (0x233, "V"), + (0x23A, "M", "ⱥ"), + (0x23B, "M", "ȼ"), + (0x23C, "V"), + (0x23D, "M", "ƚ"), + (0x23E, "M", "ⱦ"), + (0x23F, "V"), + (0x241, "M", "ɂ"), + (0x242, "V"), + (0x243, "M", "ƀ"), + (0x244, "M", "ʉ"), + (0x245, "M", "ʌ"), + (0x246, "M", "ɇ"), + (0x247, "V"), + (0x248, "M", "ɉ"), + (0x249, "V"), + (0x24A, "M", "ɋ"), + (0x24B, "V"), + (0x24C, "M", "ɍ"), + (0x24D, "V"), + (0x24E, "M", "ɏ"), + (0x24F, "V"), + (0x2B0, "M", "h"), + (0x2B1, "M", "ɦ"), + (0x2B2, "M", "j"), + (0x2B3, "M", "r"), + (0x2B4, "M", "ɹ"), + (0x2B5, "M", "ɻ"), + (0x2B6, "M", "ʁ"), + (0x2B7, "M", "w"), + (0x2B8, "M", "y"), + (0x2B9, "V"), + (0x2D8, "3", " ̆"), + (0x2D9, "3", " ̇"), + (0x2DA, "3", " ̊"), + (0x2DB, "3", " ̨"), + (0x2DC, "3", " ̃"), + (0x2DD, "3", " ̋"), + (0x2DE, "V"), + (0x2E0, "M", "ɣ"), + (0x2E1, "M", "l"), + (0x2E2, "M", "s"), + (0x2E3, "M", "x"), + (0x2E4, "M", "ʕ"), + (0x2E5, "V"), + (0x340, "M", "̀"), + (0x341, "M", "́"), + (0x342, "V"), + (0x343, "M", "̓"), + (0x344, "M", "̈́"), + (0x345, "M", "ι"), + (0x346, "V"), + (0x34F, "I"), + (0x350, "V"), + (0x370, "M", "ͱ"), + (0x371, "V"), + (0x372, "M", "ͳ"), + (0x373, "V"), + (0x374, "M", "ʹ"), + (0x375, "V"), + (0x376, "M", "ͷ"), + (0x377, "V"), + ] + + +def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x378, "X"), + (0x37A, "3", " ι"), + (0x37B, "V"), + (0x37E, "3", ";"), + (0x37F, "M", "ϳ"), + (0x380, "X"), + (0x384, "3", " ́"), + (0x385, "3", " ̈́"), + (0x386, "M", "ά"), + (0x387, "M", "·"), + (0x388, "M", "έ"), + (0x389, "M", "ή"), + (0x38A, "M", "ί"), + (0x38B, "X"), + (0x38C, "M", "ό"), + (0x38D, "X"), + (0x38E, "M", "ύ"), + (0x38F, "M", "ώ"), + (0x390, "V"), + (0x391, "M", "α"), + (0x392, "M", "β"), + (0x393, "M", "γ"), + (0x394, "M", "δ"), + (0x395, "M", "ε"), + (0x396, "M", "ζ"), + (0x397, "M", "η"), + (0x398, "M", "θ"), + (0x399, "M", "ι"), + (0x39A, "M", "κ"), + (0x39B, "M", "λ"), + (0x39C, "M", "μ"), + (0x39D, "M", "ν"), + (0x39E, "M", "ξ"), + (0x39F, "M", "ο"), + (0x3A0, "M", "π"), + (0x3A1, "M", "ρ"), + (0x3A2, "X"), + (0x3A3, "M", "σ"), + (0x3A4, "M", "τ"), + (0x3A5, "M", "υ"), + (0x3A6, "M", "φ"), + (0x3A7, "M", "χ"), + (0x3A8, "M", "ψ"), + (0x3A9, "M", "ω"), + (0x3AA, "M", "ϊ"), + (0x3AB, "M", "ϋ"), + (0x3AC, "V"), + (0x3C2, "D", "σ"), + (0x3C3, "V"), + (0x3CF, "M", "ϗ"), + (0x3D0, "M", "β"), + (0x3D1, "M", "θ"), + (0x3D2, "M", "υ"), + (0x3D3, "M", "ύ"), + (0x3D4, "M", "ϋ"), + (0x3D5, "M", "φ"), + (0x3D6, "M", "π"), + (0x3D7, "V"), + (0x3D8, "M", "ϙ"), + (0x3D9, "V"), + (0x3DA, "M", "ϛ"), + (0x3DB, "V"), + (0x3DC, "M", "ϝ"), + (0x3DD, "V"), + (0x3DE, "M", "ϟ"), + (0x3DF, "V"), + (0x3E0, "M", "ϡ"), + (0x3E1, "V"), + (0x3E2, "M", "ϣ"), + (0x3E3, "V"), + (0x3E4, "M", "ϥ"), + (0x3E5, "V"), + (0x3E6, "M", "ϧ"), + (0x3E7, "V"), + (0x3E8, "M", "ϩ"), + (0x3E9, "V"), + (0x3EA, "M", "ϫ"), + (0x3EB, "V"), + (0x3EC, "M", "ϭ"), + (0x3ED, "V"), + (0x3EE, "M", "ϯ"), + (0x3EF, "V"), + (0x3F0, "M", "κ"), + (0x3F1, "M", "ρ"), + (0x3F2, "M", "σ"), + (0x3F3, "V"), + (0x3F4, "M", "θ"), + (0x3F5, "M", "ε"), + (0x3F6, "V"), + (0x3F7, "M", "ϸ"), + (0x3F8, "V"), + (0x3F9, "M", "σ"), + (0x3FA, "M", "ϻ"), + (0x3FB, "V"), + (0x3FD, "M", "ͻ"), + (0x3FE, "M", "ͼ"), + (0x3FF, "M", "ͽ"), + (0x400, "M", "ѐ"), + (0x401, "M", "ё"), + (0x402, "M", "ђ"), + ] + + +def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x403, "M", "ѓ"), + (0x404, "M", "є"), + (0x405, "M", "ѕ"), + (0x406, "M", "і"), + (0x407, "M", "ї"), + (0x408, "M", "ј"), + (0x409, "M", "љ"), + (0x40A, "M", "њ"), + (0x40B, "M", "ћ"), + (0x40C, "M", "ќ"), + (0x40D, "M", "ѝ"), + (0x40E, "M", "ў"), + (0x40F, "M", "џ"), + (0x410, "M", "а"), + (0x411, "M", "б"), + (0x412, "M", "в"), + (0x413, "M", "г"), + (0x414, "M", "д"), + (0x415, "M", "е"), + (0x416, "M", "ж"), + (0x417, "M", "з"), + (0x418, "M", "и"), + (0x419, "M", "й"), + (0x41A, "M", "к"), + (0x41B, "M", "л"), + (0x41C, "M", "м"), + (0x41D, "M", "н"), + (0x41E, "M", "о"), + (0x41F, "M", "п"), + (0x420, "M", "р"), + (0x421, "M", "с"), + (0x422, "M", "т"), + (0x423, "M", "у"), + (0x424, "M", "ф"), + (0x425, "M", "х"), + (0x426, "M", "ц"), + (0x427, "M", "ч"), + (0x428, "M", "ш"), + (0x429, "M", "щ"), + (0x42A, "M", "ъ"), + (0x42B, "M", "ы"), + (0x42C, "M", "ь"), + (0x42D, "M", "э"), + (0x42E, "M", "ю"), + (0x42F, "M", "я"), + (0x430, "V"), + (0x460, "M", "ѡ"), + (0x461, "V"), + (0x462, "M", "ѣ"), + (0x463, "V"), + (0x464, "M", "ѥ"), + (0x465, "V"), + (0x466, "M", "ѧ"), + (0x467, "V"), + (0x468, "M", "ѩ"), + (0x469, "V"), + (0x46A, "M", "ѫ"), + (0x46B, "V"), + (0x46C, "M", "ѭ"), + (0x46D, "V"), + (0x46E, "M", "ѯ"), + (0x46F, "V"), + (0x470, "M", "ѱ"), + (0x471, "V"), + (0x472, "M", "ѳ"), + (0x473, "V"), + (0x474, "M", "ѵ"), + (0x475, "V"), + (0x476, "M", "ѷ"), + (0x477, "V"), + (0x478, "M", "ѹ"), + (0x479, "V"), + (0x47A, "M", "ѻ"), + (0x47B, "V"), + (0x47C, "M", "ѽ"), + (0x47D, "V"), + (0x47E, "M", "ѿ"), + (0x47F, "V"), + (0x480, "M", "ҁ"), + (0x481, "V"), + (0x48A, "M", "ҋ"), + (0x48B, "V"), + (0x48C, "M", "ҍ"), + (0x48D, "V"), + (0x48E, "M", "ҏ"), + (0x48F, "V"), + (0x490, "M", "ґ"), + (0x491, "V"), + (0x492, "M", "ғ"), + (0x493, "V"), + (0x494, "M", "ҕ"), + (0x495, "V"), + (0x496, "M", "җ"), + (0x497, "V"), + (0x498, "M", "ҙ"), + (0x499, "V"), + (0x49A, "M", "қ"), + (0x49B, "V"), + (0x49C, "M", "ҝ"), + (0x49D, "V"), + ] + + +def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x49E, "M", "ҟ"), + (0x49F, "V"), + (0x4A0, "M", "ҡ"), + (0x4A1, "V"), + (0x4A2, "M", "ң"), + (0x4A3, "V"), + (0x4A4, "M", "ҥ"), + (0x4A5, "V"), + (0x4A6, "M", "ҧ"), + (0x4A7, "V"), + (0x4A8, "M", "ҩ"), + (0x4A9, "V"), + (0x4AA, "M", "ҫ"), + (0x4AB, "V"), + (0x4AC, "M", "ҭ"), + (0x4AD, "V"), + (0x4AE, "M", "ү"), + (0x4AF, "V"), + (0x4B0, "M", "ұ"), + (0x4B1, "V"), + (0x4B2, "M", "ҳ"), + (0x4B3, "V"), + (0x4B4, "M", "ҵ"), + (0x4B5, "V"), + (0x4B6, "M", "ҷ"), + (0x4B7, "V"), + (0x4B8, "M", "ҹ"), + (0x4B9, "V"), + (0x4BA, "M", "һ"), + (0x4BB, "V"), + (0x4BC, "M", "ҽ"), + (0x4BD, "V"), + (0x4BE, "M", "ҿ"), + (0x4BF, "V"), + (0x4C0, "X"), + (0x4C1, "M", "ӂ"), + (0x4C2, "V"), + (0x4C3, "M", "ӄ"), + (0x4C4, "V"), + (0x4C5, "M", "ӆ"), + (0x4C6, "V"), + (0x4C7, "M", "ӈ"), + (0x4C8, "V"), + (0x4C9, "M", "ӊ"), + (0x4CA, "V"), + (0x4CB, "M", "ӌ"), + (0x4CC, "V"), + (0x4CD, "M", "ӎ"), + (0x4CE, "V"), + (0x4D0, "M", "ӑ"), + (0x4D1, "V"), + (0x4D2, "M", "ӓ"), + (0x4D3, "V"), + (0x4D4, "M", "ӕ"), + (0x4D5, "V"), + (0x4D6, "M", "ӗ"), + (0x4D7, "V"), + (0x4D8, "M", "ә"), + (0x4D9, "V"), + (0x4DA, "M", "ӛ"), + (0x4DB, "V"), + (0x4DC, "M", "ӝ"), + (0x4DD, "V"), + (0x4DE, "M", "ӟ"), + (0x4DF, "V"), + (0x4E0, "M", "ӡ"), + (0x4E1, "V"), + (0x4E2, "M", "ӣ"), + (0x4E3, "V"), + (0x4E4, "M", "ӥ"), + (0x4E5, "V"), + (0x4E6, "M", "ӧ"), + (0x4E7, "V"), + (0x4E8, "M", "ө"), + (0x4E9, "V"), + (0x4EA, "M", "ӫ"), + (0x4EB, "V"), + (0x4EC, "M", "ӭ"), + (0x4ED, "V"), + (0x4EE, "M", "ӯ"), + (0x4EF, "V"), + (0x4F0, "M", "ӱ"), + (0x4F1, "V"), + (0x4F2, "M", "ӳ"), + (0x4F3, "V"), + (0x4F4, "M", "ӵ"), + (0x4F5, "V"), + (0x4F6, "M", "ӷ"), + (0x4F7, "V"), + (0x4F8, "M", "ӹ"), + (0x4F9, "V"), + (0x4FA, "M", "ӻ"), + (0x4FB, "V"), + (0x4FC, "M", "ӽ"), + (0x4FD, "V"), + (0x4FE, "M", "ӿ"), + (0x4FF, "V"), + (0x500, "M", "ԁ"), + (0x501, "V"), + (0x502, "M", "ԃ"), + ] + + +def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x503, "V"), + (0x504, "M", "ԅ"), + (0x505, "V"), + (0x506, "M", "ԇ"), + (0x507, "V"), + (0x508, "M", "ԉ"), + (0x509, "V"), + (0x50A, "M", "ԋ"), + (0x50B, "V"), + (0x50C, "M", "ԍ"), + (0x50D, "V"), + (0x50E, "M", "ԏ"), + (0x50F, "V"), + (0x510, "M", "ԑ"), + (0x511, "V"), + (0x512, "M", "ԓ"), + (0x513, "V"), + (0x514, "M", "ԕ"), + (0x515, "V"), + (0x516, "M", "ԗ"), + (0x517, "V"), + (0x518, "M", "ԙ"), + (0x519, "V"), + (0x51A, "M", "ԛ"), + (0x51B, "V"), + (0x51C, "M", "ԝ"), + (0x51D, "V"), + (0x51E, "M", "ԟ"), + (0x51F, "V"), + (0x520, "M", "ԡ"), + (0x521, "V"), + (0x522, "M", "ԣ"), + (0x523, "V"), + (0x524, "M", "ԥ"), + (0x525, "V"), + (0x526, "M", "ԧ"), + (0x527, "V"), + (0x528, "M", "ԩ"), + (0x529, "V"), + (0x52A, "M", "ԫ"), + (0x52B, "V"), + (0x52C, "M", "ԭ"), + (0x52D, "V"), + (0x52E, "M", "ԯ"), + (0x52F, "V"), + (0x530, "X"), + (0x531, "M", "ա"), + (0x532, "M", "բ"), + (0x533, "M", "գ"), + (0x534, "M", "դ"), + (0x535, "M", "ե"), + (0x536, "M", "զ"), + (0x537, "M", "է"), + (0x538, "M", "ը"), + (0x539, "M", "թ"), + (0x53A, "M", "ժ"), + (0x53B, "M", "ի"), + (0x53C, "M", "լ"), + (0x53D, "M", "խ"), + (0x53E, "M", "ծ"), + (0x53F, "M", "կ"), + (0x540, "M", "հ"), + (0x541, "M", "ձ"), + (0x542, "M", "ղ"), + (0x543, "M", "ճ"), + (0x544, "M", "մ"), + (0x545, "M", "յ"), + (0x546, "M", "ն"), + (0x547, "M", "շ"), + (0x548, "M", "ո"), + (0x549, "M", "չ"), + (0x54A, "M", "պ"), + (0x54B, "M", "ջ"), + (0x54C, "M", "ռ"), + (0x54D, "M", "ս"), + (0x54E, "M", "վ"), + (0x54F, "M", "տ"), + (0x550, "M", "ր"), + (0x551, "M", "ց"), + (0x552, "M", "ւ"), + (0x553, "M", "փ"), + (0x554, "M", "ք"), + (0x555, "M", "օ"), + (0x556, "M", "ֆ"), + (0x557, "X"), + (0x559, "V"), + (0x587, "M", "եւ"), + (0x588, "V"), + (0x58B, "X"), + (0x58D, "V"), + (0x590, "X"), + (0x591, "V"), + (0x5C8, "X"), + (0x5D0, "V"), + (0x5EB, "X"), + (0x5EF, "V"), + (0x5F5, "X"), + (0x606, "V"), + (0x61C, "X"), + (0x61D, "V"), + ] + + +def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x675, "M", "اٴ"), + (0x676, "M", "وٴ"), + (0x677, "M", "ۇٴ"), + (0x678, "M", "يٴ"), + (0x679, "V"), + (0x6DD, "X"), + (0x6DE, "V"), + (0x70E, "X"), + (0x710, "V"), + (0x74B, "X"), + (0x74D, "V"), + (0x7B2, "X"), + (0x7C0, "V"), + (0x7FB, "X"), + (0x7FD, "V"), + (0x82E, "X"), + (0x830, "V"), + (0x83F, "X"), + (0x840, "V"), + (0x85C, "X"), + (0x85E, "V"), + (0x85F, "X"), + (0x860, "V"), + (0x86B, "X"), + (0x870, "V"), + (0x88F, "X"), + (0x898, "V"), + (0x8E2, "X"), + (0x8E3, "V"), + (0x958, "M", "क़"), + (0x959, "M", "ख़"), + (0x95A, "M", "ग़"), + (0x95B, "M", "ज़"), + (0x95C, "M", "ड़"), + (0x95D, "M", "ढ़"), + (0x95E, "M", "फ़"), + (0x95F, "M", "य़"), + (0x960, "V"), + (0x984, "X"), + (0x985, "V"), + (0x98D, "X"), + (0x98F, "V"), + (0x991, "X"), + (0x993, "V"), + (0x9A9, "X"), + (0x9AA, "V"), + (0x9B1, "X"), + (0x9B2, "V"), + (0x9B3, "X"), + (0x9B6, "V"), + (0x9BA, "X"), + (0x9BC, "V"), + (0x9C5, "X"), + (0x9C7, "V"), + (0x9C9, "X"), + (0x9CB, "V"), + (0x9CF, "X"), + (0x9D7, "V"), + (0x9D8, "X"), + (0x9DC, "M", "ড়"), + (0x9DD, "M", "ঢ়"), + (0x9DE, "X"), + (0x9DF, "M", "য়"), + (0x9E0, "V"), + (0x9E4, "X"), + (0x9E6, "V"), + (0x9FF, "X"), + (0xA01, "V"), + (0xA04, "X"), + (0xA05, "V"), + (0xA0B, "X"), + (0xA0F, "V"), + (0xA11, "X"), + (0xA13, "V"), + (0xA29, "X"), + (0xA2A, "V"), + (0xA31, "X"), + (0xA32, "V"), + (0xA33, "M", "ਲ਼"), + (0xA34, "X"), + (0xA35, "V"), + (0xA36, "M", "ਸ਼"), + (0xA37, "X"), + (0xA38, "V"), + (0xA3A, "X"), + (0xA3C, "V"), + (0xA3D, "X"), + (0xA3E, "V"), + (0xA43, "X"), + (0xA47, "V"), + (0xA49, "X"), + (0xA4B, "V"), + (0xA4E, "X"), + (0xA51, "V"), + (0xA52, "X"), + (0xA59, "M", "ਖ਼"), + (0xA5A, "M", "ਗ਼"), + (0xA5B, "M", "ਜ਼"), + (0xA5C, "V"), + (0xA5D, "X"), + ] + + +def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA5E, "M", "ਫ਼"), + (0xA5F, "X"), + (0xA66, "V"), + (0xA77, "X"), + (0xA81, "V"), + (0xA84, "X"), + (0xA85, "V"), + (0xA8E, "X"), + (0xA8F, "V"), + (0xA92, "X"), + (0xA93, "V"), + (0xAA9, "X"), + (0xAAA, "V"), + (0xAB1, "X"), + (0xAB2, "V"), + (0xAB4, "X"), + (0xAB5, "V"), + (0xABA, "X"), + (0xABC, "V"), + (0xAC6, "X"), + (0xAC7, "V"), + (0xACA, "X"), + (0xACB, "V"), + (0xACE, "X"), + (0xAD0, "V"), + (0xAD1, "X"), + (0xAE0, "V"), + (0xAE4, "X"), + (0xAE6, "V"), + (0xAF2, "X"), + (0xAF9, "V"), + (0xB00, "X"), + (0xB01, "V"), + (0xB04, "X"), + (0xB05, "V"), + (0xB0D, "X"), + (0xB0F, "V"), + (0xB11, "X"), + (0xB13, "V"), + (0xB29, "X"), + (0xB2A, "V"), + (0xB31, "X"), + (0xB32, "V"), + (0xB34, "X"), + (0xB35, "V"), + (0xB3A, "X"), + (0xB3C, "V"), + (0xB45, "X"), + (0xB47, "V"), + (0xB49, "X"), + (0xB4B, "V"), + (0xB4E, "X"), + (0xB55, "V"), + (0xB58, "X"), + (0xB5C, "M", "ଡ଼"), + (0xB5D, "M", "ଢ଼"), + (0xB5E, "X"), + (0xB5F, "V"), + (0xB64, "X"), + (0xB66, "V"), + (0xB78, "X"), + (0xB82, "V"), + (0xB84, "X"), + (0xB85, "V"), + (0xB8B, "X"), + (0xB8E, "V"), + (0xB91, "X"), + (0xB92, "V"), + (0xB96, "X"), + (0xB99, "V"), + (0xB9B, "X"), + (0xB9C, "V"), + (0xB9D, "X"), + (0xB9E, "V"), + (0xBA0, "X"), + (0xBA3, "V"), + (0xBA5, "X"), + (0xBA8, "V"), + (0xBAB, "X"), + (0xBAE, "V"), + (0xBBA, "X"), + (0xBBE, "V"), + (0xBC3, "X"), + (0xBC6, "V"), + (0xBC9, "X"), + (0xBCA, "V"), + (0xBCE, "X"), + (0xBD0, "V"), + (0xBD1, "X"), + (0xBD7, "V"), + (0xBD8, "X"), + (0xBE6, "V"), + (0xBFB, "X"), + (0xC00, "V"), + (0xC0D, "X"), + (0xC0E, "V"), + (0xC11, "X"), + (0xC12, "V"), + (0xC29, "X"), + (0xC2A, "V"), + ] + + +def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xC3A, "X"), + (0xC3C, "V"), + (0xC45, "X"), + (0xC46, "V"), + (0xC49, "X"), + (0xC4A, "V"), + (0xC4E, "X"), + (0xC55, "V"), + (0xC57, "X"), + (0xC58, "V"), + (0xC5B, "X"), + (0xC5D, "V"), + (0xC5E, "X"), + (0xC60, "V"), + (0xC64, "X"), + (0xC66, "V"), + (0xC70, "X"), + (0xC77, "V"), + (0xC8D, "X"), + (0xC8E, "V"), + (0xC91, "X"), + (0xC92, "V"), + (0xCA9, "X"), + (0xCAA, "V"), + (0xCB4, "X"), + (0xCB5, "V"), + (0xCBA, "X"), + (0xCBC, "V"), + (0xCC5, "X"), + (0xCC6, "V"), + (0xCC9, "X"), + (0xCCA, "V"), + (0xCCE, "X"), + (0xCD5, "V"), + (0xCD7, "X"), + (0xCDD, "V"), + (0xCDF, "X"), + (0xCE0, "V"), + (0xCE4, "X"), + (0xCE6, "V"), + (0xCF0, "X"), + (0xCF1, "V"), + (0xCF4, "X"), + (0xD00, "V"), + (0xD0D, "X"), + (0xD0E, "V"), + (0xD11, "X"), + (0xD12, "V"), + (0xD45, "X"), + (0xD46, "V"), + (0xD49, "X"), + (0xD4A, "V"), + (0xD50, "X"), + (0xD54, "V"), + (0xD64, "X"), + (0xD66, "V"), + (0xD80, "X"), + (0xD81, "V"), + (0xD84, "X"), + (0xD85, "V"), + (0xD97, "X"), + (0xD9A, "V"), + (0xDB2, "X"), + (0xDB3, "V"), + (0xDBC, "X"), + (0xDBD, "V"), + (0xDBE, "X"), + (0xDC0, "V"), + (0xDC7, "X"), + (0xDCA, "V"), + (0xDCB, "X"), + (0xDCF, "V"), + (0xDD5, "X"), + (0xDD6, "V"), + (0xDD7, "X"), + (0xDD8, "V"), + (0xDE0, "X"), + (0xDE6, "V"), + (0xDF0, "X"), + (0xDF2, "V"), + (0xDF5, "X"), + (0xE01, "V"), + (0xE33, "M", "ํา"), + (0xE34, "V"), + (0xE3B, "X"), + (0xE3F, "V"), + (0xE5C, "X"), + (0xE81, "V"), + (0xE83, "X"), + (0xE84, "V"), + (0xE85, "X"), + (0xE86, "V"), + (0xE8B, "X"), + (0xE8C, "V"), + (0xEA4, "X"), + (0xEA5, "V"), + (0xEA6, "X"), + (0xEA7, "V"), + (0xEB3, "M", "ໍາ"), + (0xEB4, "V"), + ] + + +def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xEBE, "X"), + (0xEC0, "V"), + (0xEC5, "X"), + (0xEC6, "V"), + (0xEC7, "X"), + (0xEC8, "V"), + (0xECF, "X"), + (0xED0, "V"), + (0xEDA, "X"), + (0xEDC, "M", "ຫນ"), + (0xEDD, "M", "ຫມ"), + (0xEDE, "V"), + (0xEE0, "X"), + (0xF00, "V"), + (0xF0C, "M", "་"), + (0xF0D, "V"), + (0xF43, "M", "གྷ"), + (0xF44, "V"), + (0xF48, "X"), + (0xF49, "V"), + (0xF4D, "M", "ཌྷ"), + (0xF4E, "V"), + (0xF52, "M", "དྷ"), + (0xF53, "V"), + (0xF57, "M", "བྷ"), + (0xF58, "V"), + (0xF5C, "M", "ཛྷ"), + (0xF5D, "V"), + (0xF69, "M", "ཀྵ"), + (0xF6A, "V"), + (0xF6D, "X"), + (0xF71, "V"), + (0xF73, "M", "ཱི"), + (0xF74, "V"), + (0xF75, "M", "ཱུ"), + (0xF76, "M", "ྲྀ"), + (0xF77, "M", "ྲཱྀ"), + (0xF78, "M", "ླྀ"), + (0xF79, "M", "ླཱྀ"), + (0xF7A, "V"), + (0xF81, "M", "ཱྀ"), + (0xF82, "V"), + (0xF93, "M", "ྒྷ"), + (0xF94, "V"), + (0xF98, "X"), + (0xF99, "V"), + (0xF9D, "M", "ྜྷ"), + (0xF9E, "V"), + (0xFA2, "M", "ྡྷ"), + (0xFA3, "V"), + (0xFA7, "M", "ྦྷ"), + (0xFA8, "V"), + (0xFAC, "M", "ྫྷ"), + (0xFAD, "V"), + (0xFB9, "M", "ྐྵ"), + (0xFBA, "V"), + (0xFBD, "X"), + (0xFBE, "V"), + (0xFCD, "X"), + (0xFCE, "V"), + (0xFDB, "X"), + (0x1000, "V"), + (0x10A0, "X"), + (0x10C7, "M", "ⴧ"), + (0x10C8, "X"), + (0x10CD, "M", "ⴭ"), + (0x10CE, "X"), + (0x10D0, "V"), + (0x10FC, "M", "ნ"), + (0x10FD, "V"), + (0x115F, "X"), + (0x1161, "V"), + (0x1249, "X"), + (0x124A, "V"), + (0x124E, "X"), + (0x1250, "V"), + (0x1257, "X"), + (0x1258, "V"), + (0x1259, "X"), + (0x125A, "V"), + (0x125E, "X"), + (0x1260, "V"), + (0x1289, "X"), + (0x128A, "V"), + (0x128E, "X"), + (0x1290, "V"), + (0x12B1, "X"), + (0x12B2, "V"), + (0x12B6, "X"), + (0x12B8, "V"), + (0x12BF, "X"), + (0x12C0, "V"), + (0x12C1, "X"), + (0x12C2, "V"), + (0x12C6, "X"), + (0x12C8, "V"), + (0x12D7, "X"), + (0x12D8, "V"), + (0x1311, "X"), + (0x1312, "V"), + ] + + +def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1316, "X"), + (0x1318, "V"), + (0x135B, "X"), + (0x135D, "V"), + (0x137D, "X"), + (0x1380, "V"), + (0x139A, "X"), + (0x13A0, "V"), + (0x13F6, "X"), + (0x13F8, "M", "Ᏸ"), + (0x13F9, "M", "Ᏹ"), + (0x13FA, "M", "Ᏺ"), + (0x13FB, "M", "Ᏻ"), + (0x13FC, "M", "Ᏼ"), + (0x13FD, "M", "Ᏽ"), + (0x13FE, "X"), + (0x1400, "V"), + (0x1680, "X"), + (0x1681, "V"), + (0x169D, "X"), + (0x16A0, "V"), + (0x16F9, "X"), + (0x1700, "V"), + (0x1716, "X"), + (0x171F, "V"), + (0x1737, "X"), + (0x1740, "V"), + (0x1754, "X"), + (0x1760, "V"), + (0x176D, "X"), + (0x176E, "V"), + (0x1771, "X"), + (0x1772, "V"), + (0x1774, "X"), + (0x1780, "V"), + (0x17B4, "X"), + (0x17B6, "V"), + (0x17DE, "X"), + (0x17E0, "V"), + (0x17EA, "X"), + (0x17F0, "V"), + (0x17FA, "X"), + (0x1800, "V"), + (0x1806, "X"), + (0x1807, "V"), + (0x180B, "I"), + (0x180E, "X"), + (0x180F, "I"), + (0x1810, "V"), + (0x181A, "X"), + (0x1820, "V"), + (0x1879, "X"), + (0x1880, "V"), + (0x18AB, "X"), + (0x18B0, "V"), + (0x18F6, "X"), + (0x1900, "V"), + (0x191F, "X"), + (0x1920, "V"), + (0x192C, "X"), + (0x1930, "V"), + (0x193C, "X"), + (0x1940, "V"), + (0x1941, "X"), + (0x1944, "V"), + (0x196E, "X"), + (0x1970, "V"), + (0x1975, "X"), + (0x1980, "V"), + (0x19AC, "X"), + (0x19B0, "V"), + (0x19CA, "X"), + (0x19D0, "V"), + (0x19DB, "X"), + (0x19DE, "V"), + (0x1A1C, "X"), + (0x1A1E, "V"), + (0x1A5F, "X"), + (0x1A60, "V"), + (0x1A7D, "X"), + (0x1A7F, "V"), + (0x1A8A, "X"), + (0x1A90, "V"), + (0x1A9A, "X"), + (0x1AA0, "V"), + (0x1AAE, "X"), + (0x1AB0, "V"), + (0x1ACF, "X"), + (0x1B00, "V"), + (0x1B4D, "X"), + (0x1B50, "V"), + (0x1B7F, "X"), + (0x1B80, "V"), + (0x1BF4, "X"), + (0x1BFC, "V"), + (0x1C38, "X"), + (0x1C3B, "V"), + (0x1C4A, "X"), + (0x1C4D, "V"), + (0x1C80, "M", "в"), + ] + + +def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1C81, "M", "д"), + (0x1C82, "M", "о"), + (0x1C83, "M", "с"), + (0x1C84, "M", "т"), + (0x1C86, "M", "ъ"), + (0x1C87, "M", "ѣ"), + (0x1C88, "M", "ꙋ"), + (0x1C89, "X"), + (0x1C90, "M", "ა"), + (0x1C91, "M", "ბ"), + (0x1C92, "M", "გ"), + (0x1C93, "M", "დ"), + (0x1C94, "M", "ე"), + (0x1C95, "M", "ვ"), + (0x1C96, "M", "ზ"), + (0x1C97, "M", "თ"), + (0x1C98, "M", "ი"), + (0x1C99, "M", "კ"), + (0x1C9A, "M", "ლ"), + (0x1C9B, "M", "მ"), + (0x1C9C, "M", "ნ"), + (0x1C9D, "M", "ო"), + (0x1C9E, "M", "პ"), + (0x1C9F, "M", "ჟ"), + (0x1CA0, "M", "რ"), + (0x1CA1, "M", "ს"), + (0x1CA2, "M", "ტ"), + (0x1CA3, "M", "უ"), + (0x1CA4, "M", "ფ"), + (0x1CA5, "M", "ქ"), + (0x1CA6, "M", "ღ"), + (0x1CA7, "M", "ყ"), + (0x1CA8, "M", "შ"), + (0x1CA9, "M", "ჩ"), + (0x1CAA, "M", "ც"), + (0x1CAB, "M", "ძ"), + (0x1CAC, "M", "წ"), + (0x1CAD, "M", "ჭ"), + (0x1CAE, "M", "ხ"), + (0x1CAF, "M", "ჯ"), + (0x1CB0, "M", "ჰ"), + (0x1CB1, "M", "ჱ"), + (0x1CB2, "M", "ჲ"), + (0x1CB3, "M", "ჳ"), + (0x1CB4, "M", "ჴ"), + (0x1CB5, "M", "ჵ"), + (0x1CB6, "M", "ჶ"), + (0x1CB7, "M", "ჷ"), + (0x1CB8, "M", "ჸ"), + (0x1CB9, "M", "ჹ"), + (0x1CBA, "M", "ჺ"), + (0x1CBB, "X"), + (0x1CBD, "M", "ჽ"), + (0x1CBE, "M", "ჾ"), + (0x1CBF, "M", "ჿ"), + (0x1CC0, "V"), + (0x1CC8, "X"), + (0x1CD0, "V"), + (0x1CFB, "X"), + (0x1D00, "V"), + (0x1D2C, "M", "a"), + (0x1D2D, "M", "æ"), + (0x1D2E, "M", "b"), + (0x1D2F, "V"), + (0x1D30, "M", "d"), + (0x1D31, "M", "e"), + (0x1D32, "M", "ǝ"), + (0x1D33, "M", "g"), + (0x1D34, "M", "h"), + (0x1D35, "M", "i"), + (0x1D36, "M", "j"), + (0x1D37, "M", "k"), + (0x1D38, "M", "l"), + (0x1D39, "M", "m"), + (0x1D3A, "M", "n"), + (0x1D3B, "V"), + (0x1D3C, "M", "o"), + (0x1D3D, "M", "ȣ"), + (0x1D3E, "M", "p"), + (0x1D3F, "M", "r"), + (0x1D40, "M", "t"), + (0x1D41, "M", "u"), + (0x1D42, "M", "w"), + (0x1D43, "M", "a"), + (0x1D44, "M", "ɐ"), + (0x1D45, "M", "ɑ"), + (0x1D46, "M", "ᴂ"), + (0x1D47, "M", "b"), + (0x1D48, "M", "d"), + (0x1D49, "M", "e"), + (0x1D4A, "M", "ə"), + (0x1D4B, "M", "ɛ"), + (0x1D4C, "M", "ɜ"), + (0x1D4D, "M", "g"), + (0x1D4E, "V"), + (0x1D4F, "M", "k"), + (0x1D50, "M", "m"), + (0x1D51, "M", "ŋ"), + (0x1D52, "M", "o"), + (0x1D53, "M", "ɔ"), + ] + + +def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D54, "M", "ᴖ"), + (0x1D55, "M", "ᴗ"), + (0x1D56, "M", "p"), + (0x1D57, "M", "t"), + (0x1D58, "M", "u"), + (0x1D59, "M", "ᴝ"), + (0x1D5A, "M", "ɯ"), + (0x1D5B, "M", "v"), + (0x1D5C, "M", "ᴥ"), + (0x1D5D, "M", "β"), + (0x1D5E, "M", "γ"), + (0x1D5F, "M", "δ"), + (0x1D60, "M", "φ"), + (0x1D61, "M", "χ"), + (0x1D62, "M", "i"), + (0x1D63, "M", "r"), + (0x1D64, "M", "u"), + (0x1D65, "M", "v"), + (0x1D66, "M", "β"), + (0x1D67, "M", "γ"), + (0x1D68, "M", "ρ"), + (0x1D69, "M", "φ"), + (0x1D6A, "M", "χ"), + (0x1D6B, "V"), + (0x1D78, "M", "н"), + (0x1D79, "V"), + (0x1D9B, "M", "ɒ"), + (0x1D9C, "M", "c"), + (0x1D9D, "M", "ɕ"), + (0x1D9E, "M", "ð"), + (0x1D9F, "M", "ɜ"), + (0x1DA0, "M", "f"), + (0x1DA1, "M", "ɟ"), + (0x1DA2, "M", "ɡ"), + (0x1DA3, "M", "ɥ"), + (0x1DA4, "M", "ɨ"), + (0x1DA5, "M", "ɩ"), + (0x1DA6, "M", "ɪ"), + (0x1DA7, "M", "ᵻ"), + (0x1DA8, "M", "ʝ"), + (0x1DA9, "M", "ɭ"), + (0x1DAA, "M", "ᶅ"), + (0x1DAB, "M", "ʟ"), + (0x1DAC, "M", "ɱ"), + (0x1DAD, "M", "ɰ"), + (0x1DAE, "M", "ɲ"), + (0x1DAF, "M", "ɳ"), + (0x1DB0, "M", "ɴ"), + (0x1DB1, "M", "ɵ"), + (0x1DB2, "M", "ɸ"), + (0x1DB3, "M", "ʂ"), + (0x1DB4, "M", "ʃ"), + (0x1DB5, "M", "ƫ"), + (0x1DB6, "M", "ʉ"), + (0x1DB7, "M", "ʊ"), + (0x1DB8, "M", "ᴜ"), + (0x1DB9, "M", "ʋ"), + (0x1DBA, "M", "ʌ"), + (0x1DBB, "M", "z"), + (0x1DBC, "M", "ʐ"), + (0x1DBD, "M", "ʑ"), + (0x1DBE, "M", "ʒ"), + (0x1DBF, "M", "θ"), + (0x1DC0, "V"), + (0x1E00, "M", "ḁ"), + (0x1E01, "V"), + (0x1E02, "M", "ḃ"), + (0x1E03, "V"), + (0x1E04, "M", "ḅ"), + (0x1E05, "V"), + (0x1E06, "M", "ḇ"), + (0x1E07, "V"), + (0x1E08, "M", "ḉ"), + (0x1E09, "V"), + (0x1E0A, "M", "ḋ"), + (0x1E0B, "V"), + (0x1E0C, "M", "ḍ"), + (0x1E0D, "V"), + (0x1E0E, "M", "ḏ"), + (0x1E0F, "V"), + (0x1E10, "M", "ḑ"), + (0x1E11, "V"), + (0x1E12, "M", "ḓ"), + (0x1E13, "V"), + (0x1E14, "M", "ḕ"), + (0x1E15, "V"), + (0x1E16, "M", "ḗ"), + (0x1E17, "V"), + (0x1E18, "M", "ḙ"), + (0x1E19, "V"), + (0x1E1A, "M", "ḛ"), + (0x1E1B, "V"), + (0x1E1C, "M", "ḝ"), + (0x1E1D, "V"), + (0x1E1E, "M", "ḟ"), + (0x1E1F, "V"), + (0x1E20, "M", "ḡ"), + (0x1E21, "V"), + (0x1E22, "M", "ḣ"), + (0x1E23, "V"), + ] + + +def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E24, "M", "ḥ"), + (0x1E25, "V"), + (0x1E26, "M", "ḧ"), + (0x1E27, "V"), + (0x1E28, "M", "ḩ"), + (0x1E29, "V"), + (0x1E2A, "M", "ḫ"), + (0x1E2B, "V"), + (0x1E2C, "M", "ḭ"), + (0x1E2D, "V"), + (0x1E2E, "M", "ḯ"), + (0x1E2F, "V"), + (0x1E30, "M", "ḱ"), + (0x1E31, "V"), + (0x1E32, "M", "ḳ"), + (0x1E33, "V"), + (0x1E34, "M", "ḵ"), + (0x1E35, "V"), + (0x1E36, "M", "ḷ"), + (0x1E37, "V"), + (0x1E38, "M", "ḹ"), + (0x1E39, "V"), + (0x1E3A, "M", "ḻ"), + (0x1E3B, "V"), + (0x1E3C, "M", "ḽ"), + (0x1E3D, "V"), + (0x1E3E, "M", "ḿ"), + (0x1E3F, "V"), + (0x1E40, "M", "ṁ"), + (0x1E41, "V"), + (0x1E42, "M", "ṃ"), + (0x1E43, "V"), + (0x1E44, "M", "ṅ"), + (0x1E45, "V"), + (0x1E46, "M", "ṇ"), + (0x1E47, "V"), + (0x1E48, "M", "ṉ"), + (0x1E49, "V"), + (0x1E4A, "M", "ṋ"), + (0x1E4B, "V"), + (0x1E4C, "M", "ṍ"), + (0x1E4D, "V"), + (0x1E4E, "M", "ṏ"), + (0x1E4F, "V"), + (0x1E50, "M", "ṑ"), + (0x1E51, "V"), + (0x1E52, "M", "ṓ"), + (0x1E53, "V"), + (0x1E54, "M", "ṕ"), + (0x1E55, "V"), + (0x1E56, "M", "ṗ"), + (0x1E57, "V"), + (0x1E58, "M", "ṙ"), + (0x1E59, "V"), + (0x1E5A, "M", "ṛ"), + (0x1E5B, "V"), + (0x1E5C, "M", "ṝ"), + (0x1E5D, "V"), + (0x1E5E, "M", "ṟ"), + (0x1E5F, "V"), + (0x1E60, "M", "ṡ"), + (0x1E61, "V"), + (0x1E62, "M", "ṣ"), + (0x1E63, "V"), + (0x1E64, "M", "ṥ"), + (0x1E65, "V"), + (0x1E66, "M", "ṧ"), + (0x1E67, "V"), + (0x1E68, "M", "ṩ"), + (0x1E69, "V"), + (0x1E6A, "M", "ṫ"), + (0x1E6B, "V"), + (0x1E6C, "M", "ṭ"), + (0x1E6D, "V"), + (0x1E6E, "M", "ṯ"), + (0x1E6F, "V"), + (0x1E70, "M", "ṱ"), + (0x1E71, "V"), + (0x1E72, "M", "ṳ"), + (0x1E73, "V"), + (0x1E74, "M", "ṵ"), + (0x1E75, "V"), + (0x1E76, "M", "ṷ"), + (0x1E77, "V"), + (0x1E78, "M", "ṹ"), + (0x1E79, "V"), + (0x1E7A, "M", "ṻ"), + (0x1E7B, "V"), + (0x1E7C, "M", "ṽ"), + (0x1E7D, "V"), + (0x1E7E, "M", "ṿ"), + (0x1E7F, "V"), + (0x1E80, "M", "ẁ"), + (0x1E81, "V"), + (0x1E82, "M", "ẃ"), + (0x1E83, "V"), + (0x1E84, "M", "ẅ"), + (0x1E85, "V"), + (0x1E86, "M", "ẇ"), + (0x1E87, "V"), + ] + + +def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E88, "M", "ẉ"), + (0x1E89, "V"), + (0x1E8A, "M", "ẋ"), + (0x1E8B, "V"), + (0x1E8C, "M", "ẍ"), + (0x1E8D, "V"), + (0x1E8E, "M", "ẏ"), + (0x1E8F, "V"), + (0x1E90, "M", "ẑ"), + (0x1E91, "V"), + (0x1E92, "M", "ẓ"), + (0x1E93, "V"), + (0x1E94, "M", "ẕ"), + (0x1E95, "V"), + (0x1E9A, "M", "aʾ"), + (0x1E9B, "M", "ṡ"), + (0x1E9C, "V"), + (0x1E9E, "M", "ß"), + (0x1E9F, "V"), + (0x1EA0, "M", "ạ"), + (0x1EA1, "V"), + (0x1EA2, "M", "ả"), + (0x1EA3, "V"), + (0x1EA4, "M", "ấ"), + (0x1EA5, "V"), + (0x1EA6, "M", "ầ"), + (0x1EA7, "V"), + (0x1EA8, "M", "ẩ"), + (0x1EA9, "V"), + (0x1EAA, "M", "ẫ"), + (0x1EAB, "V"), + (0x1EAC, "M", "ậ"), + (0x1EAD, "V"), + (0x1EAE, "M", "ắ"), + (0x1EAF, "V"), + (0x1EB0, "M", "ằ"), + (0x1EB1, "V"), + (0x1EB2, "M", "ẳ"), + (0x1EB3, "V"), + (0x1EB4, "M", "ẵ"), + (0x1EB5, "V"), + (0x1EB6, "M", "ặ"), + (0x1EB7, "V"), + (0x1EB8, "M", "ẹ"), + (0x1EB9, "V"), + (0x1EBA, "M", "ẻ"), + (0x1EBB, "V"), + (0x1EBC, "M", "ẽ"), + (0x1EBD, "V"), + (0x1EBE, "M", "ế"), + (0x1EBF, "V"), + (0x1EC0, "M", "ề"), + (0x1EC1, "V"), + (0x1EC2, "M", "ể"), + (0x1EC3, "V"), + (0x1EC4, "M", "ễ"), + (0x1EC5, "V"), + (0x1EC6, "M", "ệ"), + (0x1EC7, "V"), + (0x1EC8, "M", "ỉ"), + (0x1EC9, "V"), + (0x1ECA, "M", "ị"), + (0x1ECB, "V"), + (0x1ECC, "M", "ọ"), + (0x1ECD, "V"), + (0x1ECE, "M", "ỏ"), + (0x1ECF, "V"), + (0x1ED0, "M", "ố"), + (0x1ED1, "V"), + (0x1ED2, "M", "ồ"), + (0x1ED3, "V"), + (0x1ED4, "M", "ổ"), + (0x1ED5, "V"), + (0x1ED6, "M", "ỗ"), + (0x1ED7, "V"), + (0x1ED8, "M", "ộ"), + (0x1ED9, "V"), + (0x1EDA, "M", "ớ"), + (0x1EDB, "V"), + (0x1EDC, "M", "ờ"), + (0x1EDD, "V"), + (0x1EDE, "M", "ở"), + (0x1EDF, "V"), + (0x1EE0, "M", "ỡ"), + (0x1EE1, "V"), + (0x1EE2, "M", "ợ"), + (0x1EE3, "V"), + (0x1EE4, "M", "ụ"), + (0x1EE5, "V"), + (0x1EE6, "M", "ủ"), + (0x1EE7, "V"), + (0x1EE8, "M", "ứ"), + (0x1EE9, "V"), + (0x1EEA, "M", "ừ"), + (0x1EEB, "V"), + (0x1EEC, "M", "ử"), + (0x1EED, "V"), + (0x1EEE, "M", "ữ"), + (0x1EEF, "V"), + (0x1EF0, "M", "ự"), + ] + + +def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EF1, "V"), + (0x1EF2, "M", "ỳ"), + (0x1EF3, "V"), + (0x1EF4, "M", "ỵ"), + (0x1EF5, "V"), + (0x1EF6, "M", "ỷ"), + (0x1EF7, "V"), + (0x1EF8, "M", "ỹ"), + (0x1EF9, "V"), + (0x1EFA, "M", "ỻ"), + (0x1EFB, "V"), + (0x1EFC, "M", "ỽ"), + (0x1EFD, "V"), + (0x1EFE, "M", "ỿ"), + (0x1EFF, "V"), + (0x1F08, "M", "ἀ"), + (0x1F09, "M", "ἁ"), + (0x1F0A, "M", "ἂ"), + (0x1F0B, "M", "ἃ"), + (0x1F0C, "M", "ἄ"), + (0x1F0D, "M", "ἅ"), + (0x1F0E, "M", "ἆ"), + (0x1F0F, "M", "ἇ"), + (0x1F10, "V"), + (0x1F16, "X"), + (0x1F18, "M", "ἐ"), + (0x1F19, "M", "ἑ"), + (0x1F1A, "M", "ἒ"), + (0x1F1B, "M", "ἓ"), + (0x1F1C, "M", "ἔ"), + (0x1F1D, "M", "ἕ"), + (0x1F1E, "X"), + (0x1F20, "V"), + (0x1F28, "M", "ἠ"), + (0x1F29, "M", "ἡ"), + (0x1F2A, "M", "ἢ"), + (0x1F2B, "M", "ἣ"), + (0x1F2C, "M", "ἤ"), + (0x1F2D, "M", "ἥ"), + (0x1F2E, "M", "ἦ"), + (0x1F2F, "M", "ἧ"), + (0x1F30, "V"), + (0x1F38, "M", "ἰ"), + (0x1F39, "M", "ἱ"), + (0x1F3A, "M", "ἲ"), + (0x1F3B, "M", "ἳ"), + (0x1F3C, "M", "ἴ"), + (0x1F3D, "M", "ἵ"), + (0x1F3E, "M", "ἶ"), + (0x1F3F, "M", "ἷ"), + (0x1F40, "V"), + (0x1F46, "X"), + (0x1F48, "M", "ὀ"), + (0x1F49, "M", "ὁ"), + (0x1F4A, "M", "ὂ"), + (0x1F4B, "M", "ὃ"), + (0x1F4C, "M", "ὄ"), + (0x1F4D, "M", "ὅ"), + (0x1F4E, "X"), + (0x1F50, "V"), + (0x1F58, "X"), + (0x1F59, "M", "ὑ"), + (0x1F5A, "X"), + (0x1F5B, "M", "ὓ"), + (0x1F5C, "X"), + (0x1F5D, "M", "ὕ"), + (0x1F5E, "X"), + (0x1F5F, "M", "ὗ"), + (0x1F60, "V"), + (0x1F68, "M", "ὠ"), + (0x1F69, "M", "ὡ"), + (0x1F6A, "M", "ὢ"), + (0x1F6B, "M", "ὣ"), + (0x1F6C, "M", "ὤ"), + (0x1F6D, "M", "ὥ"), + (0x1F6E, "M", "ὦ"), + (0x1F6F, "M", "ὧ"), + (0x1F70, "V"), + (0x1F71, "M", "ά"), + (0x1F72, "V"), + (0x1F73, "M", "έ"), + (0x1F74, "V"), + (0x1F75, "M", "ή"), + (0x1F76, "V"), + (0x1F77, "M", "ί"), + (0x1F78, "V"), + (0x1F79, "M", "ό"), + (0x1F7A, "V"), + (0x1F7B, "M", "ύ"), + (0x1F7C, "V"), + (0x1F7D, "M", "ώ"), + (0x1F7E, "X"), + (0x1F80, "M", "ἀι"), + (0x1F81, "M", "ἁι"), + (0x1F82, "M", "ἂι"), + (0x1F83, "M", "ἃι"), + (0x1F84, "M", "ἄι"), + (0x1F85, "M", "ἅι"), + (0x1F86, "M", "ἆι"), + (0x1F87, "M", "ἇι"), + ] + + +def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1F88, "M", "ἀι"), + (0x1F89, "M", "ἁι"), + (0x1F8A, "M", "ἂι"), + (0x1F8B, "M", "ἃι"), + (0x1F8C, "M", "ἄι"), + (0x1F8D, "M", "ἅι"), + (0x1F8E, "M", "ἆι"), + (0x1F8F, "M", "ἇι"), + (0x1F90, "M", "ἠι"), + (0x1F91, "M", "ἡι"), + (0x1F92, "M", "ἢι"), + (0x1F93, "M", "ἣι"), + (0x1F94, "M", "ἤι"), + (0x1F95, "M", "ἥι"), + (0x1F96, "M", "ἦι"), + (0x1F97, "M", "ἧι"), + (0x1F98, "M", "ἠι"), + (0x1F99, "M", "ἡι"), + (0x1F9A, "M", "ἢι"), + (0x1F9B, "M", "ἣι"), + (0x1F9C, "M", "ἤι"), + (0x1F9D, "M", "ἥι"), + (0x1F9E, "M", "ἦι"), + (0x1F9F, "M", "ἧι"), + (0x1FA0, "M", "ὠι"), + (0x1FA1, "M", "ὡι"), + (0x1FA2, "M", "ὢι"), + (0x1FA3, "M", "ὣι"), + (0x1FA4, "M", "ὤι"), + (0x1FA5, "M", "ὥι"), + (0x1FA6, "M", "ὦι"), + (0x1FA7, "M", "ὧι"), + (0x1FA8, "M", "ὠι"), + (0x1FA9, "M", "ὡι"), + (0x1FAA, "M", "ὢι"), + (0x1FAB, "M", "ὣι"), + (0x1FAC, "M", "ὤι"), + (0x1FAD, "M", "ὥι"), + (0x1FAE, "M", "ὦι"), + (0x1FAF, "M", "ὧι"), + (0x1FB0, "V"), + (0x1FB2, "M", "ὰι"), + (0x1FB3, "M", "αι"), + (0x1FB4, "M", "άι"), + (0x1FB5, "X"), + (0x1FB6, "V"), + (0x1FB7, "M", "ᾶι"), + (0x1FB8, "M", "ᾰ"), + (0x1FB9, "M", "ᾱ"), + (0x1FBA, "M", "ὰ"), + (0x1FBB, "M", "ά"), + (0x1FBC, "M", "αι"), + (0x1FBD, "3", " ̓"), + (0x1FBE, "M", "ι"), + (0x1FBF, "3", " ̓"), + (0x1FC0, "3", " ͂"), + (0x1FC1, "3", " ̈͂"), + (0x1FC2, "M", "ὴι"), + (0x1FC3, "M", "ηι"), + (0x1FC4, "M", "ήι"), + (0x1FC5, "X"), + (0x1FC6, "V"), + (0x1FC7, "M", "ῆι"), + (0x1FC8, "M", "ὲ"), + (0x1FC9, "M", "έ"), + (0x1FCA, "M", "ὴ"), + (0x1FCB, "M", "ή"), + (0x1FCC, "M", "ηι"), + (0x1FCD, "3", " ̓̀"), + (0x1FCE, "3", " ̓́"), + (0x1FCF, "3", " ̓͂"), + (0x1FD0, "V"), + (0x1FD3, "M", "ΐ"), + (0x1FD4, "X"), + (0x1FD6, "V"), + (0x1FD8, "M", "ῐ"), + (0x1FD9, "M", "ῑ"), + (0x1FDA, "M", "ὶ"), + (0x1FDB, "M", "ί"), + (0x1FDC, "X"), + (0x1FDD, "3", " ̔̀"), + (0x1FDE, "3", " ̔́"), + (0x1FDF, "3", " ̔͂"), + (0x1FE0, "V"), + (0x1FE3, "M", "ΰ"), + (0x1FE4, "V"), + (0x1FE8, "M", "ῠ"), + (0x1FE9, "M", "ῡ"), + (0x1FEA, "M", "ὺ"), + (0x1FEB, "M", "ύ"), + (0x1FEC, "M", "ῥ"), + (0x1FED, "3", " ̈̀"), + (0x1FEE, "3", " ̈́"), + (0x1FEF, "3", "`"), + (0x1FF0, "X"), + (0x1FF2, "M", "ὼι"), + (0x1FF3, "M", "ωι"), + (0x1FF4, "M", "ώι"), + (0x1FF5, "X"), + (0x1FF6, "V"), + ] + + +def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1FF7, "M", "ῶι"), + (0x1FF8, "M", "ὸ"), + (0x1FF9, "M", "ό"), + (0x1FFA, "M", "ὼ"), + (0x1FFB, "M", "ώ"), + (0x1FFC, "M", "ωι"), + (0x1FFD, "3", " ́"), + (0x1FFE, "3", " ̔"), + (0x1FFF, "X"), + (0x2000, "3", " "), + (0x200B, "I"), + (0x200C, "D", ""), + (0x200E, "X"), + (0x2010, "V"), + (0x2011, "M", "‐"), + (0x2012, "V"), + (0x2017, "3", " ̳"), + (0x2018, "V"), + (0x2024, "X"), + (0x2027, "V"), + (0x2028, "X"), + (0x202F, "3", " "), + (0x2030, "V"), + (0x2033, "M", "′′"), + (0x2034, "M", "′′′"), + (0x2035, "V"), + (0x2036, "M", "‵‵"), + (0x2037, "M", "‵‵‵"), + (0x2038, "V"), + (0x203C, "3", "!!"), + (0x203D, "V"), + (0x203E, "3", " ̅"), + (0x203F, "V"), + (0x2047, "3", "??"), + (0x2048, "3", "?!"), + (0x2049, "3", "!?"), + (0x204A, "V"), + (0x2057, "M", "′′′′"), + (0x2058, "V"), + (0x205F, "3", " "), + (0x2060, "I"), + (0x2061, "X"), + (0x2064, "I"), + (0x2065, "X"), + (0x2070, "M", "0"), + (0x2071, "M", "i"), + (0x2072, "X"), + (0x2074, "M", "4"), + (0x2075, "M", "5"), + (0x2076, "M", "6"), + (0x2077, "M", "7"), + (0x2078, "M", "8"), + (0x2079, "M", "9"), + (0x207A, "3", "+"), + (0x207B, "M", "−"), + (0x207C, "3", "="), + (0x207D, "3", "("), + (0x207E, "3", ")"), + (0x207F, "M", "n"), + (0x2080, "M", "0"), + (0x2081, "M", "1"), + (0x2082, "M", "2"), + (0x2083, "M", "3"), + (0x2084, "M", "4"), + (0x2085, "M", "5"), + (0x2086, "M", "6"), + (0x2087, "M", "7"), + (0x2088, "M", "8"), + (0x2089, "M", "9"), + (0x208A, "3", "+"), + (0x208B, "M", "−"), + (0x208C, "3", "="), + (0x208D, "3", "("), + (0x208E, "3", ")"), + (0x208F, "X"), + (0x2090, "M", "a"), + (0x2091, "M", "e"), + (0x2092, "M", "o"), + (0x2093, "M", "x"), + (0x2094, "M", "ə"), + (0x2095, "M", "h"), + (0x2096, "M", "k"), + (0x2097, "M", "l"), + (0x2098, "M", "m"), + (0x2099, "M", "n"), + (0x209A, "M", "p"), + (0x209B, "M", "s"), + (0x209C, "M", "t"), + (0x209D, "X"), + (0x20A0, "V"), + (0x20A8, "M", "rs"), + (0x20A9, "V"), + (0x20C1, "X"), + (0x20D0, "V"), + (0x20F1, "X"), + (0x2100, "3", "a/c"), + (0x2101, "3", "a/s"), + (0x2102, "M", "c"), + (0x2103, "M", "°c"), + (0x2104, "V"), + ] + + +def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2105, "3", "c/o"), + (0x2106, "3", "c/u"), + (0x2107, "M", "ɛ"), + (0x2108, "V"), + (0x2109, "M", "°f"), + (0x210A, "M", "g"), + (0x210B, "M", "h"), + (0x210F, "M", "ħ"), + (0x2110, "M", "i"), + (0x2112, "M", "l"), + (0x2114, "V"), + (0x2115, "M", "n"), + (0x2116, "M", "no"), + (0x2117, "V"), + (0x2119, "M", "p"), + (0x211A, "M", "q"), + (0x211B, "M", "r"), + (0x211E, "V"), + (0x2120, "M", "sm"), + (0x2121, "M", "tel"), + (0x2122, "M", "tm"), + (0x2123, "V"), + (0x2124, "M", "z"), + (0x2125, "V"), + (0x2126, "M", "ω"), + (0x2127, "V"), + (0x2128, "M", "z"), + (0x2129, "V"), + (0x212A, "M", "k"), + (0x212B, "M", "å"), + (0x212C, "M", "b"), + (0x212D, "M", "c"), + (0x212E, "V"), + (0x212F, "M", "e"), + (0x2131, "M", "f"), + (0x2132, "X"), + (0x2133, "M", "m"), + (0x2134, "M", "o"), + (0x2135, "M", "א"), + (0x2136, "M", "ב"), + (0x2137, "M", "ג"), + (0x2138, "M", "ד"), + (0x2139, "M", "i"), + (0x213A, "V"), + (0x213B, "M", "fax"), + (0x213C, "M", "π"), + (0x213D, "M", "γ"), + (0x213F, "M", "π"), + (0x2140, "M", "∑"), + (0x2141, "V"), + (0x2145, "M", "d"), + (0x2147, "M", "e"), + (0x2148, "M", "i"), + (0x2149, "M", "j"), + (0x214A, "V"), + (0x2150, "M", "1⁄7"), + (0x2151, "M", "1⁄9"), + (0x2152, "M", "1⁄10"), + (0x2153, "M", "1⁄3"), + (0x2154, "M", "2⁄3"), + (0x2155, "M", "1⁄5"), + (0x2156, "M", "2⁄5"), + (0x2157, "M", "3⁄5"), + (0x2158, "M", "4⁄5"), + (0x2159, "M", "1⁄6"), + (0x215A, "M", "5⁄6"), + (0x215B, "M", "1⁄8"), + (0x215C, "M", "3⁄8"), + (0x215D, "M", "5⁄8"), + (0x215E, "M", "7⁄8"), + (0x215F, "M", "1⁄"), + (0x2160, "M", "i"), + (0x2161, "M", "ii"), + (0x2162, "M", "iii"), + (0x2163, "M", "iv"), + (0x2164, "M", "v"), + (0x2165, "M", "vi"), + (0x2166, "M", "vii"), + (0x2167, "M", "viii"), + (0x2168, "M", "ix"), + (0x2169, "M", "x"), + (0x216A, "M", "xi"), + (0x216B, "M", "xii"), + (0x216C, "M", "l"), + (0x216D, "M", "c"), + (0x216E, "M", "d"), + (0x216F, "M", "m"), + (0x2170, "M", "i"), + (0x2171, "M", "ii"), + (0x2172, "M", "iii"), + (0x2173, "M", "iv"), + (0x2174, "M", "v"), + (0x2175, "M", "vi"), + (0x2176, "M", "vii"), + (0x2177, "M", "viii"), + (0x2178, "M", "ix"), + (0x2179, "M", "x"), + (0x217A, "M", "xi"), + (0x217B, "M", "xii"), + (0x217C, "M", "l"), + ] + + +def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x217D, "M", "c"), + (0x217E, "M", "d"), + (0x217F, "M", "m"), + (0x2180, "V"), + (0x2183, "X"), + (0x2184, "V"), + (0x2189, "M", "0⁄3"), + (0x218A, "V"), + (0x218C, "X"), + (0x2190, "V"), + (0x222C, "M", "∫∫"), + (0x222D, "M", "∫∫∫"), + (0x222E, "V"), + (0x222F, "M", "∮∮"), + (0x2230, "M", "∮∮∮"), + (0x2231, "V"), + (0x2329, "M", "〈"), + (0x232A, "M", "〉"), + (0x232B, "V"), + (0x2427, "X"), + (0x2440, "V"), + (0x244B, "X"), + (0x2460, "M", "1"), + (0x2461, "M", "2"), + (0x2462, "M", "3"), + (0x2463, "M", "4"), + (0x2464, "M", "5"), + (0x2465, "M", "6"), + (0x2466, "M", "7"), + (0x2467, "M", "8"), + (0x2468, "M", "9"), + (0x2469, "M", "10"), + (0x246A, "M", "11"), + (0x246B, "M", "12"), + (0x246C, "M", "13"), + (0x246D, "M", "14"), + (0x246E, "M", "15"), + (0x246F, "M", "16"), + (0x2470, "M", "17"), + (0x2471, "M", "18"), + (0x2472, "M", "19"), + (0x2473, "M", "20"), + (0x2474, "3", "(1)"), + (0x2475, "3", "(2)"), + (0x2476, "3", "(3)"), + (0x2477, "3", "(4)"), + (0x2478, "3", "(5)"), + (0x2479, "3", "(6)"), + (0x247A, "3", "(7)"), + (0x247B, "3", "(8)"), + (0x247C, "3", "(9)"), + (0x247D, "3", "(10)"), + (0x247E, "3", "(11)"), + (0x247F, "3", "(12)"), + (0x2480, "3", "(13)"), + (0x2481, "3", "(14)"), + (0x2482, "3", "(15)"), + (0x2483, "3", "(16)"), + (0x2484, "3", "(17)"), + (0x2485, "3", "(18)"), + (0x2486, "3", "(19)"), + (0x2487, "3", "(20)"), + (0x2488, "X"), + (0x249C, "3", "(a)"), + (0x249D, "3", "(b)"), + (0x249E, "3", "(c)"), + (0x249F, "3", "(d)"), + (0x24A0, "3", "(e)"), + (0x24A1, "3", "(f)"), + (0x24A2, "3", "(g)"), + (0x24A3, "3", "(h)"), + (0x24A4, "3", "(i)"), + (0x24A5, "3", "(j)"), + (0x24A6, "3", "(k)"), + (0x24A7, "3", "(l)"), + (0x24A8, "3", "(m)"), + (0x24A9, "3", "(n)"), + (0x24AA, "3", "(o)"), + (0x24AB, "3", "(p)"), + (0x24AC, "3", "(q)"), + (0x24AD, "3", "(r)"), + (0x24AE, "3", "(s)"), + (0x24AF, "3", "(t)"), + (0x24B0, "3", "(u)"), + (0x24B1, "3", "(v)"), + (0x24B2, "3", "(w)"), + (0x24B3, "3", "(x)"), + (0x24B4, "3", "(y)"), + (0x24B5, "3", "(z)"), + (0x24B6, "M", "a"), + (0x24B7, "M", "b"), + (0x24B8, "M", "c"), + (0x24B9, "M", "d"), + (0x24BA, "M", "e"), + (0x24BB, "M", "f"), + (0x24BC, "M", "g"), + (0x24BD, "M", "h"), + (0x24BE, "M", "i"), + (0x24BF, "M", "j"), + (0x24C0, "M", "k"), + ] + + +def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x24C1, "M", "l"), + (0x24C2, "M", "m"), + (0x24C3, "M", "n"), + (0x24C4, "M", "o"), + (0x24C5, "M", "p"), + (0x24C6, "M", "q"), + (0x24C7, "M", "r"), + (0x24C8, "M", "s"), + (0x24C9, "M", "t"), + (0x24CA, "M", "u"), + (0x24CB, "M", "v"), + (0x24CC, "M", "w"), + (0x24CD, "M", "x"), + (0x24CE, "M", "y"), + (0x24CF, "M", "z"), + (0x24D0, "M", "a"), + (0x24D1, "M", "b"), + (0x24D2, "M", "c"), + (0x24D3, "M", "d"), + (0x24D4, "M", "e"), + (0x24D5, "M", "f"), + (0x24D6, "M", "g"), + (0x24D7, "M", "h"), + (0x24D8, "M", "i"), + (0x24D9, "M", "j"), + (0x24DA, "M", "k"), + (0x24DB, "M", "l"), + (0x24DC, "M", "m"), + (0x24DD, "M", "n"), + (0x24DE, "M", "o"), + (0x24DF, "M", "p"), + (0x24E0, "M", "q"), + (0x24E1, "M", "r"), + (0x24E2, "M", "s"), + (0x24E3, "M", "t"), + (0x24E4, "M", "u"), + (0x24E5, "M", "v"), + (0x24E6, "M", "w"), + (0x24E7, "M", "x"), + (0x24E8, "M", "y"), + (0x24E9, "M", "z"), + (0x24EA, "M", "0"), + (0x24EB, "V"), + (0x2A0C, "M", "∫∫∫∫"), + (0x2A0D, "V"), + (0x2A74, "3", "::="), + (0x2A75, "3", "=="), + (0x2A76, "3", "==="), + (0x2A77, "V"), + (0x2ADC, "M", "⫝̸"), + (0x2ADD, "V"), + (0x2B74, "X"), + (0x2B76, "V"), + (0x2B96, "X"), + (0x2B97, "V"), + (0x2C00, "M", "ⰰ"), + (0x2C01, "M", "ⰱ"), + (0x2C02, "M", "ⰲ"), + (0x2C03, "M", "ⰳ"), + (0x2C04, "M", "ⰴ"), + (0x2C05, "M", "ⰵ"), + (0x2C06, "M", "ⰶ"), + (0x2C07, "M", "ⰷ"), + (0x2C08, "M", "ⰸ"), + (0x2C09, "M", "ⰹ"), + (0x2C0A, "M", "ⰺ"), + (0x2C0B, "M", "ⰻ"), + (0x2C0C, "M", "ⰼ"), + (0x2C0D, "M", "ⰽ"), + (0x2C0E, "M", "ⰾ"), + (0x2C0F, "M", "ⰿ"), + (0x2C10, "M", "ⱀ"), + (0x2C11, "M", "ⱁ"), + (0x2C12, "M", "ⱂ"), + (0x2C13, "M", "ⱃ"), + (0x2C14, "M", "ⱄ"), + (0x2C15, "M", "ⱅ"), + (0x2C16, "M", "ⱆ"), + (0x2C17, "M", "ⱇ"), + (0x2C18, "M", "ⱈ"), + (0x2C19, "M", "ⱉ"), + (0x2C1A, "M", "ⱊ"), + (0x2C1B, "M", "ⱋ"), + (0x2C1C, "M", "ⱌ"), + (0x2C1D, "M", "ⱍ"), + (0x2C1E, "M", "ⱎ"), + (0x2C1F, "M", "ⱏ"), + (0x2C20, "M", "ⱐ"), + (0x2C21, "M", "ⱑ"), + (0x2C22, "M", "ⱒ"), + (0x2C23, "M", "ⱓ"), + (0x2C24, "M", "ⱔ"), + (0x2C25, "M", "ⱕ"), + (0x2C26, "M", "ⱖ"), + (0x2C27, "M", "ⱗ"), + (0x2C28, "M", "ⱘ"), + (0x2C29, "M", "ⱙ"), + (0x2C2A, "M", "ⱚ"), + (0x2C2B, "M", "ⱛ"), + (0x2C2C, "M", "ⱜ"), + ] + + +def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2C2D, "M", "ⱝ"), + (0x2C2E, "M", "ⱞ"), + (0x2C2F, "M", "ⱟ"), + (0x2C30, "V"), + (0x2C60, "M", "ⱡ"), + (0x2C61, "V"), + (0x2C62, "M", "ɫ"), + (0x2C63, "M", "ᵽ"), + (0x2C64, "M", "ɽ"), + (0x2C65, "V"), + (0x2C67, "M", "ⱨ"), + (0x2C68, "V"), + (0x2C69, "M", "ⱪ"), + (0x2C6A, "V"), + (0x2C6B, "M", "ⱬ"), + (0x2C6C, "V"), + (0x2C6D, "M", "ɑ"), + (0x2C6E, "M", "ɱ"), + (0x2C6F, "M", "ɐ"), + (0x2C70, "M", "ɒ"), + (0x2C71, "V"), + (0x2C72, "M", "ⱳ"), + (0x2C73, "V"), + (0x2C75, "M", "ⱶ"), + (0x2C76, "V"), + (0x2C7C, "M", "j"), + (0x2C7D, "M", "v"), + (0x2C7E, "M", "ȿ"), + (0x2C7F, "M", "ɀ"), + (0x2C80, "M", "ⲁ"), + (0x2C81, "V"), + (0x2C82, "M", "ⲃ"), + (0x2C83, "V"), + (0x2C84, "M", "ⲅ"), + (0x2C85, "V"), + (0x2C86, "M", "ⲇ"), + (0x2C87, "V"), + (0x2C88, "M", "ⲉ"), + (0x2C89, "V"), + (0x2C8A, "M", "ⲋ"), + (0x2C8B, "V"), + (0x2C8C, "M", "ⲍ"), + (0x2C8D, "V"), + (0x2C8E, "M", "ⲏ"), + (0x2C8F, "V"), + (0x2C90, "M", "ⲑ"), + (0x2C91, "V"), + (0x2C92, "M", "ⲓ"), + (0x2C93, "V"), + (0x2C94, "M", "ⲕ"), + (0x2C95, "V"), + (0x2C96, "M", "ⲗ"), + (0x2C97, "V"), + (0x2C98, "M", "ⲙ"), + (0x2C99, "V"), + (0x2C9A, "M", "ⲛ"), + (0x2C9B, "V"), + (0x2C9C, "M", "ⲝ"), + (0x2C9D, "V"), + (0x2C9E, "M", "ⲟ"), + (0x2C9F, "V"), + (0x2CA0, "M", "ⲡ"), + (0x2CA1, "V"), + (0x2CA2, "M", "ⲣ"), + (0x2CA3, "V"), + (0x2CA4, "M", "ⲥ"), + (0x2CA5, "V"), + (0x2CA6, "M", "ⲧ"), + (0x2CA7, "V"), + (0x2CA8, "M", "ⲩ"), + (0x2CA9, "V"), + (0x2CAA, "M", "ⲫ"), + (0x2CAB, "V"), + (0x2CAC, "M", "ⲭ"), + (0x2CAD, "V"), + (0x2CAE, "M", "ⲯ"), + (0x2CAF, "V"), + (0x2CB0, "M", "ⲱ"), + (0x2CB1, "V"), + (0x2CB2, "M", "ⲳ"), + (0x2CB3, "V"), + (0x2CB4, "M", "ⲵ"), + (0x2CB5, "V"), + (0x2CB6, "M", "ⲷ"), + (0x2CB7, "V"), + (0x2CB8, "M", "ⲹ"), + (0x2CB9, "V"), + (0x2CBA, "M", "ⲻ"), + (0x2CBB, "V"), + (0x2CBC, "M", "ⲽ"), + (0x2CBD, "V"), + (0x2CBE, "M", "ⲿ"), + (0x2CBF, "V"), + (0x2CC0, "M", "ⳁ"), + (0x2CC1, "V"), + (0x2CC2, "M", "ⳃ"), + (0x2CC3, "V"), + (0x2CC4, "M", "ⳅ"), + (0x2CC5, "V"), + (0x2CC6, "M", "ⳇ"), + ] + + +def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2CC7, "V"), + (0x2CC8, "M", "ⳉ"), + (0x2CC9, "V"), + (0x2CCA, "M", "ⳋ"), + (0x2CCB, "V"), + (0x2CCC, "M", "ⳍ"), + (0x2CCD, "V"), + (0x2CCE, "M", "ⳏ"), + (0x2CCF, "V"), + (0x2CD0, "M", "ⳑ"), + (0x2CD1, "V"), + (0x2CD2, "M", "ⳓ"), + (0x2CD3, "V"), + (0x2CD4, "M", "ⳕ"), + (0x2CD5, "V"), + (0x2CD6, "M", "ⳗ"), + (0x2CD7, "V"), + (0x2CD8, "M", "ⳙ"), + (0x2CD9, "V"), + (0x2CDA, "M", "ⳛ"), + (0x2CDB, "V"), + (0x2CDC, "M", "ⳝ"), + (0x2CDD, "V"), + (0x2CDE, "M", "ⳟ"), + (0x2CDF, "V"), + (0x2CE0, "M", "ⳡ"), + (0x2CE1, "V"), + (0x2CE2, "M", "ⳣ"), + (0x2CE3, "V"), + (0x2CEB, "M", "ⳬ"), + (0x2CEC, "V"), + (0x2CED, "M", "ⳮ"), + (0x2CEE, "V"), + (0x2CF2, "M", "ⳳ"), + (0x2CF3, "V"), + (0x2CF4, "X"), + (0x2CF9, "V"), + (0x2D26, "X"), + (0x2D27, "V"), + (0x2D28, "X"), + (0x2D2D, "V"), + (0x2D2E, "X"), + (0x2D30, "V"), + (0x2D68, "X"), + (0x2D6F, "M", "ⵡ"), + (0x2D70, "V"), + (0x2D71, "X"), + (0x2D7F, "V"), + (0x2D97, "X"), + (0x2DA0, "V"), + (0x2DA7, "X"), + (0x2DA8, "V"), + (0x2DAF, "X"), + (0x2DB0, "V"), + (0x2DB7, "X"), + (0x2DB8, "V"), + (0x2DBF, "X"), + (0x2DC0, "V"), + (0x2DC7, "X"), + (0x2DC8, "V"), + (0x2DCF, "X"), + (0x2DD0, "V"), + (0x2DD7, "X"), + (0x2DD8, "V"), + (0x2DDF, "X"), + (0x2DE0, "V"), + (0x2E5E, "X"), + (0x2E80, "V"), + (0x2E9A, "X"), + (0x2E9B, "V"), + (0x2E9F, "M", "母"), + (0x2EA0, "V"), + (0x2EF3, "M", "龟"), + (0x2EF4, "X"), + (0x2F00, "M", "一"), + (0x2F01, "M", "丨"), + (0x2F02, "M", "丶"), + (0x2F03, "M", "丿"), + (0x2F04, "M", "乙"), + (0x2F05, "M", "亅"), + (0x2F06, "M", "二"), + (0x2F07, "M", "亠"), + (0x2F08, "M", "人"), + (0x2F09, "M", "儿"), + (0x2F0A, "M", "入"), + (0x2F0B, "M", "八"), + (0x2F0C, "M", "冂"), + (0x2F0D, "M", "冖"), + (0x2F0E, "M", "冫"), + (0x2F0F, "M", "几"), + (0x2F10, "M", "凵"), + (0x2F11, "M", "刀"), + (0x2F12, "M", "力"), + (0x2F13, "M", "勹"), + (0x2F14, "M", "匕"), + (0x2F15, "M", "匚"), + (0x2F16, "M", "匸"), + (0x2F17, "M", "十"), + (0x2F18, "M", "卜"), + (0x2F19, "M", "卩"), + ] + + +def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F1A, "M", "厂"), + (0x2F1B, "M", "厶"), + (0x2F1C, "M", "又"), + (0x2F1D, "M", "口"), + (0x2F1E, "M", "囗"), + (0x2F1F, "M", "土"), + (0x2F20, "M", "士"), + (0x2F21, "M", "夂"), + (0x2F22, "M", "夊"), + (0x2F23, "M", "夕"), + (0x2F24, "M", "大"), + (0x2F25, "M", "女"), + (0x2F26, "M", "子"), + (0x2F27, "M", "宀"), + (0x2F28, "M", "寸"), + (0x2F29, "M", "小"), + (0x2F2A, "M", "尢"), + (0x2F2B, "M", "尸"), + (0x2F2C, "M", "屮"), + (0x2F2D, "M", "山"), + (0x2F2E, "M", "巛"), + (0x2F2F, "M", "工"), + (0x2F30, "M", "己"), + (0x2F31, "M", "巾"), + (0x2F32, "M", "干"), + (0x2F33, "M", "幺"), + (0x2F34, "M", "广"), + (0x2F35, "M", "廴"), + (0x2F36, "M", "廾"), + (0x2F37, "M", "弋"), + (0x2F38, "M", "弓"), + (0x2F39, "M", "彐"), + (0x2F3A, "M", "彡"), + (0x2F3B, "M", "彳"), + (0x2F3C, "M", "心"), + (0x2F3D, "M", "戈"), + (0x2F3E, "M", "戶"), + (0x2F3F, "M", "手"), + (0x2F40, "M", "支"), + (0x2F41, "M", "攴"), + (0x2F42, "M", "文"), + (0x2F43, "M", "斗"), + (0x2F44, "M", "斤"), + (0x2F45, "M", "方"), + (0x2F46, "M", "无"), + (0x2F47, "M", "日"), + (0x2F48, "M", "曰"), + (0x2F49, "M", "月"), + (0x2F4A, "M", "木"), + (0x2F4B, "M", "欠"), + (0x2F4C, "M", "止"), + (0x2F4D, "M", "歹"), + (0x2F4E, "M", "殳"), + (0x2F4F, "M", "毋"), + (0x2F50, "M", "比"), + (0x2F51, "M", "毛"), + (0x2F52, "M", "氏"), + (0x2F53, "M", "气"), + (0x2F54, "M", "水"), + (0x2F55, "M", "火"), + (0x2F56, "M", "爪"), + (0x2F57, "M", "父"), + (0x2F58, "M", "爻"), + (0x2F59, "M", "爿"), + (0x2F5A, "M", "片"), + (0x2F5B, "M", "牙"), + (0x2F5C, "M", "牛"), + (0x2F5D, "M", "犬"), + (0x2F5E, "M", "玄"), + (0x2F5F, "M", "玉"), + (0x2F60, "M", "瓜"), + (0x2F61, "M", "瓦"), + (0x2F62, "M", "甘"), + (0x2F63, "M", "生"), + (0x2F64, "M", "用"), + (0x2F65, "M", "田"), + (0x2F66, "M", "疋"), + (0x2F67, "M", "疒"), + (0x2F68, "M", "癶"), + (0x2F69, "M", "白"), + (0x2F6A, "M", "皮"), + (0x2F6B, "M", "皿"), + (0x2F6C, "M", "目"), + (0x2F6D, "M", "矛"), + (0x2F6E, "M", "矢"), + (0x2F6F, "M", "石"), + (0x2F70, "M", "示"), + (0x2F71, "M", "禸"), + (0x2F72, "M", "禾"), + (0x2F73, "M", "穴"), + (0x2F74, "M", "立"), + (0x2F75, "M", "竹"), + (0x2F76, "M", "米"), + (0x2F77, "M", "糸"), + (0x2F78, "M", "缶"), + (0x2F79, "M", "网"), + (0x2F7A, "M", "羊"), + (0x2F7B, "M", "羽"), + (0x2F7C, "M", "老"), + (0x2F7D, "M", "而"), + ] + + +def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F7E, "M", "耒"), + (0x2F7F, "M", "耳"), + (0x2F80, "M", "聿"), + (0x2F81, "M", "肉"), + (0x2F82, "M", "臣"), + (0x2F83, "M", "自"), + (0x2F84, "M", "至"), + (0x2F85, "M", "臼"), + (0x2F86, "M", "舌"), + (0x2F87, "M", "舛"), + (0x2F88, "M", "舟"), + (0x2F89, "M", "艮"), + (0x2F8A, "M", "色"), + (0x2F8B, "M", "艸"), + (0x2F8C, "M", "虍"), + (0x2F8D, "M", "虫"), + (0x2F8E, "M", "血"), + (0x2F8F, "M", "行"), + (0x2F90, "M", "衣"), + (0x2F91, "M", "襾"), + (0x2F92, "M", "見"), + (0x2F93, "M", "角"), + (0x2F94, "M", "言"), + (0x2F95, "M", "谷"), + (0x2F96, "M", "豆"), + (0x2F97, "M", "豕"), + (0x2F98, "M", "豸"), + (0x2F99, "M", "貝"), + (0x2F9A, "M", "赤"), + (0x2F9B, "M", "走"), + (0x2F9C, "M", "足"), + (0x2F9D, "M", "身"), + (0x2F9E, "M", "車"), + (0x2F9F, "M", "辛"), + (0x2FA0, "M", "辰"), + (0x2FA1, "M", "辵"), + (0x2FA2, "M", "邑"), + (0x2FA3, "M", "酉"), + (0x2FA4, "M", "釆"), + (0x2FA5, "M", "里"), + (0x2FA6, "M", "金"), + (0x2FA7, "M", "長"), + (0x2FA8, "M", "門"), + (0x2FA9, "M", "阜"), + (0x2FAA, "M", "隶"), + (0x2FAB, "M", "隹"), + (0x2FAC, "M", "雨"), + (0x2FAD, "M", "靑"), + (0x2FAE, "M", "非"), + (0x2FAF, "M", "面"), + (0x2FB0, "M", "革"), + (0x2FB1, "M", "韋"), + (0x2FB2, "M", "韭"), + (0x2FB3, "M", "音"), + (0x2FB4, "M", "頁"), + (0x2FB5, "M", "風"), + (0x2FB6, "M", "飛"), + (0x2FB7, "M", "食"), + (0x2FB8, "M", "首"), + (0x2FB9, "M", "香"), + (0x2FBA, "M", "馬"), + (0x2FBB, "M", "骨"), + (0x2FBC, "M", "高"), + (0x2FBD, "M", "髟"), + (0x2FBE, "M", "鬥"), + (0x2FBF, "M", "鬯"), + (0x2FC0, "M", "鬲"), + (0x2FC1, "M", "鬼"), + (0x2FC2, "M", "魚"), + (0x2FC3, "M", "鳥"), + (0x2FC4, "M", "鹵"), + (0x2FC5, "M", "鹿"), + (0x2FC6, "M", "麥"), + (0x2FC7, "M", "麻"), + (0x2FC8, "M", "黃"), + (0x2FC9, "M", "黍"), + (0x2FCA, "M", "黑"), + (0x2FCB, "M", "黹"), + (0x2FCC, "M", "黽"), + (0x2FCD, "M", "鼎"), + (0x2FCE, "M", "鼓"), + (0x2FCF, "M", "鼠"), + (0x2FD0, "M", "鼻"), + (0x2FD1, "M", "齊"), + (0x2FD2, "M", "齒"), + (0x2FD3, "M", "龍"), + (0x2FD4, "M", "龜"), + (0x2FD5, "M", "龠"), + (0x2FD6, "X"), + (0x3000, "3", " "), + (0x3001, "V"), + (0x3002, "M", "."), + (0x3003, "V"), + (0x3036, "M", "〒"), + (0x3037, "V"), + (0x3038, "M", "十"), + (0x3039, "M", "卄"), + (0x303A, "M", "卅"), + (0x303B, "V"), + (0x3040, "X"), + ] + + +def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3041, "V"), + (0x3097, "X"), + (0x3099, "V"), + (0x309B, "3", " ゙"), + (0x309C, "3", " ゚"), + (0x309D, "V"), + (0x309F, "M", "より"), + (0x30A0, "V"), + (0x30FF, "M", "コト"), + (0x3100, "X"), + (0x3105, "V"), + (0x3130, "X"), + (0x3131, "M", "ᄀ"), + (0x3132, "M", "ᄁ"), + (0x3133, "M", "ᆪ"), + (0x3134, "M", "ᄂ"), + (0x3135, "M", "ᆬ"), + (0x3136, "M", "ᆭ"), + (0x3137, "M", "ᄃ"), + (0x3138, "M", "ᄄ"), + (0x3139, "M", "ᄅ"), + (0x313A, "M", "ᆰ"), + (0x313B, "M", "ᆱ"), + (0x313C, "M", "ᆲ"), + (0x313D, "M", "ᆳ"), + (0x313E, "M", "ᆴ"), + (0x313F, "M", "ᆵ"), + (0x3140, "M", "ᄚ"), + (0x3141, "M", "ᄆ"), + (0x3142, "M", "ᄇ"), + (0x3143, "M", "ᄈ"), + (0x3144, "M", "ᄡ"), + (0x3145, "M", "ᄉ"), + (0x3146, "M", "ᄊ"), + (0x3147, "M", "ᄋ"), + (0x3148, "M", "ᄌ"), + (0x3149, "M", "ᄍ"), + (0x314A, "M", "ᄎ"), + (0x314B, "M", "ᄏ"), + (0x314C, "M", "ᄐ"), + (0x314D, "M", "ᄑ"), + (0x314E, "M", "ᄒ"), + (0x314F, "M", "ᅡ"), + (0x3150, "M", "ᅢ"), + (0x3151, "M", "ᅣ"), + (0x3152, "M", "ᅤ"), + (0x3153, "M", "ᅥ"), + (0x3154, "M", "ᅦ"), + (0x3155, "M", "ᅧ"), + (0x3156, "M", "ᅨ"), + (0x3157, "M", "ᅩ"), + (0x3158, "M", "ᅪ"), + (0x3159, "M", "ᅫ"), + (0x315A, "M", "ᅬ"), + (0x315B, "M", "ᅭ"), + (0x315C, "M", "ᅮ"), + (0x315D, "M", "ᅯ"), + (0x315E, "M", "ᅰ"), + (0x315F, "M", "ᅱ"), + (0x3160, "M", "ᅲ"), + (0x3161, "M", "ᅳ"), + (0x3162, "M", "ᅴ"), + (0x3163, "M", "ᅵ"), + (0x3164, "X"), + (0x3165, "M", "ᄔ"), + (0x3166, "M", "ᄕ"), + (0x3167, "M", "ᇇ"), + (0x3168, "M", "ᇈ"), + (0x3169, "M", "ᇌ"), + (0x316A, "M", "ᇎ"), + (0x316B, "M", "ᇓ"), + (0x316C, "M", "ᇗ"), + (0x316D, "M", "ᇙ"), + (0x316E, "M", "ᄜ"), + (0x316F, "M", "ᇝ"), + (0x3170, "M", "ᇟ"), + (0x3171, "M", "ᄝ"), + (0x3172, "M", "ᄞ"), + (0x3173, "M", "ᄠ"), + (0x3174, "M", "ᄢ"), + (0x3175, "M", "ᄣ"), + (0x3176, "M", "ᄧ"), + (0x3177, "M", "ᄩ"), + (0x3178, "M", "ᄫ"), + (0x3179, "M", "ᄬ"), + (0x317A, "M", "ᄭ"), + (0x317B, "M", "ᄮ"), + (0x317C, "M", "ᄯ"), + (0x317D, "M", "ᄲ"), + (0x317E, "M", "ᄶ"), + (0x317F, "M", "ᅀ"), + (0x3180, "M", "ᅇ"), + (0x3181, "M", "ᅌ"), + (0x3182, "M", "ᇱ"), + (0x3183, "M", "ᇲ"), + (0x3184, "M", "ᅗ"), + (0x3185, "M", "ᅘ"), + (0x3186, "M", "ᅙ"), + (0x3187, "M", "ᆄ"), + (0x3188, "M", "ᆅ"), + ] + + +def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3189, "M", "ᆈ"), + (0x318A, "M", "ᆑ"), + (0x318B, "M", "ᆒ"), + (0x318C, "M", "ᆔ"), + (0x318D, "M", "ᆞ"), + (0x318E, "M", "ᆡ"), + (0x318F, "X"), + (0x3190, "V"), + (0x3192, "M", "一"), + (0x3193, "M", "二"), + (0x3194, "M", "三"), + (0x3195, "M", "四"), + (0x3196, "M", "上"), + (0x3197, "M", "中"), + (0x3198, "M", "下"), + (0x3199, "M", "甲"), + (0x319A, "M", "乙"), + (0x319B, "M", "丙"), + (0x319C, "M", "丁"), + (0x319D, "M", "天"), + (0x319E, "M", "地"), + (0x319F, "M", "人"), + (0x31A0, "V"), + (0x31E4, "X"), + (0x31F0, "V"), + (0x3200, "3", "(ᄀ)"), + (0x3201, "3", "(ᄂ)"), + (0x3202, "3", "(ᄃ)"), + (0x3203, "3", "(ᄅ)"), + (0x3204, "3", "(ᄆ)"), + (0x3205, "3", "(ᄇ)"), + (0x3206, "3", "(ᄉ)"), + (0x3207, "3", "(ᄋ)"), + (0x3208, "3", "(ᄌ)"), + (0x3209, "3", "(ᄎ)"), + (0x320A, "3", "(ᄏ)"), + (0x320B, "3", "(ᄐ)"), + (0x320C, "3", "(ᄑ)"), + (0x320D, "3", "(ᄒ)"), + (0x320E, "3", "(가)"), + (0x320F, "3", "(나)"), + (0x3210, "3", "(다)"), + (0x3211, "3", "(라)"), + (0x3212, "3", "(마)"), + (0x3213, "3", "(바)"), + (0x3214, "3", "(사)"), + (0x3215, "3", "(아)"), + (0x3216, "3", "(자)"), + (0x3217, "3", "(차)"), + (0x3218, "3", "(카)"), + (0x3219, "3", "(타)"), + (0x321A, "3", "(파)"), + (0x321B, "3", "(하)"), + (0x321C, "3", "(주)"), + (0x321D, "3", "(오전)"), + (0x321E, "3", "(오후)"), + (0x321F, "X"), + (0x3220, "3", "(一)"), + (0x3221, "3", "(二)"), + (0x3222, "3", "(三)"), + (0x3223, "3", "(四)"), + (0x3224, "3", "(五)"), + (0x3225, "3", "(六)"), + (0x3226, "3", "(七)"), + (0x3227, "3", "(八)"), + (0x3228, "3", "(九)"), + (0x3229, "3", "(十)"), + (0x322A, "3", "(月)"), + (0x322B, "3", "(火)"), + (0x322C, "3", "(水)"), + (0x322D, "3", "(木)"), + (0x322E, "3", "(金)"), + (0x322F, "3", "(土)"), + (0x3230, "3", "(日)"), + (0x3231, "3", "(株)"), + (0x3232, "3", "(有)"), + (0x3233, "3", "(社)"), + (0x3234, "3", "(名)"), + (0x3235, "3", "(特)"), + (0x3236, "3", "(財)"), + (0x3237, "3", "(祝)"), + (0x3238, "3", "(労)"), + (0x3239, "3", "(代)"), + (0x323A, "3", "(呼)"), + (0x323B, "3", "(学)"), + (0x323C, "3", "(監)"), + (0x323D, "3", "(企)"), + (0x323E, "3", "(資)"), + (0x323F, "3", "(協)"), + (0x3240, "3", "(祭)"), + (0x3241, "3", "(休)"), + (0x3242, "3", "(自)"), + (0x3243, "3", "(至)"), + (0x3244, "M", "問"), + (0x3245, "M", "幼"), + (0x3246, "M", "文"), + (0x3247, "M", "箏"), + (0x3248, "V"), + (0x3250, "M", "pte"), + (0x3251, "M", "21"), + ] + + +def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3252, "M", "22"), + (0x3253, "M", "23"), + (0x3254, "M", "24"), + (0x3255, "M", "25"), + (0x3256, "M", "26"), + (0x3257, "M", "27"), + (0x3258, "M", "28"), + (0x3259, "M", "29"), + (0x325A, "M", "30"), + (0x325B, "M", "31"), + (0x325C, "M", "32"), + (0x325D, "M", "33"), + (0x325E, "M", "34"), + (0x325F, "M", "35"), + (0x3260, "M", "ᄀ"), + (0x3261, "M", "ᄂ"), + (0x3262, "M", "ᄃ"), + (0x3263, "M", "ᄅ"), + (0x3264, "M", "ᄆ"), + (0x3265, "M", "ᄇ"), + (0x3266, "M", "ᄉ"), + (0x3267, "M", "ᄋ"), + (0x3268, "M", "ᄌ"), + (0x3269, "M", "ᄎ"), + (0x326A, "M", "ᄏ"), + (0x326B, "M", "ᄐ"), + (0x326C, "M", "ᄑ"), + (0x326D, "M", "ᄒ"), + (0x326E, "M", "가"), + (0x326F, "M", "나"), + (0x3270, "M", "다"), + (0x3271, "M", "라"), + (0x3272, "M", "마"), + (0x3273, "M", "바"), + (0x3274, "M", "사"), + (0x3275, "M", "아"), + (0x3276, "M", "자"), + (0x3277, "M", "차"), + (0x3278, "M", "카"), + (0x3279, "M", "타"), + (0x327A, "M", "파"), + (0x327B, "M", "하"), + (0x327C, "M", "참고"), + (0x327D, "M", "주의"), + (0x327E, "M", "우"), + (0x327F, "V"), + (0x3280, "M", "一"), + (0x3281, "M", "二"), + (0x3282, "M", "三"), + (0x3283, "M", "四"), + (0x3284, "M", "五"), + (0x3285, "M", "六"), + (0x3286, "M", "七"), + (0x3287, "M", "八"), + (0x3288, "M", "九"), + (0x3289, "M", "十"), + (0x328A, "M", "月"), + (0x328B, "M", "火"), + (0x328C, "M", "水"), + (0x328D, "M", "木"), + (0x328E, "M", "金"), + (0x328F, "M", "土"), + (0x3290, "M", "日"), + (0x3291, "M", "株"), + (0x3292, "M", "有"), + (0x3293, "M", "社"), + (0x3294, "M", "名"), + (0x3295, "M", "特"), + (0x3296, "M", "財"), + (0x3297, "M", "祝"), + (0x3298, "M", "労"), + (0x3299, "M", "秘"), + (0x329A, "M", "男"), + (0x329B, "M", "女"), + (0x329C, "M", "適"), + (0x329D, "M", "優"), + (0x329E, "M", "印"), + (0x329F, "M", "注"), + (0x32A0, "M", "項"), + (0x32A1, "M", "休"), + (0x32A2, "M", "写"), + (0x32A3, "M", "正"), + (0x32A4, "M", "上"), + (0x32A5, "M", "中"), + (0x32A6, "M", "下"), + (0x32A7, "M", "左"), + (0x32A8, "M", "右"), + (0x32A9, "M", "医"), + (0x32AA, "M", "宗"), + (0x32AB, "M", "学"), + (0x32AC, "M", "監"), + (0x32AD, "M", "企"), + (0x32AE, "M", "資"), + (0x32AF, "M", "協"), + (0x32B0, "M", "夜"), + (0x32B1, "M", "36"), + (0x32B2, "M", "37"), + (0x32B3, "M", "38"), + (0x32B4, "M", "39"), + (0x32B5, "M", "40"), + ] + + +def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x32B6, "M", "41"), + (0x32B7, "M", "42"), + (0x32B8, "M", "43"), + (0x32B9, "M", "44"), + (0x32BA, "M", "45"), + (0x32BB, "M", "46"), + (0x32BC, "M", "47"), + (0x32BD, "M", "48"), + (0x32BE, "M", "49"), + (0x32BF, "M", "50"), + (0x32C0, "M", "1月"), + (0x32C1, "M", "2月"), + (0x32C2, "M", "3月"), + (0x32C3, "M", "4月"), + (0x32C4, "M", "5月"), + (0x32C5, "M", "6月"), + (0x32C6, "M", "7月"), + (0x32C7, "M", "8月"), + (0x32C8, "M", "9月"), + (0x32C9, "M", "10月"), + (0x32CA, "M", "11月"), + (0x32CB, "M", "12月"), + (0x32CC, "M", "hg"), + (0x32CD, "M", "erg"), + (0x32CE, "M", "ev"), + (0x32CF, "M", "ltd"), + (0x32D0, "M", "ア"), + (0x32D1, "M", "イ"), + (0x32D2, "M", "ウ"), + (0x32D3, "M", "エ"), + (0x32D4, "M", "オ"), + (0x32D5, "M", "カ"), + (0x32D6, "M", "キ"), + (0x32D7, "M", "ク"), + (0x32D8, "M", "ケ"), + (0x32D9, "M", "コ"), + (0x32DA, "M", "サ"), + (0x32DB, "M", "シ"), + (0x32DC, "M", "ス"), + (0x32DD, "M", "セ"), + (0x32DE, "M", "ソ"), + (0x32DF, "M", "タ"), + (0x32E0, "M", "チ"), + (0x32E1, "M", "ツ"), + (0x32E2, "M", "テ"), + (0x32E3, "M", "ト"), + (0x32E4, "M", "ナ"), + (0x32E5, "M", "ニ"), + (0x32E6, "M", "ヌ"), + (0x32E7, "M", "ネ"), + (0x32E8, "M", "ノ"), + (0x32E9, "M", "ハ"), + (0x32EA, "M", "ヒ"), + (0x32EB, "M", "フ"), + (0x32EC, "M", "ヘ"), + (0x32ED, "M", "ホ"), + (0x32EE, "M", "マ"), + (0x32EF, "M", "ミ"), + (0x32F0, "M", "ム"), + (0x32F1, "M", "メ"), + (0x32F2, "M", "モ"), + (0x32F3, "M", "ヤ"), + (0x32F4, "M", "ユ"), + (0x32F5, "M", "ヨ"), + (0x32F6, "M", "ラ"), + (0x32F7, "M", "リ"), + (0x32F8, "M", "ル"), + (0x32F9, "M", "レ"), + (0x32FA, "M", "ロ"), + (0x32FB, "M", "ワ"), + (0x32FC, "M", "ヰ"), + (0x32FD, "M", "ヱ"), + (0x32FE, "M", "ヲ"), + (0x32FF, "M", "令和"), + (0x3300, "M", "アパート"), + (0x3301, "M", "アルファ"), + (0x3302, "M", "アンペア"), + (0x3303, "M", "アール"), + (0x3304, "M", "イニング"), + (0x3305, "M", "インチ"), + (0x3306, "M", "ウォン"), + (0x3307, "M", "エスクード"), + (0x3308, "M", "エーカー"), + (0x3309, "M", "オンス"), + (0x330A, "M", "オーム"), + (0x330B, "M", "カイリ"), + (0x330C, "M", "カラット"), + (0x330D, "M", "カロリー"), + (0x330E, "M", "ガロン"), + (0x330F, "M", "ガンマ"), + (0x3310, "M", "ギガ"), + (0x3311, "M", "ギニー"), + (0x3312, "M", "キュリー"), + (0x3313, "M", "ギルダー"), + (0x3314, "M", "キロ"), + (0x3315, "M", "キログラム"), + (0x3316, "M", "キロメートル"), + (0x3317, "M", "キロワット"), + (0x3318, "M", "グラム"), + (0x3319, "M", "グラムトン"), + ] + + +def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x331A, "M", "クルゼイロ"), + (0x331B, "M", "クローネ"), + (0x331C, "M", "ケース"), + (0x331D, "M", "コルナ"), + (0x331E, "M", "コーポ"), + (0x331F, "M", "サイクル"), + (0x3320, "M", "サンチーム"), + (0x3321, "M", "シリング"), + (0x3322, "M", "センチ"), + (0x3323, "M", "セント"), + (0x3324, "M", "ダース"), + (0x3325, "M", "デシ"), + (0x3326, "M", "ドル"), + (0x3327, "M", "トン"), + (0x3328, "M", "ナノ"), + (0x3329, "M", "ノット"), + (0x332A, "M", "ハイツ"), + (0x332B, "M", "パーセント"), + (0x332C, "M", "パーツ"), + (0x332D, "M", "バーレル"), + (0x332E, "M", "ピアストル"), + (0x332F, "M", "ピクル"), + (0x3330, "M", "ピコ"), + (0x3331, "M", "ビル"), + (0x3332, "M", "ファラッド"), + (0x3333, "M", "フィート"), + (0x3334, "M", "ブッシェル"), + (0x3335, "M", "フラン"), + (0x3336, "M", "ヘクタール"), + (0x3337, "M", "ペソ"), + (0x3338, "M", "ペニヒ"), + (0x3339, "M", "ヘルツ"), + (0x333A, "M", "ペンス"), + (0x333B, "M", "ページ"), + (0x333C, "M", "ベータ"), + (0x333D, "M", "ポイント"), + (0x333E, "M", "ボルト"), + (0x333F, "M", "ホン"), + (0x3340, "M", "ポンド"), + (0x3341, "M", "ホール"), + (0x3342, "M", "ホーン"), + (0x3343, "M", "マイクロ"), + (0x3344, "M", "マイル"), + (0x3345, "M", "マッハ"), + (0x3346, "M", "マルク"), + (0x3347, "M", "マンション"), + (0x3348, "M", "ミクロン"), + (0x3349, "M", "ミリ"), + (0x334A, "M", "ミリバール"), + (0x334B, "M", "メガ"), + (0x334C, "M", "メガトン"), + (0x334D, "M", "メートル"), + (0x334E, "M", "ヤード"), + (0x334F, "M", "ヤール"), + (0x3350, "M", "ユアン"), + (0x3351, "M", "リットル"), + (0x3352, "M", "リラ"), + (0x3353, "M", "ルピー"), + (0x3354, "M", "ルーブル"), + (0x3355, "M", "レム"), + (0x3356, "M", "レントゲン"), + (0x3357, "M", "ワット"), + (0x3358, "M", "0点"), + (0x3359, "M", "1点"), + (0x335A, "M", "2点"), + (0x335B, "M", "3点"), + (0x335C, "M", "4点"), + (0x335D, "M", "5点"), + (0x335E, "M", "6点"), + (0x335F, "M", "7点"), + (0x3360, "M", "8点"), + (0x3361, "M", "9点"), + (0x3362, "M", "10点"), + (0x3363, "M", "11点"), + (0x3364, "M", "12点"), + (0x3365, "M", "13点"), + (0x3366, "M", "14点"), + (0x3367, "M", "15点"), + (0x3368, "M", "16点"), + (0x3369, "M", "17点"), + (0x336A, "M", "18点"), + (0x336B, "M", "19点"), + (0x336C, "M", "20点"), + (0x336D, "M", "21点"), + (0x336E, "M", "22点"), + (0x336F, "M", "23点"), + (0x3370, "M", "24点"), + (0x3371, "M", "hpa"), + (0x3372, "M", "da"), + (0x3373, "M", "au"), + (0x3374, "M", "bar"), + (0x3375, "M", "ov"), + (0x3376, "M", "pc"), + (0x3377, "M", "dm"), + (0x3378, "M", "dm2"), + (0x3379, "M", "dm3"), + (0x337A, "M", "iu"), + (0x337B, "M", "平成"), + (0x337C, "M", "昭和"), + (0x337D, "M", "大正"), + ] + + +def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x337E, "M", "明治"), + (0x337F, "M", "株式会社"), + (0x3380, "M", "pa"), + (0x3381, "M", "na"), + (0x3382, "M", "μa"), + (0x3383, "M", "ma"), + (0x3384, "M", "ka"), + (0x3385, "M", "kb"), + (0x3386, "M", "mb"), + (0x3387, "M", "gb"), + (0x3388, "M", "cal"), + (0x3389, "M", "kcal"), + (0x338A, "M", "pf"), + (0x338B, "M", "nf"), + (0x338C, "M", "μf"), + (0x338D, "M", "μg"), + (0x338E, "M", "mg"), + (0x338F, "M", "kg"), + (0x3390, "M", "hz"), + (0x3391, "M", "khz"), + (0x3392, "M", "mhz"), + (0x3393, "M", "ghz"), + (0x3394, "M", "thz"), + (0x3395, "M", "μl"), + (0x3396, "M", "ml"), + (0x3397, "M", "dl"), + (0x3398, "M", "kl"), + (0x3399, "M", "fm"), + (0x339A, "M", "nm"), + (0x339B, "M", "μm"), + (0x339C, "M", "mm"), + (0x339D, "M", "cm"), + (0x339E, "M", "km"), + (0x339F, "M", "mm2"), + (0x33A0, "M", "cm2"), + (0x33A1, "M", "m2"), + (0x33A2, "M", "km2"), + (0x33A3, "M", "mm3"), + (0x33A4, "M", "cm3"), + (0x33A5, "M", "m3"), + (0x33A6, "M", "km3"), + (0x33A7, "M", "m∕s"), + (0x33A8, "M", "m∕s2"), + (0x33A9, "M", "pa"), + (0x33AA, "M", "kpa"), + (0x33AB, "M", "mpa"), + (0x33AC, "M", "gpa"), + (0x33AD, "M", "rad"), + (0x33AE, "M", "rad∕s"), + (0x33AF, "M", "rad∕s2"), + (0x33B0, "M", "ps"), + (0x33B1, "M", "ns"), + (0x33B2, "M", "μs"), + (0x33B3, "M", "ms"), + (0x33B4, "M", "pv"), + (0x33B5, "M", "nv"), + (0x33B6, "M", "μv"), + (0x33B7, "M", "mv"), + (0x33B8, "M", "kv"), + (0x33B9, "M", "mv"), + (0x33BA, "M", "pw"), + (0x33BB, "M", "nw"), + (0x33BC, "M", "μw"), + (0x33BD, "M", "mw"), + (0x33BE, "M", "kw"), + (0x33BF, "M", "mw"), + (0x33C0, "M", "kω"), + (0x33C1, "M", "mω"), + (0x33C2, "X"), + (0x33C3, "M", "bq"), + (0x33C4, "M", "cc"), + (0x33C5, "M", "cd"), + (0x33C6, "M", "c∕kg"), + (0x33C7, "X"), + (0x33C8, "M", "db"), + (0x33C9, "M", "gy"), + (0x33CA, "M", "ha"), + (0x33CB, "M", "hp"), + (0x33CC, "M", "in"), + (0x33CD, "M", "kk"), + (0x33CE, "M", "km"), + (0x33CF, "M", "kt"), + (0x33D0, "M", "lm"), + (0x33D1, "M", "ln"), + (0x33D2, "M", "log"), + (0x33D3, "M", "lx"), + (0x33D4, "M", "mb"), + (0x33D5, "M", "mil"), + (0x33D6, "M", "mol"), + (0x33D7, "M", "ph"), + (0x33D8, "X"), + (0x33D9, "M", "ppm"), + (0x33DA, "M", "pr"), + (0x33DB, "M", "sr"), + (0x33DC, "M", "sv"), + (0x33DD, "M", "wb"), + (0x33DE, "M", "v∕m"), + (0x33DF, "M", "a∕m"), + (0x33E0, "M", "1日"), + (0x33E1, "M", "2日"), + ] + + +def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x33E2, "M", "3日"), + (0x33E3, "M", "4日"), + (0x33E4, "M", "5日"), + (0x33E5, "M", "6日"), + (0x33E6, "M", "7日"), + (0x33E7, "M", "8日"), + (0x33E8, "M", "9日"), + (0x33E9, "M", "10日"), + (0x33EA, "M", "11日"), + (0x33EB, "M", "12日"), + (0x33EC, "M", "13日"), + (0x33ED, "M", "14日"), + (0x33EE, "M", "15日"), + (0x33EF, "M", "16日"), + (0x33F0, "M", "17日"), + (0x33F1, "M", "18日"), + (0x33F2, "M", "19日"), + (0x33F3, "M", "20日"), + (0x33F4, "M", "21日"), + (0x33F5, "M", "22日"), + (0x33F6, "M", "23日"), + (0x33F7, "M", "24日"), + (0x33F8, "M", "25日"), + (0x33F9, "M", "26日"), + (0x33FA, "M", "27日"), + (0x33FB, "M", "28日"), + (0x33FC, "M", "29日"), + (0x33FD, "M", "30日"), + (0x33FE, "M", "31日"), + (0x33FF, "M", "gal"), + (0x3400, "V"), + (0xA48D, "X"), + (0xA490, "V"), + (0xA4C7, "X"), + (0xA4D0, "V"), + (0xA62C, "X"), + (0xA640, "M", "ꙁ"), + (0xA641, "V"), + (0xA642, "M", "ꙃ"), + (0xA643, "V"), + (0xA644, "M", "ꙅ"), + (0xA645, "V"), + (0xA646, "M", "ꙇ"), + (0xA647, "V"), + (0xA648, "M", "ꙉ"), + (0xA649, "V"), + (0xA64A, "M", "ꙋ"), + (0xA64B, "V"), + (0xA64C, "M", "ꙍ"), + (0xA64D, "V"), + (0xA64E, "M", "ꙏ"), + (0xA64F, "V"), + (0xA650, "M", "ꙑ"), + (0xA651, "V"), + (0xA652, "M", "ꙓ"), + (0xA653, "V"), + (0xA654, "M", "ꙕ"), + (0xA655, "V"), + (0xA656, "M", "ꙗ"), + (0xA657, "V"), + (0xA658, "M", "ꙙ"), + (0xA659, "V"), + (0xA65A, "M", "ꙛ"), + (0xA65B, "V"), + (0xA65C, "M", "ꙝ"), + (0xA65D, "V"), + (0xA65E, "M", "ꙟ"), + (0xA65F, "V"), + (0xA660, "M", "ꙡ"), + (0xA661, "V"), + (0xA662, "M", "ꙣ"), + (0xA663, "V"), + (0xA664, "M", "ꙥ"), + (0xA665, "V"), + (0xA666, "M", "ꙧ"), + (0xA667, "V"), + (0xA668, "M", "ꙩ"), + (0xA669, "V"), + (0xA66A, "M", "ꙫ"), + (0xA66B, "V"), + (0xA66C, "M", "ꙭ"), + (0xA66D, "V"), + (0xA680, "M", "ꚁ"), + (0xA681, "V"), + (0xA682, "M", "ꚃ"), + (0xA683, "V"), + (0xA684, "M", "ꚅ"), + (0xA685, "V"), + (0xA686, "M", "ꚇ"), + (0xA687, "V"), + (0xA688, "M", "ꚉ"), + (0xA689, "V"), + (0xA68A, "M", "ꚋ"), + (0xA68B, "V"), + (0xA68C, "M", "ꚍ"), + (0xA68D, "V"), + (0xA68E, "M", "ꚏ"), + (0xA68F, "V"), + (0xA690, "M", "ꚑ"), + (0xA691, "V"), + ] + + +def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA692, "M", "ꚓ"), + (0xA693, "V"), + (0xA694, "M", "ꚕ"), + (0xA695, "V"), + (0xA696, "M", "ꚗ"), + (0xA697, "V"), + (0xA698, "M", "ꚙ"), + (0xA699, "V"), + (0xA69A, "M", "ꚛ"), + (0xA69B, "V"), + (0xA69C, "M", "ъ"), + (0xA69D, "M", "ь"), + (0xA69E, "V"), + (0xA6F8, "X"), + (0xA700, "V"), + (0xA722, "M", "ꜣ"), + (0xA723, "V"), + (0xA724, "M", "ꜥ"), + (0xA725, "V"), + (0xA726, "M", "ꜧ"), + (0xA727, "V"), + (0xA728, "M", "ꜩ"), + (0xA729, "V"), + (0xA72A, "M", "ꜫ"), + (0xA72B, "V"), + (0xA72C, "M", "ꜭ"), + (0xA72D, "V"), + (0xA72E, "M", "ꜯ"), + (0xA72F, "V"), + (0xA732, "M", "ꜳ"), + (0xA733, "V"), + (0xA734, "M", "ꜵ"), + (0xA735, "V"), + (0xA736, "M", "ꜷ"), + (0xA737, "V"), + (0xA738, "M", "ꜹ"), + (0xA739, "V"), + (0xA73A, "M", "ꜻ"), + (0xA73B, "V"), + (0xA73C, "M", "ꜽ"), + (0xA73D, "V"), + (0xA73E, "M", "ꜿ"), + (0xA73F, "V"), + (0xA740, "M", "ꝁ"), + (0xA741, "V"), + (0xA742, "M", "ꝃ"), + (0xA743, "V"), + (0xA744, "M", "ꝅ"), + (0xA745, "V"), + (0xA746, "M", "ꝇ"), + (0xA747, "V"), + (0xA748, "M", "ꝉ"), + (0xA749, "V"), + (0xA74A, "M", "ꝋ"), + (0xA74B, "V"), + (0xA74C, "M", "ꝍ"), + (0xA74D, "V"), + (0xA74E, "M", "ꝏ"), + (0xA74F, "V"), + (0xA750, "M", "ꝑ"), + (0xA751, "V"), + (0xA752, "M", "ꝓ"), + (0xA753, "V"), + (0xA754, "M", "ꝕ"), + (0xA755, "V"), + (0xA756, "M", "ꝗ"), + (0xA757, "V"), + (0xA758, "M", "ꝙ"), + (0xA759, "V"), + (0xA75A, "M", "ꝛ"), + (0xA75B, "V"), + (0xA75C, "M", "ꝝ"), + (0xA75D, "V"), + (0xA75E, "M", "ꝟ"), + (0xA75F, "V"), + (0xA760, "M", "ꝡ"), + (0xA761, "V"), + (0xA762, "M", "ꝣ"), + (0xA763, "V"), + (0xA764, "M", "ꝥ"), + (0xA765, "V"), + (0xA766, "M", "ꝧ"), + (0xA767, "V"), + (0xA768, "M", "ꝩ"), + (0xA769, "V"), + (0xA76A, "M", "ꝫ"), + (0xA76B, "V"), + (0xA76C, "M", "ꝭ"), + (0xA76D, "V"), + (0xA76E, "M", "ꝯ"), + (0xA76F, "V"), + (0xA770, "M", "ꝯ"), + (0xA771, "V"), + (0xA779, "M", "ꝺ"), + (0xA77A, "V"), + (0xA77B, "M", "ꝼ"), + (0xA77C, "V"), + (0xA77D, "M", "ᵹ"), + (0xA77E, "M", "ꝿ"), + (0xA77F, "V"), + ] + + +def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA780, "M", "ꞁ"), + (0xA781, "V"), + (0xA782, "M", "ꞃ"), + (0xA783, "V"), + (0xA784, "M", "ꞅ"), + (0xA785, "V"), + (0xA786, "M", "ꞇ"), + (0xA787, "V"), + (0xA78B, "M", "ꞌ"), + (0xA78C, "V"), + (0xA78D, "M", "ɥ"), + (0xA78E, "V"), + (0xA790, "M", "ꞑ"), + (0xA791, "V"), + (0xA792, "M", "ꞓ"), + (0xA793, "V"), + (0xA796, "M", "ꞗ"), + (0xA797, "V"), + (0xA798, "M", "ꞙ"), + (0xA799, "V"), + (0xA79A, "M", "ꞛ"), + (0xA79B, "V"), + (0xA79C, "M", "ꞝ"), + (0xA79D, "V"), + (0xA79E, "M", "ꞟ"), + (0xA79F, "V"), + (0xA7A0, "M", "ꞡ"), + (0xA7A1, "V"), + (0xA7A2, "M", "ꞣ"), + (0xA7A3, "V"), + (0xA7A4, "M", "ꞥ"), + (0xA7A5, "V"), + (0xA7A6, "M", "ꞧ"), + (0xA7A7, "V"), + (0xA7A8, "M", "ꞩ"), + (0xA7A9, "V"), + (0xA7AA, "M", "ɦ"), + (0xA7AB, "M", "ɜ"), + (0xA7AC, "M", "ɡ"), + (0xA7AD, "M", "ɬ"), + (0xA7AE, "M", "ɪ"), + (0xA7AF, "V"), + (0xA7B0, "M", "ʞ"), + (0xA7B1, "M", "ʇ"), + (0xA7B2, "M", "ʝ"), + (0xA7B3, "M", "ꭓ"), + (0xA7B4, "M", "ꞵ"), + (0xA7B5, "V"), + (0xA7B6, "M", "ꞷ"), + (0xA7B7, "V"), + (0xA7B8, "M", "ꞹ"), + (0xA7B9, "V"), + (0xA7BA, "M", "ꞻ"), + (0xA7BB, "V"), + (0xA7BC, "M", "ꞽ"), + (0xA7BD, "V"), + (0xA7BE, "M", "ꞿ"), + (0xA7BF, "V"), + (0xA7C0, "M", "ꟁ"), + (0xA7C1, "V"), + (0xA7C2, "M", "ꟃ"), + (0xA7C3, "V"), + (0xA7C4, "M", "ꞔ"), + (0xA7C5, "M", "ʂ"), + (0xA7C6, "M", "ᶎ"), + (0xA7C7, "M", "ꟈ"), + (0xA7C8, "V"), + (0xA7C9, "M", "ꟊ"), + (0xA7CA, "V"), + (0xA7CB, "X"), + (0xA7D0, "M", "ꟑ"), + (0xA7D1, "V"), + (0xA7D2, "X"), + (0xA7D3, "V"), + (0xA7D4, "X"), + (0xA7D5, "V"), + (0xA7D6, "M", "ꟗ"), + (0xA7D7, "V"), + (0xA7D8, "M", "ꟙ"), + (0xA7D9, "V"), + (0xA7DA, "X"), + (0xA7F2, "M", "c"), + (0xA7F3, "M", "f"), + (0xA7F4, "M", "q"), + (0xA7F5, "M", "ꟶ"), + (0xA7F6, "V"), + (0xA7F8, "M", "ħ"), + (0xA7F9, "M", "œ"), + (0xA7FA, "V"), + (0xA82D, "X"), + (0xA830, "V"), + (0xA83A, "X"), + (0xA840, "V"), + (0xA878, "X"), + (0xA880, "V"), + (0xA8C6, "X"), + (0xA8CE, "V"), + (0xA8DA, "X"), + (0xA8E0, "V"), + (0xA954, "X"), + ] + + +def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA95F, "V"), + (0xA97D, "X"), + (0xA980, "V"), + (0xA9CE, "X"), + (0xA9CF, "V"), + (0xA9DA, "X"), + (0xA9DE, "V"), + (0xA9FF, "X"), + (0xAA00, "V"), + (0xAA37, "X"), + (0xAA40, "V"), + (0xAA4E, "X"), + (0xAA50, "V"), + (0xAA5A, "X"), + (0xAA5C, "V"), + (0xAAC3, "X"), + (0xAADB, "V"), + (0xAAF7, "X"), + (0xAB01, "V"), + (0xAB07, "X"), + (0xAB09, "V"), + (0xAB0F, "X"), + (0xAB11, "V"), + (0xAB17, "X"), + (0xAB20, "V"), + (0xAB27, "X"), + (0xAB28, "V"), + (0xAB2F, "X"), + (0xAB30, "V"), + (0xAB5C, "M", "ꜧ"), + (0xAB5D, "M", "ꬷ"), + (0xAB5E, "M", "ɫ"), + (0xAB5F, "M", "ꭒ"), + (0xAB60, "V"), + (0xAB69, "M", "ʍ"), + (0xAB6A, "V"), + (0xAB6C, "X"), + (0xAB70, "M", "Ꭰ"), + (0xAB71, "M", "Ꭱ"), + (0xAB72, "M", "Ꭲ"), + (0xAB73, "M", "Ꭳ"), + (0xAB74, "M", "Ꭴ"), + (0xAB75, "M", "Ꭵ"), + (0xAB76, "M", "Ꭶ"), + (0xAB77, "M", "Ꭷ"), + (0xAB78, "M", "Ꭸ"), + (0xAB79, "M", "Ꭹ"), + (0xAB7A, "M", "Ꭺ"), + (0xAB7B, "M", "Ꭻ"), + (0xAB7C, "M", "Ꭼ"), + (0xAB7D, "M", "Ꭽ"), + (0xAB7E, "M", "Ꭾ"), + (0xAB7F, "M", "Ꭿ"), + (0xAB80, "M", "Ꮀ"), + (0xAB81, "M", "Ꮁ"), + (0xAB82, "M", "Ꮂ"), + (0xAB83, "M", "Ꮃ"), + (0xAB84, "M", "Ꮄ"), + (0xAB85, "M", "Ꮅ"), + (0xAB86, "M", "Ꮆ"), + (0xAB87, "M", "Ꮇ"), + (0xAB88, "M", "Ꮈ"), + (0xAB89, "M", "Ꮉ"), + (0xAB8A, "M", "Ꮊ"), + (0xAB8B, "M", "Ꮋ"), + (0xAB8C, "M", "Ꮌ"), + (0xAB8D, "M", "Ꮍ"), + (0xAB8E, "M", "Ꮎ"), + (0xAB8F, "M", "Ꮏ"), + (0xAB90, "M", "Ꮐ"), + (0xAB91, "M", "Ꮑ"), + (0xAB92, "M", "Ꮒ"), + (0xAB93, "M", "Ꮓ"), + (0xAB94, "M", "Ꮔ"), + (0xAB95, "M", "Ꮕ"), + (0xAB96, "M", "Ꮖ"), + (0xAB97, "M", "Ꮗ"), + (0xAB98, "M", "Ꮘ"), + (0xAB99, "M", "Ꮙ"), + (0xAB9A, "M", "Ꮚ"), + (0xAB9B, "M", "Ꮛ"), + (0xAB9C, "M", "Ꮜ"), + (0xAB9D, "M", "Ꮝ"), + (0xAB9E, "M", "Ꮞ"), + (0xAB9F, "M", "Ꮟ"), + (0xABA0, "M", "Ꮠ"), + (0xABA1, "M", "Ꮡ"), + (0xABA2, "M", "Ꮢ"), + (0xABA3, "M", "Ꮣ"), + (0xABA4, "M", "Ꮤ"), + (0xABA5, "M", "Ꮥ"), + (0xABA6, "M", "Ꮦ"), + (0xABA7, "M", "Ꮧ"), + (0xABA8, "M", "Ꮨ"), + (0xABA9, "M", "Ꮩ"), + (0xABAA, "M", "Ꮪ"), + (0xABAB, "M", "Ꮫ"), + (0xABAC, "M", "Ꮬ"), + (0xABAD, "M", "Ꮭ"), + (0xABAE, "M", "Ꮮ"), + ] + + +def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xABAF, "M", "Ꮯ"), + (0xABB0, "M", "Ꮰ"), + (0xABB1, "M", "Ꮱ"), + (0xABB2, "M", "Ꮲ"), + (0xABB3, "M", "Ꮳ"), + (0xABB4, "M", "Ꮴ"), + (0xABB5, "M", "Ꮵ"), + (0xABB6, "M", "Ꮶ"), + (0xABB7, "M", "Ꮷ"), + (0xABB8, "M", "Ꮸ"), + (0xABB9, "M", "Ꮹ"), + (0xABBA, "M", "Ꮺ"), + (0xABBB, "M", "Ꮻ"), + (0xABBC, "M", "Ꮼ"), + (0xABBD, "M", "Ꮽ"), + (0xABBE, "M", "Ꮾ"), + (0xABBF, "M", "Ꮿ"), + (0xABC0, "V"), + (0xABEE, "X"), + (0xABF0, "V"), + (0xABFA, "X"), + (0xAC00, "V"), + (0xD7A4, "X"), + (0xD7B0, "V"), + (0xD7C7, "X"), + (0xD7CB, "V"), + (0xD7FC, "X"), + (0xF900, "M", "豈"), + (0xF901, "M", "更"), + (0xF902, "M", "車"), + (0xF903, "M", "賈"), + (0xF904, "M", "滑"), + (0xF905, "M", "串"), + (0xF906, "M", "句"), + (0xF907, "M", "龜"), + (0xF909, "M", "契"), + (0xF90A, "M", "金"), + (0xF90B, "M", "喇"), + (0xF90C, "M", "奈"), + (0xF90D, "M", "懶"), + (0xF90E, "M", "癩"), + (0xF90F, "M", "羅"), + (0xF910, "M", "蘿"), + (0xF911, "M", "螺"), + (0xF912, "M", "裸"), + (0xF913, "M", "邏"), + (0xF914, "M", "樂"), + (0xF915, "M", "洛"), + (0xF916, "M", "烙"), + (0xF917, "M", "珞"), + (0xF918, "M", "落"), + (0xF919, "M", "酪"), + (0xF91A, "M", "駱"), + (0xF91B, "M", "亂"), + (0xF91C, "M", "卵"), + (0xF91D, "M", "欄"), + (0xF91E, "M", "爛"), + (0xF91F, "M", "蘭"), + (0xF920, "M", "鸞"), + (0xF921, "M", "嵐"), + (0xF922, "M", "濫"), + (0xF923, "M", "藍"), + (0xF924, "M", "襤"), + (0xF925, "M", "拉"), + (0xF926, "M", "臘"), + (0xF927, "M", "蠟"), + (0xF928, "M", "廊"), + (0xF929, "M", "朗"), + (0xF92A, "M", "浪"), + (0xF92B, "M", "狼"), + (0xF92C, "M", "郎"), + (0xF92D, "M", "來"), + (0xF92E, "M", "冷"), + (0xF92F, "M", "勞"), + (0xF930, "M", "擄"), + (0xF931, "M", "櫓"), + (0xF932, "M", "爐"), + (0xF933, "M", "盧"), + (0xF934, "M", "老"), + (0xF935, "M", "蘆"), + (0xF936, "M", "虜"), + (0xF937, "M", "路"), + (0xF938, "M", "露"), + (0xF939, "M", "魯"), + (0xF93A, "M", "鷺"), + (0xF93B, "M", "碌"), + (0xF93C, "M", "祿"), + (0xF93D, "M", "綠"), + (0xF93E, "M", "菉"), + (0xF93F, "M", "錄"), + (0xF940, "M", "鹿"), + (0xF941, "M", "論"), + (0xF942, "M", "壟"), + (0xF943, "M", "弄"), + (0xF944, "M", "籠"), + (0xF945, "M", "聾"), + (0xF946, "M", "牢"), + (0xF947, "M", "磊"), + (0xF948, "M", "賂"), + (0xF949, "M", "雷"), + ] + + +def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xF94A, "M", "壘"), + (0xF94B, "M", "屢"), + (0xF94C, "M", "樓"), + (0xF94D, "M", "淚"), + (0xF94E, "M", "漏"), + (0xF94F, "M", "累"), + (0xF950, "M", "縷"), + (0xF951, "M", "陋"), + (0xF952, "M", "勒"), + (0xF953, "M", "肋"), + (0xF954, "M", "凜"), + (0xF955, "M", "凌"), + (0xF956, "M", "稜"), + (0xF957, "M", "綾"), + (0xF958, "M", "菱"), + (0xF959, "M", "陵"), + (0xF95A, "M", "讀"), + (0xF95B, "M", "拏"), + (0xF95C, "M", "樂"), + (0xF95D, "M", "諾"), + (0xF95E, "M", "丹"), + (0xF95F, "M", "寧"), + (0xF960, "M", "怒"), + (0xF961, "M", "率"), + (0xF962, "M", "異"), + (0xF963, "M", "北"), + (0xF964, "M", "磻"), + (0xF965, "M", "便"), + (0xF966, "M", "復"), + (0xF967, "M", "不"), + (0xF968, "M", "泌"), + (0xF969, "M", "數"), + (0xF96A, "M", "索"), + (0xF96B, "M", "參"), + (0xF96C, "M", "塞"), + (0xF96D, "M", "省"), + (0xF96E, "M", "葉"), + (0xF96F, "M", "說"), + (0xF970, "M", "殺"), + (0xF971, "M", "辰"), + (0xF972, "M", "沈"), + (0xF973, "M", "拾"), + (0xF974, "M", "若"), + (0xF975, "M", "掠"), + (0xF976, "M", "略"), + (0xF977, "M", "亮"), + (0xF978, "M", "兩"), + (0xF979, "M", "凉"), + (0xF97A, "M", "梁"), + (0xF97B, "M", "糧"), + (0xF97C, "M", "良"), + (0xF97D, "M", "諒"), + (0xF97E, "M", "量"), + (0xF97F, "M", "勵"), + (0xF980, "M", "呂"), + (0xF981, "M", "女"), + (0xF982, "M", "廬"), + (0xF983, "M", "旅"), + (0xF984, "M", "濾"), + (0xF985, "M", "礪"), + (0xF986, "M", "閭"), + (0xF987, "M", "驪"), + (0xF988, "M", "麗"), + (0xF989, "M", "黎"), + (0xF98A, "M", "力"), + (0xF98B, "M", "曆"), + (0xF98C, "M", "歷"), + (0xF98D, "M", "轢"), + (0xF98E, "M", "年"), + (0xF98F, "M", "憐"), + (0xF990, "M", "戀"), + (0xF991, "M", "撚"), + (0xF992, "M", "漣"), + (0xF993, "M", "煉"), + (0xF994, "M", "璉"), + (0xF995, "M", "秊"), + (0xF996, "M", "練"), + (0xF997, "M", "聯"), + (0xF998, "M", "輦"), + (0xF999, "M", "蓮"), + (0xF99A, "M", "連"), + (0xF99B, "M", "鍊"), + (0xF99C, "M", "列"), + (0xF99D, "M", "劣"), + (0xF99E, "M", "咽"), + (0xF99F, "M", "烈"), + (0xF9A0, "M", "裂"), + (0xF9A1, "M", "說"), + (0xF9A2, "M", "廉"), + (0xF9A3, "M", "念"), + (0xF9A4, "M", "捻"), + (0xF9A5, "M", "殮"), + (0xF9A6, "M", "簾"), + (0xF9A7, "M", "獵"), + (0xF9A8, "M", "令"), + (0xF9A9, "M", "囹"), + (0xF9AA, "M", "寧"), + (0xF9AB, "M", "嶺"), + (0xF9AC, "M", "怜"), + (0xF9AD, "M", "玲"), + ] + + +def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xF9AE, "M", "瑩"), + (0xF9AF, "M", "羚"), + (0xF9B0, "M", "聆"), + (0xF9B1, "M", "鈴"), + (0xF9B2, "M", "零"), + (0xF9B3, "M", "靈"), + (0xF9B4, "M", "領"), + (0xF9B5, "M", "例"), + (0xF9B6, "M", "禮"), + (0xF9B7, "M", "醴"), + (0xF9B8, "M", "隸"), + (0xF9B9, "M", "惡"), + (0xF9BA, "M", "了"), + (0xF9BB, "M", "僚"), + (0xF9BC, "M", "寮"), + (0xF9BD, "M", "尿"), + (0xF9BE, "M", "料"), + (0xF9BF, "M", "樂"), + (0xF9C0, "M", "燎"), + (0xF9C1, "M", "療"), + (0xF9C2, "M", "蓼"), + (0xF9C3, "M", "遼"), + (0xF9C4, "M", "龍"), + (0xF9C5, "M", "暈"), + (0xF9C6, "M", "阮"), + (0xF9C7, "M", "劉"), + (0xF9C8, "M", "杻"), + (0xF9C9, "M", "柳"), + (0xF9CA, "M", "流"), + (0xF9CB, "M", "溜"), + (0xF9CC, "M", "琉"), + (0xF9CD, "M", "留"), + (0xF9CE, "M", "硫"), + (0xF9CF, "M", "紐"), + (0xF9D0, "M", "類"), + (0xF9D1, "M", "六"), + (0xF9D2, "M", "戮"), + (0xF9D3, "M", "陸"), + (0xF9D4, "M", "倫"), + (0xF9D5, "M", "崙"), + (0xF9D6, "M", "淪"), + (0xF9D7, "M", "輪"), + (0xF9D8, "M", "律"), + (0xF9D9, "M", "慄"), + (0xF9DA, "M", "栗"), + (0xF9DB, "M", "率"), + (0xF9DC, "M", "隆"), + (0xF9DD, "M", "利"), + (0xF9DE, "M", "吏"), + (0xF9DF, "M", "履"), + (0xF9E0, "M", "易"), + (0xF9E1, "M", "李"), + (0xF9E2, "M", "梨"), + (0xF9E3, "M", "泥"), + (0xF9E4, "M", "理"), + (0xF9E5, "M", "痢"), + (0xF9E6, "M", "罹"), + (0xF9E7, "M", "裏"), + (0xF9E8, "M", "裡"), + (0xF9E9, "M", "里"), + (0xF9EA, "M", "離"), + (0xF9EB, "M", "匿"), + (0xF9EC, "M", "溺"), + (0xF9ED, "M", "吝"), + (0xF9EE, "M", "燐"), + (0xF9EF, "M", "璘"), + (0xF9F0, "M", "藺"), + (0xF9F1, "M", "隣"), + (0xF9F2, "M", "鱗"), + (0xF9F3, "M", "麟"), + (0xF9F4, "M", "林"), + (0xF9F5, "M", "淋"), + (0xF9F6, "M", "臨"), + (0xF9F7, "M", "立"), + (0xF9F8, "M", "笠"), + (0xF9F9, "M", "粒"), + (0xF9FA, "M", "狀"), + (0xF9FB, "M", "炙"), + (0xF9FC, "M", "識"), + (0xF9FD, "M", "什"), + (0xF9FE, "M", "茶"), + (0xF9FF, "M", "刺"), + (0xFA00, "M", "切"), + (0xFA01, "M", "度"), + (0xFA02, "M", "拓"), + (0xFA03, "M", "糖"), + (0xFA04, "M", "宅"), + (0xFA05, "M", "洞"), + (0xFA06, "M", "暴"), + (0xFA07, "M", "輻"), + (0xFA08, "M", "行"), + (0xFA09, "M", "降"), + (0xFA0A, "M", "見"), + (0xFA0B, "M", "廓"), + (0xFA0C, "M", "兀"), + (0xFA0D, "M", "嗀"), + (0xFA0E, "V"), + (0xFA10, "M", "塚"), + (0xFA11, "V"), + (0xFA12, "M", "晴"), + ] + + +def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFA13, "V"), + (0xFA15, "M", "凞"), + (0xFA16, "M", "猪"), + (0xFA17, "M", "益"), + (0xFA18, "M", "礼"), + (0xFA19, "M", "神"), + (0xFA1A, "M", "祥"), + (0xFA1B, "M", "福"), + (0xFA1C, "M", "靖"), + (0xFA1D, "M", "精"), + (0xFA1E, "M", "羽"), + (0xFA1F, "V"), + (0xFA20, "M", "蘒"), + (0xFA21, "V"), + (0xFA22, "M", "諸"), + (0xFA23, "V"), + (0xFA25, "M", "逸"), + (0xFA26, "M", "都"), + (0xFA27, "V"), + (0xFA2A, "M", "飯"), + (0xFA2B, "M", "飼"), + (0xFA2C, "M", "館"), + (0xFA2D, "M", "鶴"), + (0xFA2E, "M", "郞"), + (0xFA2F, "M", "隷"), + (0xFA30, "M", "侮"), + (0xFA31, "M", "僧"), + (0xFA32, "M", "免"), + (0xFA33, "M", "勉"), + (0xFA34, "M", "勤"), + (0xFA35, "M", "卑"), + (0xFA36, "M", "喝"), + (0xFA37, "M", "嘆"), + (0xFA38, "M", "器"), + (0xFA39, "M", "塀"), + (0xFA3A, "M", "墨"), + (0xFA3B, "M", "層"), + (0xFA3C, "M", "屮"), + (0xFA3D, "M", "悔"), + (0xFA3E, "M", "慨"), + (0xFA3F, "M", "憎"), + (0xFA40, "M", "懲"), + (0xFA41, "M", "敏"), + (0xFA42, "M", "既"), + (0xFA43, "M", "暑"), + (0xFA44, "M", "梅"), + (0xFA45, "M", "海"), + (0xFA46, "M", "渚"), + (0xFA47, "M", "漢"), + (0xFA48, "M", "煮"), + (0xFA49, "M", "爫"), + (0xFA4A, "M", "琢"), + (0xFA4B, "M", "碑"), + (0xFA4C, "M", "社"), + (0xFA4D, "M", "祉"), + (0xFA4E, "M", "祈"), + (0xFA4F, "M", "祐"), + (0xFA50, "M", "祖"), + (0xFA51, "M", "祝"), + (0xFA52, "M", "禍"), + (0xFA53, "M", "禎"), + (0xFA54, "M", "穀"), + (0xFA55, "M", "突"), + (0xFA56, "M", "節"), + (0xFA57, "M", "練"), + (0xFA58, "M", "縉"), + (0xFA59, "M", "繁"), + (0xFA5A, "M", "署"), + (0xFA5B, "M", "者"), + (0xFA5C, "M", "臭"), + (0xFA5D, "M", "艹"), + (0xFA5F, "M", "著"), + (0xFA60, "M", "褐"), + (0xFA61, "M", "視"), + (0xFA62, "M", "謁"), + (0xFA63, "M", "謹"), + (0xFA64, "M", "賓"), + (0xFA65, "M", "贈"), + (0xFA66, "M", "辶"), + (0xFA67, "M", "逸"), + (0xFA68, "M", "難"), + (0xFA69, "M", "響"), + (0xFA6A, "M", "頻"), + (0xFA6B, "M", "恵"), + (0xFA6C, "M", "𤋮"), + (0xFA6D, "M", "舘"), + (0xFA6E, "X"), + (0xFA70, "M", "並"), + (0xFA71, "M", "况"), + (0xFA72, "M", "全"), + (0xFA73, "M", "侀"), + (0xFA74, "M", "充"), + (0xFA75, "M", "冀"), + (0xFA76, "M", "勇"), + (0xFA77, "M", "勺"), + (0xFA78, "M", "喝"), + (0xFA79, "M", "啕"), + (0xFA7A, "M", "喙"), + (0xFA7B, "M", "嗢"), + (0xFA7C, "M", "塚"), + ] + + +def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFA7D, "M", "墳"), + (0xFA7E, "M", "奄"), + (0xFA7F, "M", "奔"), + (0xFA80, "M", "婢"), + (0xFA81, "M", "嬨"), + (0xFA82, "M", "廒"), + (0xFA83, "M", "廙"), + (0xFA84, "M", "彩"), + (0xFA85, "M", "徭"), + (0xFA86, "M", "惘"), + (0xFA87, "M", "慎"), + (0xFA88, "M", "愈"), + (0xFA89, "M", "憎"), + (0xFA8A, "M", "慠"), + (0xFA8B, "M", "懲"), + (0xFA8C, "M", "戴"), + (0xFA8D, "M", "揄"), + (0xFA8E, "M", "搜"), + (0xFA8F, "M", "摒"), + (0xFA90, "M", "敖"), + (0xFA91, "M", "晴"), + (0xFA92, "M", "朗"), + (0xFA93, "M", "望"), + (0xFA94, "M", "杖"), + (0xFA95, "M", "歹"), + (0xFA96, "M", "殺"), + (0xFA97, "M", "流"), + (0xFA98, "M", "滛"), + (0xFA99, "M", "滋"), + (0xFA9A, "M", "漢"), + (0xFA9B, "M", "瀞"), + (0xFA9C, "M", "煮"), + (0xFA9D, "M", "瞧"), + (0xFA9E, "M", "爵"), + (0xFA9F, "M", "犯"), + (0xFAA0, "M", "猪"), + (0xFAA1, "M", "瑱"), + (0xFAA2, "M", "甆"), + (0xFAA3, "M", "画"), + (0xFAA4, "M", "瘝"), + (0xFAA5, "M", "瘟"), + (0xFAA6, "M", "益"), + (0xFAA7, "M", "盛"), + (0xFAA8, "M", "直"), + (0xFAA9, "M", "睊"), + (0xFAAA, "M", "着"), + (0xFAAB, "M", "磌"), + (0xFAAC, "M", "窱"), + (0xFAAD, "M", "節"), + (0xFAAE, "M", "类"), + (0xFAAF, "M", "絛"), + (0xFAB0, "M", "練"), + (0xFAB1, "M", "缾"), + (0xFAB2, "M", "者"), + (0xFAB3, "M", "荒"), + (0xFAB4, "M", "華"), + (0xFAB5, "M", "蝹"), + (0xFAB6, "M", "襁"), + (0xFAB7, "M", "覆"), + (0xFAB8, "M", "視"), + (0xFAB9, "M", "調"), + (0xFABA, "M", "諸"), + (0xFABB, "M", "請"), + (0xFABC, "M", "謁"), + (0xFABD, "M", "諾"), + (0xFABE, "M", "諭"), + (0xFABF, "M", "謹"), + (0xFAC0, "M", "變"), + (0xFAC1, "M", "贈"), + (0xFAC2, "M", "輸"), + (0xFAC3, "M", "遲"), + (0xFAC4, "M", "醙"), + (0xFAC5, "M", "鉶"), + (0xFAC6, "M", "陼"), + (0xFAC7, "M", "難"), + (0xFAC8, "M", "靖"), + (0xFAC9, "M", "韛"), + (0xFACA, "M", "響"), + (0xFACB, "M", "頋"), + (0xFACC, "M", "頻"), + (0xFACD, "M", "鬒"), + (0xFACE, "M", "龜"), + (0xFACF, "M", "𢡊"), + (0xFAD0, "M", "𢡄"), + (0xFAD1, "M", "𣏕"), + (0xFAD2, "M", "㮝"), + (0xFAD3, "M", "䀘"), + (0xFAD4, "M", "䀹"), + (0xFAD5, "M", "𥉉"), + (0xFAD6, "M", "𥳐"), + (0xFAD7, "M", "𧻓"), + (0xFAD8, "M", "齃"), + (0xFAD9, "M", "龎"), + (0xFADA, "X"), + (0xFB00, "M", "ff"), + (0xFB01, "M", "fi"), + (0xFB02, "M", "fl"), + (0xFB03, "M", "ffi"), + (0xFB04, "M", "ffl"), + (0xFB05, "M", "st"), + ] + + +def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFB07, "X"), + (0xFB13, "M", "մն"), + (0xFB14, "M", "մե"), + (0xFB15, "M", "մի"), + (0xFB16, "M", "վն"), + (0xFB17, "M", "մխ"), + (0xFB18, "X"), + (0xFB1D, "M", "יִ"), + (0xFB1E, "V"), + (0xFB1F, "M", "ײַ"), + (0xFB20, "M", "ע"), + (0xFB21, "M", "א"), + (0xFB22, "M", "ד"), + (0xFB23, "M", "ה"), + (0xFB24, "M", "כ"), + (0xFB25, "M", "ל"), + (0xFB26, "M", "ם"), + (0xFB27, "M", "ר"), + (0xFB28, "M", "ת"), + (0xFB29, "3", "+"), + (0xFB2A, "M", "שׁ"), + (0xFB2B, "M", "שׂ"), + (0xFB2C, "M", "שּׁ"), + (0xFB2D, "M", "שּׂ"), + (0xFB2E, "M", "אַ"), + (0xFB2F, "M", "אָ"), + (0xFB30, "M", "אּ"), + (0xFB31, "M", "בּ"), + (0xFB32, "M", "גּ"), + (0xFB33, "M", "דּ"), + (0xFB34, "M", "הּ"), + (0xFB35, "M", "וּ"), + (0xFB36, "M", "זּ"), + (0xFB37, "X"), + (0xFB38, "M", "טּ"), + (0xFB39, "M", "יּ"), + (0xFB3A, "M", "ךּ"), + (0xFB3B, "M", "כּ"), + (0xFB3C, "M", "לּ"), + (0xFB3D, "X"), + (0xFB3E, "M", "מּ"), + (0xFB3F, "X"), + (0xFB40, "M", "נּ"), + (0xFB41, "M", "סּ"), + (0xFB42, "X"), + (0xFB43, "M", "ףּ"), + (0xFB44, "M", "פּ"), + (0xFB45, "X"), + (0xFB46, "M", "צּ"), + (0xFB47, "M", "קּ"), + (0xFB48, "M", "רּ"), + (0xFB49, "M", "שּ"), + (0xFB4A, "M", "תּ"), + (0xFB4B, "M", "וֹ"), + (0xFB4C, "M", "בֿ"), + (0xFB4D, "M", "כֿ"), + (0xFB4E, "M", "פֿ"), + (0xFB4F, "M", "אל"), + (0xFB50, "M", "ٱ"), + (0xFB52, "M", "ٻ"), + (0xFB56, "M", "پ"), + (0xFB5A, "M", "ڀ"), + (0xFB5E, "M", "ٺ"), + (0xFB62, "M", "ٿ"), + (0xFB66, "M", "ٹ"), + (0xFB6A, "M", "ڤ"), + (0xFB6E, "M", "ڦ"), + (0xFB72, "M", "ڄ"), + (0xFB76, "M", "ڃ"), + (0xFB7A, "M", "چ"), + (0xFB7E, "M", "ڇ"), + (0xFB82, "M", "ڍ"), + (0xFB84, "M", "ڌ"), + (0xFB86, "M", "ڎ"), + (0xFB88, "M", "ڈ"), + (0xFB8A, "M", "ژ"), + (0xFB8C, "M", "ڑ"), + (0xFB8E, "M", "ک"), + (0xFB92, "M", "گ"), + (0xFB96, "M", "ڳ"), + (0xFB9A, "M", "ڱ"), + (0xFB9E, "M", "ں"), + (0xFBA0, "M", "ڻ"), + (0xFBA4, "M", "ۀ"), + (0xFBA6, "M", "ہ"), + (0xFBAA, "M", "ھ"), + (0xFBAE, "M", "ے"), + (0xFBB0, "M", "ۓ"), + (0xFBB2, "V"), + (0xFBC3, "X"), + (0xFBD3, "M", "ڭ"), + (0xFBD7, "M", "ۇ"), + (0xFBD9, "M", "ۆ"), + (0xFBDB, "M", "ۈ"), + (0xFBDD, "M", "ۇٴ"), + (0xFBDE, "M", "ۋ"), + (0xFBE0, "M", "ۅ"), + (0xFBE2, "M", "ۉ"), + (0xFBE4, "M", "ې"), + (0xFBE8, "M", "ى"), + ] + + +def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFBEA, "M", "ئا"), + (0xFBEC, "M", "ئە"), + (0xFBEE, "M", "ئو"), + (0xFBF0, "M", "ئۇ"), + (0xFBF2, "M", "ئۆ"), + (0xFBF4, "M", "ئۈ"), + (0xFBF6, "M", "ئې"), + (0xFBF9, "M", "ئى"), + (0xFBFC, "M", "ی"), + (0xFC00, "M", "ئج"), + (0xFC01, "M", "ئح"), + (0xFC02, "M", "ئم"), + (0xFC03, "M", "ئى"), + (0xFC04, "M", "ئي"), + (0xFC05, "M", "بج"), + (0xFC06, "M", "بح"), + (0xFC07, "M", "بخ"), + (0xFC08, "M", "بم"), + (0xFC09, "M", "بى"), + (0xFC0A, "M", "بي"), + (0xFC0B, "M", "تج"), + (0xFC0C, "M", "تح"), + (0xFC0D, "M", "تخ"), + (0xFC0E, "M", "تم"), + (0xFC0F, "M", "تى"), + (0xFC10, "M", "تي"), + (0xFC11, "M", "ثج"), + (0xFC12, "M", "ثم"), + (0xFC13, "M", "ثى"), + (0xFC14, "M", "ثي"), + (0xFC15, "M", "جح"), + (0xFC16, "M", "جم"), + (0xFC17, "M", "حج"), + (0xFC18, "M", "حم"), + (0xFC19, "M", "خج"), + (0xFC1A, "M", "خح"), + (0xFC1B, "M", "خم"), + (0xFC1C, "M", "سج"), + (0xFC1D, "M", "سح"), + (0xFC1E, "M", "سخ"), + (0xFC1F, "M", "سم"), + (0xFC20, "M", "صح"), + (0xFC21, "M", "صم"), + (0xFC22, "M", "ضج"), + (0xFC23, "M", "ضح"), + (0xFC24, "M", "ضخ"), + (0xFC25, "M", "ضم"), + (0xFC26, "M", "طح"), + (0xFC27, "M", "طم"), + (0xFC28, "M", "ظم"), + (0xFC29, "M", "عج"), + (0xFC2A, "M", "عم"), + (0xFC2B, "M", "غج"), + (0xFC2C, "M", "غم"), + (0xFC2D, "M", "فج"), + (0xFC2E, "M", "فح"), + (0xFC2F, "M", "فخ"), + (0xFC30, "M", "فم"), + (0xFC31, "M", "فى"), + (0xFC32, "M", "في"), + (0xFC33, "M", "قح"), + (0xFC34, "M", "قم"), + (0xFC35, "M", "قى"), + (0xFC36, "M", "قي"), + (0xFC37, "M", "كا"), + (0xFC38, "M", "كج"), + (0xFC39, "M", "كح"), + (0xFC3A, "M", "كخ"), + (0xFC3B, "M", "كل"), + (0xFC3C, "M", "كم"), + (0xFC3D, "M", "كى"), + (0xFC3E, "M", "كي"), + (0xFC3F, "M", "لج"), + (0xFC40, "M", "لح"), + (0xFC41, "M", "لخ"), + (0xFC42, "M", "لم"), + (0xFC43, "M", "لى"), + (0xFC44, "M", "لي"), + (0xFC45, "M", "مج"), + (0xFC46, "M", "مح"), + (0xFC47, "M", "مخ"), + (0xFC48, "M", "مم"), + (0xFC49, "M", "مى"), + (0xFC4A, "M", "مي"), + (0xFC4B, "M", "نج"), + (0xFC4C, "M", "نح"), + (0xFC4D, "M", "نخ"), + (0xFC4E, "M", "نم"), + (0xFC4F, "M", "نى"), + (0xFC50, "M", "ني"), + (0xFC51, "M", "هج"), + (0xFC52, "M", "هم"), + (0xFC53, "M", "هى"), + (0xFC54, "M", "هي"), + (0xFC55, "M", "يج"), + (0xFC56, "M", "يح"), + (0xFC57, "M", "يخ"), + (0xFC58, "M", "يم"), + (0xFC59, "M", "يى"), + (0xFC5A, "M", "يي"), + ] + + +def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFC5B, "M", "ذٰ"), + (0xFC5C, "M", "رٰ"), + (0xFC5D, "M", "ىٰ"), + (0xFC5E, "3", " ٌّ"), + (0xFC5F, "3", " ٍّ"), + (0xFC60, "3", " َّ"), + (0xFC61, "3", " ُّ"), + (0xFC62, "3", " ِّ"), + (0xFC63, "3", " ّٰ"), + (0xFC64, "M", "ئر"), + (0xFC65, "M", "ئز"), + (0xFC66, "M", "ئم"), + (0xFC67, "M", "ئن"), + (0xFC68, "M", "ئى"), + (0xFC69, "M", "ئي"), + (0xFC6A, "M", "بر"), + (0xFC6B, "M", "بز"), + (0xFC6C, "M", "بم"), + (0xFC6D, "M", "بن"), + (0xFC6E, "M", "بى"), + (0xFC6F, "M", "بي"), + (0xFC70, "M", "تر"), + (0xFC71, "M", "تز"), + (0xFC72, "M", "تم"), + (0xFC73, "M", "تن"), + (0xFC74, "M", "تى"), + (0xFC75, "M", "تي"), + (0xFC76, "M", "ثر"), + (0xFC77, "M", "ثز"), + (0xFC78, "M", "ثم"), + (0xFC79, "M", "ثن"), + (0xFC7A, "M", "ثى"), + (0xFC7B, "M", "ثي"), + (0xFC7C, "M", "فى"), + (0xFC7D, "M", "في"), + (0xFC7E, "M", "قى"), + (0xFC7F, "M", "قي"), + (0xFC80, "M", "كا"), + (0xFC81, "M", "كل"), + (0xFC82, "M", "كم"), + (0xFC83, "M", "كى"), + (0xFC84, "M", "كي"), + (0xFC85, "M", "لم"), + (0xFC86, "M", "لى"), + (0xFC87, "M", "لي"), + (0xFC88, "M", "ما"), + (0xFC89, "M", "مم"), + (0xFC8A, "M", "نر"), + (0xFC8B, "M", "نز"), + (0xFC8C, "M", "نم"), + (0xFC8D, "M", "نن"), + (0xFC8E, "M", "نى"), + (0xFC8F, "M", "ني"), + (0xFC90, "M", "ىٰ"), + (0xFC91, "M", "ير"), + (0xFC92, "M", "يز"), + (0xFC93, "M", "يم"), + (0xFC94, "M", "ين"), + (0xFC95, "M", "يى"), + (0xFC96, "M", "يي"), + (0xFC97, "M", "ئج"), + (0xFC98, "M", "ئح"), + (0xFC99, "M", "ئخ"), + (0xFC9A, "M", "ئم"), + (0xFC9B, "M", "ئه"), + (0xFC9C, "M", "بج"), + (0xFC9D, "M", "بح"), + (0xFC9E, "M", "بخ"), + (0xFC9F, "M", "بم"), + (0xFCA0, "M", "به"), + (0xFCA1, "M", "تج"), + (0xFCA2, "M", "تح"), + (0xFCA3, "M", "تخ"), + (0xFCA4, "M", "تم"), + (0xFCA5, "M", "ته"), + (0xFCA6, "M", "ثم"), + (0xFCA7, "M", "جح"), + (0xFCA8, "M", "جم"), + (0xFCA9, "M", "حج"), + (0xFCAA, "M", "حم"), + (0xFCAB, "M", "خج"), + (0xFCAC, "M", "خم"), + (0xFCAD, "M", "سج"), + (0xFCAE, "M", "سح"), + (0xFCAF, "M", "سخ"), + (0xFCB0, "M", "سم"), + (0xFCB1, "M", "صح"), + (0xFCB2, "M", "صخ"), + (0xFCB3, "M", "صم"), + (0xFCB4, "M", "ضج"), + (0xFCB5, "M", "ضح"), + (0xFCB6, "M", "ضخ"), + (0xFCB7, "M", "ضم"), + (0xFCB8, "M", "طح"), + (0xFCB9, "M", "ظم"), + (0xFCBA, "M", "عج"), + (0xFCBB, "M", "عم"), + (0xFCBC, "M", "غج"), + (0xFCBD, "M", "غم"), + (0xFCBE, "M", "فج"), + ] + + +def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFCBF, "M", "فح"), + (0xFCC0, "M", "فخ"), + (0xFCC1, "M", "فم"), + (0xFCC2, "M", "قح"), + (0xFCC3, "M", "قم"), + (0xFCC4, "M", "كج"), + (0xFCC5, "M", "كح"), + (0xFCC6, "M", "كخ"), + (0xFCC7, "M", "كل"), + (0xFCC8, "M", "كم"), + (0xFCC9, "M", "لج"), + (0xFCCA, "M", "لح"), + (0xFCCB, "M", "لخ"), + (0xFCCC, "M", "لم"), + (0xFCCD, "M", "له"), + (0xFCCE, "M", "مج"), + (0xFCCF, "M", "مح"), + (0xFCD0, "M", "مخ"), + (0xFCD1, "M", "مم"), + (0xFCD2, "M", "نج"), + (0xFCD3, "M", "نح"), + (0xFCD4, "M", "نخ"), + (0xFCD5, "M", "نم"), + (0xFCD6, "M", "نه"), + (0xFCD7, "M", "هج"), + (0xFCD8, "M", "هم"), + (0xFCD9, "M", "هٰ"), + (0xFCDA, "M", "يج"), + (0xFCDB, "M", "يح"), + (0xFCDC, "M", "يخ"), + (0xFCDD, "M", "يم"), + (0xFCDE, "M", "يه"), + (0xFCDF, "M", "ئم"), + (0xFCE0, "M", "ئه"), + (0xFCE1, "M", "بم"), + (0xFCE2, "M", "به"), + (0xFCE3, "M", "تم"), + (0xFCE4, "M", "ته"), + (0xFCE5, "M", "ثم"), + (0xFCE6, "M", "ثه"), + (0xFCE7, "M", "سم"), + (0xFCE8, "M", "سه"), + (0xFCE9, "M", "شم"), + (0xFCEA, "M", "شه"), + (0xFCEB, "M", "كل"), + (0xFCEC, "M", "كم"), + (0xFCED, "M", "لم"), + (0xFCEE, "M", "نم"), + (0xFCEF, "M", "نه"), + (0xFCF0, "M", "يم"), + (0xFCF1, "M", "يه"), + (0xFCF2, "M", "ـَّ"), + (0xFCF3, "M", "ـُّ"), + (0xFCF4, "M", "ـِّ"), + (0xFCF5, "M", "طى"), + (0xFCF6, "M", "طي"), + (0xFCF7, "M", "عى"), + (0xFCF8, "M", "عي"), + (0xFCF9, "M", "غى"), + (0xFCFA, "M", "غي"), + (0xFCFB, "M", "سى"), + (0xFCFC, "M", "سي"), + (0xFCFD, "M", "شى"), + (0xFCFE, "M", "شي"), + (0xFCFF, "M", "حى"), + (0xFD00, "M", "حي"), + (0xFD01, "M", "جى"), + (0xFD02, "M", "جي"), + (0xFD03, "M", "خى"), + (0xFD04, "M", "خي"), + (0xFD05, "M", "صى"), + (0xFD06, "M", "صي"), + (0xFD07, "M", "ضى"), + (0xFD08, "M", "ضي"), + (0xFD09, "M", "شج"), + (0xFD0A, "M", "شح"), + (0xFD0B, "M", "شخ"), + (0xFD0C, "M", "شم"), + (0xFD0D, "M", "شر"), + (0xFD0E, "M", "سر"), + (0xFD0F, "M", "صر"), + (0xFD10, "M", "ضر"), + (0xFD11, "M", "طى"), + (0xFD12, "M", "طي"), + (0xFD13, "M", "عى"), + (0xFD14, "M", "عي"), + (0xFD15, "M", "غى"), + (0xFD16, "M", "غي"), + (0xFD17, "M", "سى"), + (0xFD18, "M", "سي"), + (0xFD19, "M", "شى"), + (0xFD1A, "M", "شي"), + (0xFD1B, "M", "حى"), + (0xFD1C, "M", "حي"), + (0xFD1D, "M", "جى"), + (0xFD1E, "M", "جي"), + (0xFD1F, "M", "خى"), + (0xFD20, "M", "خي"), + (0xFD21, "M", "صى"), + (0xFD22, "M", "صي"), + ] + + +def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFD23, "M", "ضى"), + (0xFD24, "M", "ضي"), + (0xFD25, "M", "شج"), + (0xFD26, "M", "شح"), + (0xFD27, "M", "شخ"), + (0xFD28, "M", "شم"), + (0xFD29, "M", "شر"), + (0xFD2A, "M", "سر"), + (0xFD2B, "M", "صر"), + (0xFD2C, "M", "ضر"), + (0xFD2D, "M", "شج"), + (0xFD2E, "M", "شح"), + (0xFD2F, "M", "شخ"), + (0xFD30, "M", "شم"), + (0xFD31, "M", "سه"), + (0xFD32, "M", "شه"), + (0xFD33, "M", "طم"), + (0xFD34, "M", "سج"), + (0xFD35, "M", "سح"), + (0xFD36, "M", "سخ"), + (0xFD37, "M", "شج"), + (0xFD38, "M", "شح"), + (0xFD39, "M", "شخ"), + (0xFD3A, "M", "طم"), + (0xFD3B, "M", "ظم"), + (0xFD3C, "M", "اً"), + (0xFD3E, "V"), + (0xFD50, "M", "تجم"), + (0xFD51, "M", "تحج"), + (0xFD53, "M", "تحم"), + (0xFD54, "M", "تخم"), + (0xFD55, "M", "تمج"), + (0xFD56, "M", "تمح"), + (0xFD57, "M", "تمخ"), + (0xFD58, "M", "جمح"), + (0xFD5A, "M", "حمي"), + (0xFD5B, "M", "حمى"), + (0xFD5C, "M", "سحج"), + (0xFD5D, "M", "سجح"), + (0xFD5E, "M", "سجى"), + (0xFD5F, "M", "سمح"), + (0xFD61, "M", "سمج"), + (0xFD62, "M", "سمم"), + (0xFD64, "M", "صحح"), + (0xFD66, "M", "صمم"), + (0xFD67, "M", "شحم"), + (0xFD69, "M", "شجي"), + (0xFD6A, "M", "شمخ"), + (0xFD6C, "M", "شمم"), + (0xFD6E, "M", "ضحى"), + (0xFD6F, "M", "ضخم"), + (0xFD71, "M", "طمح"), + (0xFD73, "M", "طمم"), + (0xFD74, "M", "طمي"), + (0xFD75, "M", "عجم"), + (0xFD76, "M", "عمم"), + (0xFD78, "M", "عمى"), + (0xFD79, "M", "غمم"), + (0xFD7A, "M", "غمي"), + (0xFD7B, "M", "غمى"), + (0xFD7C, "M", "فخم"), + (0xFD7E, "M", "قمح"), + (0xFD7F, "M", "قمم"), + (0xFD80, "M", "لحم"), + (0xFD81, "M", "لحي"), + (0xFD82, "M", "لحى"), + (0xFD83, "M", "لجج"), + (0xFD85, "M", "لخم"), + (0xFD87, "M", "لمح"), + (0xFD89, "M", "محج"), + (0xFD8A, "M", "محم"), + (0xFD8B, "M", "محي"), + (0xFD8C, "M", "مجح"), + (0xFD8D, "M", "مجم"), + (0xFD8E, "M", "مخج"), + (0xFD8F, "M", "مخم"), + (0xFD90, "X"), + (0xFD92, "M", "مجخ"), + (0xFD93, "M", "همج"), + (0xFD94, "M", "همم"), + (0xFD95, "M", "نحم"), + (0xFD96, "M", "نحى"), + (0xFD97, "M", "نجم"), + (0xFD99, "M", "نجى"), + (0xFD9A, "M", "نمي"), + (0xFD9B, "M", "نمى"), + (0xFD9C, "M", "يمم"), + (0xFD9E, "M", "بخي"), + (0xFD9F, "M", "تجي"), + (0xFDA0, "M", "تجى"), + (0xFDA1, "M", "تخي"), + (0xFDA2, "M", "تخى"), + (0xFDA3, "M", "تمي"), + (0xFDA4, "M", "تمى"), + (0xFDA5, "M", "جمي"), + (0xFDA6, "M", "جحى"), + (0xFDA7, "M", "جمى"), + (0xFDA8, "M", "سخى"), + (0xFDA9, "M", "صحي"), + (0xFDAA, "M", "شحي"), + ] + + +def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFDAB, "M", "ضحي"), + (0xFDAC, "M", "لجي"), + (0xFDAD, "M", "لمي"), + (0xFDAE, "M", "يحي"), + (0xFDAF, "M", "يجي"), + (0xFDB0, "M", "يمي"), + (0xFDB1, "M", "ممي"), + (0xFDB2, "M", "قمي"), + (0xFDB3, "M", "نحي"), + (0xFDB4, "M", "قمح"), + (0xFDB5, "M", "لحم"), + (0xFDB6, "M", "عمي"), + (0xFDB7, "M", "كمي"), + (0xFDB8, "M", "نجح"), + (0xFDB9, "M", "مخي"), + (0xFDBA, "M", "لجم"), + (0xFDBB, "M", "كمم"), + (0xFDBC, "M", "لجم"), + (0xFDBD, "M", "نجح"), + (0xFDBE, "M", "جحي"), + (0xFDBF, "M", "حجي"), + (0xFDC0, "M", "مجي"), + (0xFDC1, "M", "فمي"), + (0xFDC2, "M", "بحي"), + (0xFDC3, "M", "كمم"), + (0xFDC4, "M", "عجم"), + (0xFDC5, "M", "صمم"), + (0xFDC6, "M", "سخي"), + (0xFDC7, "M", "نجي"), + (0xFDC8, "X"), + (0xFDCF, "V"), + (0xFDD0, "X"), + (0xFDF0, "M", "صلے"), + (0xFDF1, "M", "قلے"), + (0xFDF2, "M", "الله"), + (0xFDF3, "M", "اكبر"), + (0xFDF4, "M", "محمد"), + (0xFDF5, "M", "صلعم"), + (0xFDF6, "M", "رسول"), + (0xFDF7, "M", "عليه"), + (0xFDF8, "M", "وسلم"), + (0xFDF9, "M", "صلى"), + (0xFDFA, "3", "صلى الله عليه وسلم"), + (0xFDFB, "3", "جل جلاله"), + (0xFDFC, "M", "ریال"), + (0xFDFD, "V"), + (0xFE00, "I"), + (0xFE10, "3", ","), + (0xFE11, "M", "、"), + (0xFE12, "X"), + (0xFE13, "3", ":"), + (0xFE14, "3", ";"), + (0xFE15, "3", "!"), + (0xFE16, "3", "?"), + (0xFE17, "M", "〖"), + (0xFE18, "M", "〗"), + (0xFE19, "X"), + (0xFE20, "V"), + (0xFE30, "X"), + (0xFE31, "M", "—"), + (0xFE32, "M", "–"), + (0xFE33, "3", "_"), + (0xFE35, "3", "("), + (0xFE36, "3", ")"), + (0xFE37, "3", "{"), + (0xFE38, "3", "}"), + (0xFE39, "M", "〔"), + (0xFE3A, "M", "〕"), + (0xFE3B, "M", "【"), + (0xFE3C, "M", "】"), + (0xFE3D, "M", "《"), + (0xFE3E, "M", "》"), + (0xFE3F, "M", "〈"), + (0xFE40, "M", "〉"), + (0xFE41, "M", "「"), + (0xFE42, "M", "」"), + (0xFE43, "M", "『"), + (0xFE44, "M", "』"), + (0xFE45, "V"), + (0xFE47, "3", "["), + (0xFE48, "3", "]"), + (0xFE49, "3", " ̅"), + (0xFE4D, "3", "_"), + (0xFE50, "3", ","), + (0xFE51, "M", "、"), + (0xFE52, "X"), + (0xFE54, "3", ";"), + (0xFE55, "3", ":"), + (0xFE56, "3", "?"), + (0xFE57, "3", "!"), + (0xFE58, "M", "—"), + (0xFE59, "3", "("), + (0xFE5A, "3", ")"), + (0xFE5B, "3", "{"), + (0xFE5C, "3", "}"), + (0xFE5D, "M", "〔"), + (0xFE5E, "M", "〕"), + (0xFE5F, "3", "#"), + (0xFE60, "3", "&"), + (0xFE61, "3", "*"), + ] + + +def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFE62, "3", "+"), + (0xFE63, "M", "-"), + (0xFE64, "3", "<"), + (0xFE65, "3", ">"), + (0xFE66, "3", "="), + (0xFE67, "X"), + (0xFE68, "3", "\\"), + (0xFE69, "3", "$"), + (0xFE6A, "3", "%"), + (0xFE6B, "3", "@"), + (0xFE6C, "X"), + (0xFE70, "3", " ً"), + (0xFE71, "M", "ـً"), + (0xFE72, "3", " ٌ"), + (0xFE73, "V"), + (0xFE74, "3", " ٍ"), + (0xFE75, "X"), + (0xFE76, "3", " َ"), + (0xFE77, "M", "ـَ"), + (0xFE78, "3", " ُ"), + (0xFE79, "M", "ـُ"), + (0xFE7A, "3", " ِ"), + (0xFE7B, "M", "ـِ"), + (0xFE7C, "3", " ّ"), + (0xFE7D, "M", "ـّ"), + (0xFE7E, "3", " ْ"), + (0xFE7F, "M", "ـْ"), + (0xFE80, "M", "ء"), + (0xFE81, "M", "آ"), + (0xFE83, "M", "أ"), + (0xFE85, "M", "ؤ"), + (0xFE87, "M", "إ"), + (0xFE89, "M", "ئ"), + (0xFE8D, "M", "ا"), + (0xFE8F, "M", "ب"), + (0xFE93, "M", "ة"), + (0xFE95, "M", "ت"), + (0xFE99, "M", "ث"), + (0xFE9D, "M", "ج"), + (0xFEA1, "M", "ح"), + (0xFEA5, "M", "خ"), + (0xFEA9, "M", "د"), + (0xFEAB, "M", "ذ"), + (0xFEAD, "M", "ر"), + (0xFEAF, "M", "ز"), + (0xFEB1, "M", "س"), + (0xFEB5, "M", "ش"), + (0xFEB9, "M", "ص"), + (0xFEBD, "M", "ض"), + (0xFEC1, "M", "ط"), + (0xFEC5, "M", "ظ"), + (0xFEC9, "M", "ع"), + (0xFECD, "M", "غ"), + (0xFED1, "M", "ف"), + (0xFED5, "M", "ق"), + (0xFED9, "M", "ك"), + (0xFEDD, "M", "ل"), + (0xFEE1, "M", "م"), + (0xFEE5, "M", "ن"), + (0xFEE9, "M", "ه"), + (0xFEED, "M", "و"), + (0xFEEF, "M", "ى"), + (0xFEF1, "M", "ي"), + (0xFEF5, "M", "لآ"), + (0xFEF7, "M", "لأ"), + (0xFEF9, "M", "لإ"), + (0xFEFB, "M", "لا"), + (0xFEFD, "X"), + (0xFEFF, "I"), + (0xFF00, "X"), + (0xFF01, "3", "!"), + (0xFF02, "3", '"'), + (0xFF03, "3", "#"), + (0xFF04, "3", "$"), + (0xFF05, "3", "%"), + (0xFF06, "3", "&"), + (0xFF07, "3", "'"), + (0xFF08, "3", "("), + (0xFF09, "3", ")"), + (0xFF0A, "3", "*"), + (0xFF0B, "3", "+"), + (0xFF0C, "3", ","), + (0xFF0D, "M", "-"), + (0xFF0E, "M", "."), + (0xFF0F, "3", "/"), + (0xFF10, "M", "0"), + (0xFF11, "M", "1"), + (0xFF12, "M", "2"), + (0xFF13, "M", "3"), + (0xFF14, "M", "4"), + (0xFF15, "M", "5"), + (0xFF16, "M", "6"), + (0xFF17, "M", "7"), + (0xFF18, "M", "8"), + (0xFF19, "M", "9"), + (0xFF1A, "3", ":"), + (0xFF1B, "3", ";"), + (0xFF1C, "3", "<"), + (0xFF1D, "3", "="), + (0xFF1E, "3", ">"), + ] + + +def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFF1F, "3", "?"), + (0xFF20, "3", "@"), + (0xFF21, "M", "a"), + (0xFF22, "M", "b"), + (0xFF23, "M", "c"), + (0xFF24, "M", "d"), + (0xFF25, "M", "e"), + (0xFF26, "M", "f"), + (0xFF27, "M", "g"), + (0xFF28, "M", "h"), + (0xFF29, "M", "i"), + (0xFF2A, "M", "j"), + (0xFF2B, "M", "k"), + (0xFF2C, "M", "l"), + (0xFF2D, "M", "m"), + (0xFF2E, "M", "n"), + (0xFF2F, "M", "o"), + (0xFF30, "M", "p"), + (0xFF31, "M", "q"), + (0xFF32, "M", "r"), + (0xFF33, "M", "s"), + (0xFF34, "M", "t"), + (0xFF35, "M", "u"), + (0xFF36, "M", "v"), + (0xFF37, "M", "w"), + (0xFF38, "M", "x"), + (0xFF39, "M", "y"), + (0xFF3A, "M", "z"), + (0xFF3B, "3", "["), + (0xFF3C, "3", "\\"), + (0xFF3D, "3", "]"), + (0xFF3E, "3", "^"), + (0xFF3F, "3", "_"), + (0xFF40, "3", "`"), + (0xFF41, "M", "a"), + (0xFF42, "M", "b"), + (0xFF43, "M", "c"), + (0xFF44, "M", "d"), + (0xFF45, "M", "e"), + (0xFF46, "M", "f"), + (0xFF47, "M", "g"), + (0xFF48, "M", "h"), + (0xFF49, "M", "i"), + (0xFF4A, "M", "j"), + (0xFF4B, "M", "k"), + (0xFF4C, "M", "l"), + (0xFF4D, "M", "m"), + (0xFF4E, "M", "n"), + (0xFF4F, "M", "o"), + (0xFF50, "M", "p"), + (0xFF51, "M", "q"), + (0xFF52, "M", "r"), + (0xFF53, "M", "s"), + (0xFF54, "M", "t"), + (0xFF55, "M", "u"), + (0xFF56, "M", "v"), + (0xFF57, "M", "w"), + (0xFF58, "M", "x"), + (0xFF59, "M", "y"), + (0xFF5A, "M", "z"), + (0xFF5B, "3", "{"), + (0xFF5C, "3", "|"), + (0xFF5D, "3", "}"), + (0xFF5E, "3", "~"), + (0xFF5F, "M", "⦅"), + (0xFF60, "M", "⦆"), + (0xFF61, "M", "."), + (0xFF62, "M", "「"), + (0xFF63, "M", "」"), + (0xFF64, "M", "、"), + (0xFF65, "M", "・"), + (0xFF66, "M", "ヲ"), + (0xFF67, "M", "ァ"), + (0xFF68, "M", "ィ"), + (0xFF69, "M", "ゥ"), + (0xFF6A, "M", "ェ"), + (0xFF6B, "M", "ォ"), + (0xFF6C, "M", "ャ"), + (0xFF6D, "M", "ュ"), + (0xFF6E, "M", "ョ"), + (0xFF6F, "M", "ッ"), + (0xFF70, "M", "ー"), + (0xFF71, "M", "ア"), + (0xFF72, "M", "イ"), + (0xFF73, "M", "ウ"), + (0xFF74, "M", "エ"), + (0xFF75, "M", "オ"), + (0xFF76, "M", "カ"), + (0xFF77, "M", "キ"), + (0xFF78, "M", "ク"), + (0xFF79, "M", "ケ"), + (0xFF7A, "M", "コ"), + (0xFF7B, "M", "サ"), + (0xFF7C, "M", "シ"), + (0xFF7D, "M", "ス"), + (0xFF7E, "M", "セ"), + (0xFF7F, "M", "ソ"), + (0xFF80, "M", "タ"), + (0xFF81, "M", "チ"), + (0xFF82, "M", "ツ"), + ] + + +def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFF83, "M", "テ"), + (0xFF84, "M", "ト"), + (0xFF85, "M", "ナ"), + (0xFF86, "M", "ニ"), + (0xFF87, "M", "ヌ"), + (0xFF88, "M", "ネ"), + (0xFF89, "M", "ノ"), + (0xFF8A, "M", "ハ"), + (0xFF8B, "M", "ヒ"), + (0xFF8C, "M", "フ"), + (0xFF8D, "M", "ヘ"), + (0xFF8E, "M", "ホ"), + (0xFF8F, "M", "マ"), + (0xFF90, "M", "ミ"), + (0xFF91, "M", "ム"), + (0xFF92, "M", "メ"), + (0xFF93, "M", "モ"), + (0xFF94, "M", "ヤ"), + (0xFF95, "M", "ユ"), + (0xFF96, "M", "ヨ"), + (0xFF97, "M", "ラ"), + (0xFF98, "M", "リ"), + (0xFF99, "M", "ル"), + (0xFF9A, "M", "レ"), + (0xFF9B, "M", "ロ"), + (0xFF9C, "M", "ワ"), + (0xFF9D, "M", "ン"), + (0xFF9E, "M", "゙"), + (0xFF9F, "M", "゚"), + (0xFFA0, "X"), + (0xFFA1, "M", "ᄀ"), + (0xFFA2, "M", "ᄁ"), + (0xFFA3, "M", "ᆪ"), + (0xFFA4, "M", "ᄂ"), + (0xFFA5, "M", "ᆬ"), + (0xFFA6, "M", "ᆭ"), + (0xFFA7, "M", "ᄃ"), + (0xFFA8, "M", "ᄄ"), + (0xFFA9, "M", "ᄅ"), + (0xFFAA, "M", "ᆰ"), + (0xFFAB, "M", "ᆱ"), + (0xFFAC, "M", "ᆲ"), + (0xFFAD, "M", "ᆳ"), + (0xFFAE, "M", "ᆴ"), + (0xFFAF, "M", "ᆵ"), + (0xFFB0, "M", "ᄚ"), + (0xFFB1, "M", "ᄆ"), + (0xFFB2, "M", "ᄇ"), + (0xFFB3, "M", "ᄈ"), + (0xFFB4, "M", "ᄡ"), + (0xFFB5, "M", "ᄉ"), + (0xFFB6, "M", "ᄊ"), + (0xFFB7, "M", "ᄋ"), + (0xFFB8, "M", "ᄌ"), + (0xFFB9, "M", "ᄍ"), + (0xFFBA, "M", "ᄎ"), + (0xFFBB, "M", "ᄏ"), + (0xFFBC, "M", "ᄐ"), + (0xFFBD, "M", "ᄑ"), + (0xFFBE, "M", "ᄒ"), + (0xFFBF, "X"), + (0xFFC2, "M", "ᅡ"), + (0xFFC3, "M", "ᅢ"), + (0xFFC4, "M", "ᅣ"), + (0xFFC5, "M", "ᅤ"), + (0xFFC6, "M", "ᅥ"), + (0xFFC7, "M", "ᅦ"), + (0xFFC8, "X"), + (0xFFCA, "M", "ᅧ"), + (0xFFCB, "M", "ᅨ"), + (0xFFCC, "M", "ᅩ"), + (0xFFCD, "M", "ᅪ"), + (0xFFCE, "M", "ᅫ"), + (0xFFCF, "M", "ᅬ"), + (0xFFD0, "X"), + (0xFFD2, "M", "ᅭ"), + (0xFFD3, "M", "ᅮ"), + (0xFFD4, "M", "ᅯ"), + (0xFFD5, "M", "ᅰ"), + (0xFFD6, "M", "ᅱ"), + (0xFFD7, "M", "ᅲ"), + (0xFFD8, "X"), + (0xFFDA, "M", "ᅳ"), + (0xFFDB, "M", "ᅴ"), + (0xFFDC, "M", "ᅵ"), + (0xFFDD, "X"), + (0xFFE0, "M", "¢"), + (0xFFE1, "M", "£"), + (0xFFE2, "M", "¬"), + (0xFFE3, "3", " ̄"), + (0xFFE4, "M", "¦"), + (0xFFE5, "M", "¥"), + (0xFFE6, "M", "₩"), + (0xFFE7, "X"), + (0xFFE8, "M", "│"), + (0xFFE9, "M", "←"), + (0xFFEA, "M", "↑"), + (0xFFEB, "M", "→"), + (0xFFEC, "M", "↓"), + (0xFFED, "M", "■"), + ] + + +def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFFEE, "M", "○"), + (0xFFEF, "X"), + (0x10000, "V"), + (0x1000C, "X"), + (0x1000D, "V"), + (0x10027, "X"), + (0x10028, "V"), + (0x1003B, "X"), + (0x1003C, "V"), + (0x1003E, "X"), + (0x1003F, "V"), + (0x1004E, "X"), + (0x10050, "V"), + (0x1005E, "X"), + (0x10080, "V"), + (0x100FB, "X"), + (0x10100, "V"), + (0x10103, "X"), + (0x10107, "V"), + (0x10134, "X"), + (0x10137, "V"), + (0x1018F, "X"), + (0x10190, "V"), + (0x1019D, "X"), + (0x101A0, "V"), + (0x101A1, "X"), + (0x101D0, "V"), + (0x101FE, "X"), + (0x10280, "V"), + (0x1029D, "X"), + (0x102A0, "V"), + (0x102D1, "X"), + (0x102E0, "V"), + (0x102FC, "X"), + (0x10300, "V"), + (0x10324, "X"), + (0x1032D, "V"), + (0x1034B, "X"), + (0x10350, "V"), + (0x1037B, "X"), + (0x10380, "V"), + (0x1039E, "X"), + (0x1039F, "V"), + (0x103C4, "X"), + (0x103C8, "V"), + (0x103D6, "X"), + (0x10400, "M", "𐐨"), + (0x10401, "M", "𐐩"), + (0x10402, "M", "𐐪"), + (0x10403, "M", "𐐫"), + (0x10404, "M", "𐐬"), + (0x10405, "M", "𐐭"), + (0x10406, "M", "𐐮"), + (0x10407, "M", "𐐯"), + (0x10408, "M", "𐐰"), + (0x10409, "M", "𐐱"), + (0x1040A, "M", "𐐲"), + (0x1040B, "M", "𐐳"), + (0x1040C, "M", "𐐴"), + (0x1040D, "M", "𐐵"), + (0x1040E, "M", "𐐶"), + (0x1040F, "M", "𐐷"), + (0x10410, "M", "𐐸"), + (0x10411, "M", "𐐹"), + (0x10412, "M", "𐐺"), + (0x10413, "M", "𐐻"), + (0x10414, "M", "𐐼"), + (0x10415, "M", "𐐽"), + (0x10416, "M", "𐐾"), + (0x10417, "M", "𐐿"), + (0x10418, "M", "𐑀"), + (0x10419, "M", "𐑁"), + (0x1041A, "M", "𐑂"), + (0x1041B, "M", "𐑃"), + (0x1041C, "M", "𐑄"), + (0x1041D, "M", "𐑅"), + (0x1041E, "M", "𐑆"), + (0x1041F, "M", "𐑇"), + (0x10420, "M", "𐑈"), + (0x10421, "M", "𐑉"), + (0x10422, "M", "𐑊"), + (0x10423, "M", "𐑋"), + (0x10424, "M", "𐑌"), + (0x10425, "M", "𐑍"), + (0x10426, "M", "𐑎"), + (0x10427, "M", "𐑏"), + (0x10428, "V"), + (0x1049E, "X"), + (0x104A0, "V"), + (0x104AA, "X"), + (0x104B0, "M", "𐓘"), + (0x104B1, "M", "𐓙"), + (0x104B2, "M", "𐓚"), + (0x104B3, "M", "𐓛"), + (0x104B4, "M", "𐓜"), + (0x104B5, "M", "𐓝"), + (0x104B6, "M", "𐓞"), + (0x104B7, "M", "𐓟"), + (0x104B8, "M", "𐓠"), + (0x104B9, "M", "𐓡"), + ] + + +def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x104BA, "M", "𐓢"), + (0x104BB, "M", "𐓣"), + (0x104BC, "M", "𐓤"), + (0x104BD, "M", "𐓥"), + (0x104BE, "M", "𐓦"), + (0x104BF, "M", "𐓧"), + (0x104C0, "M", "𐓨"), + (0x104C1, "M", "𐓩"), + (0x104C2, "M", "𐓪"), + (0x104C3, "M", "𐓫"), + (0x104C4, "M", "𐓬"), + (0x104C5, "M", "𐓭"), + (0x104C6, "M", "𐓮"), + (0x104C7, "M", "𐓯"), + (0x104C8, "M", "𐓰"), + (0x104C9, "M", "𐓱"), + (0x104CA, "M", "𐓲"), + (0x104CB, "M", "𐓳"), + (0x104CC, "M", "𐓴"), + (0x104CD, "M", "𐓵"), + (0x104CE, "M", "𐓶"), + (0x104CF, "M", "𐓷"), + (0x104D0, "M", "𐓸"), + (0x104D1, "M", "𐓹"), + (0x104D2, "M", "𐓺"), + (0x104D3, "M", "𐓻"), + (0x104D4, "X"), + (0x104D8, "V"), + (0x104FC, "X"), + (0x10500, "V"), + (0x10528, "X"), + (0x10530, "V"), + (0x10564, "X"), + (0x1056F, "V"), + (0x10570, "M", "𐖗"), + (0x10571, "M", "𐖘"), + (0x10572, "M", "𐖙"), + (0x10573, "M", "𐖚"), + (0x10574, "M", "𐖛"), + (0x10575, "M", "𐖜"), + (0x10576, "M", "𐖝"), + (0x10577, "M", "𐖞"), + (0x10578, "M", "𐖟"), + (0x10579, "M", "𐖠"), + (0x1057A, "M", "𐖡"), + (0x1057B, "X"), + (0x1057C, "M", "𐖣"), + (0x1057D, "M", "𐖤"), + (0x1057E, "M", "𐖥"), + (0x1057F, "M", "𐖦"), + (0x10580, "M", "𐖧"), + (0x10581, "M", "𐖨"), + (0x10582, "M", "𐖩"), + (0x10583, "M", "𐖪"), + (0x10584, "M", "𐖫"), + (0x10585, "M", "𐖬"), + (0x10586, "M", "𐖭"), + (0x10587, "M", "𐖮"), + (0x10588, "M", "𐖯"), + (0x10589, "M", "𐖰"), + (0x1058A, "M", "𐖱"), + (0x1058B, "X"), + (0x1058C, "M", "𐖳"), + (0x1058D, "M", "𐖴"), + (0x1058E, "M", "𐖵"), + (0x1058F, "M", "𐖶"), + (0x10590, "M", "𐖷"), + (0x10591, "M", "𐖸"), + (0x10592, "M", "𐖹"), + (0x10593, "X"), + (0x10594, "M", "𐖻"), + (0x10595, "M", "𐖼"), + (0x10596, "X"), + (0x10597, "V"), + (0x105A2, "X"), + (0x105A3, "V"), + (0x105B2, "X"), + (0x105B3, "V"), + (0x105BA, "X"), + (0x105BB, "V"), + (0x105BD, "X"), + (0x10600, "V"), + (0x10737, "X"), + (0x10740, "V"), + (0x10756, "X"), + (0x10760, "V"), + (0x10768, "X"), + (0x10780, "V"), + (0x10781, "M", "ː"), + (0x10782, "M", "ˑ"), + (0x10783, "M", "æ"), + (0x10784, "M", "ʙ"), + (0x10785, "M", "ɓ"), + (0x10786, "X"), + (0x10787, "M", "ʣ"), + (0x10788, "M", "ꭦ"), + (0x10789, "M", "ʥ"), + (0x1078A, "M", "ʤ"), + (0x1078B, "M", "ɖ"), + (0x1078C, "M", "ɗ"), + ] + + +def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1078D, "M", "ᶑ"), + (0x1078E, "M", "ɘ"), + (0x1078F, "M", "ɞ"), + (0x10790, "M", "ʩ"), + (0x10791, "M", "ɤ"), + (0x10792, "M", "ɢ"), + (0x10793, "M", "ɠ"), + (0x10794, "M", "ʛ"), + (0x10795, "M", "ħ"), + (0x10796, "M", "ʜ"), + (0x10797, "M", "ɧ"), + (0x10798, "M", "ʄ"), + (0x10799, "M", "ʪ"), + (0x1079A, "M", "ʫ"), + (0x1079B, "M", "ɬ"), + (0x1079C, "M", "𝼄"), + (0x1079D, "M", "ꞎ"), + (0x1079E, "M", "ɮ"), + (0x1079F, "M", "𝼅"), + (0x107A0, "M", "ʎ"), + (0x107A1, "M", "𝼆"), + (0x107A2, "M", "ø"), + (0x107A3, "M", "ɶ"), + (0x107A4, "M", "ɷ"), + (0x107A5, "M", "q"), + (0x107A6, "M", "ɺ"), + (0x107A7, "M", "𝼈"), + (0x107A8, "M", "ɽ"), + (0x107A9, "M", "ɾ"), + (0x107AA, "M", "ʀ"), + (0x107AB, "M", "ʨ"), + (0x107AC, "M", "ʦ"), + (0x107AD, "M", "ꭧ"), + (0x107AE, "M", "ʧ"), + (0x107AF, "M", "ʈ"), + (0x107B0, "M", "ⱱ"), + (0x107B1, "X"), + (0x107B2, "M", "ʏ"), + (0x107B3, "M", "ʡ"), + (0x107B4, "M", "ʢ"), + (0x107B5, "M", "ʘ"), + (0x107B6, "M", "ǀ"), + (0x107B7, "M", "ǁ"), + (0x107B8, "M", "ǂ"), + (0x107B9, "M", "𝼊"), + (0x107BA, "M", "𝼞"), + (0x107BB, "X"), + (0x10800, "V"), + (0x10806, "X"), + (0x10808, "V"), + (0x10809, "X"), + (0x1080A, "V"), + (0x10836, "X"), + (0x10837, "V"), + (0x10839, "X"), + (0x1083C, "V"), + (0x1083D, "X"), + (0x1083F, "V"), + (0x10856, "X"), + (0x10857, "V"), + (0x1089F, "X"), + (0x108A7, "V"), + (0x108B0, "X"), + (0x108E0, "V"), + (0x108F3, "X"), + (0x108F4, "V"), + (0x108F6, "X"), + (0x108FB, "V"), + (0x1091C, "X"), + (0x1091F, "V"), + (0x1093A, "X"), + (0x1093F, "V"), + (0x10940, "X"), + (0x10980, "V"), + (0x109B8, "X"), + (0x109BC, "V"), + (0x109D0, "X"), + (0x109D2, "V"), + (0x10A04, "X"), + (0x10A05, "V"), + (0x10A07, "X"), + (0x10A0C, "V"), + (0x10A14, "X"), + (0x10A15, "V"), + (0x10A18, "X"), + (0x10A19, "V"), + (0x10A36, "X"), + (0x10A38, "V"), + (0x10A3B, "X"), + (0x10A3F, "V"), + (0x10A49, "X"), + (0x10A50, "V"), + (0x10A59, "X"), + (0x10A60, "V"), + (0x10AA0, "X"), + (0x10AC0, "V"), + (0x10AE7, "X"), + (0x10AEB, "V"), + (0x10AF7, "X"), + (0x10B00, "V"), + ] + + +def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x10B36, "X"), + (0x10B39, "V"), + (0x10B56, "X"), + (0x10B58, "V"), + (0x10B73, "X"), + (0x10B78, "V"), + (0x10B92, "X"), + (0x10B99, "V"), + (0x10B9D, "X"), + (0x10BA9, "V"), + (0x10BB0, "X"), + (0x10C00, "V"), + (0x10C49, "X"), + (0x10C80, "M", "𐳀"), + (0x10C81, "M", "𐳁"), + (0x10C82, "M", "𐳂"), + (0x10C83, "M", "𐳃"), + (0x10C84, "M", "𐳄"), + (0x10C85, "M", "𐳅"), + (0x10C86, "M", "𐳆"), + (0x10C87, "M", "𐳇"), + (0x10C88, "M", "𐳈"), + (0x10C89, "M", "𐳉"), + (0x10C8A, "M", "𐳊"), + (0x10C8B, "M", "𐳋"), + (0x10C8C, "M", "𐳌"), + (0x10C8D, "M", "𐳍"), + (0x10C8E, "M", "𐳎"), + (0x10C8F, "M", "𐳏"), + (0x10C90, "M", "𐳐"), + (0x10C91, "M", "𐳑"), + (0x10C92, "M", "𐳒"), + (0x10C93, "M", "𐳓"), + (0x10C94, "M", "𐳔"), + (0x10C95, "M", "𐳕"), + (0x10C96, "M", "𐳖"), + (0x10C97, "M", "𐳗"), + (0x10C98, "M", "𐳘"), + (0x10C99, "M", "𐳙"), + (0x10C9A, "M", "𐳚"), + (0x10C9B, "M", "𐳛"), + (0x10C9C, "M", "𐳜"), + (0x10C9D, "M", "𐳝"), + (0x10C9E, "M", "𐳞"), + (0x10C9F, "M", "𐳟"), + (0x10CA0, "M", "𐳠"), + (0x10CA1, "M", "𐳡"), + (0x10CA2, "M", "𐳢"), + (0x10CA3, "M", "𐳣"), + (0x10CA4, "M", "𐳤"), + (0x10CA5, "M", "𐳥"), + (0x10CA6, "M", "𐳦"), + (0x10CA7, "M", "𐳧"), + (0x10CA8, "M", "𐳨"), + (0x10CA9, "M", "𐳩"), + (0x10CAA, "M", "𐳪"), + (0x10CAB, "M", "𐳫"), + (0x10CAC, "M", "𐳬"), + (0x10CAD, "M", "𐳭"), + (0x10CAE, "M", "𐳮"), + (0x10CAF, "M", "𐳯"), + (0x10CB0, "M", "𐳰"), + (0x10CB1, "M", "𐳱"), + (0x10CB2, "M", "𐳲"), + (0x10CB3, "X"), + (0x10CC0, "V"), + (0x10CF3, "X"), + (0x10CFA, "V"), + (0x10D28, "X"), + (0x10D30, "V"), + (0x10D3A, "X"), + (0x10E60, "V"), + (0x10E7F, "X"), + (0x10E80, "V"), + (0x10EAA, "X"), + (0x10EAB, "V"), + (0x10EAE, "X"), + (0x10EB0, "V"), + (0x10EB2, "X"), + (0x10EFD, "V"), + (0x10F28, "X"), + (0x10F30, "V"), + (0x10F5A, "X"), + (0x10F70, "V"), + (0x10F8A, "X"), + (0x10FB0, "V"), + (0x10FCC, "X"), + (0x10FE0, "V"), + (0x10FF7, "X"), + (0x11000, "V"), + (0x1104E, "X"), + (0x11052, "V"), + (0x11076, "X"), + (0x1107F, "V"), + (0x110BD, "X"), + (0x110BE, "V"), + (0x110C3, "X"), + (0x110D0, "V"), + (0x110E9, "X"), + (0x110F0, "V"), + ] + + +def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x110FA, "X"), + (0x11100, "V"), + (0x11135, "X"), + (0x11136, "V"), + (0x11148, "X"), + (0x11150, "V"), + (0x11177, "X"), + (0x11180, "V"), + (0x111E0, "X"), + (0x111E1, "V"), + (0x111F5, "X"), + (0x11200, "V"), + (0x11212, "X"), + (0x11213, "V"), + (0x11242, "X"), + (0x11280, "V"), + (0x11287, "X"), + (0x11288, "V"), + (0x11289, "X"), + (0x1128A, "V"), + (0x1128E, "X"), + (0x1128F, "V"), + (0x1129E, "X"), + (0x1129F, "V"), + (0x112AA, "X"), + (0x112B0, "V"), + (0x112EB, "X"), + (0x112F0, "V"), + (0x112FA, "X"), + (0x11300, "V"), + (0x11304, "X"), + (0x11305, "V"), + (0x1130D, "X"), + (0x1130F, "V"), + (0x11311, "X"), + (0x11313, "V"), + (0x11329, "X"), + (0x1132A, "V"), + (0x11331, "X"), + (0x11332, "V"), + (0x11334, "X"), + (0x11335, "V"), + (0x1133A, "X"), + (0x1133B, "V"), + (0x11345, "X"), + (0x11347, "V"), + (0x11349, "X"), + (0x1134B, "V"), + (0x1134E, "X"), + (0x11350, "V"), + (0x11351, "X"), + (0x11357, "V"), + (0x11358, "X"), + (0x1135D, "V"), + (0x11364, "X"), + (0x11366, "V"), + (0x1136D, "X"), + (0x11370, "V"), + (0x11375, "X"), + (0x11400, "V"), + (0x1145C, "X"), + (0x1145D, "V"), + (0x11462, "X"), + (0x11480, "V"), + (0x114C8, "X"), + (0x114D0, "V"), + (0x114DA, "X"), + (0x11580, "V"), + (0x115B6, "X"), + (0x115B8, "V"), + (0x115DE, "X"), + (0x11600, "V"), + (0x11645, "X"), + (0x11650, "V"), + (0x1165A, "X"), + (0x11660, "V"), + (0x1166D, "X"), + (0x11680, "V"), + (0x116BA, "X"), + (0x116C0, "V"), + (0x116CA, "X"), + (0x11700, "V"), + (0x1171B, "X"), + (0x1171D, "V"), + (0x1172C, "X"), + (0x11730, "V"), + (0x11747, "X"), + (0x11800, "V"), + (0x1183C, "X"), + (0x118A0, "M", "𑣀"), + (0x118A1, "M", "𑣁"), + (0x118A2, "M", "𑣂"), + (0x118A3, "M", "𑣃"), + (0x118A4, "M", "𑣄"), + (0x118A5, "M", "𑣅"), + (0x118A6, "M", "𑣆"), + (0x118A7, "M", "𑣇"), + (0x118A8, "M", "𑣈"), + (0x118A9, "M", "𑣉"), + (0x118AA, "M", "𑣊"), + ] + + +def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x118AB, "M", "𑣋"), + (0x118AC, "M", "𑣌"), + (0x118AD, "M", "𑣍"), + (0x118AE, "M", "𑣎"), + (0x118AF, "M", "𑣏"), + (0x118B0, "M", "𑣐"), + (0x118B1, "M", "𑣑"), + (0x118B2, "M", "𑣒"), + (0x118B3, "M", "𑣓"), + (0x118B4, "M", "𑣔"), + (0x118B5, "M", "𑣕"), + (0x118B6, "M", "𑣖"), + (0x118B7, "M", "𑣗"), + (0x118B8, "M", "𑣘"), + (0x118B9, "M", "𑣙"), + (0x118BA, "M", "𑣚"), + (0x118BB, "M", "𑣛"), + (0x118BC, "M", "𑣜"), + (0x118BD, "M", "𑣝"), + (0x118BE, "M", "𑣞"), + (0x118BF, "M", "𑣟"), + (0x118C0, "V"), + (0x118F3, "X"), + (0x118FF, "V"), + (0x11907, "X"), + (0x11909, "V"), + (0x1190A, "X"), + (0x1190C, "V"), + (0x11914, "X"), + (0x11915, "V"), + (0x11917, "X"), + (0x11918, "V"), + (0x11936, "X"), + (0x11937, "V"), + (0x11939, "X"), + (0x1193B, "V"), + (0x11947, "X"), + (0x11950, "V"), + (0x1195A, "X"), + (0x119A0, "V"), + (0x119A8, "X"), + (0x119AA, "V"), + (0x119D8, "X"), + (0x119DA, "V"), + (0x119E5, "X"), + (0x11A00, "V"), + (0x11A48, "X"), + (0x11A50, "V"), + (0x11AA3, "X"), + (0x11AB0, "V"), + (0x11AF9, "X"), + (0x11B00, "V"), + (0x11B0A, "X"), + (0x11C00, "V"), + (0x11C09, "X"), + (0x11C0A, "V"), + (0x11C37, "X"), + (0x11C38, "V"), + (0x11C46, "X"), + (0x11C50, "V"), + (0x11C6D, "X"), + (0x11C70, "V"), + (0x11C90, "X"), + (0x11C92, "V"), + (0x11CA8, "X"), + (0x11CA9, "V"), + (0x11CB7, "X"), + (0x11D00, "V"), + (0x11D07, "X"), + (0x11D08, "V"), + (0x11D0A, "X"), + (0x11D0B, "V"), + (0x11D37, "X"), + (0x11D3A, "V"), + (0x11D3B, "X"), + (0x11D3C, "V"), + (0x11D3E, "X"), + (0x11D3F, "V"), + (0x11D48, "X"), + (0x11D50, "V"), + (0x11D5A, "X"), + (0x11D60, "V"), + (0x11D66, "X"), + (0x11D67, "V"), + (0x11D69, "X"), + (0x11D6A, "V"), + (0x11D8F, "X"), + (0x11D90, "V"), + (0x11D92, "X"), + (0x11D93, "V"), + (0x11D99, "X"), + (0x11DA0, "V"), + (0x11DAA, "X"), + (0x11EE0, "V"), + (0x11EF9, "X"), + (0x11F00, "V"), + (0x11F11, "X"), + (0x11F12, "V"), + (0x11F3B, "X"), + (0x11F3E, "V"), + ] + + +def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x11F5A, "X"), + (0x11FB0, "V"), + (0x11FB1, "X"), + (0x11FC0, "V"), + (0x11FF2, "X"), + (0x11FFF, "V"), + (0x1239A, "X"), + (0x12400, "V"), + (0x1246F, "X"), + (0x12470, "V"), + (0x12475, "X"), + (0x12480, "V"), + (0x12544, "X"), + (0x12F90, "V"), + (0x12FF3, "X"), + (0x13000, "V"), + (0x13430, "X"), + (0x13440, "V"), + (0x13456, "X"), + (0x14400, "V"), + (0x14647, "X"), + (0x16800, "V"), + (0x16A39, "X"), + (0x16A40, "V"), + (0x16A5F, "X"), + (0x16A60, "V"), + (0x16A6A, "X"), + (0x16A6E, "V"), + (0x16ABF, "X"), + (0x16AC0, "V"), + (0x16ACA, "X"), + (0x16AD0, "V"), + (0x16AEE, "X"), + (0x16AF0, "V"), + (0x16AF6, "X"), + (0x16B00, "V"), + (0x16B46, "X"), + (0x16B50, "V"), + (0x16B5A, "X"), + (0x16B5B, "V"), + (0x16B62, "X"), + (0x16B63, "V"), + (0x16B78, "X"), + (0x16B7D, "V"), + (0x16B90, "X"), + (0x16E40, "M", "𖹠"), + (0x16E41, "M", "𖹡"), + (0x16E42, "M", "𖹢"), + (0x16E43, "M", "𖹣"), + (0x16E44, "M", "𖹤"), + (0x16E45, "M", "𖹥"), + (0x16E46, "M", "𖹦"), + (0x16E47, "M", "𖹧"), + (0x16E48, "M", "𖹨"), + (0x16E49, "M", "𖹩"), + (0x16E4A, "M", "𖹪"), + (0x16E4B, "M", "𖹫"), + (0x16E4C, "M", "𖹬"), + (0x16E4D, "M", "𖹭"), + (0x16E4E, "M", "𖹮"), + (0x16E4F, "M", "𖹯"), + (0x16E50, "M", "𖹰"), + (0x16E51, "M", "𖹱"), + (0x16E52, "M", "𖹲"), + (0x16E53, "M", "𖹳"), + (0x16E54, "M", "𖹴"), + (0x16E55, "M", "𖹵"), + (0x16E56, "M", "𖹶"), + (0x16E57, "M", "𖹷"), + (0x16E58, "M", "𖹸"), + (0x16E59, "M", "𖹹"), + (0x16E5A, "M", "𖹺"), + (0x16E5B, "M", "𖹻"), + (0x16E5C, "M", "𖹼"), + (0x16E5D, "M", "𖹽"), + (0x16E5E, "M", "𖹾"), + (0x16E5F, "M", "𖹿"), + (0x16E60, "V"), + (0x16E9B, "X"), + (0x16F00, "V"), + (0x16F4B, "X"), + (0x16F4F, "V"), + (0x16F88, "X"), + (0x16F8F, "V"), + (0x16FA0, "X"), + (0x16FE0, "V"), + (0x16FE5, "X"), + (0x16FF0, "V"), + (0x16FF2, "X"), + (0x17000, "V"), + (0x187F8, "X"), + (0x18800, "V"), + (0x18CD6, "X"), + (0x18D00, "V"), + (0x18D09, "X"), + (0x1AFF0, "V"), + (0x1AFF4, "X"), + (0x1AFF5, "V"), + (0x1AFFC, "X"), + (0x1AFFD, "V"), + ] + + +def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1AFFF, "X"), + (0x1B000, "V"), + (0x1B123, "X"), + (0x1B132, "V"), + (0x1B133, "X"), + (0x1B150, "V"), + (0x1B153, "X"), + (0x1B155, "V"), + (0x1B156, "X"), + (0x1B164, "V"), + (0x1B168, "X"), + (0x1B170, "V"), + (0x1B2FC, "X"), + (0x1BC00, "V"), + (0x1BC6B, "X"), + (0x1BC70, "V"), + (0x1BC7D, "X"), + (0x1BC80, "V"), + (0x1BC89, "X"), + (0x1BC90, "V"), + (0x1BC9A, "X"), + (0x1BC9C, "V"), + (0x1BCA0, "I"), + (0x1BCA4, "X"), + (0x1CF00, "V"), + (0x1CF2E, "X"), + (0x1CF30, "V"), + (0x1CF47, "X"), + (0x1CF50, "V"), + (0x1CFC4, "X"), + (0x1D000, "V"), + (0x1D0F6, "X"), + (0x1D100, "V"), + (0x1D127, "X"), + (0x1D129, "V"), + (0x1D15E, "M", "𝅗𝅥"), + (0x1D15F, "M", "𝅘𝅥"), + (0x1D160, "M", "𝅘𝅥𝅮"), + (0x1D161, "M", "𝅘𝅥𝅯"), + (0x1D162, "M", "𝅘𝅥𝅰"), + (0x1D163, "M", "𝅘𝅥𝅱"), + (0x1D164, "M", "𝅘𝅥𝅲"), + (0x1D165, "V"), + (0x1D173, "X"), + (0x1D17B, "V"), + (0x1D1BB, "M", "𝆹𝅥"), + (0x1D1BC, "M", "𝆺𝅥"), + (0x1D1BD, "M", "𝆹𝅥𝅮"), + (0x1D1BE, "M", "𝆺𝅥𝅮"), + (0x1D1BF, "M", "𝆹𝅥𝅯"), + (0x1D1C0, "M", "𝆺𝅥𝅯"), + (0x1D1C1, "V"), + (0x1D1EB, "X"), + (0x1D200, "V"), + (0x1D246, "X"), + (0x1D2C0, "V"), + (0x1D2D4, "X"), + (0x1D2E0, "V"), + (0x1D2F4, "X"), + (0x1D300, "V"), + (0x1D357, "X"), + (0x1D360, "V"), + (0x1D379, "X"), + (0x1D400, "M", "a"), + (0x1D401, "M", "b"), + (0x1D402, "M", "c"), + (0x1D403, "M", "d"), + (0x1D404, "M", "e"), + (0x1D405, "M", "f"), + (0x1D406, "M", "g"), + (0x1D407, "M", "h"), + (0x1D408, "M", "i"), + (0x1D409, "M", "j"), + (0x1D40A, "M", "k"), + (0x1D40B, "M", "l"), + (0x1D40C, "M", "m"), + (0x1D40D, "M", "n"), + (0x1D40E, "M", "o"), + (0x1D40F, "M", "p"), + (0x1D410, "M", "q"), + (0x1D411, "M", "r"), + (0x1D412, "M", "s"), + (0x1D413, "M", "t"), + (0x1D414, "M", "u"), + (0x1D415, "M", "v"), + (0x1D416, "M", "w"), + (0x1D417, "M", "x"), + (0x1D418, "M", "y"), + (0x1D419, "M", "z"), + (0x1D41A, "M", "a"), + (0x1D41B, "M", "b"), + (0x1D41C, "M", "c"), + (0x1D41D, "M", "d"), + (0x1D41E, "M", "e"), + (0x1D41F, "M", "f"), + (0x1D420, "M", "g"), + (0x1D421, "M", "h"), + (0x1D422, "M", "i"), + (0x1D423, "M", "j"), + (0x1D424, "M", "k"), + ] + + +def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D425, "M", "l"), + (0x1D426, "M", "m"), + (0x1D427, "M", "n"), + (0x1D428, "M", "o"), + (0x1D429, "M", "p"), + (0x1D42A, "M", "q"), + (0x1D42B, "M", "r"), + (0x1D42C, "M", "s"), + (0x1D42D, "M", "t"), + (0x1D42E, "M", "u"), + (0x1D42F, "M", "v"), + (0x1D430, "M", "w"), + (0x1D431, "M", "x"), + (0x1D432, "M", "y"), + (0x1D433, "M", "z"), + (0x1D434, "M", "a"), + (0x1D435, "M", "b"), + (0x1D436, "M", "c"), + (0x1D437, "M", "d"), + (0x1D438, "M", "e"), + (0x1D439, "M", "f"), + (0x1D43A, "M", "g"), + (0x1D43B, "M", "h"), + (0x1D43C, "M", "i"), + (0x1D43D, "M", "j"), + (0x1D43E, "M", "k"), + (0x1D43F, "M", "l"), + (0x1D440, "M", "m"), + (0x1D441, "M", "n"), + (0x1D442, "M", "o"), + (0x1D443, "M", "p"), + (0x1D444, "M", "q"), + (0x1D445, "M", "r"), + (0x1D446, "M", "s"), + (0x1D447, "M", "t"), + (0x1D448, "M", "u"), + (0x1D449, "M", "v"), + (0x1D44A, "M", "w"), + (0x1D44B, "M", "x"), + (0x1D44C, "M", "y"), + (0x1D44D, "M", "z"), + (0x1D44E, "M", "a"), + (0x1D44F, "M", "b"), + (0x1D450, "M", "c"), + (0x1D451, "M", "d"), + (0x1D452, "M", "e"), + (0x1D453, "M", "f"), + (0x1D454, "M", "g"), + (0x1D455, "X"), + (0x1D456, "M", "i"), + (0x1D457, "M", "j"), + (0x1D458, "M", "k"), + (0x1D459, "M", "l"), + (0x1D45A, "M", "m"), + (0x1D45B, "M", "n"), + (0x1D45C, "M", "o"), + (0x1D45D, "M", "p"), + (0x1D45E, "M", "q"), + (0x1D45F, "M", "r"), + (0x1D460, "M", "s"), + (0x1D461, "M", "t"), + (0x1D462, "M", "u"), + (0x1D463, "M", "v"), + (0x1D464, "M", "w"), + (0x1D465, "M", "x"), + (0x1D466, "M", "y"), + (0x1D467, "M", "z"), + (0x1D468, "M", "a"), + (0x1D469, "M", "b"), + (0x1D46A, "M", "c"), + (0x1D46B, "M", "d"), + (0x1D46C, "M", "e"), + (0x1D46D, "M", "f"), + (0x1D46E, "M", "g"), + (0x1D46F, "M", "h"), + (0x1D470, "M", "i"), + (0x1D471, "M", "j"), + (0x1D472, "M", "k"), + (0x1D473, "M", "l"), + (0x1D474, "M", "m"), + (0x1D475, "M", "n"), + (0x1D476, "M", "o"), + (0x1D477, "M", "p"), + (0x1D478, "M", "q"), + (0x1D479, "M", "r"), + (0x1D47A, "M", "s"), + (0x1D47B, "M", "t"), + (0x1D47C, "M", "u"), + (0x1D47D, "M", "v"), + (0x1D47E, "M", "w"), + (0x1D47F, "M", "x"), + (0x1D480, "M", "y"), + (0x1D481, "M", "z"), + (0x1D482, "M", "a"), + (0x1D483, "M", "b"), + (0x1D484, "M", "c"), + (0x1D485, "M", "d"), + (0x1D486, "M", "e"), + (0x1D487, "M", "f"), + (0x1D488, "M", "g"), + ] + + +def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D489, "M", "h"), + (0x1D48A, "M", "i"), + (0x1D48B, "M", "j"), + (0x1D48C, "M", "k"), + (0x1D48D, "M", "l"), + (0x1D48E, "M", "m"), + (0x1D48F, "M", "n"), + (0x1D490, "M", "o"), + (0x1D491, "M", "p"), + (0x1D492, "M", "q"), + (0x1D493, "M", "r"), + (0x1D494, "M", "s"), + (0x1D495, "M", "t"), + (0x1D496, "M", "u"), + (0x1D497, "M", "v"), + (0x1D498, "M", "w"), + (0x1D499, "M", "x"), + (0x1D49A, "M", "y"), + (0x1D49B, "M", "z"), + (0x1D49C, "M", "a"), + (0x1D49D, "X"), + (0x1D49E, "M", "c"), + (0x1D49F, "M", "d"), + (0x1D4A0, "X"), + (0x1D4A2, "M", "g"), + (0x1D4A3, "X"), + (0x1D4A5, "M", "j"), + (0x1D4A6, "M", "k"), + (0x1D4A7, "X"), + (0x1D4A9, "M", "n"), + (0x1D4AA, "M", "o"), + (0x1D4AB, "M", "p"), + (0x1D4AC, "M", "q"), + (0x1D4AD, "X"), + (0x1D4AE, "M", "s"), + (0x1D4AF, "M", "t"), + (0x1D4B0, "M", "u"), + (0x1D4B1, "M", "v"), + (0x1D4B2, "M", "w"), + (0x1D4B3, "M", "x"), + (0x1D4B4, "M", "y"), + (0x1D4B5, "M", "z"), + (0x1D4B6, "M", "a"), + (0x1D4B7, "M", "b"), + (0x1D4B8, "M", "c"), + (0x1D4B9, "M", "d"), + (0x1D4BA, "X"), + (0x1D4BB, "M", "f"), + (0x1D4BC, "X"), + (0x1D4BD, "M", "h"), + (0x1D4BE, "M", "i"), + (0x1D4BF, "M", "j"), + (0x1D4C0, "M", "k"), + (0x1D4C1, "M", "l"), + (0x1D4C2, "M", "m"), + (0x1D4C3, "M", "n"), + (0x1D4C4, "X"), + (0x1D4C5, "M", "p"), + (0x1D4C6, "M", "q"), + (0x1D4C7, "M", "r"), + (0x1D4C8, "M", "s"), + (0x1D4C9, "M", "t"), + (0x1D4CA, "M", "u"), + (0x1D4CB, "M", "v"), + (0x1D4CC, "M", "w"), + (0x1D4CD, "M", "x"), + (0x1D4CE, "M", "y"), + (0x1D4CF, "M", "z"), + (0x1D4D0, "M", "a"), + (0x1D4D1, "M", "b"), + (0x1D4D2, "M", "c"), + (0x1D4D3, "M", "d"), + (0x1D4D4, "M", "e"), + (0x1D4D5, "M", "f"), + (0x1D4D6, "M", "g"), + (0x1D4D7, "M", "h"), + (0x1D4D8, "M", "i"), + (0x1D4D9, "M", "j"), + (0x1D4DA, "M", "k"), + (0x1D4DB, "M", "l"), + (0x1D4DC, "M", "m"), + (0x1D4DD, "M", "n"), + (0x1D4DE, "M", "o"), + (0x1D4DF, "M", "p"), + (0x1D4E0, "M", "q"), + (0x1D4E1, "M", "r"), + (0x1D4E2, "M", "s"), + (0x1D4E3, "M", "t"), + (0x1D4E4, "M", "u"), + (0x1D4E5, "M", "v"), + (0x1D4E6, "M", "w"), + (0x1D4E7, "M", "x"), + (0x1D4E8, "M", "y"), + (0x1D4E9, "M", "z"), + (0x1D4EA, "M", "a"), + (0x1D4EB, "M", "b"), + (0x1D4EC, "M", "c"), + (0x1D4ED, "M", "d"), + (0x1D4EE, "M", "e"), + (0x1D4EF, "M", "f"), + ] + + +def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D4F0, "M", "g"), + (0x1D4F1, "M", "h"), + (0x1D4F2, "M", "i"), + (0x1D4F3, "M", "j"), + (0x1D4F4, "M", "k"), + (0x1D4F5, "M", "l"), + (0x1D4F6, "M", "m"), + (0x1D4F7, "M", "n"), + (0x1D4F8, "M", "o"), + (0x1D4F9, "M", "p"), + (0x1D4FA, "M", "q"), + (0x1D4FB, "M", "r"), + (0x1D4FC, "M", "s"), + (0x1D4FD, "M", "t"), + (0x1D4FE, "M", "u"), + (0x1D4FF, "M", "v"), + (0x1D500, "M", "w"), + (0x1D501, "M", "x"), + (0x1D502, "M", "y"), + (0x1D503, "M", "z"), + (0x1D504, "M", "a"), + (0x1D505, "M", "b"), + (0x1D506, "X"), + (0x1D507, "M", "d"), + (0x1D508, "M", "e"), + (0x1D509, "M", "f"), + (0x1D50A, "M", "g"), + (0x1D50B, "X"), + (0x1D50D, "M", "j"), + (0x1D50E, "M", "k"), + (0x1D50F, "M", "l"), + (0x1D510, "M", "m"), + (0x1D511, "M", "n"), + (0x1D512, "M", "o"), + (0x1D513, "M", "p"), + (0x1D514, "M", "q"), + (0x1D515, "X"), + (0x1D516, "M", "s"), + (0x1D517, "M", "t"), + (0x1D518, "M", "u"), + (0x1D519, "M", "v"), + (0x1D51A, "M", "w"), + (0x1D51B, "M", "x"), + (0x1D51C, "M", "y"), + (0x1D51D, "X"), + (0x1D51E, "M", "a"), + (0x1D51F, "M", "b"), + (0x1D520, "M", "c"), + (0x1D521, "M", "d"), + (0x1D522, "M", "e"), + (0x1D523, "M", "f"), + (0x1D524, "M", "g"), + (0x1D525, "M", "h"), + (0x1D526, "M", "i"), + (0x1D527, "M", "j"), + (0x1D528, "M", "k"), + (0x1D529, "M", "l"), + (0x1D52A, "M", "m"), + (0x1D52B, "M", "n"), + (0x1D52C, "M", "o"), + (0x1D52D, "M", "p"), + (0x1D52E, "M", "q"), + (0x1D52F, "M", "r"), + (0x1D530, "M", "s"), + (0x1D531, "M", "t"), + (0x1D532, "M", "u"), + (0x1D533, "M", "v"), + (0x1D534, "M", "w"), + (0x1D535, "M", "x"), + (0x1D536, "M", "y"), + (0x1D537, "M", "z"), + (0x1D538, "M", "a"), + (0x1D539, "M", "b"), + (0x1D53A, "X"), + (0x1D53B, "M", "d"), + (0x1D53C, "M", "e"), + (0x1D53D, "M", "f"), + (0x1D53E, "M", "g"), + (0x1D53F, "X"), + (0x1D540, "M", "i"), + (0x1D541, "M", "j"), + (0x1D542, "M", "k"), + (0x1D543, "M", "l"), + (0x1D544, "M", "m"), + (0x1D545, "X"), + (0x1D546, "M", "o"), + (0x1D547, "X"), + (0x1D54A, "M", "s"), + (0x1D54B, "M", "t"), + (0x1D54C, "M", "u"), + (0x1D54D, "M", "v"), + (0x1D54E, "M", "w"), + (0x1D54F, "M", "x"), + (0x1D550, "M", "y"), + (0x1D551, "X"), + (0x1D552, "M", "a"), + (0x1D553, "M", "b"), + (0x1D554, "M", "c"), + (0x1D555, "M", "d"), + (0x1D556, "M", "e"), + ] + + +def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D557, "M", "f"), + (0x1D558, "M", "g"), + (0x1D559, "M", "h"), + (0x1D55A, "M", "i"), + (0x1D55B, "M", "j"), + (0x1D55C, "M", "k"), + (0x1D55D, "M", "l"), + (0x1D55E, "M", "m"), + (0x1D55F, "M", "n"), + (0x1D560, "M", "o"), + (0x1D561, "M", "p"), + (0x1D562, "M", "q"), + (0x1D563, "M", "r"), + (0x1D564, "M", "s"), + (0x1D565, "M", "t"), + (0x1D566, "M", "u"), + (0x1D567, "M", "v"), + (0x1D568, "M", "w"), + (0x1D569, "M", "x"), + (0x1D56A, "M", "y"), + (0x1D56B, "M", "z"), + (0x1D56C, "M", "a"), + (0x1D56D, "M", "b"), + (0x1D56E, "M", "c"), + (0x1D56F, "M", "d"), + (0x1D570, "M", "e"), + (0x1D571, "M", "f"), + (0x1D572, "M", "g"), + (0x1D573, "M", "h"), + (0x1D574, "M", "i"), + (0x1D575, "M", "j"), + (0x1D576, "M", "k"), + (0x1D577, "M", "l"), + (0x1D578, "M", "m"), + (0x1D579, "M", "n"), + (0x1D57A, "M", "o"), + (0x1D57B, "M", "p"), + (0x1D57C, "M", "q"), + (0x1D57D, "M", "r"), + (0x1D57E, "M", "s"), + (0x1D57F, "M", "t"), + (0x1D580, "M", "u"), + (0x1D581, "M", "v"), + (0x1D582, "M", "w"), + (0x1D583, "M", "x"), + (0x1D584, "M", "y"), + (0x1D585, "M", "z"), + (0x1D586, "M", "a"), + (0x1D587, "M", "b"), + (0x1D588, "M", "c"), + (0x1D589, "M", "d"), + (0x1D58A, "M", "e"), + (0x1D58B, "M", "f"), + (0x1D58C, "M", "g"), + (0x1D58D, "M", "h"), + (0x1D58E, "M", "i"), + (0x1D58F, "M", "j"), + (0x1D590, "M", "k"), + (0x1D591, "M", "l"), + (0x1D592, "M", "m"), + (0x1D593, "M", "n"), + (0x1D594, "M", "o"), + (0x1D595, "M", "p"), + (0x1D596, "M", "q"), + (0x1D597, "M", "r"), + (0x1D598, "M", "s"), + (0x1D599, "M", "t"), + (0x1D59A, "M", "u"), + (0x1D59B, "M", "v"), + (0x1D59C, "M", "w"), + (0x1D59D, "M", "x"), + (0x1D59E, "M", "y"), + (0x1D59F, "M", "z"), + (0x1D5A0, "M", "a"), + (0x1D5A1, "M", "b"), + (0x1D5A2, "M", "c"), + (0x1D5A3, "M", "d"), + (0x1D5A4, "M", "e"), + (0x1D5A5, "M", "f"), + (0x1D5A6, "M", "g"), + (0x1D5A7, "M", "h"), + (0x1D5A8, "M", "i"), + (0x1D5A9, "M", "j"), + (0x1D5AA, "M", "k"), + (0x1D5AB, "M", "l"), + (0x1D5AC, "M", "m"), + (0x1D5AD, "M", "n"), + (0x1D5AE, "M", "o"), + (0x1D5AF, "M", "p"), + (0x1D5B0, "M", "q"), + (0x1D5B1, "M", "r"), + (0x1D5B2, "M", "s"), + (0x1D5B3, "M", "t"), + (0x1D5B4, "M", "u"), + (0x1D5B5, "M", "v"), + (0x1D5B6, "M", "w"), + (0x1D5B7, "M", "x"), + (0x1D5B8, "M", "y"), + (0x1D5B9, "M", "z"), + (0x1D5BA, "M", "a"), + ] + + +def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D5BB, "M", "b"), + (0x1D5BC, "M", "c"), + (0x1D5BD, "M", "d"), + (0x1D5BE, "M", "e"), + (0x1D5BF, "M", "f"), + (0x1D5C0, "M", "g"), + (0x1D5C1, "M", "h"), + (0x1D5C2, "M", "i"), + (0x1D5C3, "M", "j"), + (0x1D5C4, "M", "k"), + (0x1D5C5, "M", "l"), + (0x1D5C6, "M", "m"), + (0x1D5C7, "M", "n"), + (0x1D5C8, "M", "o"), + (0x1D5C9, "M", "p"), + (0x1D5CA, "M", "q"), + (0x1D5CB, "M", "r"), + (0x1D5CC, "M", "s"), + (0x1D5CD, "M", "t"), + (0x1D5CE, "M", "u"), + (0x1D5CF, "M", "v"), + (0x1D5D0, "M", "w"), + (0x1D5D1, "M", "x"), + (0x1D5D2, "M", "y"), + (0x1D5D3, "M", "z"), + (0x1D5D4, "M", "a"), + (0x1D5D5, "M", "b"), + (0x1D5D6, "M", "c"), + (0x1D5D7, "M", "d"), + (0x1D5D8, "M", "e"), + (0x1D5D9, "M", "f"), + (0x1D5DA, "M", "g"), + (0x1D5DB, "M", "h"), + (0x1D5DC, "M", "i"), + (0x1D5DD, "M", "j"), + (0x1D5DE, "M", "k"), + (0x1D5DF, "M", "l"), + (0x1D5E0, "M", "m"), + (0x1D5E1, "M", "n"), + (0x1D5E2, "M", "o"), + (0x1D5E3, "M", "p"), + (0x1D5E4, "M", "q"), + (0x1D5E5, "M", "r"), + (0x1D5E6, "M", "s"), + (0x1D5E7, "M", "t"), + (0x1D5E8, "M", "u"), + (0x1D5E9, "M", "v"), + (0x1D5EA, "M", "w"), + (0x1D5EB, "M", "x"), + (0x1D5EC, "M", "y"), + (0x1D5ED, "M", "z"), + (0x1D5EE, "M", "a"), + (0x1D5EF, "M", "b"), + (0x1D5F0, "M", "c"), + (0x1D5F1, "M", "d"), + (0x1D5F2, "M", "e"), + (0x1D5F3, "M", "f"), + (0x1D5F4, "M", "g"), + (0x1D5F5, "M", "h"), + (0x1D5F6, "M", "i"), + (0x1D5F7, "M", "j"), + (0x1D5F8, "M", "k"), + (0x1D5F9, "M", "l"), + (0x1D5FA, "M", "m"), + (0x1D5FB, "M", "n"), + (0x1D5FC, "M", "o"), + (0x1D5FD, "M", "p"), + (0x1D5FE, "M", "q"), + (0x1D5FF, "M", "r"), + (0x1D600, "M", "s"), + (0x1D601, "M", "t"), + (0x1D602, "M", "u"), + (0x1D603, "M", "v"), + (0x1D604, "M", "w"), + (0x1D605, "M", "x"), + (0x1D606, "M", "y"), + (0x1D607, "M", "z"), + (0x1D608, "M", "a"), + (0x1D609, "M", "b"), + (0x1D60A, "M", "c"), + (0x1D60B, "M", "d"), + (0x1D60C, "M", "e"), + (0x1D60D, "M", "f"), + (0x1D60E, "M", "g"), + (0x1D60F, "M", "h"), + (0x1D610, "M", "i"), + (0x1D611, "M", "j"), + (0x1D612, "M", "k"), + (0x1D613, "M", "l"), + (0x1D614, "M", "m"), + (0x1D615, "M", "n"), + (0x1D616, "M", "o"), + (0x1D617, "M", "p"), + (0x1D618, "M", "q"), + (0x1D619, "M", "r"), + (0x1D61A, "M", "s"), + (0x1D61B, "M", "t"), + (0x1D61C, "M", "u"), + (0x1D61D, "M", "v"), + (0x1D61E, "M", "w"), + ] + + +def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D61F, "M", "x"), + (0x1D620, "M", "y"), + (0x1D621, "M", "z"), + (0x1D622, "M", "a"), + (0x1D623, "M", "b"), + (0x1D624, "M", "c"), + (0x1D625, "M", "d"), + (0x1D626, "M", "e"), + (0x1D627, "M", "f"), + (0x1D628, "M", "g"), + (0x1D629, "M", "h"), + (0x1D62A, "M", "i"), + (0x1D62B, "M", "j"), + (0x1D62C, "M", "k"), + (0x1D62D, "M", "l"), + (0x1D62E, "M", "m"), + (0x1D62F, "M", "n"), + (0x1D630, "M", "o"), + (0x1D631, "M", "p"), + (0x1D632, "M", "q"), + (0x1D633, "M", "r"), + (0x1D634, "M", "s"), + (0x1D635, "M", "t"), + (0x1D636, "M", "u"), + (0x1D637, "M", "v"), + (0x1D638, "M", "w"), + (0x1D639, "M", "x"), + (0x1D63A, "M", "y"), + (0x1D63B, "M", "z"), + (0x1D63C, "M", "a"), + (0x1D63D, "M", "b"), + (0x1D63E, "M", "c"), + (0x1D63F, "M", "d"), + (0x1D640, "M", "e"), + (0x1D641, "M", "f"), + (0x1D642, "M", "g"), + (0x1D643, "M", "h"), + (0x1D644, "M", "i"), + (0x1D645, "M", "j"), + (0x1D646, "M", "k"), + (0x1D647, "M", "l"), + (0x1D648, "M", "m"), + (0x1D649, "M", "n"), + (0x1D64A, "M", "o"), + (0x1D64B, "M", "p"), + (0x1D64C, "M", "q"), + (0x1D64D, "M", "r"), + (0x1D64E, "M", "s"), + (0x1D64F, "M", "t"), + (0x1D650, "M", "u"), + (0x1D651, "M", "v"), + (0x1D652, "M", "w"), + (0x1D653, "M", "x"), + (0x1D654, "M", "y"), + (0x1D655, "M", "z"), + (0x1D656, "M", "a"), + (0x1D657, "M", "b"), + (0x1D658, "M", "c"), + (0x1D659, "M", "d"), + (0x1D65A, "M", "e"), + (0x1D65B, "M", "f"), + (0x1D65C, "M", "g"), + (0x1D65D, "M", "h"), + (0x1D65E, "M", "i"), + (0x1D65F, "M", "j"), + (0x1D660, "M", "k"), + (0x1D661, "M", "l"), + (0x1D662, "M", "m"), + (0x1D663, "M", "n"), + (0x1D664, "M", "o"), + (0x1D665, "M", "p"), + (0x1D666, "M", "q"), + (0x1D667, "M", "r"), + (0x1D668, "M", "s"), + (0x1D669, "M", "t"), + (0x1D66A, "M", "u"), + (0x1D66B, "M", "v"), + (0x1D66C, "M", "w"), + (0x1D66D, "M", "x"), + (0x1D66E, "M", "y"), + (0x1D66F, "M", "z"), + (0x1D670, "M", "a"), + (0x1D671, "M", "b"), + (0x1D672, "M", "c"), + (0x1D673, "M", "d"), + (0x1D674, "M", "e"), + (0x1D675, "M", "f"), + (0x1D676, "M", "g"), + (0x1D677, "M", "h"), + (0x1D678, "M", "i"), + (0x1D679, "M", "j"), + (0x1D67A, "M", "k"), + (0x1D67B, "M", "l"), + (0x1D67C, "M", "m"), + (0x1D67D, "M", "n"), + (0x1D67E, "M", "o"), + (0x1D67F, "M", "p"), + (0x1D680, "M", "q"), + (0x1D681, "M", "r"), + (0x1D682, "M", "s"), + ] + + +def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D683, "M", "t"), + (0x1D684, "M", "u"), + (0x1D685, "M", "v"), + (0x1D686, "M", "w"), + (0x1D687, "M", "x"), + (0x1D688, "M", "y"), + (0x1D689, "M", "z"), + (0x1D68A, "M", "a"), + (0x1D68B, "M", "b"), + (0x1D68C, "M", "c"), + (0x1D68D, "M", "d"), + (0x1D68E, "M", "e"), + (0x1D68F, "M", "f"), + (0x1D690, "M", "g"), + (0x1D691, "M", "h"), + (0x1D692, "M", "i"), + (0x1D693, "M", "j"), + (0x1D694, "M", "k"), + (0x1D695, "M", "l"), + (0x1D696, "M", "m"), + (0x1D697, "M", "n"), + (0x1D698, "M", "o"), + (0x1D699, "M", "p"), + (0x1D69A, "M", "q"), + (0x1D69B, "M", "r"), + (0x1D69C, "M", "s"), + (0x1D69D, "M", "t"), + (0x1D69E, "M", "u"), + (0x1D69F, "M", "v"), + (0x1D6A0, "M", "w"), + (0x1D6A1, "M", "x"), + (0x1D6A2, "M", "y"), + (0x1D6A3, "M", "z"), + (0x1D6A4, "M", "ı"), + (0x1D6A5, "M", "ȷ"), + (0x1D6A6, "X"), + (0x1D6A8, "M", "α"), + (0x1D6A9, "M", "β"), + (0x1D6AA, "M", "γ"), + (0x1D6AB, "M", "δ"), + (0x1D6AC, "M", "ε"), + (0x1D6AD, "M", "ζ"), + (0x1D6AE, "M", "η"), + (0x1D6AF, "M", "θ"), + (0x1D6B0, "M", "ι"), + (0x1D6B1, "M", "κ"), + (0x1D6B2, "M", "λ"), + (0x1D6B3, "M", "μ"), + (0x1D6B4, "M", "ν"), + (0x1D6B5, "M", "ξ"), + (0x1D6B6, "M", "ο"), + (0x1D6B7, "M", "π"), + (0x1D6B8, "M", "ρ"), + (0x1D6B9, "M", "θ"), + (0x1D6BA, "M", "σ"), + (0x1D6BB, "M", "τ"), + (0x1D6BC, "M", "υ"), + (0x1D6BD, "M", "φ"), + (0x1D6BE, "M", "χ"), + (0x1D6BF, "M", "ψ"), + (0x1D6C0, "M", "ω"), + (0x1D6C1, "M", "∇"), + (0x1D6C2, "M", "α"), + (0x1D6C3, "M", "β"), + (0x1D6C4, "M", "γ"), + (0x1D6C5, "M", "δ"), + (0x1D6C6, "M", "ε"), + (0x1D6C7, "M", "ζ"), + (0x1D6C8, "M", "η"), + (0x1D6C9, "M", "θ"), + (0x1D6CA, "M", "ι"), + (0x1D6CB, "M", "κ"), + (0x1D6CC, "M", "λ"), + (0x1D6CD, "M", "μ"), + (0x1D6CE, "M", "ν"), + (0x1D6CF, "M", "ξ"), + (0x1D6D0, "M", "ο"), + (0x1D6D1, "M", "π"), + (0x1D6D2, "M", "ρ"), + (0x1D6D3, "M", "σ"), + (0x1D6D5, "M", "τ"), + (0x1D6D6, "M", "υ"), + (0x1D6D7, "M", "φ"), + (0x1D6D8, "M", "χ"), + (0x1D6D9, "M", "ψ"), + (0x1D6DA, "M", "ω"), + (0x1D6DB, "M", "∂"), + (0x1D6DC, "M", "ε"), + (0x1D6DD, "M", "θ"), + (0x1D6DE, "M", "κ"), + (0x1D6DF, "M", "φ"), + (0x1D6E0, "M", "ρ"), + (0x1D6E1, "M", "π"), + (0x1D6E2, "M", "α"), + (0x1D6E3, "M", "β"), + (0x1D6E4, "M", "γ"), + (0x1D6E5, "M", "δ"), + (0x1D6E6, "M", "ε"), + (0x1D6E7, "M", "ζ"), + (0x1D6E8, "M", "η"), + ] + + +def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D6E9, "M", "θ"), + (0x1D6EA, "M", "ι"), + (0x1D6EB, "M", "κ"), + (0x1D6EC, "M", "λ"), + (0x1D6ED, "M", "μ"), + (0x1D6EE, "M", "ν"), + (0x1D6EF, "M", "ξ"), + (0x1D6F0, "M", "ο"), + (0x1D6F1, "M", "π"), + (0x1D6F2, "M", "ρ"), + (0x1D6F3, "M", "θ"), + (0x1D6F4, "M", "σ"), + (0x1D6F5, "M", "τ"), + (0x1D6F6, "M", "υ"), + (0x1D6F7, "M", "φ"), + (0x1D6F8, "M", "χ"), + (0x1D6F9, "M", "ψ"), + (0x1D6FA, "M", "ω"), + (0x1D6FB, "M", "∇"), + (0x1D6FC, "M", "α"), + (0x1D6FD, "M", "β"), + (0x1D6FE, "M", "γ"), + (0x1D6FF, "M", "δ"), + (0x1D700, "M", "ε"), + (0x1D701, "M", "ζ"), + (0x1D702, "M", "η"), + (0x1D703, "M", "θ"), + (0x1D704, "M", "ι"), + (0x1D705, "M", "κ"), + (0x1D706, "M", "λ"), + (0x1D707, "M", "μ"), + (0x1D708, "M", "ν"), + (0x1D709, "M", "ξ"), + (0x1D70A, "M", "ο"), + (0x1D70B, "M", "π"), + (0x1D70C, "M", "ρ"), + (0x1D70D, "M", "σ"), + (0x1D70F, "M", "τ"), + (0x1D710, "M", "υ"), + (0x1D711, "M", "φ"), + (0x1D712, "M", "χ"), + (0x1D713, "M", "ψ"), + (0x1D714, "M", "ω"), + (0x1D715, "M", "∂"), + (0x1D716, "M", "ε"), + (0x1D717, "M", "θ"), + (0x1D718, "M", "κ"), + (0x1D719, "M", "φ"), + (0x1D71A, "M", "ρ"), + (0x1D71B, "M", "π"), + (0x1D71C, "M", "α"), + (0x1D71D, "M", "β"), + (0x1D71E, "M", "γ"), + (0x1D71F, "M", "δ"), + (0x1D720, "M", "ε"), + (0x1D721, "M", "ζ"), + (0x1D722, "M", "η"), + (0x1D723, "M", "θ"), + (0x1D724, "M", "ι"), + (0x1D725, "M", "κ"), + (0x1D726, "M", "λ"), + (0x1D727, "M", "μ"), + (0x1D728, "M", "ν"), + (0x1D729, "M", "ξ"), + (0x1D72A, "M", "ο"), + (0x1D72B, "M", "π"), + (0x1D72C, "M", "ρ"), + (0x1D72D, "M", "θ"), + (0x1D72E, "M", "σ"), + (0x1D72F, "M", "τ"), + (0x1D730, "M", "υ"), + (0x1D731, "M", "φ"), + (0x1D732, "M", "χ"), + (0x1D733, "M", "ψ"), + (0x1D734, "M", "ω"), + (0x1D735, "M", "∇"), + (0x1D736, "M", "α"), + (0x1D737, "M", "β"), + (0x1D738, "M", "γ"), + (0x1D739, "M", "δ"), + (0x1D73A, "M", "ε"), + (0x1D73B, "M", "ζ"), + (0x1D73C, "M", "η"), + (0x1D73D, "M", "θ"), + (0x1D73E, "M", "ι"), + (0x1D73F, "M", "κ"), + (0x1D740, "M", "λ"), + (0x1D741, "M", "μ"), + (0x1D742, "M", "ν"), + (0x1D743, "M", "ξ"), + (0x1D744, "M", "ο"), + (0x1D745, "M", "π"), + (0x1D746, "M", "ρ"), + (0x1D747, "M", "σ"), + (0x1D749, "M", "τ"), + (0x1D74A, "M", "υ"), + (0x1D74B, "M", "φ"), + (0x1D74C, "M", "χ"), + (0x1D74D, "M", "ψ"), + (0x1D74E, "M", "ω"), + ] + + +def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D74F, "M", "∂"), + (0x1D750, "M", "ε"), + (0x1D751, "M", "θ"), + (0x1D752, "M", "κ"), + (0x1D753, "M", "φ"), + (0x1D754, "M", "ρ"), + (0x1D755, "M", "π"), + (0x1D756, "M", "α"), + (0x1D757, "M", "β"), + (0x1D758, "M", "γ"), + (0x1D759, "M", "δ"), + (0x1D75A, "M", "ε"), + (0x1D75B, "M", "ζ"), + (0x1D75C, "M", "η"), + (0x1D75D, "M", "θ"), + (0x1D75E, "M", "ι"), + (0x1D75F, "M", "κ"), + (0x1D760, "M", "λ"), + (0x1D761, "M", "μ"), + (0x1D762, "M", "ν"), + (0x1D763, "M", "ξ"), + (0x1D764, "M", "ο"), + (0x1D765, "M", "π"), + (0x1D766, "M", "ρ"), + (0x1D767, "M", "θ"), + (0x1D768, "M", "σ"), + (0x1D769, "M", "τ"), + (0x1D76A, "M", "υ"), + (0x1D76B, "M", "φ"), + (0x1D76C, "M", "χ"), + (0x1D76D, "M", "ψ"), + (0x1D76E, "M", "ω"), + (0x1D76F, "M", "∇"), + (0x1D770, "M", "α"), + (0x1D771, "M", "β"), + (0x1D772, "M", "γ"), + (0x1D773, "M", "δ"), + (0x1D774, "M", "ε"), + (0x1D775, "M", "ζ"), + (0x1D776, "M", "η"), + (0x1D777, "M", "θ"), + (0x1D778, "M", "ι"), + (0x1D779, "M", "κ"), + (0x1D77A, "M", "λ"), + (0x1D77B, "M", "μ"), + (0x1D77C, "M", "ν"), + (0x1D77D, "M", "ξ"), + (0x1D77E, "M", "ο"), + (0x1D77F, "M", "π"), + (0x1D780, "M", "ρ"), + (0x1D781, "M", "σ"), + (0x1D783, "M", "τ"), + (0x1D784, "M", "υ"), + (0x1D785, "M", "φ"), + (0x1D786, "M", "χ"), + (0x1D787, "M", "ψ"), + (0x1D788, "M", "ω"), + (0x1D789, "M", "∂"), + (0x1D78A, "M", "ε"), + (0x1D78B, "M", "θ"), + (0x1D78C, "M", "κ"), + (0x1D78D, "M", "φ"), + (0x1D78E, "M", "ρ"), + (0x1D78F, "M", "π"), + (0x1D790, "M", "α"), + (0x1D791, "M", "β"), + (0x1D792, "M", "γ"), + (0x1D793, "M", "δ"), + (0x1D794, "M", "ε"), + (0x1D795, "M", "ζ"), + (0x1D796, "M", "η"), + (0x1D797, "M", "θ"), + (0x1D798, "M", "ι"), + (0x1D799, "M", "κ"), + (0x1D79A, "M", "λ"), + (0x1D79B, "M", "μ"), + (0x1D79C, "M", "ν"), + (0x1D79D, "M", "ξ"), + (0x1D79E, "M", "ο"), + (0x1D79F, "M", "π"), + (0x1D7A0, "M", "ρ"), + (0x1D7A1, "M", "θ"), + (0x1D7A2, "M", "σ"), + (0x1D7A3, "M", "τ"), + (0x1D7A4, "M", "υ"), + (0x1D7A5, "M", "φ"), + (0x1D7A6, "M", "χ"), + (0x1D7A7, "M", "ψ"), + (0x1D7A8, "M", "ω"), + (0x1D7A9, "M", "∇"), + (0x1D7AA, "M", "α"), + (0x1D7AB, "M", "β"), + (0x1D7AC, "M", "γ"), + (0x1D7AD, "M", "δ"), + (0x1D7AE, "M", "ε"), + (0x1D7AF, "M", "ζ"), + (0x1D7B0, "M", "η"), + (0x1D7B1, "M", "θ"), + (0x1D7B2, "M", "ι"), + (0x1D7B3, "M", "κ"), + ] + + +def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D7B4, "M", "λ"), + (0x1D7B5, "M", "μ"), + (0x1D7B6, "M", "ν"), + (0x1D7B7, "M", "ξ"), + (0x1D7B8, "M", "ο"), + (0x1D7B9, "M", "π"), + (0x1D7BA, "M", "ρ"), + (0x1D7BB, "M", "σ"), + (0x1D7BD, "M", "τ"), + (0x1D7BE, "M", "υ"), + (0x1D7BF, "M", "φ"), + (0x1D7C0, "M", "χ"), + (0x1D7C1, "M", "ψ"), + (0x1D7C2, "M", "ω"), + (0x1D7C3, "M", "∂"), + (0x1D7C4, "M", "ε"), + (0x1D7C5, "M", "θ"), + (0x1D7C6, "M", "κ"), + (0x1D7C7, "M", "φ"), + (0x1D7C8, "M", "ρ"), + (0x1D7C9, "M", "π"), + (0x1D7CA, "M", "ϝ"), + (0x1D7CC, "X"), + (0x1D7CE, "M", "0"), + (0x1D7CF, "M", "1"), + (0x1D7D0, "M", "2"), + (0x1D7D1, "M", "3"), + (0x1D7D2, "M", "4"), + (0x1D7D3, "M", "5"), + (0x1D7D4, "M", "6"), + (0x1D7D5, "M", "7"), + (0x1D7D6, "M", "8"), + (0x1D7D7, "M", "9"), + (0x1D7D8, "M", "0"), + (0x1D7D9, "M", "1"), + (0x1D7DA, "M", "2"), + (0x1D7DB, "M", "3"), + (0x1D7DC, "M", "4"), + (0x1D7DD, "M", "5"), + (0x1D7DE, "M", "6"), + (0x1D7DF, "M", "7"), + (0x1D7E0, "M", "8"), + (0x1D7E1, "M", "9"), + (0x1D7E2, "M", "0"), + (0x1D7E3, "M", "1"), + (0x1D7E4, "M", "2"), + (0x1D7E5, "M", "3"), + (0x1D7E6, "M", "4"), + (0x1D7E7, "M", "5"), + (0x1D7E8, "M", "6"), + (0x1D7E9, "M", "7"), + (0x1D7EA, "M", "8"), + (0x1D7EB, "M", "9"), + (0x1D7EC, "M", "0"), + (0x1D7ED, "M", "1"), + (0x1D7EE, "M", "2"), + (0x1D7EF, "M", "3"), + (0x1D7F0, "M", "4"), + (0x1D7F1, "M", "5"), + (0x1D7F2, "M", "6"), + (0x1D7F3, "M", "7"), + (0x1D7F4, "M", "8"), + (0x1D7F5, "M", "9"), + (0x1D7F6, "M", "0"), + (0x1D7F7, "M", "1"), + (0x1D7F8, "M", "2"), + (0x1D7F9, "M", "3"), + (0x1D7FA, "M", "4"), + (0x1D7FB, "M", "5"), + (0x1D7FC, "M", "6"), + (0x1D7FD, "M", "7"), + (0x1D7FE, "M", "8"), + (0x1D7FF, "M", "9"), + (0x1D800, "V"), + (0x1DA8C, "X"), + (0x1DA9B, "V"), + (0x1DAA0, "X"), + (0x1DAA1, "V"), + (0x1DAB0, "X"), + (0x1DF00, "V"), + (0x1DF1F, "X"), + (0x1DF25, "V"), + (0x1DF2B, "X"), + (0x1E000, "V"), + (0x1E007, "X"), + (0x1E008, "V"), + (0x1E019, "X"), + (0x1E01B, "V"), + (0x1E022, "X"), + (0x1E023, "V"), + (0x1E025, "X"), + (0x1E026, "V"), + (0x1E02B, "X"), + (0x1E030, "M", "а"), + (0x1E031, "M", "б"), + (0x1E032, "M", "в"), + (0x1E033, "M", "г"), + (0x1E034, "M", "д"), + (0x1E035, "M", "е"), + (0x1E036, "M", "ж"), + ] + + +def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E037, "M", "з"), + (0x1E038, "M", "и"), + (0x1E039, "M", "к"), + (0x1E03A, "M", "л"), + (0x1E03B, "M", "м"), + (0x1E03C, "M", "о"), + (0x1E03D, "M", "п"), + (0x1E03E, "M", "р"), + (0x1E03F, "M", "с"), + (0x1E040, "M", "т"), + (0x1E041, "M", "у"), + (0x1E042, "M", "ф"), + (0x1E043, "M", "х"), + (0x1E044, "M", "ц"), + (0x1E045, "M", "ч"), + (0x1E046, "M", "ш"), + (0x1E047, "M", "ы"), + (0x1E048, "M", "э"), + (0x1E049, "M", "ю"), + (0x1E04A, "M", "ꚉ"), + (0x1E04B, "M", "ә"), + (0x1E04C, "M", "і"), + (0x1E04D, "M", "ј"), + (0x1E04E, "M", "ө"), + (0x1E04F, "M", "ү"), + (0x1E050, "M", "ӏ"), + (0x1E051, "M", "а"), + (0x1E052, "M", "б"), + (0x1E053, "M", "в"), + (0x1E054, "M", "г"), + (0x1E055, "M", "д"), + (0x1E056, "M", "е"), + (0x1E057, "M", "ж"), + (0x1E058, "M", "з"), + (0x1E059, "M", "и"), + (0x1E05A, "M", "к"), + (0x1E05B, "M", "л"), + (0x1E05C, "M", "о"), + (0x1E05D, "M", "п"), + (0x1E05E, "M", "с"), + (0x1E05F, "M", "у"), + (0x1E060, "M", "ф"), + (0x1E061, "M", "х"), + (0x1E062, "M", "ц"), + (0x1E063, "M", "ч"), + (0x1E064, "M", "ш"), + (0x1E065, "M", "ъ"), + (0x1E066, "M", "ы"), + (0x1E067, "M", "ґ"), + (0x1E068, "M", "і"), + (0x1E069, "M", "ѕ"), + (0x1E06A, "M", "џ"), + (0x1E06B, "M", "ҫ"), + (0x1E06C, "M", "ꙑ"), + (0x1E06D, "M", "ұ"), + (0x1E06E, "X"), + (0x1E08F, "V"), + (0x1E090, "X"), + (0x1E100, "V"), + (0x1E12D, "X"), + (0x1E130, "V"), + (0x1E13E, "X"), + (0x1E140, "V"), + (0x1E14A, "X"), + (0x1E14E, "V"), + (0x1E150, "X"), + (0x1E290, "V"), + (0x1E2AF, "X"), + (0x1E2C0, "V"), + (0x1E2FA, "X"), + (0x1E2FF, "V"), + (0x1E300, "X"), + (0x1E4D0, "V"), + (0x1E4FA, "X"), + (0x1E7E0, "V"), + (0x1E7E7, "X"), + (0x1E7E8, "V"), + (0x1E7EC, "X"), + (0x1E7ED, "V"), + (0x1E7EF, "X"), + (0x1E7F0, "V"), + (0x1E7FF, "X"), + (0x1E800, "V"), + (0x1E8C5, "X"), + (0x1E8C7, "V"), + (0x1E8D7, "X"), + (0x1E900, "M", "𞤢"), + (0x1E901, "M", "𞤣"), + (0x1E902, "M", "𞤤"), + (0x1E903, "M", "𞤥"), + (0x1E904, "M", "𞤦"), + (0x1E905, "M", "𞤧"), + (0x1E906, "M", "𞤨"), + (0x1E907, "M", "𞤩"), + (0x1E908, "M", "𞤪"), + (0x1E909, "M", "𞤫"), + (0x1E90A, "M", "𞤬"), + (0x1E90B, "M", "𞤭"), + (0x1E90C, "M", "𞤮"), + (0x1E90D, "M", "𞤯"), + ] + + +def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E90E, "M", "𞤰"), + (0x1E90F, "M", "𞤱"), + (0x1E910, "M", "𞤲"), + (0x1E911, "M", "𞤳"), + (0x1E912, "M", "𞤴"), + (0x1E913, "M", "𞤵"), + (0x1E914, "M", "𞤶"), + (0x1E915, "M", "𞤷"), + (0x1E916, "M", "𞤸"), + (0x1E917, "M", "𞤹"), + (0x1E918, "M", "𞤺"), + (0x1E919, "M", "𞤻"), + (0x1E91A, "M", "𞤼"), + (0x1E91B, "M", "𞤽"), + (0x1E91C, "M", "𞤾"), + (0x1E91D, "M", "𞤿"), + (0x1E91E, "M", "𞥀"), + (0x1E91F, "M", "𞥁"), + (0x1E920, "M", "𞥂"), + (0x1E921, "M", "𞥃"), + (0x1E922, "V"), + (0x1E94C, "X"), + (0x1E950, "V"), + (0x1E95A, "X"), + (0x1E95E, "V"), + (0x1E960, "X"), + (0x1EC71, "V"), + (0x1ECB5, "X"), + (0x1ED01, "V"), + (0x1ED3E, "X"), + (0x1EE00, "M", "ا"), + (0x1EE01, "M", "ب"), + (0x1EE02, "M", "ج"), + (0x1EE03, "M", "د"), + (0x1EE04, "X"), + (0x1EE05, "M", "و"), + (0x1EE06, "M", "ز"), + (0x1EE07, "M", "ح"), + (0x1EE08, "M", "ط"), + (0x1EE09, "M", "ي"), + (0x1EE0A, "M", "ك"), + (0x1EE0B, "M", "ل"), + (0x1EE0C, "M", "م"), + (0x1EE0D, "M", "ن"), + (0x1EE0E, "M", "س"), + (0x1EE0F, "M", "ع"), + (0x1EE10, "M", "ف"), + (0x1EE11, "M", "ص"), + (0x1EE12, "M", "ق"), + (0x1EE13, "M", "ر"), + (0x1EE14, "M", "ش"), + (0x1EE15, "M", "ت"), + (0x1EE16, "M", "ث"), + (0x1EE17, "M", "خ"), + (0x1EE18, "M", "ذ"), + (0x1EE19, "M", "ض"), + (0x1EE1A, "M", "ظ"), + (0x1EE1B, "M", "غ"), + (0x1EE1C, "M", "ٮ"), + (0x1EE1D, "M", "ں"), + (0x1EE1E, "M", "ڡ"), + (0x1EE1F, "M", "ٯ"), + (0x1EE20, "X"), + (0x1EE21, "M", "ب"), + (0x1EE22, "M", "ج"), + (0x1EE23, "X"), + (0x1EE24, "M", "ه"), + (0x1EE25, "X"), + (0x1EE27, "M", "ح"), + (0x1EE28, "X"), + (0x1EE29, "M", "ي"), + (0x1EE2A, "M", "ك"), + (0x1EE2B, "M", "ل"), + (0x1EE2C, "M", "م"), + (0x1EE2D, "M", "ن"), + (0x1EE2E, "M", "س"), + (0x1EE2F, "M", "ع"), + (0x1EE30, "M", "ف"), + (0x1EE31, "M", "ص"), + (0x1EE32, "M", "ق"), + (0x1EE33, "X"), + (0x1EE34, "M", "ش"), + (0x1EE35, "M", "ت"), + (0x1EE36, "M", "ث"), + (0x1EE37, "M", "خ"), + (0x1EE38, "X"), + (0x1EE39, "M", "ض"), + (0x1EE3A, "X"), + (0x1EE3B, "M", "غ"), + (0x1EE3C, "X"), + (0x1EE42, "M", "ج"), + (0x1EE43, "X"), + (0x1EE47, "M", "ح"), + (0x1EE48, "X"), + (0x1EE49, "M", "ي"), + (0x1EE4A, "X"), + (0x1EE4B, "M", "ل"), + (0x1EE4C, "X"), + (0x1EE4D, "M", "ن"), + (0x1EE4E, "M", "س"), + ] + + +def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EE4F, "M", "ع"), + (0x1EE50, "X"), + (0x1EE51, "M", "ص"), + (0x1EE52, "M", "ق"), + (0x1EE53, "X"), + (0x1EE54, "M", "ش"), + (0x1EE55, "X"), + (0x1EE57, "M", "خ"), + (0x1EE58, "X"), + (0x1EE59, "M", "ض"), + (0x1EE5A, "X"), + (0x1EE5B, "M", "غ"), + (0x1EE5C, "X"), + (0x1EE5D, "M", "ں"), + (0x1EE5E, "X"), + (0x1EE5F, "M", "ٯ"), + (0x1EE60, "X"), + (0x1EE61, "M", "ب"), + (0x1EE62, "M", "ج"), + (0x1EE63, "X"), + (0x1EE64, "M", "ه"), + (0x1EE65, "X"), + (0x1EE67, "M", "ح"), + (0x1EE68, "M", "ط"), + (0x1EE69, "M", "ي"), + (0x1EE6A, "M", "ك"), + (0x1EE6B, "X"), + (0x1EE6C, "M", "م"), + (0x1EE6D, "M", "ن"), + (0x1EE6E, "M", "س"), + (0x1EE6F, "M", "ع"), + (0x1EE70, "M", "ف"), + (0x1EE71, "M", "ص"), + (0x1EE72, "M", "ق"), + (0x1EE73, "X"), + (0x1EE74, "M", "ش"), + (0x1EE75, "M", "ت"), + (0x1EE76, "M", "ث"), + (0x1EE77, "M", "خ"), + (0x1EE78, "X"), + (0x1EE79, "M", "ض"), + (0x1EE7A, "M", "ظ"), + (0x1EE7B, "M", "غ"), + (0x1EE7C, "M", "ٮ"), + (0x1EE7D, "X"), + (0x1EE7E, "M", "ڡ"), + (0x1EE7F, "X"), + (0x1EE80, "M", "ا"), + (0x1EE81, "M", "ب"), + (0x1EE82, "M", "ج"), + (0x1EE83, "M", "د"), + (0x1EE84, "M", "ه"), + (0x1EE85, "M", "و"), + (0x1EE86, "M", "ز"), + (0x1EE87, "M", "ح"), + (0x1EE88, "M", "ط"), + (0x1EE89, "M", "ي"), + (0x1EE8A, "X"), + (0x1EE8B, "M", "ل"), + (0x1EE8C, "M", "م"), + (0x1EE8D, "M", "ن"), + (0x1EE8E, "M", "س"), + (0x1EE8F, "M", "ع"), + (0x1EE90, "M", "ف"), + (0x1EE91, "M", "ص"), + (0x1EE92, "M", "ق"), + (0x1EE93, "M", "ر"), + (0x1EE94, "M", "ش"), + (0x1EE95, "M", "ت"), + (0x1EE96, "M", "ث"), + (0x1EE97, "M", "خ"), + (0x1EE98, "M", "ذ"), + (0x1EE99, "M", "ض"), + (0x1EE9A, "M", "ظ"), + (0x1EE9B, "M", "غ"), + (0x1EE9C, "X"), + (0x1EEA1, "M", "ب"), + (0x1EEA2, "M", "ج"), + (0x1EEA3, "M", "د"), + (0x1EEA4, "X"), + (0x1EEA5, "M", "و"), + (0x1EEA6, "M", "ز"), + (0x1EEA7, "M", "ح"), + (0x1EEA8, "M", "ط"), + (0x1EEA9, "M", "ي"), + (0x1EEAA, "X"), + (0x1EEAB, "M", "ل"), + (0x1EEAC, "M", "م"), + (0x1EEAD, "M", "ن"), + (0x1EEAE, "M", "س"), + (0x1EEAF, "M", "ع"), + (0x1EEB0, "M", "ف"), + (0x1EEB1, "M", "ص"), + (0x1EEB2, "M", "ق"), + (0x1EEB3, "M", "ر"), + (0x1EEB4, "M", "ش"), + (0x1EEB5, "M", "ت"), + (0x1EEB6, "M", "ث"), + (0x1EEB7, "M", "خ"), + (0x1EEB8, "M", "ذ"), + ] + + +def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EEB9, "M", "ض"), + (0x1EEBA, "M", "ظ"), + (0x1EEBB, "M", "غ"), + (0x1EEBC, "X"), + (0x1EEF0, "V"), + (0x1EEF2, "X"), + (0x1F000, "V"), + (0x1F02C, "X"), + (0x1F030, "V"), + (0x1F094, "X"), + (0x1F0A0, "V"), + (0x1F0AF, "X"), + (0x1F0B1, "V"), + (0x1F0C0, "X"), + (0x1F0C1, "V"), + (0x1F0D0, "X"), + (0x1F0D1, "V"), + (0x1F0F6, "X"), + (0x1F101, "3", "0,"), + (0x1F102, "3", "1,"), + (0x1F103, "3", "2,"), + (0x1F104, "3", "3,"), + (0x1F105, "3", "4,"), + (0x1F106, "3", "5,"), + (0x1F107, "3", "6,"), + (0x1F108, "3", "7,"), + (0x1F109, "3", "8,"), + (0x1F10A, "3", "9,"), + (0x1F10B, "V"), + (0x1F110, "3", "(a)"), + (0x1F111, "3", "(b)"), + (0x1F112, "3", "(c)"), + (0x1F113, "3", "(d)"), + (0x1F114, "3", "(e)"), + (0x1F115, "3", "(f)"), + (0x1F116, "3", "(g)"), + (0x1F117, "3", "(h)"), + (0x1F118, "3", "(i)"), + (0x1F119, "3", "(j)"), + (0x1F11A, "3", "(k)"), + (0x1F11B, "3", "(l)"), + (0x1F11C, "3", "(m)"), + (0x1F11D, "3", "(n)"), + (0x1F11E, "3", "(o)"), + (0x1F11F, "3", "(p)"), + (0x1F120, "3", "(q)"), + (0x1F121, "3", "(r)"), + (0x1F122, "3", "(s)"), + (0x1F123, "3", "(t)"), + (0x1F124, "3", "(u)"), + (0x1F125, "3", "(v)"), + (0x1F126, "3", "(w)"), + (0x1F127, "3", "(x)"), + (0x1F128, "3", "(y)"), + (0x1F129, "3", "(z)"), + (0x1F12A, "M", "〔s〕"), + (0x1F12B, "M", "c"), + (0x1F12C, "M", "r"), + (0x1F12D, "M", "cd"), + (0x1F12E, "M", "wz"), + (0x1F12F, "V"), + (0x1F130, "M", "a"), + (0x1F131, "M", "b"), + (0x1F132, "M", "c"), + (0x1F133, "M", "d"), + (0x1F134, "M", "e"), + (0x1F135, "M", "f"), + (0x1F136, "M", "g"), + (0x1F137, "M", "h"), + (0x1F138, "M", "i"), + (0x1F139, "M", "j"), + (0x1F13A, "M", "k"), + (0x1F13B, "M", "l"), + (0x1F13C, "M", "m"), + (0x1F13D, "M", "n"), + (0x1F13E, "M", "o"), + (0x1F13F, "M", "p"), + (0x1F140, "M", "q"), + (0x1F141, "M", "r"), + (0x1F142, "M", "s"), + (0x1F143, "M", "t"), + (0x1F144, "M", "u"), + (0x1F145, "M", "v"), + (0x1F146, "M", "w"), + (0x1F147, "M", "x"), + (0x1F148, "M", "y"), + (0x1F149, "M", "z"), + (0x1F14A, "M", "hv"), + (0x1F14B, "M", "mv"), + (0x1F14C, "M", "sd"), + (0x1F14D, "M", "ss"), + (0x1F14E, "M", "ppv"), + (0x1F14F, "M", "wc"), + (0x1F150, "V"), + (0x1F16A, "M", "mc"), + (0x1F16B, "M", "md"), + (0x1F16C, "M", "mr"), + (0x1F16D, "V"), + (0x1F190, "M", "dj"), + (0x1F191, "V"), + ] + + +def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1F1AE, "X"), + (0x1F1E6, "V"), + (0x1F200, "M", "ほか"), + (0x1F201, "M", "ココ"), + (0x1F202, "M", "サ"), + (0x1F203, "X"), + (0x1F210, "M", "手"), + (0x1F211, "M", "字"), + (0x1F212, "M", "双"), + (0x1F213, "M", "デ"), + (0x1F214, "M", "二"), + (0x1F215, "M", "多"), + (0x1F216, "M", "解"), + (0x1F217, "M", "天"), + (0x1F218, "M", "交"), + (0x1F219, "M", "映"), + (0x1F21A, "M", "無"), + (0x1F21B, "M", "料"), + (0x1F21C, "M", "前"), + (0x1F21D, "M", "後"), + (0x1F21E, "M", "再"), + (0x1F21F, "M", "新"), + (0x1F220, "M", "初"), + (0x1F221, "M", "終"), + (0x1F222, "M", "生"), + (0x1F223, "M", "販"), + (0x1F224, "M", "声"), + (0x1F225, "M", "吹"), + (0x1F226, "M", "演"), + (0x1F227, "M", "投"), + (0x1F228, "M", "捕"), + (0x1F229, "M", "一"), + (0x1F22A, "M", "三"), + (0x1F22B, "M", "遊"), + (0x1F22C, "M", "左"), + (0x1F22D, "M", "中"), + (0x1F22E, "M", "右"), + (0x1F22F, "M", "指"), + (0x1F230, "M", "走"), + (0x1F231, "M", "打"), + (0x1F232, "M", "禁"), + (0x1F233, "M", "空"), + (0x1F234, "M", "合"), + (0x1F235, "M", "満"), + (0x1F236, "M", "有"), + (0x1F237, "M", "月"), + (0x1F238, "M", "申"), + (0x1F239, "M", "割"), + (0x1F23A, "M", "営"), + (0x1F23B, "M", "配"), + (0x1F23C, "X"), + (0x1F240, "M", "〔本〕"), + (0x1F241, "M", "〔三〕"), + (0x1F242, "M", "〔二〕"), + (0x1F243, "M", "〔安〕"), + (0x1F244, "M", "〔点〕"), + (0x1F245, "M", "〔打〕"), + (0x1F246, "M", "〔盗〕"), + (0x1F247, "M", "〔勝〕"), + (0x1F248, "M", "〔敗〕"), + (0x1F249, "X"), + (0x1F250, "M", "得"), + (0x1F251, "M", "可"), + (0x1F252, "X"), + (0x1F260, "V"), + (0x1F266, "X"), + (0x1F300, "V"), + (0x1F6D8, "X"), + (0x1F6DC, "V"), + (0x1F6ED, "X"), + (0x1F6F0, "V"), + (0x1F6FD, "X"), + (0x1F700, "V"), + (0x1F777, "X"), + (0x1F77B, "V"), + (0x1F7DA, "X"), + (0x1F7E0, "V"), + (0x1F7EC, "X"), + (0x1F7F0, "V"), + (0x1F7F1, "X"), + (0x1F800, "V"), + (0x1F80C, "X"), + (0x1F810, "V"), + (0x1F848, "X"), + (0x1F850, "V"), + (0x1F85A, "X"), + (0x1F860, "V"), + (0x1F888, "X"), + (0x1F890, "V"), + (0x1F8AE, "X"), + (0x1F8B0, "V"), + (0x1F8B2, "X"), + (0x1F900, "V"), + (0x1FA54, "X"), + (0x1FA60, "V"), + (0x1FA6E, "X"), + (0x1FA70, "V"), + (0x1FA7D, "X"), + (0x1FA80, "V"), + (0x1FA89, "X"), + ] + + +def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1FA90, "V"), + (0x1FABE, "X"), + (0x1FABF, "V"), + (0x1FAC6, "X"), + (0x1FACE, "V"), + (0x1FADC, "X"), + (0x1FAE0, "V"), + (0x1FAE9, "X"), + (0x1FAF0, "V"), + (0x1FAF9, "X"), + (0x1FB00, "V"), + (0x1FB93, "X"), + (0x1FB94, "V"), + (0x1FBCB, "X"), + (0x1FBF0, "M", "0"), + (0x1FBF1, "M", "1"), + (0x1FBF2, "M", "2"), + (0x1FBF3, "M", "3"), + (0x1FBF4, "M", "4"), + (0x1FBF5, "M", "5"), + (0x1FBF6, "M", "6"), + (0x1FBF7, "M", "7"), + (0x1FBF8, "M", "8"), + (0x1FBF9, "M", "9"), + (0x1FBFA, "X"), + (0x20000, "V"), + (0x2A6E0, "X"), + (0x2A700, "V"), + (0x2B73A, "X"), + (0x2B740, "V"), + (0x2B81E, "X"), + (0x2B820, "V"), + (0x2CEA2, "X"), + (0x2CEB0, "V"), + (0x2EBE1, "X"), + (0x2EBF0, "V"), + (0x2EE5E, "X"), + (0x2F800, "M", "丽"), + (0x2F801, "M", "丸"), + (0x2F802, "M", "乁"), + (0x2F803, "M", "𠄢"), + (0x2F804, "M", "你"), + (0x2F805, "M", "侮"), + (0x2F806, "M", "侻"), + (0x2F807, "M", "倂"), + (0x2F808, "M", "偺"), + (0x2F809, "M", "備"), + (0x2F80A, "M", "僧"), + (0x2F80B, "M", "像"), + (0x2F80C, "M", "㒞"), + (0x2F80D, "M", "𠘺"), + (0x2F80E, "M", "免"), + (0x2F80F, "M", "兔"), + (0x2F810, "M", "兤"), + (0x2F811, "M", "具"), + (0x2F812, "M", "𠔜"), + (0x2F813, "M", "㒹"), + (0x2F814, "M", "內"), + (0x2F815, "M", "再"), + (0x2F816, "M", "𠕋"), + (0x2F817, "M", "冗"), + (0x2F818, "M", "冤"), + (0x2F819, "M", "仌"), + (0x2F81A, "M", "冬"), + (0x2F81B, "M", "况"), + (0x2F81C, "M", "𩇟"), + (0x2F81D, "M", "凵"), + (0x2F81E, "M", "刃"), + (0x2F81F, "M", "㓟"), + (0x2F820, "M", "刻"), + (0x2F821, "M", "剆"), + (0x2F822, "M", "割"), + (0x2F823, "M", "剷"), + (0x2F824, "M", "㔕"), + (0x2F825, "M", "勇"), + (0x2F826, "M", "勉"), + (0x2F827, "M", "勤"), + (0x2F828, "M", "勺"), + (0x2F829, "M", "包"), + (0x2F82A, "M", "匆"), + (0x2F82B, "M", "北"), + (0x2F82C, "M", "卉"), + (0x2F82D, "M", "卑"), + (0x2F82E, "M", "博"), + (0x2F82F, "M", "即"), + (0x2F830, "M", "卽"), + (0x2F831, "M", "卿"), + (0x2F834, "M", "𠨬"), + (0x2F835, "M", "灰"), + (0x2F836, "M", "及"), + (0x2F837, "M", "叟"), + (0x2F838, "M", "𠭣"), + (0x2F839, "M", "叫"), + (0x2F83A, "M", "叱"), + (0x2F83B, "M", "吆"), + (0x2F83C, "M", "咞"), + (0x2F83D, "M", "吸"), + (0x2F83E, "M", "呈"), + (0x2F83F, "M", "周"), + (0x2F840, "M", "咢"), + ] + + +def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F841, "M", "哶"), + (0x2F842, "M", "唐"), + (0x2F843, "M", "啓"), + (0x2F844, "M", "啣"), + (0x2F845, "M", "善"), + (0x2F847, "M", "喙"), + (0x2F848, "M", "喫"), + (0x2F849, "M", "喳"), + (0x2F84A, "M", "嗂"), + (0x2F84B, "M", "圖"), + (0x2F84C, "M", "嘆"), + (0x2F84D, "M", "圗"), + (0x2F84E, "M", "噑"), + (0x2F84F, "M", "噴"), + (0x2F850, "M", "切"), + (0x2F851, "M", "壮"), + (0x2F852, "M", "城"), + (0x2F853, "M", "埴"), + (0x2F854, "M", "堍"), + (0x2F855, "M", "型"), + (0x2F856, "M", "堲"), + (0x2F857, "M", "報"), + (0x2F858, "M", "墬"), + (0x2F859, "M", "𡓤"), + (0x2F85A, "M", "売"), + (0x2F85B, "M", "壷"), + (0x2F85C, "M", "夆"), + (0x2F85D, "M", "多"), + (0x2F85E, "M", "夢"), + (0x2F85F, "M", "奢"), + (0x2F860, "M", "𡚨"), + (0x2F861, "M", "𡛪"), + (0x2F862, "M", "姬"), + (0x2F863, "M", "娛"), + (0x2F864, "M", "娧"), + (0x2F865, "M", "姘"), + (0x2F866, "M", "婦"), + (0x2F867, "M", "㛮"), + (0x2F868, "X"), + (0x2F869, "M", "嬈"), + (0x2F86A, "M", "嬾"), + (0x2F86C, "M", "𡧈"), + (0x2F86D, "M", "寃"), + (0x2F86E, "M", "寘"), + (0x2F86F, "M", "寧"), + (0x2F870, "M", "寳"), + (0x2F871, "M", "𡬘"), + (0x2F872, "M", "寿"), + (0x2F873, "M", "将"), + (0x2F874, "X"), + (0x2F875, "M", "尢"), + (0x2F876, "M", "㞁"), + (0x2F877, "M", "屠"), + (0x2F878, "M", "屮"), + (0x2F879, "M", "峀"), + (0x2F87A, "M", "岍"), + (0x2F87B, "M", "𡷤"), + (0x2F87C, "M", "嵃"), + (0x2F87D, "M", "𡷦"), + (0x2F87E, "M", "嵮"), + (0x2F87F, "M", "嵫"), + (0x2F880, "M", "嵼"), + (0x2F881, "M", "巡"), + (0x2F882, "M", "巢"), + (0x2F883, "M", "㠯"), + (0x2F884, "M", "巽"), + (0x2F885, "M", "帨"), + (0x2F886, "M", "帽"), + (0x2F887, "M", "幩"), + (0x2F888, "M", "㡢"), + (0x2F889, "M", "𢆃"), + (0x2F88A, "M", "㡼"), + (0x2F88B, "M", "庰"), + (0x2F88C, "M", "庳"), + (0x2F88D, "M", "庶"), + (0x2F88E, "M", "廊"), + (0x2F88F, "M", "𪎒"), + (0x2F890, "M", "廾"), + (0x2F891, "M", "𢌱"), + (0x2F893, "M", "舁"), + (0x2F894, "M", "弢"), + (0x2F896, "M", "㣇"), + (0x2F897, "M", "𣊸"), + (0x2F898, "M", "𦇚"), + (0x2F899, "M", "形"), + (0x2F89A, "M", "彫"), + (0x2F89B, "M", "㣣"), + (0x2F89C, "M", "徚"), + (0x2F89D, "M", "忍"), + (0x2F89E, "M", "志"), + (0x2F89F, "M", "忹"), + (0x2F8A0, "M", "悁"), + (0x2F8A1, "M", "㤺"), + (0x2F8A2, "M", "㤜"), + (0x2F8A3, "M", "悔"), + (0x2F8A4, "M", "𢛔"), + (0x2F8A5, "M", "惇"), + (0x2F8A6, "M", "慈"), + (0x2F8A7, "M", "慌"), + (0x2F8A8, "M", "慎"), + ] + + +def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F8A9, "M", "慌"), + (0x2F8AA, "M", "慺"), + (0x2F8AB, "M", "憎"), + (0x2F8AC, "M", "憲"), + (0x2F8AD, "M", "憤"), + (0x2F8AE, "M", "憯"), + (0x2F8AF, "M", "懞"), + (0x2F8B0, "M", "懲"), + (0x2F8B1, "M", "懶"), + (0x2F8B2, "M", "成"), + (0x2F8B3, "M", "戛"), + (0x2F8B4, "M", "扝"), + (0x2F8B5, "M", "抱"), + (0x2F8B6, "M", "拔"), + (0x2F8B7, "M", "捐"), + (0x2F8B8, "M", "𢬌"), + (0x2F8B9, "M", "挽"), + (0x2F8BA, "M", "拼"), + (0x2F8BB, "M", "捨"), + (0x2F8BC, "M", "掃"), + (0x2F8BD, "M", "揤"), + (0x2F8BE, "M", "𢯱"), + (0x2F8BF, "M", "搢"), + (0x2F8C0, "M", "揅"), + (0x2F8C1, "M", "掩"), + (0x2F8C2, "M", "㨮"), + (0x2F8C3, "M", "摩"), + (0x2F8C4, "M", "摾"), + (0x2F8C5, "M", "撝"), + (0x2F8C6, "M", "摷"), + (0x2F8C7, "M", "㩬"), + (0x2F8C8, "M", "敏"), + (0x2F8C9, "M", "敬"), + (0x2F8CA, "M", "𣀊"), + (0x2F8CB, "M", "旣"), + (0x2F8CC, "M", "書"), + (0x2F8CD, "M", "晉"), + (0x2F8CE, "M", "㬙"), + (0x2F8CF, "M", "暑"), + (0x2F8D0, "M", "㬈"), + (0x2F8D1, "M", "㫤"), + (0x2F8D2, "M", "冒"), + (0x2F8D3, "M", "冕"), + (0x2F8D4, "M", "最"), + (0x2F8D5, "M", "暜"), + (0x2F8D6, "M", "肭"), + (0x2F8D7, "M", "䏙"), + (0x2F8D8, "M", "朗"), + (0x2F8D9, "M", "望"), + (0x2F8DA, "M", "朡"), + (0x2F8DB, "M", "杞"), + (0x2F8DC, "M", "杓"), + (0x2F8DD, "M", "𣏃"), + (0x2F8DE, "M", "㭉"), + (0x2F8DF, "M", "柺"), + (0x2F8E0, "M", "枅"), + (0x2F8E1, "M", "桒"), + (0x2F8E2, "M", "梅"), + (0x2F8E3, "M", "𣑭"), + (0x2F8E4, "M", "梎"), + (0x2F8E5, "M", "栟"), + (0x2F8E6, "M", "椔"), + (0x2F8E7, "M", "㮝"), + (0x2F8E8, "M", "楂"), + (0x2F8E9, "M", "榣"), + (0x2F8EA, "M", "槪"), + (0x2F8EB, "M", "檨"), + (0x2F8EC, "M", "𣚣"), + (0x2F8ED, "M", "櫛"), + (0x2F8EE, "M", "㰘"), + (0x2F8EF, "M", "次"), + (0x2F8F0, "M", "𣢧"), + (0x2F8F1, "M", "歔"), + (0x2F8F2, "M", "㱎"), + (0x2F8F3, "M", "歲"), + (0x2F8F4, "M", "殟"), + (0x2F8F5, "M", "殺"), + (0x2F8F6, "M", "殻"), + (0x2F8F7, "M", "𣪍"), + (0x2F8F8, "M", "𡴋"), + (0x2F8F9, "M", "𣫺"), + (0x2F8FA, "M", "汎"), + (0x2F8FB, "M", "𣲼"), + (0x2F8FC, "M", "沿"), + (0x2F8FD, "M", "泍"), + (0x2F8FE, "M", "汧"), + (0x2F8FF, "M", "洖"), + (0x2F900, "M", "派"), + (0x2F901, "M", "海"), + (0x2F902, "M", "流"), + (0x2F903, "M", "浩"), + (0x2F904, "M", "浸"), + (0x2F905, "M", "涅"), + (0x2F906, "M", "𣴞"), + (0x2F907, "M", "洴"), + (0x2F908, "M", "港"), + (0x2F909, "M", "湮"), + (0x2F90A, "M", "㴳"), + (0x2F90B, "M", "滋"), + (0x2F90C, "M", "滇"), + ] + + +def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F90D, "M", "𣻑"), + (0x2F90E, "M", "淹"), + (0x2F90F, "M", "潮"), + (0x2F910, "M", "𣽞"), + (0x2F911, "M", "𣾎"), + (0x2F912, "M", "濆"), + (0x2F913, "M", "瀹"), + (0x2F914, "M", "瀞"), + (0x2F915, "M", "瀛"), + (0x2F916, "M", "㶖"), + (0x2F917, "M", "灊"), + (0x2F918, "M", "災"), + (0x2F919, "M", "灷"), + (0x2F91A, "M", "炭"), + (0x2F91B, "M", "𠔥"), + (0x2F91C, "M", "煅"), + (0x2F91D, "M", "𤉣"), + (0x2F91E, "M", "熜"), + (0x2F91F, "X"), + (0x2F920, "M", "爨"), + (0x2F921, "M", "爵"), + (0x2F922, "M", "牐"), + (0x2F923, "M", "𤘈"), + (0x2F924, "M", "犀"), + (0x2F925, "M", "犕"), + (0x2F926, "M", "𤜵"), + (0x2F927, "M", "𤠔"), + (0x2F928, "M", "獺"), + (0x2F929, "M", "王"), + (0x2F92A, "M", "㺬"), + (0x2F92B, "M", "玥"), + (0x2F92C, "M", "㺸"), + (0x2F92E, "M", "瑇"), + (0x2F92F, "M", "瑜"), + (0x2F930, "M", "瑱"), + (0x2F931, "M", "璅"), + (0x2F932, "M", "瓊"), + (0x2F933, "M", "㼛"), + (0x2F934, "M", "甤"), + (0x2F935, "M", "𤰶"), + (0x2F936, "M", "甾"), + (0x2F937, "M", "𤲒"), + (0x2F938, "M", "異"), + (0x2F939, "M", "𢆟"), + (0x2F93A, "M", "瘐"), + (0x2F93B, "M", "𤾡"), + (0x2F93C, "M", "𤾸"), + (0x2F93D, "M", "𥁄"), + (0x2F93E, "M", "㿼"), + (0x2F93F, "M", "䀈"), + (0x2F940, "M", "直"), + (0x2F941, "M", "𥃳"), + (0x2F942, "M", "𥃲"), + (0x2F943, "M", "𥄙"), + (0x2F944, "M", "𥄳"), + (0x2F945, "M", "眞"), + (0x2F946, "M", "真"), + (0x2F948, "M", "睊"), + (0x2F949, "M", "䀹"), + (0x2F94A, "M", "瞋"), + (0x2F94B, "M", "䁆"), + (0x2F94C, "M", "䂖"), + (0x2F94D, "M", "𥐝"), + (0x2F94E, "M", "硎"), + (0x2F94F, "M", "碌"), + (0x2F950, "M", "磌"), + (0x2F951, "M", "䃣"), + (0x2F952, "M", "𥘦"), + (0x2F953, "M", "祖"), + (0x2F954, "M", "𥚚"), + (0x2F955, "M", "𥛅"), + (0x2F956, "M", "福"), + (0x2F957, "M", "秫"), + (0x2F958, "M", "䄯"), + (0x2F959, "M", "穀"), + (0x2F95A, "M", "穊"), + (0x2F95B, "M", "穏"), + (0x2F95C, "M", "𥥼"), + (0x2F95D, "M", "𥪧"), + (0x2F95F, "X"), + (0x2F960, "M", "䈂"), + (0x2F961, "M", "𥮫"), + (0x2F962, "M", "篆"), + (0x2F963, "M", "築"), + (0x2F964, "M", "䈧"), + (0x2F965, "M", "𥲀"), + (0x2F966, "M", "糒"), + (0x2F967, "M", "䊠"), + (0x2F968, "M", "糨"), + (0x2F969, "M", "糣"), + (0x2F96A, "M", "紀"), + (0x2F96B, "M", "𥾆"), + (0x2F96C, "M", "絣"), + (0x2F96D, "M", "䌁"), + (0x2F96E, "M", "緇"), + (0x2F96F, "M", "縂"), + (0x2F970, "M", "繅"), + (0x2F971, "M", "䌴"), + (0x2F972, "M", "𦈨"), + (0x2F973, "M", "𦉇"), + ] + + +def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F974, "M", "䍙"), + (0x2F975, "M", "𦋙"), + (0x2F976, "M", "罺"), + (0x2F977, "M", "𦌾"), + (0x2F978, "M", "羕"), + (0x2F979, "M", "翺"), + (0x2F97A, "M", "者"), + (0x2F97B, "M", "𦓚"), + (0x2F97C, "M", "𦔣"), + (0x2F97D, "M", "聠"), + (0x2F97E, "M", "𦖨"), + (0x2F97F, "M", "聰"), + (0x2F980, "M", "𣍟"), + (0x2F981, "M", "䏕"), + (0x2F982, "M", "育"), + (0x2F983, "M", "脃"), + (0x2F984, "M", "䐋"), + (0x2F985, "M", "脾"), + (0x2F986, "M", "媵"), + (0x2F987, "M", "𦞧"), + (0x2F988, "M", "𦞵"), + (0x2F989, "M", "𣎓"), + (0x2F98A, "M", "𣎜"), + (0x2F98B, "M", "舁"), + (0x2F98C, "M", "舄"), + (0x2F98D, "M", "辞"), + (0x2F98E, "M", "䑫"), + (0x2F98F, "M", "芑"), + (0x2F990, "M", "芋"), + (0x2F991, "M", "芝"), + (0x2F992, "M", "劳"), + (0x2F993, "M", "花"), + (0x2F994, "M", "芳"), + (0x2F995, "M", "芽"), + (0x2F996, "M", "苦"), + (0x2F997, "M", "𦬼"), + (0x2F998, "M", "若"), + (0x2F999, "M", "茝"), + (0x2F99A, "M", "荣"), + (0x2F99B, "M", "莭"), + (0x2F99C, "M", "茣"), + (0x2F99D, "M", "莽"), + (0x2F99E, "M", "菧"), + (0x2F99F, "M", "著"), + (0x2F9A0, "M", "荓"), + (0x2F9A1, "M", "菊"), + (0x2F9A2, "M", "菌"), + (0x2F9A3, "M", "菜"), + (0x2F9A4, "M", "𦰶"), + (0x2F9A5, "M", "𦵫"), + (0x2F9A6, "M", "𦳕"), + (0x2F9A7, "M", "䔫"), + (0x2F9A8, "M", "蓱"), + (0x2F9A9, "M", "蓳"), + (0x2F9AA, "M", "蔖"), + (0x2F9AB, "M", "𧏊"), + (0x2F9AC, "M", "蕤"), + (0x2F9AD, "M", "𦼬"), + (0x2F9AE, "M", "䕝"), + (0x2F9AF, "M", "䕡"), + (0x2F9B0, "M", "𦾱"), + (0x2F9B1, "M", "𧃒"), + (0x2F9B2, "M", "䕫"), + (0x2F9B3, "M", "虐"), + (0x2F9B4, "M", "虜"), + (0x2F9B5, "M", "虧"), + (0x2F9B6, "M", "虩"), + (0x2F9B7, "M", "蚩"), + (0x2F9B8, "M", "蚈"), + (0x2F9B9, "M", "蜎"), + (0x2F9BA, "M", "蛢"), + (0x2F9BB, "M", "蝹"), + (0x2F9BC, "M", "蜨"), + (0x2F9BD, "M", "蝫"), + (0x2F9BE, "M", "螆"), + (0x2F9BF, "X"), + (0x2F9C0, "M", "蟡"), + (0x2F9C1, "M", "蠁"), + (0x2F9C2, "M", "䗹"), + (0x2F9C3, "M", "衠"), + (0x2F9C4, "M", "衣"), + (0x2F9C5, "M", "𧙧"), + (0x2F9C6, "M", "裗"), + (0x2F9C7, "M", "裞"), + (0x2F9C8, "M", "䘵"), + (0x2F9C9, "M", "裺"), + (0x2F9CA, "M", "㒻"), + (0x2F9CB, "M", "𧢮"), + (0x2F9CC, "M", "𧥦"), + (0x2F9CD, "M", "䚾"), + (0x2F9CE, "M", "䛇"), + (0x2F9CF, "M", "誠"), + (0x2F9D0, "M", "諭"), + (0x2F9D1, "M", "變"), + (0x2F9D2, "M", "豕"), + (0x2F9D3, "M", "𧲨"), + (0x2F9D4, "M", "貫"), + (0x2F9D5, "M", "賁"), + (0x2F9D6, "M", "贛"), + (0x2F9D7, "M", "起"), + ] + + +def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F9D8, "M", "𧼯"), + (0x2F9D9, "M", "𠠄"), + (0x2F9DA, "M", "跋"), + (0x2F9DB, "M", "趼"), + (0x2F9DC, "M", "跰"), + (0x2F9DD, "M", "𠣞"), + (0x2F9DE, "M", "軔"), + (0x2F9DF, "M", "輸"), + (0x2F9E0, "M", "𨗒"), + (0x2F9E1, "M", "𨗭"), + (0x2F9E2, "M", "邔"), + (0x2F9E3, "M", "郱"), + (0x2F9E4, "M", "鄑"), + (0x2F9E5, "M", "𨜮"), + (0x2F9E6, "M", "鄛"), + (0x2F9E7, "M", "鈸"), + (0x2F9E8, "M", "鋗"), + (0x2F9E9, "M", "鋘"), + (0x2F9EA, "M", "鉼"), + (0x2F9EB, "M", "鏹"), + (0x2F9EC, "M", "鐕"), + (0x2F9ED, "M", "𨯺"), + (0x2F9EE, "M", "開"), + (0x2F9EF, "M", "䦕"), + (0x2F9F0, "M", "閷"), + (0x2F9F1, "M", "𨵷"), + (0x2F9F2, "M", "䧦"), + (0x2F9F3, "M", "雃"), + (0x2F9F4, "M", "嶲"), + (0x2F9F5, "M", "霣"), + (0x2F9F6, "M", "𩅅"), + (0x2F9F7, "M", "𩈚"), + (0x2F9F8, "M", "䩮"), + (0x2F9F9, "M", "䩶"), + (0x2F9FA, "M", "韠"), + (0x2F9FB, "M", "𩐊"), + (0x2F9FC, "M", "䪲"), + (0x2F9FD, "M", "𩒖"), + (0x2F9FE, "M", "頋"), + (0x2FA00, "M", "頩"), + (0x2FA01, "M", "𩖶"), + (0x2FA02, "M", "飢"), + (0x2FA03, "M", "䬳"), + (0x2FA04, "M", "餩"), + (0x2FA05, "M", "馧"), + (0x2FA06, "M", "駂"), + (0x2FA07, "M", "駾"), + (0x2FA08, "M", "䯎"), + (0x2FA09, "M", "𩬰"), + (0x2FA0A, "M", "鬒"), + (0x2FA0B, "M", "鱀"), + (0x2FA0C, "M", "鳽"), + (0x2FA0D, "M", "䳎"), + (0x2FA0E, "M", "䳭"), + (0x2FA0F, "M", "鵧"), + (0x2FA10, "M", "𪃎"), + (0x2FA11, "M", "䳸"), + (0x2FA12, "M", "𪄅"), + (0x2FA13, "M", "𪈎"), + (0x2FA14, "M", "𪊑"), + (0x2FA15, "M", "麻"), + (0x2FA16, "M", "䵖"), + (0x2FA17, "M", "黹"), + (0x2FA18, "M", "黾"), + (0x2FA19, "M", "鼅"), + (0x2FA1A, "M", "鼏"), + (0x2FA1B, "M", "鼖"), + (0x2FA1C, "M", "鼻"), + (0x2FA1D, "M", "𪘀"), + (0x2FA1E, "X"), + (0x30000, "V"), + (0x3134B, "X"), + (0x31350, "V"), + (0x323B0, "X"), + (0xE0100, "I"), + (0xE01F0, "X"), + ] + + +uts46data = tuple( + _seg_0() + + _seg_1() + + _seg_2() + + _seg_3() + + _seg_4() + + _seg_5() + + _seg_6() + + _seg_7() + + _seg_8() + + _seg_9() + + _seg_10() + + _seg_11() + + _seg_12() + + _seg_13() + + _seg_14() + + _seg_15() + + _seg_16() + + _seg_17() + + _seg_18() + + _seg_19() + + _seg_20() + + _seg_21() + + _seg_22() + + _seg_23() + + _seg_24() + + _seg_25() + + _seg_26() + + _seg_27() + + _seg_28() + + _seg_29() + + _seg_30() + + _seg_31() + + _seg_32() + + _seg_33() + + _seg_34() + + _seg_35() + + _seg_36() + + _seg_37() + + _seg_38() + + _seg_39() + + _seg_40() + + _seg_41() + + _seg_42() + + _seg_43() + + _seg_44() + + _seg_45() + + _seg_46() + + _seg_47() + + _seg_48() + + _seg_49() + + _seg_50() + + _seg_51() + + _seg_52() + + _seg_53() + + _seg_54() + + _seg_55() + + _seg_56() + + _seg_57() + + _seg_58() + + _seg_59() + + _seg_60() + + _seg_61() + + _seg_62() + + _seg_63() + + _seg_64() + + _seg_65() + + _seg_66() + + _seg_67() + + _seg_68() + + _seg_69() + + _seg_70() + + _seg_71() + + _seg_72() + + _seg_73() + + _seg_74() + + _seg_75() + + _seg_76() + + _seg_77() + + _seg_78() + + _seg_79() + + _seg_80() + + _seg_81() +) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/venv/Lib/site-packages/jiter-0.9.0.dist-info/INSTALLER b/venv/Lib/site-packages/jiter-0.9.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/jiter-0.9.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/jiter-0.9.0.dist-info/METADATA b/venv/Lib/site-packages/jiter-0.9.0.dist-info/METADATA new file mode 100644 index 00000000..611a1333 --- /dev/null +++ b/venv/Lib/site-packages/jiter-0.9.0.dist-info/METADATA @@ -0,0 +1,145 @@ +Metadata-Version: 2.4 +Name: jiter +Version: 0.9.0 +Classifier: Development Status :: 4 - Beta +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: Unix +Classifier: Operating System :: POSIX :: Linux +Classifier: Environment :: Console +Classifier: Environment :: MacOS X +Classifier: Topic :: File Formats :: JSON +Classifier: Framework :: Pydantic :: 2 +Summary: Fast iterable JSON parser. +Keywords: JSON,parsing,deserialization,iter +Home-Page: https://github.com/pydantic/jiter/ +Author: Samuel Colvin +Author-email: Samuel Colvin +License: MIT +Requires-Python: >=3.8 +Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM +Project-URL: Source Code, https://github.com/pydantic/jiter/ + +# jiter + +[![CI](https://github.com/pydantic/jiter/workflows/CI/badge.svg?event=push)](https://github.com/pydantic/jiter/actions?query=event%3Apush+branch%3Amain+workflow%3ACI) +[![pypi](https://img.shields.io/pypi/v/jiter.svg)](https://pypi.python.org/pypi/jiter) +[![versions](https://img.shields.io/pypi/pyversions/jiter.svg)](https://github.com/pydantic/jiter) +[![license](https://img.shields.io/github/license/pydantic/jiter.svg)](https://github.com/pydantic/jiter/blob/main/LICENSE) + +This is a standalone version of the JSON parser used in `pydantic-core`. The recommendation is to only use this package directly if you do not use `pydantic`. + +The API is extremely minimal: + +```python +def from_json( + json_data: bytes, + /, + *, + allow_inf_nan: bool = True, + cache_mode: Literal[True, False, "all", "keys", "none"] = "all", + partial_mode: Literal[True, False, "off", "on", "trailing-strings"] = False, + catch_duplicate_keys: bool = False, + float_mode: Literal["float", "decimal", "lossless-float"] = False, +) -> Any: + """ + Parse input bytes into a JSON object. + + Arguments: + json_data: The JSON data to parse + allow_inf_nan: Whether to allow infinity (`Infinity` an `-Infinity`) and `NaN` values to float fields. + Defaults to True. + cache_mode: cache Python strings to improve performance at the cost of some memory usage + - True / 'all' - cache all strings + - 'keys' - cache only object keys + - False / 'none' - cache nothing + partial_mode: How to handle incomplete strings: + - False / 'off' - raise an exception if the input is incomplete + - True / 'on' - allow incomplete JSON but discard the last string if it is incomplete + - 'trailing-strings' - allow incomplete JSON, and include the last incomplete string in the output + catch_duplicate_keys: if True, raise an exception if objects contain the same key multiple times + float_mode: How to return floats: as a `float`, `Decimal` or `LosslessFloat` + + Returns: + Python object built from the JSON input. + """ + +def cache_clear() -> None: + """ + Reset the string cache. + """ + +def cache_usage() -> int: + """ + get the size of the string cache. + + Returns: + Size of the string cache in bytes. + """ +``` +## Examples + +The main function provided by Jiter is `from_json()`, which accepts a bytes object containing JSON and returns a Python dictionary, list or other value. + +```python +import jiter + +json_data = b'{"name": "John", "age": 30}' +parsed_data = jiter.from_json(json_data) +print(parsed_data) # Output: {'name': 'John', 'age': 30} +``` + +### Handling Partial JSON + +Incomplete JSON objects can be parsed using the `partial_mode=` parameter. + +```python +import jiter + +partial_json = b'{"name": "John", "age": 30, "city": "New Yor' + +# Raise error on incomplete JSON +try: + jiter.from_json(partial_json, partial_mode=False) +except ValueError as e: + print(f"Error: {e}") + +# Parse incomplete JSON, discarding incomplete last field +result = jiter.from_json(partial_json, partial_mode=True) +print(result) # Output: {'name': 'John', 'age': 30} + +# Parse incomplete JSON, including incomplete last field +result = jiter.from_json(partial_json, partial_mode='trailing-strings') +print(result) # Output: {'name': 'John', 'age': 30, 'city': 'New Yor'} +``` + +### Catching Duplicate Keys + +The `catch_duplicate_keys=True` option can be used to raise a `ValueError` if an object contains duplicate keys. + +```python +import jiter + +json_with_dupes = b'{"foo": 1, "foo": 2}' + +# Default behavior (last value wins) +result = jiter.from_json(json_with_dupes) +print(result) # Output: {'foo': 2} + +# Catch duplicate keys +try: + jiter.from_json(json_with_dupes, catch_duplicate_keys=True) +except ValueError as e: + print(f"Error: {e}") +``` + diff --git a/venv/Lib/site-packages/jiter-0.9.0.dist-info/RECORD b/venv/Lib/site-packages/jiter-0.9.0.dist-info/RECORD new file mode 100644 index 00000000..6ea4bc4f --- /dev/null +++ b/venv/Lib/site-packages/jiter-0.9.0.dist-info/RECORD @@ -0,0 +1,9 @@ +jiter-0.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jiter-0.9.0.dist-info/METADATA,sha256=2hQKefnS3Sh7nDwZQFqDBcdNtbgGgV9_CGx4guzlHQ8,5289 +jiter-0.9.0.dist-info/RECORD,, +jiter-0.9.0.dist-info/WHEEL,sha256=tpW5AN9B-9qsM9WW2FXG2r193YXiqexDadpKp0A2daI,96 +jiter/__init__.py,sha256=Fp9HkOixiYYDSiC_80vmiJ_sCoCGT8OAh48yltm0lP0,103 +jiter/__init__.pyi,sha256=TdIK8dlOWZXDjCYbWNUaro21YmAibxK9CHxclnnR-u0,2433 +jiter/__pycache__/__init__.cpython-312.pyc,, +jiter/jiter.cp312-win_amd64.pyd,sha256=KKRJPDkG04JV5xE7PVMCq_QrWnPw-P7dBXk1hPdUHjE,481280 +jiter/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/Lib/site-packages/jiter-0.9.0.dist-info/WHEEL b/venv/Lib/site-packages/jiter-0.9.0.dist-info/WHEEL new file mode 100644 index 00000000..adf7e875 --- /dev/null +++ b/venv/Lib/site-packages/jiter-0.9.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: maturin (1.8.2) +Root-Is-Purelib: false +Tag: cp312-cp312-win_amd64 diff --git a/venv/Lib/site-packages/jiter/__init__.py b/venv/Lib/site-packages/jiter/__init__.py new file mode 100644 index 00000000..3d17192f --- /dev/null +++ b/venv/Lib/site-packages/jiter/__init__.py @@ -0,0 +1,5 @@ +from .jiter import * + +__doc__ = jiter.__doc__ +if hasattr(jiter, "__all__"): + __all__ = jiter.__all__ \ No newline at end of file diff --git a/venv/Lib/site-packages/jiter/__init__.pyi b/venv/Lib/site-packages/jiter/__init__.pyi new file mode 100644 index 00000000..928fa751 --- /dev/null +++ b/venv/Lib/site-packages/jiter/__init__.pyi @@ -0,0 +1,70 @@ +import decimal +from typing import Any, Literal + +def from_json( + json_data: bytes, + /, + *, + allow_inf_nan: bool = True, + cache_mode: Literal[True, False, "all", "keys", "none"] = "all", + partial_mode: Literal[True, False, "off", "on", "trailing-strings"] = False, + catch_duplicate_keys: bool = False, + float_mode: Literal["float", "decimal", "lossless-float"] = False, +) -> Any: + """ + Parse input bytes into a JSON object. + + Arguments: + json_data: The JSON data to parse + allow_inf_nan: Whether to allow infinity (`Infinity` an `-Infinity`) and `NaN` values to float fields. + Defaults to True. + cache_mode: cache Python strings to improve performance at the cost of some memory usage + - True / 'all' - cache all strings + - 'keys' - cache only object keys + - False / 'none' - cache nothing + partial_mode: How to handle incomplete strings: + - False / 'off' - raise an exception if the input is incomplete + - True / 'on' - allow incomplete JSON but discard the last string if it is incomplete + - 'trailing-strings' - allow incomplete JSON, and include the last incomplete string in the output + catch_duplicate_keys: if True, raise an exception if objects contain the same key multiple times + float_mode: How to return floats: as a `float`, `Decimal` or `LosslessFloat` + + Returns: + Python object built from the JSON input. + """ + +def cache_clear() -> None: + """ + Reset the string cache. + """ + +def cache_usage() -> int: + """ + get the size of the string cache. + + Returns: + Size of the string cache in bytes. + """ + + +class LosslessFloat: + """ + Represents a float from JSON, by holding the underlying bytes representing a float from JSON. + """ + def __init__(self, json_float: bytes): + """Construct a LosslessFloat object from a JSON bytes slice""" + + def as_decimal(self) -> decimal.Decimal: + """Construct a Python Decimal from the JSON bytes slice""" + + def __float__(self) -> float: + """Construct a Python float from the JSON bytes slice""" + + def __bytes__(self) -> bytes: + """Return the JSON bytes slice as bytes""" + + def __str__(self): + """Return the JSON bytes slice as a string""" + + def __repr__(self): + ... diff --git a/venv/Lib/site-packages/jiter/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/jiter/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..6061343a Binary files /dev/null and b/venv/Lib/site-packages/jiter/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/jiter/jiter.cp312-win_amd64.pyd b/venv/Lib/site-packages/jiter/jiter.cp312-win_amd64.pyd new file mode 100644 index 00000000..c7555999 Binary files /dev/null and b/venv/Lib/site-packages/jiter/jiter.cp312-win_amd64.pyd differ diff --git a/venv/Lib/site-packages/jiter/py.typed b/venv/Lib/site-packages/jiter/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/jsonpatch-1.33.dist-info/AUTHORS b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/AUTHORS new file mode 100644 index 00000000..f5f6363e --- /dev/null +++ b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/AUTHORS @@ -0,0 +1,4 @@ +Stefan Kögl +Alexander Shorin +Byron Ruth +William Kral diff --git a/venv/Lib/site-packages/jsonpatch-1.33.dist-info/INSTALLER b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/jsonpatch-1.33.dist-info/LICENSE b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/LICENSE new file mode 100644 index 00000000..c8fc60f3 --- /dev/null +++ b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/LICENSE @@ -0,0 +1,11 @@ +Copyright (c) 2011 Stefan Kögl + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/Lib/site-packages/jsonpatch-1.33.dist-info/METADATA b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/METADATA new file mode 100644 index 00000000..f73d29d6 --- /dev/null +++ b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/METADATA @@ -0,0 +1,66 @@ +Metadata-Version: 2.1 +Name: jsonpatch +Version: 1.33 +Summary: Apply JSON-Patches (RFC 6902) +Home-page: https://github.com/stefankoegl/python-json-patch +Author: Stefan Kögl +Author-email: stefan@skoegl.net +License: Modified BSD License +Project-URL: Website, https://github.com/stefankoegl/python-json-patch +Project-URL: Repository, https://github.com/stefankoegl/python-json-patch.git +Project-URL: Documentation, https://python-json-patch.readthedocs.org/ +Project-URL: PyPI, https://pypi.org/pypi/jsonpatch +Project-URL: Tests, https://travis-ci.org/stefankoegl/python-json-patch +Project-URL: Test Coverage, https://coveralls.io/r/stefankoegl/python-json-patch +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.* +License-File: LICENSE +License-File: AUTHORS +Requires-Dist: jsonpointer (>=1.9) + +python-json-patch +================= + +[![PyPI version](https://img.shields.io/pypi/v/jsonpatch.svg)](https://pypi.python.org/pypi/jsonpatch/) +[![Supported Python versions](https://img.shields.io/pypi/pyversions/jsonpatch.svg)](https://pypi.python.org/pypi/jsonpatch/) +[![Build Status](https://travis-ci.org/stefankoegl/python-json-patch.png?branch=master)](https://travis-ci.org/stefankoegl/python-json-patch) +[![Coverage Status](https://coveralls.io/repos/stefankoegl/python-json-patch/badge.png?branch=master)](https://coveralls.io/r/stefankoegl/python-json-patch?branch=master) + +Applying JSON Patches in Python +------------------------------- + +Library to apply JSON Patches according to +[RFC 6902](http://tools.ietf.org/html/rfc6902) + +See source code for examples + +* Website: https://github.com/stefankoegl/python-json-patch +* Repository: https://github.com/stefankoegl/python-json-patch.git +* Documentation: https://python-json-patch.readthedocs.org/ +* PyPI: https://pypi.python.org/pypi/jsonpatch +* Travis CI: https://travis-ci.org/stefankoegl/python-json-patch +* Coveralls: https://coveralls.io/r/stefankoegl/python-json-patch + +Running external tests +---------------------- +To run external tests (such as those from https://github.com/json-patch/json-patch-tests) use ext_test.py + + ./ext_tests.py ../json-patch-tests/tests.json + + diff --git a/venv/Lib/site-packages/jsonpatch-1.33.dist-info/RECORD b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/RECORD new file mode 100644 index 00000000..a0bff840 --- /dev/null +++ b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/RECORD @@ -0,0 +1,11 @@ +../../Scripts/jsondiff,sha256=Uc2MZGlP4ioa2iTdgK-8mkmWToXZDFCC9S2XM44147Y,1232 +../../Scripts/jsonpatch,sha256=Vx74RDH7u3gZfxfD-11_T6KtpMj02Sd9d7vjJ-S4b-8,3893 +__pycache__/jsonpatch.cpython-312.pyc,, +jsonpatch-1.33.dist-info/AUTHORS,sha256=hOtEnBGbfDegKjzMDEl5vuVJyczbI2PlYE2ho4LjGNQ,140 +jsonpatch-1.33.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jsonpatch-1.33.dist-info/LICENSE,sha256=Q5PrZzP6HjZ_QXabOC9PG-e_Da6x4kPtkrP12Lr9I28,1481 +jsonpatch-1.33.dist-info/METADATA,sha256=BvMru0ZpJn2EqCva9fwN1Nhh0NoAayr171mCWaZveik,2980 +jsonpatch-1.33.dist-info/RECORD,, +jsonpatch-1.33.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 +jsonpatch-1.33.dist-info/top_level.txt,sha256=zcviI7Mh4AvvPBpgKZBzEdgbVh_ewsfvS2t3-wcsnN4,10 +jsonpatch.py,sha256=V752by5vjU3ced_Unhc1Ltd2jgcoS12krKaSEPs7pf4,29778 diff --git a/venv/Lib/site-packages/jsonpatch-1.33.dist-info/WHEEL b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/WHEEL new file mode 100644 index 00000000..0b18a281 --- /dev/null +++ b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/jsonpatch-1.33.dist-info/top_level.txt b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/top_level.txt new file mode 100644 index 00000000..621f9382 --- /dev/null +++ b/venv/Lib/site-packages/jsonpatch-1.33.dist-info/top_level.txt @@ -0,0 +1 @@ +jsonpatch diff --git a/venv/Lib/site-packages/jsonpatch.py b/venv/Lib/site-packages/jsonpatch.py new file mode 100644 index 00000000..d3fc26d5 --- /dev/null +++ b/venv/Lib/site-packages/jsonpatch.py @@ -0,0 +1,931 @@ +# -*- coding: utf-8 -*- +# +# python-json-patch - An implementation of the JSON Patch format +# https://github.com/stefankoegl/python-json-patch +# +# Copyright (c) 2011 Stefan Kögl +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. The name of the author may not be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +""" Apply JSON-Patches (RFC 6902) """ + +from __future__ import unicode_literals + +import collections +import copy +import functools +import json +import sys + +try: + from collections.abc import Sequence +except ImportError: # Python 3 + from collections import Sequence + +try: + from types import MappingProxyType +except ImportError: + # Python < 3.3 + MappingProxyType = dict + +from jsonpointer import JsonPointer, JsonPointerException + + +_ST_ADD = 0 +_ST_REMOVE = 1 + + +try: + from collections.abc import MutableMapping, MutableSequence + +except ImportError: + from collections import MutableMapping, MutableSequence + str = unicode + +# Will be parsed by setup.py to determine package metadata +__author__ = 'Stefan Kögl ' +__version__ = '1.33' +__website__ = 'https://github.com/stefankoegl/python-json-patch' +__license__ = 'Modified BSD License' + + +# pylint: disable=E0611,W0404 +if sys.version_info >= (3, 0): + basestring = (bytes, str) # pylint: disable=C0103,W0622 + + +class JsonPatchException(Exception): + """Base Json Patch exception""" + + +class InvalidJsonPatch(JsonPatchException): + """ Raised if an invalid JSON Patch is created """ + + +class JsonPatchConflict(JsonPatchException): + """Raised if patch could not be applied due to conflict situation such as: + - attempt to add object key when it already exists; + - attempt to operate with nonexistence object key; + - attempt to insert value to array at position beyond its size; + - etc. + """ + + +class JsonPatchTestFailed(JsonPatchException, AssertionError): + """ A Test operation failed """ + + +def multidict(ordered_pairs): + """Convert duplicate keys values to lists.""" + # read all values into lists + mdict = collections.defaultdict(list) + for key, value in ordered_pairs: + mdict[key].append(value) + + return dict( + # unpack lists that have only 1 item + (key, values[0] if len(values) == 1 else values) + for key, values in mdict.items() + ) + + +# The "object_pairs_hook" parameter is used to handle duplicate keys when +# loading a JSON object. +_jsonloads = functools.partial(json.loads, object_pairs_hook=multidict) + + +def apply_patch(doc, patch, in_place=False, pointer_cls=JsonPointer): + """Apply list of patches to specified json document. + + :param doc: Document object. + :type doc: dict + + :param patch: JSON patch as list of dicts or raw JSON-encoded string. + :type patch: list or str + + :param in_place: While :const:`True` patch will modify target document. + By default patch will be applied to document copy. + :type in_place: bool + + :param pointer_cls: JSON pointer class to use. + :type pointer_cls: Type[JsonPointer] + + :return: Patched document object. + :rtype: dict + + >>> doc = {'foo': 'bar'} + >>> patch = [{'op': 'add', 'path': '/baz', 'value': 'qux'}] + >>> other = apply_patch(doc, patch) + >>> doc is not other + True + >>> other == {'foo': 'bar', 'baz': 'qux'} + True + >>> patch = [{'op': 'add', 'path': '/baz', 'value': 'qux'}] + >>> apply_patch(doc, patch, in_place=True) == {'foo': 'bar', 'baz': 'qux'} + True + >>> doc == other + True + """ + + if isinstance(patch, basestring): + patch = JsonPatch.from_string(patch, pointer_cls=pointer_cls) + else: + patch = JsonPatch(patch, pointer_cls=pointer_cls) + return patch.apply(doc, in_place) + + +def make_patch(src, dst, pointer_cls=JsonPointer): + """Generates patch by comparing two document objects. Actually is + a proxy to :meth:`JsonPatch.from_diff` method. + + :param src: Data source document object. + :type src: dict + + :param dst: Data source document object. + :type dst: dict + + :param pointer_cls: JSON pointer class to use. + :type pointer_cls: Type[JsonPointer] + + >>> src = {'foo': 'bar', 'numbers': [1, 3, 4, 8]} + >>> dst = {'baz': 'qux', 'numbers': [1, 4, 7]} + >>> patch = make_patch(src, dst) + >>> new = patch.apply(src) + >>> new == dst + True + """ + + return JsonPatch.from_diff(src, dst, pointer_cls=pointer_cls) + + +class PatchOperation(object): + """A single operation inside a JSON Patch.""" + + def __init__(self, operation, pointer_cls=JsonPointer): + self.pointer_cls = pointer_cls + + if not operation.__contains__('path'): + raise InvalidJsonPatch("Operation must have a 'path' member") + + if isinstance(operation['path'], self.pointer_cls): + self.location = operation['path'].path + self.pointer = operation['path'] + else: + self.location = operation['path'] + try: + self.pointer = self.pointer_cls(self.location) + except TypeError as ex: + raise InvalidJsonPatch("Invalid 'path'") + + self.operation = operation + + def apply(self, obj): + """Abstract method that applies a patch operation to the specified object.""" + raise NotImplementedError('should implement the patch operation.') + + def __hash__(self): + return hash(frozenset(self.operation.items())) + + def __eq__(self, other): + if not isinstance(other, PatchOperation): + return False + return self.operation == other.operation + + def __ne__(self, other): + return not(self == other) + + @property + def path(self): + return '/'.join(self.pointer.parts[:-1]) + + @property + def key(self): + try: + return int(self.pointer.parts[-1]) + except ValueError: + return self.pointer.parts[-1] + + @key.setter + def key(self, value): + self.pointer.parts[-1] = str(value) + self.location = self.pointer.path + self.operation['path'] = self.location + + +class RemoveOperation(PatchOperation): + """Removes an object property or an array element.""" + + def apply(self, obj): + subobj, part = self.pointer.to_last(obj) + + if isinstance(subobj, Sequence) and not isinstance(part, int): + raise JsonPointerException("invalid array index '{0}'".format(part)) + + try: + del subobj[part] + except (KeyError, IndexError) as ex: + msg = "can't remove a non-existent object '{0}'".format(part) + raise JsonPatchConflict(msg) + + return obj + + def _on_undo_remove(self, path, key): + if self.path == path: + if self.key >= key: + self.key += 1 + else: + key -= 1 + return key + + def _on_undo_add(self, path, key): + if self.path == path: + if self.key > key: + self.key -= 1 + else: + key -= 1 + return key + + +class AddOperation(PatchOperation): + """Adds an object property or an array element.""" + + def apply(self, obj): + try: + value = self.operation["value"] + except KeyError as ex: + raise InvalidJsonPatch( + "The operation does not contain a 'value' member") + + subobj, part = self.pointer.to_last(obj) + + if isinstance(subobj, MutableSequence): + if part == '-': + subobj.append(value) # pylint: disable=E1103 + + elif part > len(subobj) or part < 0: + raise JsonPatchConflict("can't insert outside of list") + + else: + subobj.insert(part, value) # pylint: disable=E1103 + + elif isinstance(subobj, MutableMapping): + if part is None: + obj = value # we're replacing the root + else: + subobj[part] = value + + else: + if part is None: + raise TypeError("invalid document type {0}".format(type(subobj))) + else: + raise JsonPatchConflict("unable to fully resolve json pointer {0}, part {1}".format(self.location, part)) + return obj + + def _on_undo_remove(self, path, key): + if self.path == path: + if self.key > key: + self.key += 1 + else: + key += 1 + return key + + def _on_undo_add(self, path, key): + if self.path == path: + if self.key > key: + self.key -= 1 + else: + key += 1 + return key + + +class ReplaceOperation(PatchOperation): + """Replaces an object property or an array element by a new value.""" + + def apply(self, obj): + try: + value = self.operation["value"] + except KeyError as ex: + raise InvalidJsonPatch( + "The operation does not contain a 'value' member") + + subobj, part = self.pointer.to_last(obj) + + if part is None: + return value + + if part == "-": + raise InvalidJsonPatch("'path' with '-' can't be applied to 'replace' operation") + + if isinstance(subobj, MutableSequence): + if part >= len(subobj) or part < 0: + raise JsonPatchConflict("can't replace outside of list") + + elif isinstance(subobj, MutableMapping): + if part not in subobj: + msg = "can't replace a non-existent object '{0}'".format(part) + raise JsonPatchConflict(msg) + else: + if part is None: + raise TypeError("invalid document type {0}".format(type(subobj))) + else: + raise JsonPatchConflict("unable to fully resolve json pointer {0}, part {1}".format(self.location, part)) + + subobj[part] = value + return obj + + def _on_undo_remove(self, path, key): + return key + + def _on_undo_add(self, path, key): + return key + + +class MoveOperation(PatchOperation): + """Moves an object property or an array element to a new location.""" + + def apply(self, obj): + try: + if isinstance(self.operation['from'], self.pointer_cls): + from_ptr = self.operation['from'] + else: + from_ptr = self.pointer_cls(self.operation['from']) + except KeyError as ex: + raise InvalidJsonPatch( + "The operation does not contain a 'from' member") + + subobj, part = from_ptr.to_last(obj) + try: + value = subobj[part] + except (KeyError, IndexError) as ex: + raise JsonPatchConflict(str(ex)) + + # If source and target are equal, this is a no-op + if self.pointer == from_ptr: + return obj + + if isinstance(subobj, MutableMapping) and \ + self.pointer.contains(from_ptr): + raise JsonPatchConflict('Cannot move values into their own children') + + obj = RemoveOperation({ + 'op': 'remove', + 'path': self.operation['from'] + }, pointer_cls=self.pointer_cls).apply(obj) + + obj = AddOperation({ + 'op': 'add', + 'path': self.location, + 'value': value + }, pointer_cls=self.pointer_cls).apply(obj) + + return obj + + @property + def from_path(self): + from_ptr = self.pointer_cls(self.operation['from']) + return '/'.join(from_ptr.parts[:-1]) + + @property + def from_key(self): + from_ptr = self.pointer_cls(self.operation['from']) + try: + return int(from_ptr.parts[-1]) + except TypeError: + return from_ptr.parts[-1] + + @from_key.setter + def from_key(self, value): + from_ptr = self.pointer_cls(self.operation['from']) + from_ptr.parts[-1] = str(value) + self.operation['from'] = from_ptr.path + + def _on_undo_remove(self, path, key): + if self.from_path == path: + if self.from_key >= key: + self.from_key += 1 + else: + key -= 1 + if self.path == path: + if self.key > key: + self.key += 1 + else: + key += 1 + return key + + def _on_undo_add(self, path, key): + if self.from_path == path: + if self.from_key > key: + self.from_key -= 1 + else: + key -= 1 + if self.path == path: + if self.key > key: + self.key -= 1 + else: + key += 1 + return key + + +class TestOperation(PatchOperation): + """Test value by specified location.""" + + def apply(self, obj): + try: + subobj, part = self.pointer.to_last(obj) + if part is None: + val = subobj + else: + val = self.pointer.walk(subobj, part) + except JsonPointerException as ex: + raise JsonPatchTestFailed(str(ex)) + + try: + value = self.operation['value'] + except KeyError as ex: + raise InvalidJsonPatch( + "The operation does not contain a 'value' member") + + if val != value: + msg = '{0} ({1}) is not equal to tested value {2} ({3})' + raise JsonPatchTestFailed(msg.format(val, type(val), + value, type(value))) + + return obj + + +class CopyOperation(PatchOperation): + """ Copies an object property or an array element to a new location """ + + def apply(self, obj): + try: + from_ptr = self.pointer_cls(self.operation['from']) + except KeyError as ex: + raise InvalidJsonPatch( + "The operation does not contain a 'from' member") + + subobj, part = from_ptr.to_last(obj) + try: + value = copy.deepcopy(subobj[part]) + except (KeyError, IndexError) as ex: + raise JsonPatchConflict(str(ex)) + + obj = AddOperation({ + 'op': 'add', + 'path': self.location, + 'value': value + }, pointer_cls=self.pointer_cls).apply(obj) + + return obj + + +class JsonPatch(object): + json_dumper = staticmethod(json.dumps) + json_loader = staticmethod(_jsonloads) + + operations = MappingProxyType({ + 'remove': RemoveOperation, + 'add': AddOperation, + 'replace': ReplaceOperation, + 'move': MoveOperation, + 'test': TestOperation, + 'copy': CopyOperation, + }) + + """A JSON Patch is a list of Patch Operations. + + >>> patch = JsonPatch([ + ... {'op': 'add', 'path': '/foo', 'value': 'bar'}, + ... {'op': 'add', 'path': '/baz', 'value': [1, 2, 3]}, + ... {'op': 'remove', 'path': '/baz/1'}, + ... {'op': 'test', 'path': '/baz', 'value': [1, 3]}, + ... {'op': 'replace', 'path': '/baz/0', 'value': 42}, + ... {'op': 'remove', 'path': '/baz/1'}, + ... ]) + >>> doc = {} + >>> result = patch.apply(doc) + >>> expected = {'foo': 'bar', 'baz': [42]} + >>> result == expected + True + + JsonPatch object is iterable, so you can easily access each patch + statement in a loop: + + >>> lpatch = list(patch) + >>> expected = {'op': 'add', 'path': '/foo', 'value': 'bar'} + >>> lpatch[0] == expected + True + >>> lpatch == patch.patch + True + + Also JsonPatch could be converted directly to :class:`bool` if it contains + any operation statements: + + >>> bool(patch) + True + >>> bool(JsonPatch([])) + False + + This behavior is very handy with :func:`make_patch` to write more readable + code: + + >>> old = {'foo': 'bar', 'numbers': [1, 3, 4, 8]} + >>> new = {'baz': 'qux', 'numbers': [1, 4, 7]} + >>> patch = make_patch(old, new) + >>> if patch: + ... # document have changed, do something useful + ... patch.apply(old) #doctest: +ELLIPSIS + {...} + """ + def __init__(self, patch, pointer_cls=JsonPointer): + self.patch = patch + self.pointer_cls = pointer_cls + + # Verify that the structure of the patch document + # is correct by retrieving each patch element. + # Much of the validation is done in the initializer + # though some is delayed until the patch is applied. + for op in self.patch: + # We're only checking for basestring in the following check + # for two reasons: + # + # - It should come from JSON, which only allows strings as + # dictionary keys, so having a string here unambiguously means + # someone used: {"op": ..., ...} instead of [{"op": ..., ...}]. + # + # - There's no possible false positive: if someone give a sequence + # of mappings, this won't raise. + if isinstance(op, basestring): + raise InvalidJsonPatch("Document is expected to be sequence of " + "operations, got a sequence of strings.") + + self._get_operation(op) + + def __str__(self): + """str(self) -> self.to_string()""" + return self.to_string() + + def __bool__(self): + return bool(self.patch) + + __nonzero__ = __bool__ + + def __iter__(self): + return iter(self.patch) + + def __hash__(self): + return hash(tuple(self._ops)) + + def __eq__(self, other): + if not isinstance(other, JsonPatch): + return False + return self._ops == other._ops + + def __ne__(self, other): + return not(self == other) + + @classmethod + def from_string(cls, patch_str, loads=None, pointer_cls=JsonPointer): + """Creates JsonPatch instance from string source. + + :param patch_str: JSON patch as raw string. + :type patch_str: str + + :param loads: A function of one argument that loads a serialized + JSON string. + :type loads: function + + :param pointer_cls: JSON pointer class to use. + :type pointer_cls: Type[JsonPointer] + + :return: :class:`JsonPatch` instance. + """ + json_loader = loads or cls.json_loader + patch = json_loader(patch_str) + return cls(patch, pointer_cls=pointer_cls) + + @classmethod + def from_diff( + cls, src, dst, optimization=True, dumps=None, + pointer_cls=JsonPointer, + ): + """Creates JsonPatch instance based on comparison of two document + objects. Json patch would be created for `src` argument against `dst` + one. + + :param src: Data source document object. + :type src: dict + + :param dst: Data source document object. + :type dst: dict + + :param dumps: A function of one argument that produces a serialized + JSON string. + :type dumps: function + + :param pointer_cls: JSON pointer class to use. + :type pointer_cls: Type[JsonPointer] + + :return: :class:`JsonPatch` instance. + + >>> src = {'foo': 'bar', 'numbers': [1, 3, 4, 8]} + >>> dst = {'baz': 'qux', 'numbers': [1, 4, 7]} + >>> patch = JsonPatch.from_diff(src, dst) + >>> new = patch.apply(src) + >>> new == dst + True + """ + json_dumper = dumps or cls.json_dumper + builder = DiffBuilder(src, dst, json_dumper, pointer_cls=pointer_cls) + builder._compare_values('', None, src, dst) + ops = list(builder.execute()) + return cls(ops, pointer_cls=pointer_cls) + + def to_string(self, dumps=None): + """Returns patch set as JSON string.""" + json_dumper = dumps or self.json_dumper + return json_dumper(self.patch) + + @property + def _ops(self): + return tuple(map(self._get_operation, self.patch)) + + def apply(self, obj, in_place=False): + """Applies the patch to a given object. + + :param obj: Document object. + :type obj: dict + + :param in_place: Tweaks the way how patch would be applied - directly to + specified `obj` or to its copy. + :type in_place: bool + + :return: Modified `obj`. + """ + + if not in_place: + obj = copy.deepcopy(obj) + + for operation in self._ops: + obj = operation.apply(obj) + + return obj + + def _get_operation(self, operation): + if 'op' not in operation: + raise InvalidJsonPatch("Operation does not contain 'op' member") + + op = operation['op'] + + if not isinstance(op, basestring): + raise InvalidJsonPatch("Operation's op must be a string") + + if op not in self.operations: + raise InvalidJsonPatch("Unknown operation {0!r}".format(op)) + + cls = self.operations[op] + return cls(operation, pointer_cls=self.pointer_cls) + + +class DiffBuilder(object): + + def __init__(self, src_doc, dst_doc, dumps=json.dumps, pointer_cls=JsonPointer): + self.dumps = dumps + self.pointer_cls = pointer_cls + self.index_storage = [{}, {}] + self.index_storage2 = [[], []] + self.__root = root = [] + self.src_doc = src_doc + self.dst_doc = dst_doc + root[:] = [root, root, None] + + def store_index(self, value, index, st): + typed_key = (value, type(value)) + try: + storage = self.index_storage[st] + stored = storage.get(typed_key) + if stored is None: + storage[typed_key] = [index] + else: + storage[typed_key].append(index) + + except TypeError: + self.index_storage2[st].append((typed_key, index)) + + def take_index(self, value, st): + typed_key = (value, type(value)) + try: + stored = self.index_storage[st].get(typed_key) + if stored: + return stored.pop() + + except TypeError: + storage = self.index_storage2[st] + for i in range(len(storage)-1, -1, -1): + if storage[i][0] == typed_key: + return storage.pop(i)[1] + + def insert(self, op): + root = self.__root + last = root[0] + last[1] = root[0] = [last, root, op] + return root[0] + + def remove(self, index): + link_prev, link_next, _ = index + link_prev[1] = link_next + link_next[0] = link_prev + index[:] = [] + + def iter_from(self, start): + root = self.__root + curr = start[1] + while curr is not root: + yield curr[2] + curr = curr[1] + + def __iter__(self): + root = self.__root + curr = root[1] + while curr is not root: + yield curr[2] + curr = curr[1] + + def execute(self): + root = self.__root + curr = root[1] + while curr is not root: + if curr[1] is not root: + op_first, op_second = curr[2], curr[1][2] + if op_first.location == op_second.location and \ + type(op_first) == RemoveOperation and \ + type(op_second) == AddOperation: + yield ReplaceOperation({ + 'op': 'replace', + 'path': op_second.location, + 'value': op_second.operation['value'], + }, pointer_cls=self.pointer_cls).operation + curr = curr[1][1] + continue + + yield curr[2].operation + curr = curr[1] + + def _item_added(self, path, key, item): + index = self.take_index(item, _ST_REMOVE) + if index is not None: + op = index[2] + if type(op.key) == int and type(key) == int: + for v in self.iter_from(index): + op.key = v._on_undo_remove(op.path, op.key) + + self.remove(index) + if op.location != _path_join(path, key): + new_op = MoveOperation({ + 'op': 'move', + 'from': op.location, + 'path': _path_join(path, key), + }, pointer_cls=self.pointer_cls) + self.insert(new_op) + else: + new_op = AddOperation({ + 'op': 'add', + 'path': _path_join(path, key), + 'value': item, + }, pointer_cls=self.pointer_cls) + new_index = self.insert(new_op) + self.store_index(item, new_index, _ST_ADD) + + def _item_removed(self, path, key, item): + new_op = RemoveOperation({ + 'op': 'remove', + 'path': _path_join(path, key), + }, pointer_cls=self.pointer_cls) + index = self.take_index(item, _ST_ADD) + new_index = self.insert(new_op) + if index is not None: + op = index[2] + # We can't rely on the op.key type since PatchOperation casts + # the .key property to int and this path wrongly ends up being taken + # for numeric string dict keys while the intention is to only handle lists. + # So we do an explicit check on the item affected by the op instead. + added_item = op.pointer.to_last(self.dst_doc)[0] + if type(added_item) == list: + for v in self.iter_from(index): + op.key = v._on_undo_add(op.path, op.key) + + self.remove(index) + if new_op.location != op.location: + new_op = MoveOperation({ + 'op': 'move', + 'from': new_op.location, + 'path': op.location, + }, pointer_cls=self.pointer_cls) + new_index[2] = new_op + + else: + self.remove(new_index) + + else: + self.store_index(item, new_index, _ST_REMOVE) + + def _item_replaced(self, path, key, item): + self.insert(ReplaceOperation({ + 'op': 'replace', + 'path': _path_join(path, key), + 'value': item, + }, pointer_cls=self.pointer_cls)) + + def _compare_dicts(self, path, src, dst): + src_keys = set(src.keys()) + dst_keys = set(dst.keys()) + added_keys = dst_keys - src_keys + removed_keys = src_keys - dst_keys + + for key in removed_keys: + self._item_removed(path, str(key), src[key]) + + for key in added_keys: + self._item_added(path, str(key), dst[key]) + + for key in src_keys & dst_keys: + self._compare_values(path, key, src[key], dst[key]) + + def _compare_lists(self, path, src, dst): + len_src, len_dst = len(src), len(dst) + max_len = max(len_src, len_dst) + min_len = min(len_src, len_dst) + for key in range(max_len): + if key < min_len: + old, new = src[key], dst[key] + if old == new: + continue + + elif isinstance(old, MutableMapping) and \ + isinstance(new, MutableMapping): + self._compare_dicts(_path_join(path, key), old, new) + + elif isinstance(old, MutableSequence) and \ + isinstance(new, MutableSequence): + self._compare_lists(_path_join(path, key), old, new) + + else: + self._item_removed(path, key, old) + self._item_added(path, key, new) + + elif len_src > len_dst: + self._item_removed(path, len_dst, src[key]) + + else: + self._item_added(path, key, dst[key]) + + def _compare_values(self, path, key, src, dst): + if isinstance(src, MutableMapping) and \ + isinstance(dst, MutableMapping): + self._compare_dicts(_path_join(path, key), src, dst) + + elif isinstance(src, MutableSequence) and \ + isinstance(dst, MutableSequence): + self._compare_lists(_path_join(path, key), src, dst) + + # To ensure we catch changes to JSON, we can't rely on a simple + # src == dst, because it would not recognize the difference between + # 1 and True, among other things. Using json.dumps is the most + # fool-proof way to ensure we catch type changes that matter to JSON + # and ignore those that don't. The performance of this could be + # improved by doing more direct type checks, but we'd need to be + # careful to accept type changes that don't matter when JSONified. + elif self.dumps(src) == self.dumps(dst): + return + + else: + self._item_replaced(path, key, dst) + + +def _path_join(path, key): + if key is None: + return path + + return path + '/' + str(key).replace('~', '~0').replace('/', '~1') diff --git a/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/AUTHORS b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/AUTHORS new file mode 100644 index 00000000..8319fec4 --- /dev/null +++ b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/AUTHORS @@ -0,0 +1,3 @@ +Stefan Kögl +Alexander Shorin +Christopher J. White diff --git a/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/INSTALLER b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/LICENSE.txt b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/LICENSE.txt new file mode 100644 index 00000000..491196d7 --- /dev/null +++ b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/LICENSE.txt @@ -0,0 +1,26 @@ +Copyright (c) 2011 Stefan Kögl +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/METADATA b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/METADATA new file mode 100644 index 00000000..ed342b77 --- /dev/null +++ b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/METADATA @@ -0,0 +1,51 @@ +Metadata-Version: 2.1 +Name: jsonpointer +Version: 3.0.0 +Summary: Identify specific nodes in a JSON document (RFC 6901) +Home-page: https://github.com/stefankoegl/python-json-pointer +Author: Stefan Kögl +Author-email: stefan@skoegl.net +License: Modified BSD License +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE.txt +License-File: AUTHORS + +python-json-pointer +=================== + +[![PyPI version](https://img.shields.io/pypi/v/jsonpointer.svg)](https://pypi.python.org/pypi/jsonpointer/) +[![Supported Python versions](https://img.shields.io/pypi/pyversions/jsonpointer.svg)](https://pypi.python.org/pypi/jsonpointer/) +[![Coverage Status](https://coveralls.io/repos/stefankoegl/python-json-pointer/badge.svg?branch=master)](https://coveralls.io/r/stefankoegl/python-json-pointer?branch=master) + + +Resolve JSON Pointers in Python +------------------------------- + +Library to resolve JSON Pointers according to +[RFC 6901](http://tools.ietf.org/html/rfc6901) + +See source code for examples +* Website: https://github.com/stefankoegl/python-json-pointer +* Repository: https://github.com/stefankoegl/python-json-pointer.git +* Documentation: https://python-json-pointer.readthedocs.org/ +* PyPI: https://pypi.python.org/pypi/jsonpointer +* Travis CI: https://travis-ci.org/stefankoegl/python-json-pointer +* Coveralls: https://coveralls.io/r/stefankoegl/python-json-pointer diff --git a/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/RECORD b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/RECORD new file mode 100644 index 00000000..2052e270 --- /dev/null +++ b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/RECORD @@ -0,0 +1,10 @@ +../../Scripts/jsonpointer,sha256=B24Q8Ag-NtWwA-Dv7ygtpuZf5vUw7WT3M7iBAqMqhWI,1818 +__pycache__/jsonpointer.cpython-312.pyc,, +jsonpointer-3.0.0.dist-info/AUTHORS,sha256=TVgxnQ9ZyHvvWwez_k2w8ZwtfVVFsDTGv3tXyJu-9X8,113 +jsonpointer-3.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jsonpointer-3.0.0.dist-info/LICENSE.txt,sha256=2LJPFdRyiF94ii1umFhQ8mRie4YBKhe7JCyD8xDZB-U,1413 +jsonpointer-3.0.0.dist-info/METADATA,sha256=m8E1xhbkqAdvsZMBI6vy061wLV4wqcmbS6iryFCWgLI,2251 +jsonpointer-3.0.0.dist-info/RECORD,, +jsonpointer-3.0.0.dist-info/WHEEL,sha256=k3vXr0c0OitO0k9eCWBlI2yTYnpb_n_I2SGzrrfY7HY,110 +jsonpointer-3.0.0.dist-info/top_level.txt,sha256=BsUcar_C0nZzPGV2ackrJ9CpVU8_0W_pHYKwpdnWddM,12 +jsonpointer.py,sha256=kXEcNnnUkS8NdSmqSiFJjczJl2-dSZksILgKL0SsABU,10601 diff --git a/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/WHEEL b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/WHEEL new file mode 100644 index 00000000..09b796ed --- /dev/null +++ b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/top_level.txt b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/top_level.txt new file mode 100644 index 00000000..5d437553 --- /dev/null +++ b/venv/Lib/site-packages/jsonpointer-3.0.0.dist-info/top_level.txt @@ -0,0 +1 @@ +jsonpointer diff --git a/venv/Lib/site-packages/jsonpointer.py b/venv/Lib/site-packages/jsonpointer.py new file mode 100644 index 00000000..3e97adda --- /dev/null +++ b/venv/Lib/site-packages/jsonpointer.py @@ -0,0 +1,348 @@ +# -*- coding: utf-8 -*- +# +# python-json-pointer - An implementation of the JSON Pointer syntax +# https://github.com/stefankoegl/python-json-pointer +# +# Copyright (c) 2011 Stefan Kögl +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. The name of the author may not be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +""" Identify specific nodes in a JSON document (RFC 6901) """ + +# Will be parsed by setup.py to determine package metadata +__author__ = 'Stefan Kögl ' +__version__ = '3.0.0' +__website__ = 'https://github.com/stefankoegl/python-json-pointer' +__license__ = 'Modified BSD License' + +import copy +import re +from collections.abc import Mapping, Sequence +from itertools import tee, chain + +_nothing = object() + + +def set_pointer(doc, pointer, value, inplace=True): + """Resolves a pointer against doc and sets the value of the target within doc. + + With inplace set to true, doc is modified as long as pointer is not the + root. + + >>> obj = {'foo': {'anArray': [ {'prop': 44}], 'another prop': {'baz': 'A string' }}} + + >>> set_pointer(obj, '/foo/anArray/0/prop', 55) == \ + {'foo': {'another prop': {'baz': 'A string'}, 'anArray': [{'prop': 55}]}} + True + + >>> set_pointer(obj, '/foo/yet another prop', 'added prop') == \ + {'foo': {'another prop': {'baz': 'A string'}, 'yet another prop': 'added prop', 'anArray': [{'prop': 55}]}} + True + + >>> obj = {'foo': {}} + >>> set_pointer(obj, '/foo/a%20b', 'x') == \ + {'foo': {'a%20b': 'x' }} + True + """ + + pointer = JsonPointer(pointer) + return pointer.set(doc, value, inplace) + + +def resolve_pointer(doc, pointer, default=_nothing): + """ Resolves pointer against doc and returns the referenced object + + >>> obj = {'foo': {'anArray': [ {'prop': 44}], 'another prop': {'baz': 'A string' }}, 'a%20b': 1, 'c d': 2} + + >>> resolve_pointer(obj, '') == obj + True + + >>> resolve_pointer(obj, '/foo') == obj['foo'] + True + + >>> resolve_pointer(obj, '/foo/another prop') == obj['foo']['another prop'] + True + + >>> resolve_pointer(obj, '/foo/another prop/baz') == obj['foo']['another prop']['baz'] + True + + >>> resolve_pointer(obj, '/foo/anArray/0') == obj['foo']['anArray'][0] + True + + >>> resolve_pointer(obj, '/some/path', None) == None + True + + >>> resolve_pointer(obj, '/a b', None) == None + True + + >>> resolve_pointer(obj, '/a%20b') == 1 + True + + >>> resolve_pointer(obj, '/c d') == 2 + True + + >>> resolve_pointer(obj, '/c%20d', None) == None + True + """ + + pointer = JsonPointer(pointer) + return pointer.resolve(doc, default) + + +def pairwise(iterable): + """ Transforms a list to a list of tuples of adjacent items + + s -> (s0,s1), (s1,s2), (s2, s3), ... + + >>> list(pairwise([])) + [] + + >>> list(pairwise([1])) + [] + + >>> list(pairwise([1, 2, 3, 4])) + [(1, 2), (2, 3), (3, 4)] + """ + a, b = tee(iterable) + for _ in b: + break + return zip(a, b) + + +class JsonPointerException(Exception): + pass + + +class EndOfList(object): + """Result of accessing element "-" of a list""" + + def __init__(self, list_): + self.list_ = list_ + + def __repr__(self): + return '{cls}({lst})'.format(cls=self.__class__.__name__, + lst=repr(self.list_)) + + +class JsonPointer(object): + """A JSON Pointer that can reference parts of a JSON document""" + + # Array indices must not contain: + # leading zeros, signs, spaces, decimals, etc + _RE_ARRAY_INDEX = re.compile('0|[1-9][0-9]*$') + _RE_INVALID_ESCAPE = re.compile('(~[^01]|~$)') + + def __init__(self, pointer): + + # validate escapes + invalid_escape = self._RE_INVALID_ESCAPE.search(pointer) + if invalid_escape: + raise JsonPointerException('Found invalid escape {}'.format( + invalid_escape.group())) + + parts = pointer.split('/') + if parts.pop(0) != '': + raise JsonPointerException('Location must start with /') + + parts = [unescape(part) for part in parts] + self.parts = parts + + def to_last(self, doc): + """Resolves ptr until the last step, returns (sub-doc, last-step)""" + + if not self.parts: + return doc, None + + for part in self.parts[:-1]: + doc = self.walk(doc, part) + + return doc, JsonPointer.get_part(doc, self.parts[-1]) + + def resolve(self, doc, default=_nothing): + """Resolves the pointer against doc and returns the referenced object""" + + for part in self.parts: + + try: + doc = self.walk(doc, part) + except JsonPointerException: + if default is _nothing: + raise + else: + return default + + return doc + + get = resolve + + def set(self, doc, value, inplace=True): + """Resolve the pointer against the doc and replace the target with value.""" + + if len(self.parts) == 0: + if inplace: + raise JsonPointerException('Cannot set root in place') + return value + + if not inplace: + doc = copy.deepcopy(doc) + + (parent, part) = self.to_last(doc) + + if isinstance(parent, Sequence) and part == '-': + parent.append(value) + else: + parent[part] = value + + return doc + + @classmethod + def get_part(cls, doc, part): + """Returns the next step in the correct type""" + + if isinstance(doc, Mapping): + return part + + elif isinstance(doc, Sequence): + + if part == '-': + return part + + if not JsonPointer._RE_ARRAY_INDEX.match(str(part)): + raise JsonPointerException("'%s' is not a valid sequence index" % part) + + return int(part) + + elif hasattr(doc, '__getitem__'): + # Allow indexing via ducktyping + # if the target has defined __getitem__ + return part + + else: + raise JsonPointerException("Document '%s' does not support indexing, " + "must be mapping/sequence or support __getitem__" % type(doc)) + + def get_parts(self): + """Returns the list of the parts. For example, JsonPointer('/a/b').get_parts() == ['a', 'b']""" + + return self.parts + + def walk(self, doc, part): + """ Walks one step in doc and returns the referenced part """ + + part = JsonPointer.get_part(doc, part) + + assert hasattr(doc, '__getitem__'), "invalid document type %s" % (type(doc),) + + if isinstance(doc, Sequence): + if part == '-': + return EndOfList(doc) + + try: + return doc[part] + + except IndexError: + raise JsonPointerException("index '%s' is out of bounds" % (part,)) + + # Else the object is a mapping or supports __getitem__(so assume custom indexing) + try: + return doc[part] + + except KeyError: + raise JsonPointerException("member '%s' not found in %s" % (part, doc)) + + def contains(self, ptr): + """ Returns True if self contains the given ptr """ + return self.parts[:len(ptr.parts)] == ptr.parts + + def __contains__(self, item): + """ Returns True if self contains the given ptr """ + return self.contains(item) + + def join(self, suffix): + """ Returns a new JsonPointer with the given suffix append to this ptr """ + if isinstance(suffix, JsonPointer): + suffix_parts = suffix.parts + elif isinstance(suffix, str): + suffix_parts = JsonPointer(suffix).parts + else: + suffix_parts = suffix + try: + return JsonPointer.from_parts(chain(self.parts, suffix_parts)) + except: # noqa E722 + raise JsonPointerException("Invalid suffix") + + def __truediv__(self, suffix): # Python 3 + return self.join(suffix) + + @property + def path(self): + """Returns the string representation of the pointer + + >>> ptr = JsonPointer('/~0/0/~1').path == '/~0/0/~1' + """ + parts = [escape(part) for part in self.parts] + return ''.join('/' + part for part in parts) + + def __eq__(self, other): + """Compares a pointer to another object + + Pointers can be compared by comparing their strings (or splitted + strings), because no two different parts can point to the same + structure in an object (eg no different number representations) + """ + + if not isinstance(other, JsonPointer): + return False + + return self.parts == other.parts + + def __hash__(self): + return hash(tuple(self.parts)) + + def __str__(self): + return self.path + + def __repr__(self): + return type(self).__name__ + "(" + repr(self.path) + ")" + + @classmethod + def from_parts(cls, parts): + """Constructs a JsonPointer from a list of (unescaped) paths + + >>> JsonPointer.from_parts(['a', '~', '/', 0]).path == '/a/~0/~1/0' + True + """ + parts = [escape(str(part)) for part in parts] + ptr = cls(''.join('/' + part for part in parts)) + return ptr + + +def escape(s): + return s.replace('~', '~0').replace('/', '~1') + + +def unescape(s): + return s.replace('~1', '/').replace('~0', '~') diff --git a/venv/Lib/site-packages/langchain-0.3.25.dist-info/INSTALLER b/venv/Lib/site-packages/langchain-0.3.25.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/langchain-0.3.25.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/langchain-0.3.25.dist-info/METADATA b/venv/Lib/site-packages/langchain-0.3.25.dist-info/METADATA new file mode 100644 index 00000000..7acbffa5 --- /dev/null +++ b/venv/Lib/site-packages/langchain-0.3.25.dist-info/METADATA @@ -0,0 +1,144 @@ +Metadata-Version: 2.1 +Name: langchain +Version: 0.3.25 +Summary: Building applications with LLMs through composability +License: MIT +Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/langchain +Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D0%22&expanded=true +Project-URL: repository, https://github.com/langchain-ai/langchain +Requires-Python: >=3.9 +Requires-Dist: langchain-core<1.0.0,>=0.3.58 +Requires-Dist: langchain-text-splitters<1.0.0,>=0.3.8 +Requires-Dist: langsmith<0.4,>=0.1.17 +Requires-Dist: pydantic<3.0.0,>=2.7.4 +Requires-Dist: SQLAlchemy<3,>=1.4 +Requires-Dist: requests<3,>=2 +Requires-Dist: PyYAML>=5.3 +Requires-Dist: async-timeout<5.0.0,>=4.0.0; python_version < "3.11" +Provides-Extra: community +Requires-Dist: langchain-community; extra == "community" +Provides-Extra: anthropic +Requires-Dist: langchain-anthropic; extra == "anthropic" +Provides-Extra: openai +Requires-Dist: langchain-openai; extra == "openai" +Provides-Extra: azure-ai +Requires-Dist: langchain-azure-ai; extra == "azure-ai" +Provides-Extra: cohere +Requires-Dist: langchain-cohere; extra == "cohere" +Provides-Extra: google-vertexai +Requires-Dist: langchain-google-vertexai; extra == "google-vertexai" +Provides-Extra: google-genai +Requires-Dist: langchain-google-genai; extra == "google-genai" +Provides-Extra: fireworks +Requires-Dist: langchain-fireworks; extra == "fireworks" +Provides-Extra: ollama +Requires-Dist: langchain-ollama; extra == "ollama" +Provides-Extra: together +Requires-Dist: langchain-together; extra == "together" +Provides-Extra: mistralai +Requires-Dist: langchain-mistralai; extra == "mistralai" +Provides-Extra: huggingface +Requires-Dist: langchain-huggingface; extra == "huggingface" +Provides-Extra: groq +Requires-Dist: langchain-groq; extra == "groq" +Provides-Extra: aws +Requires-Dist: langchain-aws; extra == "aws" +Provides-Extra: deepseek +Requires-Dist: langchain-deepseek; extra == "deepseek" +Provides-Extra: xai +Requires-Dist: langchain-xai; extra == "xai" +Provides-Extra: perplexity +Requires-Dist: langchain-perplexity; extra == "perplexity" +Description-Content-Type: text/markdown + +# 🦜️🔗 LangChain + +⚡ Building applications with LLMs through composability ⚡ + +[![Release Notes](https://img.shields.io/github/release/langchain-ai/langchain)](https://github.com/langchain-ai/langchain/releases) +[![lint](https://github.com/langchain-ai/langchain/actions/workflows/lint.yml/badge.svg)](https://github.com/langchain-ai/langchain/actions/workflows/lint.yml) +[![test](https://github.com/langchain-ai/langchain/actions/workflows/test.yml/badge.svg)](https://github.com/langchain-ai/langchain/actions/workflows/test.yml) +[![Downloads](https://static.pepy.tech/badge/langchain/month)](https://pepy.tech/project/langchain) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) +[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) +[![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain) +[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/langchain-ai/langchain) +[![GitHub star chart](https://img.shields.io/github/stars/langchain-ai/langchain?style=social)](https://star-history.com/#langchain-ai/langchain) +[![Dependency Status](https://img.shields.io/librariesio/github/langchain-ai/langchain)](https://libraries.io/github/langchain-ai/langchain) +[![Open Issues](https://img.shields.io/github/issues-raw/langchain-ai/langchain)](https://github.com/langchain-ai/langchain/issues) + + +Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs). + +To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com). +[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications. +Fill out [this form](https://www.langchain.com/contact-sales) to speak with our sales team. + +## Quick Install + +`pip install langchain` +or +`pip install langsmith && conda install langchain -c conda-forge` + +## 🤔 What is this? + +Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. However, using these LLMs in isolation is often insufficient for creating a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge. + +This library aims to assist in the development of those types of applications. Common examples of these applications include: + +**❓ Question answering with RAG** + +- [Documentation](https://python.langchain.com/docs/use_cases/question_answering/) +- End-to-end Example: [Chat LangChain](https://chat.langchain.com) and [repo](https://github.com/langchain-ai/chat-langchain) + +**🧱 Extracting structured output** + +- [Documentation](https://python.langchain.com/docs/use_cases/extraction/) +- End-to-end Example: [SQL Llama2 Template](https://github.com/langchain-ai/langchain-extract/) + +**🤖 Chatbots** + +- [Documentation](https://python.langchain.com/docs/use_cases/chatbots) +- End-to-end Example: [Web LangChain (web researcher chatbot)](https://weblangchain.vercel.app) and [repo](https://github.com/langchain-ai/weblangchain) + +## 📖 Documentation + +Please see [here](https://python.langchain.com) for full documentation on: + +- Getting started (installation, setting up the environment, simple examples) +- How-To examples (demos, integrations, helper functions) +- Reference (full API docs) +- Resources (high-level explanation of core concepts) + +## 🚀 What can this help with? + +There are five main areas that LangChain is designed to help with. +These are, in increasing order of complexity: + +**📃 Models and Prompts:** + +This includes prompt management, prompt optimization, a generic interface for all LLMs, and common utilities for working with chat models and LLMs. + +**🔗 Chains:** + +Chains go beyond a single LLM call and involve sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications. + +**📚 Retrieval Augmented Generation:** + +Retrieval Augmented Generation involves specific types of chains that first interact with an external data source to fetch data for use in the generation step. Examples include summarization of long pieces of text and question/answering over specific data sources. + +**🤖 Agents:** + +Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end-to-end agents. + +**🧐 Evaluation:** + +[BETA] Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this. + +For more information on these concepts, please see our [full documentation](https://python.langchain.com). + +## 💁 Contributing + +As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation. + +For detailed information on how to contribute, see the [Contributing Guide](https://python.langchain.com/docs/contributing/). diff --git a/venv/Lib/site-packages/langchain-0.3.25.dist-info/RECORD b/venv/Lib/site-packages/langchain-0.3.25.dist-info/RECORD new file mode 100644 index 00000000..3314cead --- /dev/null +++ b/venv/Lib/site-packages/langchain-0.3.25.dist-info/RECORD @@ -0,0 +1,2673 @@ +langchain-0.3.25.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +langchain-0.3.25.dist-info/METADATA,sha256=Vy4OJSZ6ar5e5kZjTvEYYOalLiD6ATd88kJkfuxyKKs,7841 +langchain-0.3.25.dist-info/RECORD,, +langchain-0.3.25.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain-0.3.25.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90 +langchain-0.3.25.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34 +langchain-0.3.25.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067 +langchain/__init__.py,sha256=4cqV-N_QJnfjk52DqtR2e72vsmJC1R6PkflvRdLjZQI,13709 +langchain/__pycache__/__init__.cpython-312.pyc,, +langchain/__pycache__/base_language.cpython-312.pyc,, +langchain/__pycache__/cache.cpython-312.pyc,, +langchain/__pycache__/env.cpython-312.pyc,, +langchain/__pycache__/example_generator.cpython-312.pyc,, +langchain/__pycache__/formatting.cpython-312.pyc,, +langchain/__pycache__/globals.cpython-312.pyc,, +langchain/__pycache__/hub.cpython-312.pyc,, +langchain/__pycache__/input.cpython-312.pyc,, +langchain/__pycache__/model_laboratory.cpython-312.pyc,, +langchain/__pycache__/python.cpython-312.pyc,, +langchain/__pycache__/requests.cpython-312.pyc,, +langchain/__pycache__/serpapi.cpython-312.pyc,, +langchain/__pycache__/sql_database.cpython-312.pyc,, +langchain/__pycache__/text_splitter.cpython-312.pyc,, +langchain/_api/__init__.py,sha256=0FuHuMNUBMrst1Y1nm5yZzQr2xbLmb7rxMsimqKBXhs,733 +langchain/_api/__pycache__/__init__.cpython-312.pyc,, +langchain/_api/__pycache__/deprecation.cpython-312.pyc,, +langchain/_api/__pycache__/interactive_env.cpython-312.pyc,, +langchain/_api/__pycache__/module_import.cpython-312.pyc,, +langchain/_api/__pycache__/path.cpython-312.pyc,, +langchain/_api/deprecation.py,sha256=K9VCkmMs_ebfd_wCJppKq4Ahw-mlXkukbsQ69iQVxT0,1246 +langchain/_api/interactive_env.py,sha256=NlnXizhm1TG3l_qKNI0qHJiHkh9q2jRjt5zGJsg_BCA,139 +langchain/_api/module_import.py,sha256=4H-TnUQQ_0mBip0Gh67QXgbxeXPyKY81kSZ7e8mSaqE,6341 +langchain/_api/path.py,sha256=ovJP6Pcf7L_KaKvMMet9G9OzfLTb-sZV2pEw3Tp7o3I,122 +langchain/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/adapters/__pycache__/__init__.cpython-312.pyc,, +langchain/adapters/__pycache__/openai.cpython-312.pyc,, +langchain/adapters/openai.py,sha256=kWvS_DdRtpcc49vDY8zLUo3BrtXA3a89bLJu3Sksvaw,1996 +langchain/agents/__init__.py,sha256=JQJ3VlqRMRpHbjR-pkzy1yowJkdEmsQEPXTptkyHc-o,6282 +langchain/agents/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/__pycache__/agent.cpython-312.pyc,, +langchain/agents/__pycache__/agent_iterator.cpython-312.pyc,, +langchain/agents/__pycache__/agent_types.cpython-312.pyc,, +langchain/agents/__pycache__/initialize.cpython-312.pyc,, +langchain/agents/__pycache__/load_tools.cpython-312.pyc,, +langchain/agents/__pycache__/loading.cpython-312.pyc,, +langchain/agents/__pycache__/schema.cpython-312.pyc,, +langchain/agents/__pycache__/tools.cpython-312.pyc,, +langchain/agents/__pycache__/types.cpython-312.pyc,, +langchain/agents/__pycache__/utils.cpython-312.pyc,, +langchain/agents/agent.py,sha256=mRKAfWyKvf6OS8X-9yJUYglTRkfe_QkyVeOKwiDpkr0,62258 +langchain/agents/agent_iterator.py,sha256=C26pYcmPPn37fGleePqocT66gn2OkY2_UL7Xa0UZ8Uc,16440 +langchain/agents/agent_toolkits/__init__.py,sha256=N0ylx2gzwaOqaoHRXQs9jvYNIzrnTM-2rgjNkCU5UII,7370 +langchain/agents/agent_toolkits/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/__pycache__/azure_cognitive_services.cpython-312.pyc,, +langchain/agents/agent_toolkits/__pycache__/base.cpython-312.pyc,, +langchain/agents/agent_toolkits/ainetwork/__init__.py,sha256=henfKntuAEjG1KoN-Hk1IHy3fFGCYPWLEuZtF2bIdZI,25 +langchain/agents/agent_toolkits/ainetwork/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/ainetwork/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/ainetwork/toolkit.py,sha256=c1N_VXW-PJgKUxMCzQudyvyix1Y9wFdGJhG84vj1x1Q,685 +langchain/agents/agent_toolkits/amadeus/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/amadeus/toolkit.py,sha256=VVlGyK6l8Pbe-tDBaTQc3vfaenGwu81kvrRCGQtf6Ig,668 +langchain/agents/agent_toolkits/azure_cognitive_services.py,sha256=ilmxJTCz_-o7yLmkmegUTD5dnxhpC7Z4tHmMmP9vOFY,771 +langchain/agents/agent_toolkits/base.py,sha256=X0zLdn_efEvDW5pCTB_hu2crw3E3vqFRm7GDxWk74Sk,72 +langchain/agents/agent_toolkits/clickup/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/agents/agent_toolkits/clickup/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/clickup/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/clickup/toolkit.py,sha256=-xQ3nnHtGdWmwo-fSDzurNzHo824EcLqy8SzFTjcjLI,675 +langchain/agents/agent_toolkits/conversational_retrieval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/agents/agent_toolkits/conversational_retrieval/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/conversational_retrieval/__pycache__/openai_functions.cpython-312.pyc,, +langchain/agents/agent_toolkits/conversational_retrieval/__pycache__/tool.cpython-312.pyc,, +langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py,sha256=MiokYEmoLuUBDS5Q4lTfsru5NOdcEnaRY1ODIMh1GlA,3215 +langchain/agents/agent_toolkits/conversational_retrieval/tool.py,sha256=JReb_U_ZVrWGJeYxmGVxeEROfk1-T7DcwuK5lYQIZYs,97 +langchain/agents/agent_toolkits/csv/__init__.py,sha256=nxqqnFzM48gemXmWUZc7mWjuwdiDRzF215ftoGU6qro,1091 +langchain/agents/agent_toolkits/csv/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/file_management/__init__.py,sha256=dW4O8mobKEP0jFCdBnz0exiJDsMBC6LP-Lh98ESY8go,783 +langchain/agents/agent_toolkits/file_management/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/file_management/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/file_management/toolkit.py,sha256=vm3Lgy7P62CBC0ttyWkiYb5-C684Ye2dmJtLIz5ArH8,745 +langchain/agents/agent_toolkits/github/__init__.py,sha256=FBxQxsk8O9n4TXCZXHQW_-011pdVK3_3dN-yeLGPQjE,22 +langchain/agents/agent_toolkits/github/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/github/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/github/toolkit.py,sha256=n-iNbOZQie7aN_IupoGEMLZ-FIwlvSpixvEd1EevPUg,2244 +langchain/agents/agent_toolkits/gitlab/__init__.py,sha256=x1DYZ-uaP3BvHsoZs21RxdktQ9292mYBP-tR3tG0h3U,22 +langchain/agents/agent_toolkits/gitlab/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/gitlab/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/gitlab/toolkit.py,sha256=GSnn52b_3v6RXYFfF9n4FrScWH-e_zCA8q5mswy1J5s,670 +langchain/agents/agent_toolkits/gmail/__init__.py,sha256=0Y2P1d5UFysfWDxwUmb98JLCYNHoQBs1GnxynWGSRz8,21 +langchain/agents/agent_toolkits/gmail/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/gmail/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/gmail/toolkit.py,sha256=Nweq3d3BtjYTiIDCoVUPYN24Sk8THNYEOo9wogBfxck,659 +langchain/agents/agent_toolkits/jira/__init__.py,sha256=g7l8EPCXUddP-_AiO9huERcC_x2kD-dfroYmUe8O8I0,20 +langchain/agents/agent_toolkits/jira/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/jira/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/jira/toolkit.py,sha256=moCkIlwvRhCK4jPBnq-Y8nuXihqkdJccLO6IPcbvfG8,654 +langchain/agents/agent_toolkits/json/__init__.py,sha256=T7Z9zw9_awf5-r0kExvry2aybzxEnpDb5SyLOpBC2d0,18 +langchain/agents/agent_toolkits/json/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/json/__pycache__/base.cpython-312.pyc,, +langchain/agents/agent_toolkits/json/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/agent_toolkits/json/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/json/base.py,sha256=sDxh2a7ZE8SgqUJs-VA1svdGrTQ7mhsX_7ArQ_xMztE,672 +langchain/agents/agent_toolkits/json/prompt.py,sha256=Pvys9ybRhZ4xXVukGB4XoPNoZPLGhcAL6kCPe3EXjYA,749 +langchain/agents/agent_toolkits/json/toolkit.py,sha256=i5FylpXWyPK0yvgVeYV9FvgCP7skzqx38OjGg5kbU20,654 +langchain/agents/agent_toolkits/multion/__init__.py,sha256=hc75Ek8tmBDf4f34RGwQ447AzE5qHR-HZACB7Di3YAA,23 +langchain/agents/agent_toolkits/multion/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/multion/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/multion/toolkit.py,sha256=1hTNZDsSDneudxy1VpbQTi5cq3cPjbJc3I-Uk9-EEuk,675 +langchain/agents/agent_toolkits/nasa/__init__.py,sha256=_g1obC4mS4XeMYhkcNw32uIe7mGPChqhOYMj170Pjp0,19 +langchain/agents/agent_toolkits/nasa/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/nasa/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/nasa/toolkit.py,sha256=KOZSn134X87g77rnDic9FcKDg0TytiY13tLqP5ZtnmY,654 +langchain/agents/agent_toolkits/nla/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/agents/agent_toolkits/nla/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/nla/__pycache__/tool.cpython-312.pyc,, +langchain/agents/agent_toolkits/nla/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/nla/tool.py,sha256=hhgym5f4hRbcVdLjkp4VplI0QztPPELu0MpjWjWr5Vs,634 +langchain/agents/agent_toolkits/nla/toolkit.py,sha256=Y4VLB0-uGAr6E-LN-TKzjrg_-40GAyv0ee5-dtmCWbg,649 +langchain/agents/agent_toolkits/office365/__init__.py,sha256=wdPaHFsDOXYsITlWPe2RtHIxFRP2CdbQHIOG1GeEcLs,25 +langchain/agents/agent_toolkits/office365/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/office365/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/office365/toolkit.py,sha256=bX5KErdDQsdHHGGJODrwBnutc-doFC8Z-zMhjWI-HNc,670 +langchain/agents/agent_toolkits/openapi/__init__.py,sha256=b7ELUVFz_v756WQLXBUtR1mbaXGrKr3tdAroWCsWGm4,26 +langchain/agents/agent_toolkits/openapi/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/openapi/__pycache__/base.cpython-312.pyc,, +langchain/agents/agent_toolkits/openapi/__pycache__/planner.cpython-312.pyc,, +langchain/agents/agent_toolkits/openapi/__pycache__/planner_prompt.cpython-312.pyc,, +langchain/agents/agent_toolkits/openapi/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/agent_toolkits/openapi/__pycache__/spec.cpython-312.pyc,, +langchain/agents/agent_toolkits/openapi/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/openapi/base.py,sha256=SjjelN2AXeZ0g0mBBSjREZ4R8303D9vSX_tkH7H8xyM,687 +langchain/agents/agent_toolkits/openapi/planner.py,sha256=GyjM8_ft6Kc1C6rRn4t-A8ZJll4Ctb7bP1mijRm1BE4,1599 +langchain/agents/agent_toolkits/openapi/planner_prompt.py,sha256=LSRTzh8F-rpOPK8GeabJMnNRVK-QdDcpS0KcAuVwGUo,3526 +langchain/agents/agent_toolkits/openapi/prompt.py,sha256=1uZYWVquRvOm4FtQ4qadiehGQDUZS3buYepqo2aeEjc,909 +langchain/agents/agent_toolkits/openapi/spec.py,sha256=WN0WEgYxtwCZa8cWOBKrt_CueCvfRD_2jSQF3eoGFVc,833 +langchain/agents/agent_toolkits/openapi/toolkit.py,sha256=ex25y9sDZKUuLBzfl-TWA0EoNq5H4Bx4Cx1CUViuZCc,818 +langchain/agents/agent_toolkits/pandas/__init__.py,sha256=Ga1aHBv5_ROpZdvAxE9yH64irbTUZ6EVU1einTlY3ic,1104 +langchain/agents/agent_toolkits/pandas/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/playwright/__init__.py,sha256=Vn7tN8XR7UFzcIu5LCPEHzOLdB0Jyo8bjHguHjdQpKM,763 +langchain/agents/agent_toolkits/playwright/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/playwright/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/playwright/toolkit.py,sha256=nJTBZgEksOpKNmKp7CPyBgvJrhupbaTBjOb4dZiHZrk,728 +langchain/agents/agent_toolkits/powerbi/__init__.py,sha256=9KrYrWCcuVyxlBBLCke09XngnFsFodfInQSW7XVXys4,22 +langchain/agents/agent_toolkits/powerbi/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/powerbi/__pycache__/base.cpython-312.pyc,, +langchain/agents/agent_toolkits/powerbi/__pycache__/chat_base.cpython-312.pyc,, +langchain/agents/agent_toolkits/powerbi/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/agent_toolkits/powerbi/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/powerbi/base.py,sha256=pspztgc_osrKySZpHnaugMsG7WOZipUpIODaW68XWWg,675 +langchain/agents/agent_toolkits/powerbi/chat_base.py,sha256=6OcmkFAr3TLVcvBbPIFoT4JB84avGaoaJCZwwN8bsg4,717 +langchain/agents/agent_toolkits/powerbi/prompt.py,sha256=GNkaptaTCXzHk5HFMNCinoWJtJSO2v5Nq67J1gHh1UU,1084 +langchain/agents/agent_toolkits/powerbi/toolkit.py,sha256=LNB6K5-zjExSmOnFyIeMmQEJWYMo1r4Ku1fcM32UMNc,675 +langchain/agents/agent_toolkits/python/__init__.py,sha256=WlNZZ07mpFZL1phriTjn9q4yXQASVbsSkKCjq-vC-9Y,1094 +langchain/agents/agent_toolkits/python/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/slack/__init__.py,sha256=6Z7GpcJD6FwuFKdcvKJvIfhFvJiiy9I7Gc1MSEKJlcw,21 +langchain/agents/agent_toolkits/slack/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/slack/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/slack/toolkit.py,sha256=HBl5fVRKuR4dZYev9GSHFfchCWDUFweEZr9G4Rk2PAg,659 +langchain/agents/agent_toolkits/spark/__init__.py,sha256=h5uYM0mjy7S6_qCnmk1b-Vx-GWJeBtSXhYAeCsM_4VI,1103 +langchain/agents/agent_toolkits/spark/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/spark_sql/__init__.py,sha256=3IVQbSsdtLKybKYDE0VSq-SCTNFSAJNgCzaJWnSWJbg,23 +langchain/agents/agent_toolkits/spark_sql/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/spark_sql/__pycache__/base.cpython-312.pyc,, +langchain/agents/agent_toolkits/spark_sql/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/agent_toolkits/spark_sql/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/spark_sql/base.py,sha256=1wH1lbbBhwu7jVTc-H5inaOD-hvsQoCJqC471O1J2jk,697 +langchain/agents/agent_toolkits/spark_sql/prompt.py,sha256=NWKkyLCrI8c_sBvde6H_OGMV17PM6ZGIhSXtT9EKH-M,783 +langchain/agents/agent_toolkits/spark_sql/toolkit.py,sha256=t7lnqHRQWlZPXFTys4n_1R7IM74NpM9906yFY5iSt9U,682 +langchain/agents/agent_toolkits/sql/__init__.py,sha256=eqqu9Hd5KiY9-04X2_9acILI2bShgSqNxJFsQ7cm9Dw,17 +langchain/agents/agent_toolkits/sql/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/sql/__pycache__/base.cpython-312.pyc,, +langchain/agents/agent_toolkits/sql/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/agent_toolkits/sql/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/sql/base.py,sha256=NVE3nwtNtmo7INPdamPk6JdtU6fsUWSYbvfa2kAlUfU,661 +langchain/agents/agent_toolkits/sql/prompt.py,sha256=fUeHPEqe1ogVFLuSJB9ZqmMB9iHVws2XHt8PIhjwKZg,896 +langchain/agents/agent_toolkits/sql/toolkit.py,sha256=CCVWRJKVuECq-eFRjatJjYsy81sesiSEGzW4vC4mTow,679 +langchain/agents/agent_toolkits/steam/__init__.py,sha256=iOMgxWCt0FTNLMNq0wScgSN_YdBBq-56VM6j0Ud8GpI,21 +langchain/agents/agent_toolkits/steam/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/steam/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/steam/toolkit.py,sha256=V0_xpO4mC4rfWBaLyTPW-pKwd-EScTTUnvgtB1sW6Cw,659 +langchain/agents/agent_toolkits/vectorstore/__init__.py,sha256=uT5qVHjIcx3yFkWfxOzbRKL5xwWcMuFGQ-es9O7b2NQ,56 +langchain/agents/agent_toolkits/vectorstore/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/vectorstore/__pycache__/base.cpython-312.pyc,, +langchain/agents/agent_toolkits/vectorstore/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/agent_toolkits/vectorstore/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/vectorstore/base.py,sha256=mzq9Jr7k7SoCMvZ6tR9ipT-3Aa3uevz0AXdcepipWmw,9116 +langchain/agents/agent_toolkits/vectorstore/prompt.py,sha256=DndLnLxi9iKjuYKo5E1nscHCOPeCoNcpl8dFHcSltxU,834 +langchain/agents/agent_toolkits/vectorstore/toolkit.py,sha256=X2Q8H0hJu4zYxq-Te5-tdNJnegLm_ue_ypHyJ0Zcqpo,3185 +langchain/agents/agent_toolkits/xorbits/__init__.py,sha256=LJ-yZ3UKg4vjibzbgMXocR03vcsU_7ZvU7TlScM9RlE,1095 +langchain/agents/agent_toolkits/xorbits/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/zapier/__init__.py,sha256=19Hc7HG8DzQfg83qqEbYiXA5FklLoRAEOfIs9JqTjX8,22 +langchain/agents/agent_toolkits/zapier/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/agent_toolkits/zapier/__pycache__/toolkit.cpython-312.pyc,, +langchain/agents/agent_toolkits/zapier/toolkit.py,sha256=BcFOzvckA9ZBz8HTeWUPFc_eIeifE3fIGE5RBSb7Yls,670 +langchain/agents/agent_types.py,sha256=b6WCaXUAXi6CK9vDaKRgGaOM5VIiUI7I5pHEdO2lRCM,1893 +langchain/agents/chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/agents/chat/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/chat/__pycache__/base.cpython-312.pyc,, +langchain/agents/chat/__pycache__/output_parser.cpython-312.pyc,, +langchain/agents/chat/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/chat/base.py,sha256=_RIVzR3VKN9NMo9qqMiyE1lBFRrQzlHBttk-iIuUQ48,6578 +langchain/agents/chat/output_parser.py,sha256=yhiZ765XA4BPEQwNoCGA00N7LGY44E5jaLB3OlwOcUM,2381 +langchain/agents/chat/prompt.py,sha256=4Ub4oZyIKmJRpWwxOyGcYwlyoK8jJ0kR60jW0lPspC8,1158 +langchain/agents/conversational/__init__.py,sha256=TnMfDzoRzR-xCiR6ph3tn3H7OPbBPpuTsFuqkLMzjiA,75 +langchain/agents/conversational/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/conversational/__pycache__/base.cpython-312.pyc,, +langchain/agents/conversational/__pycache__/output_parser.cpython-312.pyc,, +langchain/agents/conversational/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/conversational/base.py,sha256=1RJ4TVoueylC8LwZQ-yCEnaVVZs8ngw4ayQGQ61JSEc,6317 +langchain/agents/conversational/output_parser.py,sha256=OXFq_96ASiAVgz-Ra0UYO_ZxAIDSWaAWEKrXQlHIgVc,1610 +langchain/agents/conversational/prompt.py,sha256=6eiZYQT9liZQr30wAhoqP_2Unph7i-qSqTWqfqdMijI,1859 +langchain/agents/conversational_chat/__init__.py,sha256=TnMfDzoRzR-xCiR6ph3tn3H7OPbBPpuTsFuqkLMzjiA,75 +langchain/agents/conversational_chat/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/conversational_chat/__pycache__/base.cpython-312.pyc,, +langchain/agents/conversational_chat/__pycache__/output_parser.cpython-312.pyc,, +langchain/agents/conversational_chat/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/conversational_chat/base.py,sha256=fu3iZnY20ZeyP1267MsGoo2zmQXFG6710TL3TBM8uhI,6431 +langchain/agents/conversational_chat/output_parser.py,sha256=-k5COJBeB3pxH_PM8KuVPVHPEylwJL8lSDgnAtUJcO4,2394 +langchain/agents/conversational_chat/prompt.py,sha256=rJk3Y5zRo0rxUJUmz5-B7SWt-fs9Mqbs2mucJsIInWY,2763 +langchain/agents/format_scratchpad/__init__.py,sha256=sD4bJjkW4gQ2Xc-ZAYLWVusVg9LqPiapYT4_EQ95Ol8,956 +langchain/agents/format_scratchpad/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/format_scratchpad/__pycache__/log.cpython-312.pyc,, +langchain/agents/format_scratchpad/__pycache__/log_to_messages.cpython-312.pyc,, +langchain/agents/format_scratchpad/__pycache__/openai_functions.cpython-312.pyc,, +langchain/agents/format_scratchpad/__pycache__/openai_tools.cpython-312.pyc,, +langchain/agents/format_scratchpad/__pycache__/tools.cpython-312.pyc,, +langchain/agents/format_scratchpad/__pycache__/xml.cpython-312.pyc,, +langchain/agents/format_scratchpad/log.py,sha256=Ozdir20MQU_jqdyfTi2kOVlrBacgmGmIZOp71gpOewY,844 +langchain/agents/format_scratchpad/log_to_messages.py,sha256=2C4r7Vis2PedFKI8677ZWiwxHaRv31AI_XwveRjDHiY,960 +langchain/agents/format_scratchpad/openai_functions.py,sha256=_bku5XGavc2zWAOaaW-o59Js1Gb4cUT0NucA8Zv89XM,2429 +langchain/agents/format_scratchpad/openai_tools.py,sha256=vyBEqvIZ5HCradWWg0weg4bj9R3nr-CpGZqvSua9HnE,166 +langchain/agents/format_scratchpad/tools.py,sha256=g1lQujvBpu5hr7Mmc_4LENzrHDO5WKiiLA3cUYXeKis,1928 +langchain/agents/format_scratchpad/xml.py,sha256=5ZgWUipBVg78sV_XV0YIBZukHv7fBoi_u4-BYjGbe0g,546 +langchain/agents/initialize.py,sha256=_YASYFuFPpRtoaw4FF4MqtKHjRqGwGR-P0KTg337Mb8,3586 +langchain/agents/json_chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/agents/json_chat/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/json_chat/__pycache__/base.cpython-312.pyc,, +langchain/agents/json_chat/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/json_chat/base.py,sha256=Vcok_zXs9eHUS_tkb2IZYBowwKgF4zm4QwWzmHOhuWc,8015 +langchain/agents/json_chat/prompt.py,sha256=gZukOH50C1llQ-AB2QvtL-PSrczv-a-gJLIPYP8z6vA,551 +langchain/agents/load_tools.py,sha256=uMi1EZtkv2sgyUw6iXMNlCSZlIaju0Rw2svwMtkeW3E,286 +langchain/agents/loading.py,sha256=LrvJxp4WoFYnUGxKulxXlNjE8nCaE0c5nMfr04r0PRM,4768 +langchain/agents/mrkl/__init__.py,sha256=Gpz8w88wAF4GSXoGnuYOwZY5rhjFL5WGZvTVQa-YJas,86 +langchain/agents/mrkl/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/mrkl/__pycache__/base.cpython-312.pyc,, +langchain/agents/mrkl/__pycache__/output_parser.cpython-312.pyc,, +langchain/agents/mrkl/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/mrkl/base.py,sha256=kkXmm8T_3RyhqT-8l1vWe4QUfONkKI28Q_Om03IzT3k,7176 +langchain/agents/mrkl/output_parser.py,sha256=YQGSjQq5pR4kFUg1HrOS3laV6xgtHgtIOQ_TtJY0UFI,3720 +langchain/agents/mrkl/prompt.py,sha256=2dTMP2lAWiLvCtuEijgQRjbKDlbPEnmx77duMwdJ7e4,641 +langchain/agents/openai_assistant/__init__.py,sha256=Xssaqoxrix3hn1gKSOLmDRQzTxAoJk0ProGXmXQe8Mw,114 +langchain/agents/openai_assistant/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/openai_assistant/__pycache__/base.cpython-312.pyc,, +langchain/agents/openai_assistant/base.py,sha256=aGosEMmbyNH5Dd6uhylTZ9MeLpsP8GcP11VkWAxpJbE,30552 +langchain/agents/openai_functions_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/agents/openai_functions_agent/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/openai_functions_agent/__pycache__/agent_token_buffer_memory.cpython-312.pyc,, +langchain/agents/openai_functions_agent/__pycache__/base.cpython-312.pyc,, +langchain/agents/openai_functions_agent/agent_token_buffer_memory.py,sha256=_Czey8clGH7ldn1yl_8_L-RVSCGGHiKeRWxmdeQ1phw,3752 +langchain/agents/openai_functions_agent/base.py,sha256=nPzAvXXiqiuE0oY2H_kdtrFtxTjG_T_2JrYXJgpSCYA,13446 +langchain/agents/openai_functions_multi_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/agents/openai_functions_multi_agent/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/openai_functions_multi_agent/__pycache__/base.cpython-312.pyc,, +langchain/agents/openai_functions_multi_agent/base.py,sha256=H1_8la00B3sZzozzg9y4OOa_dCByJMu8P736NL6zpbg,12616 +langchain/agents/openai_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/agents/openai_tools/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/openai_tools/__pycache__/base.cpython-312.pyc,, +langchain/agents/openai_tools/base.py,sha256=Qd38V_WhyABJ3-6sFX4unF8ydleraNZwLMfM1ZXX9BM,3623 +langchain/agents/output_parsers/__init__.py,sha256=Zzsf8moY-juhKCrnBDUhwgKQtW12cNBkua5faqbAlQA,1374 +langchain/agents/output_parsers/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/output_parsers/__pycache__/json.cpython-312.pyc,, +langchain/agents/output_parsers/__pycache__/openai_functions.cpython-312.pyc,, +langchain/agents/output_parsers/__pycache__/openai_tools.cpython-312.pyc,, +langchain/agents/output_parsers/__pycache__/react_json_single_input.cpython-312.pyc,, +langchain/agents/output_parsers/__pycache__/react_single_input.cpython-312.pyc,, +langchain/agents/output_parsers/__pycache__/self_ask.cpython-312.pyc,, +langchain/agents/output_parsers/__pycache__/tools.cpython-312.pyc,, +langchain/agents/output_parsers/__pycache__/xml.cpython-312.pyc,, +langchain/agents/output_parsers/json.py,sha256=Q4G8FtZ2GG3JZ_ZCvntIVLL26LgW_O8jDlWnZK-k1gU,1931 +langchain/agents/output_parsers/openai_functions.py,sha256=VF7-gQAn8TW-c-6uSUhjNtH0VYOf1KjDXSjjI9aMnak,3461 +langchain/agents/output_parsers/openai_tools.py,sha256=naJ78SR_gjtK9v-gxBV8h8ObU7p3udJC7FflqDxcITA,2311 +langchain/agents/output_parsers/react_json_single_input.py,sha256=Su-mST0HR0nx4TF0tkRvimfMmAoXsxK_TSxGnDZBDlA,2487 +langchain/agents/output_parsers/react_single_input.py,sha256=nIdieCfXKXpk-CzqvVmAQS0SBrdFS1gKb9ngVeCVYjA,3219 +langchain/agents/output_parsers/self_ask.py,sha256=Nkawax8pNCdCCAWMjKaWMBquqrWTdA-Jdehepw5VrPY,1572 +langchain/agents/output_parsers/tools.py,sha256=i6IbVHi0VYpIxfQQrkc7a5T2ZplIDFzwMyQXE7ELjyQ,3747 +langchain/agents/output_parsers/xml.py,sha256=2MjxW4nAM4sZN-in3K40_K5DBx6cI2Erb0TZbpSoZIY,1658 +langchain/agents/react/__init__.py,sha256=9RIjjaUDfWnoMEMpV57JQ0CwZZC5Soh357QdKpVIM-4,76 +langchain/agents/react/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/react/__pycache__/agent.cpython-312.pyc,, +langchain/agents/react/__pycache__/base.cpython-312.pyc,, +langchain/agents/react/__pycache__/output_parser.cpython-312.pyc,, +langchain/agents/react/__pycache__/textworld_prompt.cpython-312.pyc,, +langchain/agents/react/__pycache__/wiki_prompt.cpython-312.pyc,, +langchain/agents/react/agent.py,sha256=SZf40oLcD9W4Zo_6N6KBMxnl3daF9E4HsuQQuu5JUkk,5589 +langchain/agents/react/base.py,sha256=Ut8dWMdNzjAAFqXDGFrttLMCFDWlF2dWln9bi2FQeGs,6059 +langchain/agents/react/output_parser.py,sha256=bEL3U3mxYGK7_7Lm4GlOq8JKQTgyHFQQIEVUUZjV1qs,1231 +langchain/agents/react/textworld_prompt.py,sha256=b9WDM8pFmqrfAWJ8n6zkxlPlxQI5oHljZ1R9g5y6cRE,1906 +langchain/agents/react/wiki_prompt.py,sha256=iQxqKo5IjsP9manfQwf5sz038Hv_hZH_CMWHtAZYKNM,6127 +langchain/agents/schema.py,sha256=PGG23GTbS3baE8eq17OhjtQGT-iqBnJqadFLQo9_NJI,1156 +langchain/agents/self_ask_with_search/__init__.py,sha256=gtk3yKsQVBrtX2esW3480KtNXSi7Qim-LXddQFNlS24,106 +langchain/agents/self_ask_with_search/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/self_ask_with_search/__pycache__/base.cpython-312.pyc,, +langchain/agents/self_ask_with_search/__pycache__/output_parser.cpython-312.pyc,, +langchain/agents/self_ask_with_search/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/self_ask_with_search/base.py,sha256=Cv5c-5KzzkKH7LOM6rsGkvJH_Tio4n8lyibTFm3vavY,8265 +langchain/agents/self_ask_with_search/output_parser.py,sha256=hLDqfU7xV_5G6c68ofhngNWtlnLn8q20R2uSZ9FToOk,138 +langchain/agents/self_ask_with_search/prompt.py,sha256=J3mgTaq-KwT-yTorpDkCi8ruTPTPE8s4OTcL7o8GJgA,1926 +langchain/agents/structured_chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/agents/structured_chat/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/structured_chat/__pycache__/base.cpython-312.pyc,, +langchain/agents/structured_chat/__pycache__/output_parser.cpython-312.pyc,, +langchain/agents/structured_chat/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/structured_chat/base.py,sha256=EVQ7tJKQH32hhJkIy-u31NFSsI6HdpF72tNVcRGMKwM,10845 +langchain/agents/structured_chat/output_parser.py,sha256=M0YFDa_8CH6sdBcMv1uf9gNkr487R0Yz9Fqa_0Y_dSc,3815 +langchain/agents/structured_chat/prompt.py,sha256=OiBTRUOhvhSyO2jO2ByUUiaCrkK_tIUH9pMWWKs-aF4,992 +langchain/agents/tool_calling_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/agents/tool_calling_agent/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/tool_calling_agent/__pycache__/base.cpython-312.pyc,, +langchain/agents/tool_calling_agent/base.py,sha256=Nu1tSLW9BrwWo28b5U6tK7yKHWyLelnIJgmxfPXkHJM,3884 +langchain/agents/tools.py,sha256=-lH6prY0K4NpqoTocxizTssdQLp1Ld8WD-1Bfw_fi5Y,1441 +langchain/agents/types.py,sha256=7GOfN0hMjSqMIrDgviow6MaXpXOkluV_atnTx0tU1tg,1463 +langchain/agents/utils.py,sha256=5ljwcEHZK0TszEjNosXaAe8HHikTSenXuRl7x7KbQCA,564 +langchain/agents/xml/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/agents/xml/__pycache__/__init__.cpython-312.pyc,, +langchain/agents/xml/__pycache__/base.cpython-312.pyc,, +langchain/agents/xml/__pycache__/prompt.cpython-312.pyc,, +langchain/agents/xml/base.py,sha256=d7KksiKe0YaAXtGpTZOlBOuCrl2d18-hHuKo49yHozc,8151 +langchain/agents/xml/prompt.py,sha256=XdIyXZMZq8ObRAboEgkw-s-ZBKgXKRxTBskFMWTJ9aE,767 +langchain/base_language.py,sha256=SN3vhbLbZwevAoddtq3xZeEqbaDWrRVCoNZYLgGmVA4,218 +langchain/cache.py,sha256=TmBIR4ilSX0njXU7xz_u3CyKZLkJt4uQZd7i0H3HPQk,2155 +langchain/callbacks/__init__.py,sha256=byX7lM6YmtZa4ENSRDG8hbJYKjB1rkjJc0AcRIGqQLE,5961 +langchain/callbacks/__pycache__/__init__.cpython-312.pyc,, +langchain/callbacks/__pycache__/aim_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/argilla_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/arize_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/arthur_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/base.cpython-312.pyc,, +langchain/callbacks/__pycache__/clearml_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/comet_ml_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/confident_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/context_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/file.cpython-312.pyc,, +langchain/callbacks/__pycache__/flyte_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/human.cpython-312.pyc,, +langchain/callbacks/__pycache__/infino_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/labelstudio_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/llmonitor_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/manager.cpython-312.pyc,, +langchain/callbacks/__pycache__/mlflow_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/openai_info.cpython-312.pyc,, +langchain/callbacks/__pycache__/promptlayer_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/sagemaker_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/stdout.cpython-312.pyc,, +langchain/callbacks/__pycache__/streaming_aiter.cpython-312.pyc,, +langchain/callbacks/__pycache__/streaming_aiter_final_only.cpython-312.pyc,, +langchain/callbacks/__pycache__/streaming_stdout.cpython-312.pyc,, +langchain/callbacks/__pycache__/streaming_stdout_final_only.cpython-312.pyc,, +langchain/callbacks/__pycache__/trubrics_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/utils.cpython-312.pyc,, +langchain/callbacks/__pycache__/wandb_callback.cpython-312.pyc,, +langchain/callbacks/__pycache__/whylabs_callback.cpython-312.pyc,, +langchain/callbacks/aim_callback.py,sha256=rlQxQ9_hETT932ZHkCv2eRx2pBU1h3BURWw_zoUZq1I,941 +langchain/callbacks/argilla_callback.py,sha256=rD0PWuYd1li6aPB05nkN4TLxMh1BMkLxRCkVSKK0RmM,688 +langchain/callbacks/arize_callback.py,sha256=ZDYVLZD_FF7zOg54qOMZ7lsvtulGAkPaSGHb8wQVqQo,678 +langchain/callbacks/arthur_callback.py,sha256=Hq9mDj-s75LDK_P4zkW2N21BLxH2iuUWIJfv8Xkc58A,683 +langchain/callbacks/base.py,sha256=zX-DZJ9MKeMegYn1wItn3EGEgjvaHa7MMmlvN-WKAgg,654 +langchain/callbacks/clearml_callback.py,sha256=yea6_XSWbenOMDV9fT-v46DFFvpz_sDJvBvTcMgsbmE,688 +langchain/callbacks/comet_ml_callback.py,sha256=S9eeh1Mt4igXkmiFJZwyR8vk_RXZ1cpjwVnAsWesNgc,684 +langchain/callbacks/confident_callback.py,sha256=cRYhKWLaXHDeKhVxfG2HkPXdyw5z9SOSFsxO4TjuVzM,695 +langchain/callbacks/context_callback.py,sha256=RQlxVj6vevIgWJgO3fx5GZ3R37k93coj41p_GcDlpes,688 +langchain/callbacks/file.py,sha256=nTWUsbfG_CYE4ZD0uozJwYbErld9VQmrlwRfYXQpzx8,97 +langchain/callbacks/flyte_callback.py,sha256=UdYEmSFqnMZbvyvHrTISEkyf1SblcxwYnX8vZkrmXAY,678 +langchain/callbacks/human.py,sha256=WOCe2OoWAkqHaAOrEEVyrBeziFxDrAQB-VDd4qvcnjA,997 +langchain/callbacks/infino_callback.py,sha256=O-n4mZsUOKNFXOeg-f1JQpxaDvnoLCWMJbMIOdsEPLE,683 +langchain/callbacks/labelstudio_callback.py,sha256=d8eV4rnMsyEfwjf7wMXlmMmTc62helxUIAuXdGrqfFc,1006 +langchain/callbacks/llmonitor_callback.py,sha256=vMf-gameGtVaxQJpU0yFtnPISKOTCzL-4crqE6TpTjw,715 +langchain/callbacks/manager.py,sha256=S1NroNKbrkMgSlxjwq-E_r7dS77_UodjzXpz2Q9aqDk,2410 +langchain/callbacks/mlflow_callback.py,sha256=jQdNezahCLU5Xul68z062bYm8kMWF2PqNpFwmaRNgNA,1137 +langchain/callbacks/openai_info.py,sha256=itbKLqSBimynyvLgtVoCIbUldS-9qb4o6mQxRdGi0eE,675 +langchain/callbacks/promptlayer_callback.py,sha256=Y5KIbFF4L5Kc3cwAQay16LJ7eBzOHd0Nqs3BM8_kskI,725 +langchain/callbacks/sagemaker_callback.py,sha256=5yUPjbv-o2_Wq-nhN0wH_2pgvehGB9uW2Q11reXm4-M,715 +langchain/callbacks/stdout.py,sha256=9weMjKUjKSTcWmeb3Sb2KKblj7C0-QTa1SzUzRMbjw0,103 +langchain/callbacks/streaming_aiter.py,sha256=Xno3lAXtqFA7NhH0UaJYLpWuJa7eyaFiRn9RpqbtCVs,2414 +langchain/callbacks/streaming_aiter_final_only.py,sha256=B50u6mTzDMW9IY25rTxOBm5DkxL33ZSxQC5UuEAe1AQ,3359 +langchain/callbacks/streaming_stdout.py,sha256=l-SVRCjBTOWSPwXzjzsF0GkuAxE8eOZxnwUqC2LUPfM,174 +langchain/callbacks/streaming_stdout_final_only.py,sha256=1uIDDB_S684du9Rq5D-OltSMNkiWwqWnfKfh-hssL9Q,3346 +langchain/callbacks/streamlit/__init__.py,sha256=P-sIGK8JkfcmGEGqvZEMxKbyrW5V4kGMe-P7lmlie5g,3407 +langchain/callbacks/streamlit/__pycache__/__init__.cpython-312.pyc,, +langchain/callbacks/streamlit/__pycache__/mutable_expander.cpython-312.pyc,, +langchain/callbacks/streamlit/__pycache__/streamlit_callback_handler.cpython-312.pyc,, +langchain/callbacks/streamlit/mutable_expander.py,sha256=tDjsm1dzCHDPDxuyh7FwOXmI6BA35gqN_JRDzvhRLLU,937 +langchain/callbacks/streamlit/streamlit_callback_handler.py,sha256=JlhRjQv4pgwENVkU6W4CwhfQUUL6PIdAhyE3VOpkNPM,1372 +langchain/callbacks/tracers/__init__.py,sha256=U-NgyWTmQE3tn7qSFfAoBiXX5n3DKJ3gkSOyMIPQNfk,1140 +langchain/callbacks/tracers/__pycache__/__init__.cpython-312.pyc,, +langchain/callbacks/tracers/__pycache__/base.cpython-312.pyc,, +langchain/callbacks/tracers/__pycache__/comet.cpython-312.pyc,, +langchain/callbacks/tracers/__pycache__/evaluation.cpython-312.pyc,, +langchain/callbacks/tracers/__pycache__/langchain.cpython-312.pyc,, +langchain/callbacks/tracers/__pycache__/langchain_v1.cpython-312.pyc,, +langchain/callbacks/tracers/__pycache__/log_stream.cpython-312.pyc,, +langchain/callbacks/tracers/__pycache__/logging.cpython-312.pyc,, +langchain/callbacks/tracers/__pycache__/root_listeners.cpython-312.pyc,, +langchain/callbacks/tracers/__pycache__/run_collector.cpython-312.pyc,, +langchain/callbacks/tracers/__pycache__/schemas.cpython-312.pyc,, +langchain/callbacks/tracers/__pycache__/stdout.cpython-312.pyc,, +langchain/callbacks/tracers/__pycache__/wandb.cpython-312.pyc,, +langchain/callbacks/tracers/base.py,sha256=Teo6B_k5zgnXmEAK-nE7-ngYTBFf29PSlhZIKcyu1og,154 +langchain/callbacks/tracers/comet.py,sha256=RpLXGmn8RDx029pHl17bC2en2M0D3bDze4uxIcDPYlc,800 +langchain/callbacks/tracers/evaluation.py,sha256=ryLN36OsLjXiJmb_helQqxULOYt6BcJehH5OQvSe92A,234 +langchain/callbacks/tracers/langchain.py,sha256=KS1qe0UMdmQzoESWw696yWtQyg4_ZSXj4kNOtLfWFlU,218 +langchain/callbacks/tracers/langchain_v1.py,sha256=gdFt_Orrv9W0P_ytMz0UkBTOiYFz8fOwrjKCFk96Bc8,99 +langchain/callbacks/tracers/log_stream.py,sha256=Fghp01LH6Ucvj6q-NtvhYZzW3Ow1n-IXVlrdnh-rrLs,226 +langchain/callbacks/tracers/logging.py,sha256=HX9qbGC8UrErAZv4RC0DVEHzvtoLXSyFyohGcfy6z58,1352 +langchain/callbacks/tracers/root_listeners.py,sha256=z4sMzTA35qnAd5S5K19Fu-8rySYOIDnEgYf0SjoQhk0,105 +langchain/callbacks/tracers/run_collector.py,sha256=xDu5e45bJW8PyGaFul9tenkbjZ__MtfR1FoqpqM-BsA,120 +langchain/callbacks/tracers/schemas.py,sha256=LzW3N2S6a0nozOY9lSLHDUAfn8aYrXIkd97iok6GdHw,470 +langchain/callbacks/tracers/stdout.py,sha256=0TtKsQzOiUgpgF59jc0_ptAcgfC7RwyxBYKtqyHLnD0,168 +langchain/callbacks/tracers/wandb.py,sha256=VCYrN22Tzvx3_ameEMOLNd_GVP84l88ytsf8rn9TZx8,751 +langchain/callbacks/trubrics_callback.py,sha256=Smqx09I3p1xDvEFKwaNUFQsJzYEgRBlTZiNWnlNrSaY,693 +langchain/callbacks/utils.py,sha256=8Vyjscx_GePLYPrInpqq23czc2Ew9ZBxoelnvafZiMM,1409 +langchain/callbacks/wandb_callback.py,sha256=mWcDRVTlUnzQGhN2BMiGhPsKw5uyB2qDQ_L4qgIAdMo,678 +langchain/callbacks/whylabs_callback.py,sha256=N36XACtHYNgFSSYrNbfXiZ4nxSdwSrIE5e6xwxukrPc,688 +langchain/chains/__init__.py,sha256=xsRWTwsP3mTejfnKTzsTKRwpYT5xthXZAde30M_118U,5092 +langchain/chains/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/__pycache__/base.cpython-312.pyc,, +langchain/chains/__pycache__/example_generator.cpython-312.pyc,, +langchain/chains/__pycache__/history_aware_retriever.cpython-312.pyc,, +langchain/chains/__pycache__/llm.cpython-312.pyc,, +langchain/chains/__pycache__/llm_requests.cpython-312.pyc,, +langchain/chains/__pycache__/loading.cpython-312.pyc,, +langchain/chains/__pycache__/mapreduce.cpython-312.pyc,, +langchain/chains/__pycache__/moderation.cpython-312.pyc,, +langchain/chains/__pycache__/prompt_selector.cpython-312.pyc,, +langchain/chains/__pycache__/retrieval.cpython-312.pyc,, +langchain/chains/__pycache__/sequential.cpython-312.pyc,, +langchain/chains/__pycache__/transform.cpython-312.pyc,, +langchain/chains/api/__init__.py,sha256=d8xBEQqFVNOMTm4qXNz5YiYkvA827Ayyd4XCG1KP-z4,84 +langchain/chains/api/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/api/__pycache__/base.cpython-312.pyc,, +langchain/chains/api/__pycache__/news_docs.cpython-312.pyc,, +langchain/chains/api/__pycache__/open_meteo_docs.cpython-312.pyc,, +langchain/chains/api/__pycache__/podcast_docs.cpython-312.pyc,, +langchain/chains/api/__pycache__/prompt.cpython-312.pyc,, +langchain/chains/api/__pycache__/tmdb_docs.cpython-312.pyc,, +langchain/chains/api/base.py,sha256=4c0mCsV4eM_U9K5ZHGGyO6nVBW-SpahHdVpdPcmEU1k,15256 +langchain/chains/api/news_docs.py,sha256=9vzx5nSPwe_cjFV8cemlfMp4EX8wiZe2eXBuRik2Vdg,2452 +langchain/chains/api/open_meteo_docs.py,sha256=8pLSX24K37lcgq3jmgfThcuiz7WY3zkub_V6dtsqc18,3399 +langchain/chains/api/openapi/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/chains/api/openapi/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/api/openapi/__pycache__/chain.cpython-312.pyc,, +langchain/chains/api/openapi/__pycache__/prompts.cpython-312.pyc,, +langchain/chains/api/openapi/__pycache__/requests_chain.cpython-312.pyc,, +langchain/chains/api/openapi/__pycache__/response_chain.cpython-312.pyc,, +langchain/chains/api/openapi/chain.py,sha256=fgsADFbUYVryP2av483UA8h4PHpY-dyWHvUkxGVQLJI,667 +langchain/chains/api/openapi/prompts.py,sha256=tfd7EGhnv9cQWxwNKZyttodCNX82_UkSpnMmVD6UUqI,795 +langchain/chains/api/openapi/requests_chain.py,sha256=OmFFlmGuUpisJCNNGf04kQC4bJ4mo6Q4djSwswmKbwM,963 +langchain/chains/api/openapi/response_chain.py,sha256=7vHhIF1-3JUgOXeyWb9CAkG0Ji7m9ltHeusVkxlxXbU,966 +langchain/chains/api/podcast_docs.py,sha256=mPW1GrX0X6kaGuGpVYFXNvSoLNoUFse8CaoJSUSa4KU,1920 +langchain/chains/api/prompt.py,sha256=YERLepjWuo2J4wg40DWWfHH4Tsm-9eab-cIllHFxMk4,1031 +langchain/chains/api/tmdb_docs.py,sha256=8yoowa2d53-oytU0dycV-0w9wRe9xOXAPz-s8gQ6EpE,1537 +langchain/chains/base.py,sha256=NhFH1WxDfbTd2kJGxZeZ0qQVTRgXbpWmDAhzDZRtWGE,30547 +langchain/chains/chat_vector_db/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/chains/chat_vector_db/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/chat_vector_db/__pycache__/prompts.cpython-312.pyc,, +langchain/chains/chat_vector_db/prompts.py,sha256=4YM7z5Wi8ftJEVj3ZG8YOcudYwGHCNvQh4Gf_6592yc,694 +langchain/chains/combine_documents/__init__.py,sha256=tJZmkLOD4JGjh9OxkCdTMUzbBCb-47fHLyklQo6ida4,367 +langchain/chains/combine_documents/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/combine_documents/__pycache__/base.cpython-312.pyc,, +langchain/chains/combine_documents/__pycache__/map_reduce.cpython-312.pyc,, +langchain/chains/combine_documents/__pycache__/map_rerank.cpython-312.pyc,, +langchain/chains/combine_documents/__pycache__/reduce.cpython-312.pyc,, +langchain/chains/combine_documents/__pycache__/refine.cpython-312.pyc,, +langchain/chains/combine_documents/__pycache__/stuff.cpython-312.pyc,, +langchain/chains/combine_documents/base.py,sha256=72f2DopL482XaJoSo3WVMWkUOv7gkSdk-rpoOMXaxu8,10149 +langchain/chains/combine_documents/map_reduce.py,sha256=RdFmNmhKYO0XjWpTWEAGPVJdCKTAoBgsXRK_uUCySfI,12074 +langchain/chains/combine_documents/map_rerank.py,sha256=kOClkRXTGAsL_ZFMEoXrM8NqoWmlXnGvLZVL0_WJ-YM,9329 +langchain/chains/combine_documents/reduce.py,sha256=ab-Y2cZlXz2mMslXbYGICTgsy6Li1XSBXTZ-_ZnRD38,14146 +langchain/chains/combine_documents/refine.py,sha256=4i3I0u6O2ii3me1dG7Xs8o_5FPUSiN_vsxtugjJK2Ck,9444 +langchain/chains/combine_documents/stuff.py,sha256=4wnolhMth9SKLcQ5nsfIMDH-dyrgi04dh53tW_RzEZM,11536 +langchain/chains/constitutional_ai/__init__.py,sha256=Woq_Efl5d-MSTkhpg7HLts3kXysJVZLiz3tr05NTf5Q,107 +langchain/chains/constitutional_ai/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/constitutional_ai/__pycache__/base.cpython-312.pyc,, +langchain/chains/constitutional_ai/__pycache__/models.cpython-312.pyc,, +langchain/chains/constitutional_ai/__pycache__/principles.cpython-312.pyc,, +langchain/chains/constitutional_ai/__pycache__/prompts.cpython-312.pyc,, +langchain/chains/constitutional_ai/base.py,sha256=G0isDy9xvYi39YgTmZZkynWBafT5s-n1wNf6JiKGpeg,12693 +langchain/chains/constitutional_ai/models.py,sha256=D_p--Zt-ut32VuU5nHdqmPv5vFZEbO0f9pInVmG8NqU,266 +langchain/chains/constitutional_ai/principles.py,sha256=vElwvF1w4h8URsj38ucmoKp9hUCzf0sJyoNQmKv1Kws,21739 +langchain/chains/constitutional_ai/prompts.py,sha256=vL7qEGpLZShdKY8i07874peWB63eTYud6iPJcWcD-Y4,9072 +langchain/chains/conversation/__init__.py,sha256=hpIiQSoUe0bGkqAGKxG_CEYRFsjHRL4l5uBEpCBetFc,71 +langchain/chains/conversation/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/conversation/__pycache__/base.cpython-312.pyc,, +langchain/chains/conversation/__pycache__/memory.cpython-312.pyc,, +langchain/chains/conversation/__pycache__/prompt.cpython-312.pyc,, +langchain/chains/conversation/base.py,sha256=UDIkWaZHTNA3_sLA37Sa9enBUSbRkfQTF_t8ZGLQQ98,5525 +langchain/chains/conversation/memory.py,sha256=KoKmk5FjPEkioolvmFxcJgRr2wRdWIe1LNBHCtGgUKo,1396 +langchain/chains/conversation/prompt.py,sha256=84xC4dy8yNiCSICT4b6UvZdQXpPifMVw1hf7WnFAVkw,913 +langchain/chains/conversational_retrieval/__init__.py,sha256=hq7jx-kmg3s8qLYnV7gPmzVIPcGqW69H6cXIjklvGjY,49 +langchain/chains/conversational_retrieval/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/conversational_retrieval/__pycache__/base.cpython-312.pyc,, +langchain/chains/conversational_retrieval/__pycache__/prompts.cpython-312.pyc,, +langchain/chains/conversational_retrieval/base.py,sha256=m1Vvu18MG-sbBsWPDR7sUyXmNk1pzMuFA0OtzGI1f5U,21028 +langchain/chains/conversational_retrieval/prompts.py,sha256=kJITwauXq7dYKnSBoL2EcDTqAnJZlWF_GzJ9C55ZEv8,720 +langchain/chains/elasticsearch_database/__init__.py,sha256=B3Zxy8mxTb4bfMGHC__26BFkvT_6bPisS4rPIFiFWdU,126 +langchain/chains/elasticsearch_database/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/elasticsearch_database/__pycache__/base.cpython-312.pyc,, +langchain/chains/elasticsearch_database/__pycache__/prompts.cpython-312.pyc,, +langchain/chains/elasticsearch_database/base.py,sha256=eqgy1fbOIF6WADuvCfU5zk1GyaKA-q-vg2AUMkqOXhY,8250 +langchain/chains/elasticsearch_database/prompts.py,sha256=N6X__jKt0yoA4kFfW-lXxJyP7Wsmef9AVDaxScql1yU,1421 +langchain/chains/ernie_functions/__init__.py,sha256=X_gOa8GIjyV6tAS32A1BLv6q08ufSms-tffwgtSyIDA,1514 +langchain/chains/ernie_functions/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/ernie_functions/__pycache__/base.cpython-312.pyc,, +langchain/chains/ernie_functions/base.py,sha256=SGs_-yi0qa7cxgkiu2EsoYQF4_fKQUZkxncrp1KiMbU,1730 +langchain/chains/example_generator.py,sha256=oPQ25L4QoTvuPxn81u7pmnpJpQGj-eLc7m02FkNhTAU,732 +langchain/chains/flare/__init__.py,sha256=ufb8LMpEVUzTDflcNiJJyKCG9e4EVGAvz5e7h7f0Z1c,51 +langchain/chains/flare/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/flare/__pycache__/base.cpython-312.pyc,, +langchain/chains/flare/__pycache__/prompts.cpython-312.pyc,, +langchain/chains/flare/base.py,sha256=jR1KSEKtUwdEqUC4zj7u-u6D7ywdum-wgvKNaS3f5LE,9207 +langchain/chains/flare/prompts.py,sha256=H9pHeF9FMUWlpLNe8prfW4uVqsvGtdsDorskwTgrHhw,1445 +langchain/chains/graph_qa/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/chains/graph_qa/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/arangodb.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/base.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/cypher.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/cypher_utils.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/falkordb.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/gremlin.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/hugegraph.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/kuzu.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/nebulagraph.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/neptune_cypher.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/neptune_sparql.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/ontotext_graphdb.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/prompts.cpython-312.pyc,, +langchain/chains/graph_qa/__pycache__/sparql.cpython-312.pyc,, +langchain/chains/graph_qa/arangodb.py,sha256=FdkfnDwKnmWinqYObKK-ZPDO_AFZr3PAiRKDEGJxK_A,669 +langchain/chains/graph_qa/base.py,sha256=A1xM_kXTS9HwSt3EmOUlaFR-SrQBZjjKmiOjV-d0VFk,643 +langchain/chains/graph_qa/cypher.py,sha256=gC6HztOCK8GZmgHcBj0P0gFaCJKN7-Aggm7H0eMp0y8,1205 +langchain/chains/graph_qa/cypher_utils.py,sha256=Q6D8NhDu7T-vG0Ez_s-DM98IJxpZy73pQpyFmLldTug,792 +langchain/chains/graph_qa/falkordb.py,sha256=quMQzj-_fO_3jAxDQBlqc0iE0JK7iEuwgJZ-czM0hmA,925 +langchain/chains/graph_qa/gremlin.py,sha256=pdJRut9RemuUSUUBsTDVGjOZ5UKpBmJWdKy-Woo27Fs,1090 +langchain/chains/graph_qa/hugegraph.py,sha256=f5dK4czTavCkP-aZ-8kTwHFCR5eGh9T-sQ6SNrp0Lnk,665 +langchain/chains/graph_qa/kuzu.py,sha256=fBfgaQhQLFaZL77z5kWdYROtSEgPbcpHurdPI0slHYU,870 +langchain/chains/graph_qa/nebulagraph.py,sha256=HTnXJNQX5YdIQqrdU-DLhoKLA971L49LH6sDUHXJynM,675 +langchain/chains/graph_qa/neptune_cypher.py,sha256=5qS-uxWgBm8RwDu4CJKJAa1j5vhkddmoXqkDPd8hCxg,1232 +langchain/chains/graph_qa/neptune_sparql.py,sha256=pb3k1GlZ9sLZGRsYFX3YhYV0xakgGSeHroN6M84rFD8,1137 +langchain/chains/graph_qa/ontotext_graphdb.py,sha256=sKRP7o8FKC08PT40nYIpf9KpQZK4gwrN7kOQWW9tcFk,714 +langchain/chains/graph_qa/prompts.py,sha256=dqfI2CSw5xDR3SvIsFSxq2jwOFp-CcGF3WDjeyy5t98,3934 +langchain/chains/graph_qa/sparql.py,sha256=wIAy-nymiftBnW3kExycpGOMyFveD1QBrETlfcnlyuE,665 +langchain/chains/history_aware_retriever.py,sha256=a92vlxlq0PaOubc_b4jj_WwGivk4Tyi1xzSBKaTOx4g,2662 +langchain/chains/hyde/__init__.py,sha256=mZ-cb7slBdlK5aG2R_NegBzNCXToHR-tdmfIIA6lKvQ,75 +langchain/chains/hyde/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/hyde/__pycache__/base.cpython-312.pyc,, +langchain/chains/hyde/__pycache__/prompts.cpython-312.pyc,, +langchain/chains/hyde/base.py,sha256=0mw-4UvHXVxIEPH_5J3shlkpdm0_yM2vdb_XKpPscTo,4338 +langchain/chains/hyde/prompts.py,sha256=U4LfozneOyHDIKd8rCbnGSQK84YvZqAtpf5EL435Ol8,1913 +langchain/chains/llm.py,sha256=DxybMdOYZ_eOaZ0RU5Xp97fqIo3zbd_Pn5n7ZCIttpk,15523 +langchain/chains/llm_bash/__init__.py,sha256=qvRpa5tj09akj4DLVZoKvWK8-oJrUxc5-7ooAP3mO18,453 +langchain/chains/llm_bash/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/llm_checker/__init__.py,sha256=2IHg5XUQTQEoEMutGa66_tzOStNskQnDDXdN9VzJCSo,139 +langchain/chains/llm_checker/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/llm_checker/__pycache__/base.cpython-312.pyc,, +langchain/chains/llm_checker/__pycache__/prompt.cpython-312.pyc,, +langchain/chains/llm_checker/base.py,sha256=vvFdEXqZF-9mmWBLvdmS7seagBG1ebDJJAKegzNzBTY,6494 +langchain/chains/llm_checker/prompt.py,sha256=ZyrtvgRH3XxCUKyLVAEhehlC3LfInaZ8ddZ3KE0OrIo,1125 +langchain/chains/llm_math/__init__.py,sha256=V-js2H13eXegQztkkM6joc2lRmD6XJJkj6k5RAnIWX8,143 +langchain/chains/llm_math/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/llm_math/__pycache__/base.cpython-312.pyc,, +langchain/chains/llm_math/__pycache__/prompt.cpython-312.pyc,, +langchain/chains/llm_math/base.py,sha256=WZthnwAbrepXmNF19OS-PkozzlraUKGj2LMOsTLA45I,11311 +langchain/chains/llm_math/prompt.py,sha256=uj1p7wrNWzbzMN3it80Xh1Iv14qoy_sd4f--opyMuB0,868 +langchain/chains/llm_requests.py,sha256=UtwEPVi1Kn1WDLHF0Fbf4RVmX5cv36nBr14905GlJ_8,653 +langchain/chains/llm_summarization_checker/__init__.py,sha256=UixFPJ7i6Debb4wwA1voMbgVZqQ8d4p-Tuiy3-o3iT8,352 +langchain/chains/llm_summarization_checker/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/llm_summarization_checker/__pycache__/base.cpython-312.pyc,, +langchain/chains/llm_summarization_checker/base.py,sha256=_fN_ukAxsg5WV-KkQFgci-gTXfWRcikZZL50S3NJjMA,6883 +langchain/chains/llm_summarization_checker/prompts/are_all_true_prompt.txt,sha256=yWZxXJTyYtao73asx_tE-qUU5eZZJ8iu20WW3vMmLF8,654 +langchain/chains/llm_summarization_checker/prompts/check_facts.txt,sha256=Du-gC9bXGSdXfxa643sjTr2FtWuLBWkBA9dOUzRucZs,377 +langchain/chains/llm_summarization_checker/prompts/create_facts.txt,sha256=hM2_EVxM_8iL3rm7ui17NAUKoHCjpqhYjdXO6NQ6lEI,128 +langchain/chains/llm_summarization_checker/prompts/revise_summary.txt,sha256=nSSq5UQMx6gvjMKIs2t_ituuEQzu2nni1wdnywAe-5U,416 +langchain/chains/llm_symbolic_math/__init__.py,sha256=KQ6bFiFMsqs8PNtU-oo6l-czNBBwQUn2rEirz3gt-w8,470 +langchain/chains/llm_symbolic_math/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/loading.py,sha256=MRRvAH-mc4lGIqNrn4U3AkLK_bO6IyYx4flsOsbi-SE,28546 +langchain/chains/mapreduce.py,sha256=rUPiSmczMETU4QnmzPQ7vmiUiNqQxRhlVOz4gSgVNfs,4132 +langchain/chains/moderation.py,sha256=K67npAUpKS6ERZBXCgWi1E1KD_SqocSgck5IEgXUgUY,4490 +langchain/chains/natbot/__init__.py,sha256=ACF2TYNK_CTfvmdLlG5Ry0_j9D6ZfjgfQxmeKe1BAIg,96 +langchain/chains/natbot/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/natbot/__pycache__/base.cpython-312.pyc,, +langchain/chains/natbot/__pycache__/crawler.cpython-312.pyc,, +langchain/chains/natbot/__pycache__/prompt.cpython-312.pyc,, +langchain/chains/natbot/base.py,sha256=dl-P7obWPU5LUnYzyVQXmMoiF1w7LD6mqsnvUZu37Pc,5421 +langchain/chains/natbot/crawler.py,sha256=E1mQUEsg8Jj6Eth-LBUcMU-Zc88JEA3a79kMhHkKO08,16050 +langchain/chains/natbot/prompt.py,sha256=zB95SYLG5_12ABFFGDtDi8vVP9DSdPoP8UCjrar_4TI,4989 +langchain/chains/openai_functions/__init__.py,sha256=o8B_I98nFTlFPkF6FPpLyt8pU3EfEPHADHr9xY5V1O0,1489 +langchain/chains/openai_functions/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/openai_functions/__pycache__/base.cpython-312.pyc,, +langchain/chains/openai_functions/__pycache__/citation_fuzzy_match.cpython-312.pyc,, +langchain/chains/openai_functions/__pycache__/extraction.cpython-312.pyc,, +langchain/chains/openai_functions/__pycache__/openapi.cpython-312.pyc,, +langchain/chains/openai_functions/__pycache__/qa_with_structure.cpython-312.pyc,, +langchain/chains/openai_functions/__pycache__/tagging.cpython-312.pyc,, +langchain/chains/openai_functions/__pycache__/utils.cpython-312.pyc,, +langchain/chains/openai_functions/base.py,sha256=GJgILJ9y67ftd9CE9YSSO98d6HbEeKPAH7y0LHLMWXc,10052 +langchain/chains/openai_functions/citation_fuzzy_match.py,sha256=LIToa60Bbw264ftNEDXeSKF-tj9lQ9DS0Fpt979c86E,5337 +langchain/chains/openai_functions/extraction.py,sha256=hST_-4BmGhj0jXJuZYItL26SQZ8TcGBCMgP2zragfg4,7368 +langchain/chains/openai_functions/openapi.py,sha256=y4ip8TvGS3qD5a8YishVKhCS8X_kUMiE9FRcqVEXpOM,14894 +langchain/chains/openai_functions/qa_with_structure.py,sha256=RgtkvjCeeTZEt25luABg_IUOlaOQ75s-E2dIknrzpgg,4819 +langchain/chains/openai_functions/tagging.py,sha256=5i4dAe019rCKN_zWYugHkW5U66yO9Gse8AxjxJLdnr0,6504 +langchain/chains/openai_functions/utils.py,sha256=Min8b6vqZ4ZCi2jM8Sj7JCE_lORxodQDqhYNfBg5r8Q,1249 +langchain/chains/openai_tools/__init__.py,sha256=xX0If1Nx_ocEOI56EGxCI0v0RZ1_VUegzyODAj0RLVU,134 +langchain/chains/openai_tools/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/openai_tools/__pycache__/extraction.cpython-312.pyc,, +langchain/chains/openai_tools/extraction.py,sha256=FlQe776Odis3a_54lRdC3mo6-Q2ZQQ2D6D_xn_zbSV4,3422 +langchain/chains/prompt_selector.py,sha256=h7MnJEOE0owXJpu0-GIEGkno7WTtTOAykkwOs-buv6o,1984 +langchain/chains/qa_generation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/chains/qa_generation/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/qa_generation/__pycache__/base.cpython-312.pyc,, +langchain/chains/qa_generation/__pycache__/prompt.cpython-312.pyc,, +langchain/chains/qa_generation/base.py,sha256=dH6LE5mzlgdzlmQpuPWR4FJMNx6jty-q7KZSKwLPWtI,4175 +langchain/chains/qa_generation/prompt.py,sha256=W3lYKPUDSKS4N6b_FWlKzjn0tU5J4iQ8CF2FixdtqBo,1875 +langchain/chains/qa_with_sources/__init__.py,sha256=pYogDy6KwP4fS0m6GqyhLu_1kSd0ba3Ar4aPdIlRTTo,174 +langchain/chains/qa_with_sources/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/qa_with_sources/__pycache__/base.cpython-312.pyc,, +langchain/chains/qa_with_sources/__pycache__/loading.cpython-312.pyc,, +langchain/chains/qa_with_sources/__pycache__/map_reduce_prompt.cpython-312.pyc,, +langchain/chains/qa_with_sources/__pycache__/refine_prompts.cpython-312.pyc,, +langchain/chains/qa_with_sources/__pycache__/retrieval.cpython-312.pyc,, +langchain/chains/qa_with_sources/__pycache__/stuff_prompt.cpython-312.pyc,, +langchain/chains/qa_with_sources/__pycache__/vector_db.cpython-312.pyc,, +langchain/chains/qa_with_sources/base.py,sha256=MJpCFrrOoXzNsP2gfC1aikypZGpM9LcZkCP_l11RsYA,8480 +langchain/chains/qa_with_sources/loading.py,sha256=LyT-wnEijomK4QNH9GwN6wTzwD0RftMF-GQYvK40nNM,7966 +langchain/chains/qa_with_sources/map_reduce_prompt.py,sha256=hAM6OZbefpaaANdFYElB9feUi1iTlg0h54NDrFOw6Fo,6971 +langchain/chains/qa_with_sources/refine_prompts.py,sha256=MIwQfIXjFFjmNmwgMIq9yM5rOQdjswHnShNpNNc1BwM,1318 +langchain/chains/qa_with_sources/retrieval.py,sha256=H83chDeb3vNJw-QKWsjr6326kWPAywUjBd4FU9kOeCw,2421 +langchain/chains/qa_with_sources/stuff_prompt.py,sha256=xfcB5tVDHFXTYZnPFzuJJGO7rBoFuZGENhRHLzI5bzM,6581 +langchain/chains/qa_with_sources/vector_db.py,sha256=1ZWiir3NmI8Z8k3Tt3t7SITIQGpNQVF12jnWrYc3dDA,2771 +langchain/chains/query_constructor/__init__.py,sha256=JGoT8jfg3jm9-22EgMY5OsDFZcnG83E_BH5iNt387aw,131 +langchain/chains/query_constructor/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/query_constructor/__pycache__/base.cpython-312.pyc,, +langchain/chains/query_constructor/__pycache__/ir.cpython-312.pyc,, +langchain/chains/query_constructor/__pycache__/parser.cpython-312.pyc,, +langchain/chains/query_constructor/__pycache__/prompt.cpython-312.pyc,, +langchain/chains/query_constructor/__pycache__/schema.cpython-312.pyc,, +langchain/chains/query_constructor/base.py,sha256=PAYwAX7oiFTEaTwkLu0rD0ZCsexaMwa8Lx1FzrRKHEM,13839 +langchain/chains/query_constructor/ir.py,sha256=u_jHkLPR946leUNcuk0AWwaZ3XapXqGJCU2noq80Alw,394 +langchain/chains/query_constructor/parser.py,sha256=j53pN3dkdg-GZtnyfZtVqsgk7Cn-uoMu15OiFTRp84E,6570 +langchain/chains/query_constructor/prompt.py,sha256=rwEsTr29cKBKPnn6vKB5rFw-youslUIFQoRIBkBh-j0,6880 +langchain/chains/query_constructor/schema.py,sha256=FRn_cpTRXuP9N1PprndGqkm-AfV5f-mihX1YAlAZUGE,277 +langchain/chains/question_answering/__init__.py,sha256=wohLdJqGfpWOwy68EEleW73SCenaQfxPCpMsrCIlohU,144 +langchain/chains/question_answering/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/question_answering/__pycache__/chain.cpython-312.pyc,, +langchain/chains/question_answering/__pycache__/map_reduce_prompt.cpython-312.pyc,, +langchain/chains/question_answering/__pycache__/map_rerank_prompt.cpython-312.pyc,, +langchain/chains/question_answering/__pycache__/refine_prompts.cpython-312.pyc,, +langchain/chains/question_answering/__pycache__/stuff_prompt.cpython-312.pyc,, +langchain/chains/question_answering/chain.py,sha256=wTo0kO4DD3rYBMX0Fcl0Mzt3oVfOQHOt_exta4cK5-o,9646 +langchain/chains/question_answering/map_reduce_prompt.py,sha256=CrerC8PqW1-V8SsQQsFsMd7dfjTb04Urf2naQYVGxl0,8013 +langchain/chains/question_answering/map_rerank_prompt.py,sha256=l2Ha1Xqr5Q6Y-Xh9af8JTni9gLAyhKJhmSErRFGw9s4,1622 +langchain/chains/question_answering/refine_prompts.py,sha256=eYz3MTGVdXlikjMEOE8LWxYE0A-R3jWLJQ6LtEqFibc,2432 +langchain/chains/question_answering/stuff_prompt.py,sha256=tXecxj10u9x0taPz4I1Kn-J0SOYcjIfr_8RINw9P7ys,1146 +langchain/chains/retrieval.py,sha256=yL0p_OVTByhTR---nfZMlanye3nuTaii_h320fXjJFk,2746 +langchain/chains/retrieval_qa/__init__.py,sha256=MGGNuZ-HVZDyk551hUjGexK3U9q-2Yi_VJkpi7MV2DE,62 +langchain/chains/retrieval_qa/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/retrieval_qa/__pycache__/base.cpython-312.pyc,, +langchain/chains/retrieval_qa/__pycache__/prompt.cpython-312.pyc,, +langchain/chains/retrieval_qa/base.py,sha256=dKOdmNETb99orNu-hjg3qxyRXJIEAtBQY82AegP15ck,12001 +langchain/chains/retrieval_qa/prompt.py,sha256=c5_tFGFbltYvM9P6K_Zk3dOeYYbiSFN-MkJK6HBoNuA,399 +langchain/chains/router/__init__.py,sha256=r66J28FWIORVB5QIZ1d8R_HsiBaV1eQMZDZvMC43oAQ,407 +langchain/chains/router/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/router/__pycache__/base.cpython-312.pyc,, +langchain/chains/router/__pycache__/embedding_router.cpython-312.pyc,, +langchain/chains/router/__pycache__/llm_router.cpython-312.pyc,, +langchain/chains/router/__pycache__/multi_prompt.cpython-312.pyc,, +langchain/chains/router/__pycache__/multi_prompt_prompt.cpython-312.pyc,, +langchain/chains/router/__pycache__/multi_retrieval_prompt.cpython-312.pyc,, +langchain/chains/router/__pycache__/multi_retrieval_qa.cpython-312.pyc,, +langchain/chains/router/base.py,sha256=zytRp6PC8H7ve7r7gIiycalyBqO7jqJYGLut3USi_-4,4532 +langchain/chains/router/embedding_router.py,sha256=RGXcKfE428MJ_8glv3QledUaiZkKJF8B6AjaN32GxSE,3071 +langchain/chains/router/llm_router.py,sha256=BDY55HyguKJShdp3cM3ANpl30yxofQEibuhaEzpETm0,6976 +langchain/chains/router/multi_prompt.py,sha256=Mr3Kpc9CEwvEE0Ytwu1adGiN2hxSNVWrBDNTLwwGrN8,6982 +langchain/chains/router/multi_prompt_prompt.py,sha256=T8UbIuxblnI6Byhw-BMAzwQcbB5ww3N6BiMqMJxS6Jc,1156 +langchain/chains/router/multi_retrieval_prompt.py,sha256=VUYGLWbwGiv03aSMW5sjdGNwsEa9FKgq0RcK5o3lkH4,1079 +langchain/chains/router/multi_retrieval_qa.py,sha256=0LMZinYl0G7tDYh-I6eBOy54oxkEyIeHNFk5aAi_OCc,4243 +langchain/chains/sequential.py,sha256=mroXm1uj-D3iA_MxiZT_FnTMngYLcEQWA8loclbqvcs,7487 +langchain/chains/sql_database/__init__.py,sha256=jQotWN4EWMD98Jk-f7rqh5YtbXbP9XXA0ypLGq8NgrM,47 +langchain/chains/sql_database/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/sql_database/__pycache__/prompt.cpython-312.pyc,, +langchain/chains/sql_database/__pycache__/query.cpython-312.pyc,, +langchain/chains/sql_database/prompt.py,sha256=q3C6BbmWtNYXWV-9qHnyux5trsM3fjlRLuYNPTlpdR4,15454 +langchain/chains/sql_database/query.py,sha256=kSEveCV-Y2b3mwe-sfT20UI57_jByaTucmYmzywLsEY,6037 +langchain/chains/structured_output/__init__.py,sha256=-6nFe-gznavFc3XCMv8XkEzuXoto2rI8Q-bcruVPOR8,204 +langchain/chains/structured_output/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/structured_output/__pycache__/base.cpython-312.pyc,, +langchain/chains/structured_output/base.py,sha256=qnALOtvJq5SEN5aRfzU3cdxGB9dmEr_jrHt-FMlilA8,25472 +langchain/chains/summarize/__init__.py,sha256=mg1lKtH_x-oJ5qvKY6OD7g9kkqbjMVbL3l3OhfozSQM,151 +langchain/chains/summarize/__pycache__/__init__.cpython-312.pyc,, +langchain/chains/summarize/__pycache__/chain.cpython-312.pyc,, +langchain/chains/summarize/__pycache__/map_reduce_prompt.cpython-312.pyc,, +langchain/chains/summarize/__pycache__/refine_prompts.cpython-312.pyc,, +langchain/chains/summarize/__pycache__/stuff_prompt.cpython-312.pyc,, +langchain/chains/summarize/chain.py,sha256=gMW1-nl_ZuGBdFtDy15KgKOe86NmKmDI2aLQkMkxP3s,6168 +langchain/chains/summarize/map_reduce_prompt.py,sha256=HZSitW2_WhJINN-_YJCzU6zJXbPuMr5zFek31AzutuQ,238 +langchain/chains/summarize/refine_prompts.py,sha256=CDXZDJWOV0jg-CwvQv5g1P86Xqd0aLFmUx7LLFiW_Qg,677 +langchain/chains/summarize/stuff_prompt.py,sha256=HZSitW2_WhJINN-_YJCzU6zJXbPuMr5zFek31AzutuQ,238 +langchain/chains/transform.py,sha256=xk2gCPnfN5Ar-7ierK1UkIh6SPqGZQIHlyLQW9NNrE0,2369 +langchain/chat_loaders/__init__.py,sha256=sDjTrVHWFwv4pySOIvIIUj2NwAI8Okf-i1lyIWuDFAI,452 +langchain/chat_loaders/__pycache__/__init__.cpython-312.pyc,, +langchain/chat_loaders/__pycache__/base.cpython-312.pyc,, +langchain/chat_loaders/__pycache__/facebook_messenger.cpython-312.pyc,, +langchain/chat_loaders/__pycache__/gmail.cpython-312.pyc,, +langchain/chat_loaders/__pycache__/imessage.cpython-312.pyc,, +langchain/chat_loaders/__pycache__/langsmith.cpython-312.pyc,, +langchain/chat_loaders/__pycache__/slack.cpython-312.pyc,, +langchain/chat_loaders/__pycache__/telegram.cpython-312.pyc,, +langchain/chat_loaders/__pycache__/utils.cpython-312.pyc,, +langchain/chat_loaders/__pycache__/whatsapp.cpython-312.pyc,, +langchain/chat_loaders/base.py,sha256=vTi948QJLHp8kjKFcycT0PX9sS1bNpSsPkDmk6WYRsI,85 +langchain/chat_loaders/facebook_messenger.py,sha256=UAM1dlRxynrAq9fkrQlRyPY77n6bT4-kEBXY3Ph8_sI,884 +langchain/chat_loaders/gmail.py,sha256=E1t4KaKXPDJ9exwOZtu_eobKChqNnd6TlBnGMmM1StY,636 +langchain/chat_loaders/imessage.py,sha256=7XvTZEMVmw43L8pMQtIyBe5BPhau3VmepPyOhDfs22o,663 +langchain/chat_loaders/langsmith.py,sha256=f9N-1tOoyJQdwcyMATK5I3QEHbW8Rj3Xd6Y4-ky5e2s,851 +langchain/chat_loaders/slack.py,sha256=aBWKGswxTBIu_3PpV1wf8cwc_zkt-vClMrPsaeZECxI,648 +langchain/chat_loaders/telegram.py,sha256=ri4fnM8H-pRRZRwnhURtTOr1dUaQJxAkU5AJjK4LyQw,663 +langchain/chat_loaders/utils.py,sha256=juO5rwOwKt3F8oH88iRNobbVOsVuWbch2fuYHcCDCIw,1077 +langchain/chat_loaders/whatsapp.py,sha256=-FqGGYfuw04xrVMAsOGXv7Ca2uGWKNkjmIDKSItBQ6c,663 +langchain/chat_models/__init__.py,sha256=Te4cBxrSPikBKEOl393IoTZc6Q3NGcIdmNlaIBOepMQ,2057 +langchain/chat_models/__pycache__/__init__.cpython-312.pyc,, +langchain/chat_models/__pycache__/anthropic.cpython-312.pyc,, +langchain/chat_models/__pycache__/anyscale.cpython-312.pyc,, +langchain/chat_models/__pycache__/azure_openai.cpython-312.pyc,, +langchain/chat_models/__pycache__/azureml_endpoint.cpython-312.pyc,, +langchain/chat_models/__pycache__/baichuan.cpython-312.pyc,, +langchain/chat_models/__pycache__/baidu_qianfan_endpoint.cpython-312.pyc,, +langchain/chat_models/__pycache__/base.cpython-312.pyc,, +langchain/chat_models/__pycache__/bedrock.cpython-312.pyc,, +langchain/chat_models/__pycache__/cohere.cpython-312.pyc,, +langchain/chat_models/__pycache__/databricks.cpython-312.pyc,, +langchain/chat_models/__pycache__/ernie.cpython-312.pyc,, +langchain/chat_models/__pycache__/everlyai.cpython-312.pyc,, +langchain/chat_models/__pycache__/fake.cpython-312.pyc,, +langchain/chat_models/__pycache__/fireworks.cpython-312.pyc,, +langchain/chat_models/__pycache__/gigachat.cpython-312.pyc,, +langchain/chat_models/__pycache__/google_palm.cpython-312.pyc,, +langchain/chat_models/__pycache__/human.cpython-312.pyc,, +langchain/chat_models/__pycache__/hunyuan.cpython-312.pyc,, +langchain/chat_models/__pycache__/javelin_ai_gateway.cpython-312.pyc,, +langchain/chat_models/__pycache__/jinachat.cpython-312.pyc,, +langchain/chat_models/__pycache__/konko.cpython-312.pyc,, +langchain/chat_models/__pycache__/litellm.cpython-312.pyc,, +langchain/chat_models/__pycache__/meta.cpython-312.pyc,, +langchain/chat_models/__pycache__/minimax.cpython-312.pyc,, +langchain/chat_models/__pycache__/mlflow.cpython-312.pyc,, +langchain/chat_models/__pycache__/mlflow_ai_gateway.cpython-312.pyc,, +langchain/chat_models/__pycache__/ollama.cpython-312.pyc,, +langchain/chat_models/__pycache__/openai.cpython-312.pyc,, +langchain/chat_models/__pycache__/pai_eas_endpoint.cpython-312.pyc,, +langchain/chat_models/__pycache__/promptlayer_openai.cpython-312.pyc,, +langchain/chat_models/__pycache__/tongyi.cpython-312.pyc,, +langchain/chat_models/__pycache__/vertexai.cpython-312.pyc,, +langchain/chat_models/__pycache__/volcengine_maas.cpython-312.pyc,, +langchain/chat_models/__pycache__/yandex.cpython-312.pyc,, +langchain/chat_models/anthropic.py,sha256=n6XgxmhKgAo6tD2hZ6aNTKHKDUKEWuxV5IREdpTCSa0,851 +langchain/chat_models/anyscale.py,sha256=CrGuU-IWl8ROf6syQJ3EhtnsuDfCmpEEUY3_dCq4T2w,643 +langchain/chat_models/azure_openai.py,sha256=aRNol2PNC49PmvdZnwjhQeMFRDOOelPNAXzRv6J6eoI,660 +langchain/chat_models/azureml_endpoint.py,sha256=6mxXm8UFXataLp0NYRGA88V3DpiNKPo095u_JGj7XGE,863 +langchain/chat_models/baichuan.py,sha256=3-GveFoF5ZNyLdRNK6V4i3EDDjdseOTFWbCMhDbtO9w,643 +langchain/chat_models/baidu_qianfan_endpoint.py,sha256=CZrX2SMpbE9H7wBXNC6rGvw-YqQl9zjuJrClYQxEzuI,715 +langchain/chat_models/base.py,sha256=n_xg816OR1KKniDG2DBBEiiiTPz5vEgrSl8sYVjvfyk,35222 +langchain/chat_models/bedrock.py,sha256=HRV3T_0mEnZ8LvJJqAA_UVpt-_03G715oIgomRJw55M,757 +langchain/chat_models/cohere.py,sha256=EYOECHX-nKRhZVfCfmFGZ2lr51PzaB5OvOEqmBCu1fI,633 +langchain/chat_models/databricks.py,sha256=5_QkC5lG4OldaHC2FS0XylirJouyZx1YT95SKwc12M0,653 +langchain/chat_models/ernie.py,sha256=dgN4ML_uLdZTqNb53zGfgx6-0_UrxEsUuLpzrSkwPjo,637 +langchain/chat_models/everlyai.py,sha256=ozp9Rr03bmARG6ion-EKx7ccmPGSLf36e3LFNNwgCfo,643 +langchain/chat_models/fake.py,sha256=uub02XdK1IpZM8NG6F9O4T75s1p5Z2_N-mU8KEmmGik,815 +langchain/chat_models/fireworks.py,sha256=VBimPMxZsSkvxiVc8qFk3gm5zJQ3xkcyUSStg5pijEI,648 +langchain/chat_models/gigachat.py,sha256=5I19_9xUw-VTw5kdomERLPbQmVLRfvmcGGkhgarGE4g,631 +langchain/chat_models/google_palm.py,sha256=w28OSz00ZVX8xP6diG2Py9xnhjh81pgfrfOL8GZZv0A,809 +langchain/chat_models/human.py,sha256=dCwqG2zwmvliGWPj2X3WIm6_qKg0Ne7_1ULIRXmQUk8,658 +langchain/chat_models/hunyuan.py,sha256=NYwIgLZBFq4B026WeeKz0PgEg0tHz-YowPgsl5pzgF8,638 +langchain/chat_models/javelin_ai_gateway.py,sha256=PFQb39cKIfSio_GELYWOz_bjT-DYCKTN6OpmCJ2YH0E,821 +langchain/chat_models/jinachat.py,sha256=C_0kKRLs3i6Hc6RH8Rr_BySTpNahJRaJazDfCKpZjeU,631 +langchain/chat_models/konko.py,sha256=0OrSCG3H6IE3fU3swGDGM5HetoCsaHRby3KMnrK6F10,628 +langchain/chat_models/litellm.py,sha256=ksSUdn4lnvRN8ePk5C8U3WoDjH2XZpzQYGlcrWgJFRE,791 +langchain/chat_models/meta.py,sha256=dNzxY2CBsYBT0mIeq_24vzgzt8SED2VNhkYhrS_44zQ,701 +langchain/chat_models/minimax.py,sha256=ogZyvwn9Bm4EDJw9_64QDk1n3gwDcNLthEQ9r7-4ogk,638 +langchain/chat_models/mlflow.py,sha256=h8Pe8gBwkfX9eWRdhUfYWTr551DH_RzQceAEtMCDrxA,633 +langchain/chat_models/mlflow_ai_gateway.py,sha256=m5k5_n9Tc6v82KeoKSf6UHgJz3Zok50G7dqdER6-XJc,815 +langchain/chat_models/ollama.py,sha256=w6eoHkXJJMAtofV8Vt4SgTMbvdJ9fWpy_yJsTOJWMFk,633 +langchain/chat_models/openai.py,sha256=mS7lY-vF1L2cuNP1hVhGETyiqyPsRUsQRddgMEuuo00,633 +langchain/chat_models/pai_eas_endpoint.py,sha256=1-0qDpzRBjI5yRfmT-FdAbzUavGYSkv5E3a5JeAnyMM,683 +langchain/chat_models/promptlayer_openai.py,sha256=FQKaGyqp6Kaf8wbSLndQ0-WnRRRScqTWNnmDoTsBgNE,696 +langchain/chat_models/tongyi.py,sha256=axW-CVKM6oHHlMTtoEgfs5D_wD0wJEcU_7Sq2z7I8Xo,633 +langchain/chat_models/vertexai.py,sha256=3E5aWHZLZ_Fgheaka_blMRvexaOyu6L60dPh6pFZzXg,643 +langchain/chat_models/volcengine_maas.py,sha256=Cm2X9VT-aCffD14duWxIUbwbK8_KOZQifspjTxf41tc,845 +langchain/chat_models/yandex.py,sha256=39Cqo09E6yyU6VD7wrPvmi00dYPOmgAIL1roUF7-5Jo,642 +langchain/docstore/__init__.py,sha256=8yTkHf26bbHOyAr2QxSb6J9-QJW7FcZnaR5FW9x1oAE,1246 +langchain/docstore/__pycache__/__init__.cpython-312.pyc,, +langchain/docstore/__pycache__/arbitrary_fn.cpython-312.pyc,, +langchain/docstore/__pycache__/base.cpython-312.pyc,, +langchain/docstore/__pycache__/document.cpython-312.pyc,, +langchain/docstore/__pycache__/in_memory.cpython-312.pyc,, +langchain/docstore/__pycache__/wikipedia.cpython-312.pyc,, +langchain/docstore/arbitrary_fn.py,sha256=pbfxUmAYwLGvd1eiUE8gMuenHzTk5-5m5JlGi_vMCvk,639 +langchain/docstore/base.py,sha256=QVIXy-BmdIuGMExFImVgj_0kHtlGskAlmyivTPurL6w,715 +langchain/docstore/document.py,sha256=oNDzAxnJM3S8h2Pn13b_z5Q6kllet0wXi11nEMDi7X4,70 +langchain/docstore/in_memory.py,sha256=BP0xecGnLkHUcAKUnT2FKN4RJX7LVT8QJ_Cm1RBTcwg,651 +langchain/docstore/wikipedia.py,sha256=i2Q7oMX0LhYi0gOUNR9BfywT6fq53Lve3cWnY83718E,630 +langchain/document_loaders/__init__.py,sha256=zio2Dt65Coz8YJS87ojrIBDiNk5qu2k3cyvFZWEuReY,20700 +langchain/document_loaders/__pycache__/__init__.cpython-312.pyc,, +langchain/document_loaders/__pycache__/acreom.cpython-312.pyc,, +langchain/document_loaders/__pycache__/airbyte.cpython-312.pyc,, +langchain/document_loaders/__pycache__/airbyte_json.cpython-312.pyc,, +langchain/document_loaders/__pycache__/airtable.cpython-312.pyc,, +langchain/document_loaders/__pycache__/apify_dataset.cpython-312.pyc,, +langchain/document_loaders/__pycache__/arcgis_loader.cpython-312.pyc,, +langchain/document_loaders/__pycache__/arxiv.cpython-312.pyc,, +langchain/document_loaders/__pycache__/assemblyai.cpython-312.pyc,, +langchain/document_loaders/__pycache__/async_html.cpython-312.pyc,, +langchain/document_loaders/__pycache__/azlyrics.cpython-312.pyc,, +langchain/document_loaders/__pycache__/azure_ai_data.cpython-312.pyc,, +langchain/document_loaders/__pycache__/azure_blob_storage_container.cpython-312.pyc,, +langchain/document_loaders/__pycache__/azure_blob_storage_file.cpython-312.pyc,, +langchain/document_loaders/__pycache__/baiducloud_bos_directory.cpython-312.pyc,, +langchain/document_loaders/__pycache__/baiducloud_bos_file.cpython-312.pyc,, +langchain/document_loaders/__pycache__/base.cpython-312.pyc,, +langchain/document_loaders/__pycache__/base_o365.cpython-312.pyc,, +langchain/document_loaders/__pycache__/bibtex.cpython-312.pyc,, +langchain/document_loaders/__pycache__/bigquery.cpython-312.pyc,, +langchain/document_loaders/__pycache__/bilibili.cpython-312.pyc,, +langchain/document_loaders/__pycache__/blackboard.cpython-312.pyc,, +langchain/document_loaders/__pycache__/blockchain.cpython-312.pyc,, +langchain/document_loaders/__pycache__/brave_search.cpython-312.pyc,, +langchain/document_loaders/__pycache__/browserless.cpython-312.pyc,, +langchain/document_loaders/__pycache__/chatgpt.cpython-312.pyc,, +langchain/document_loaders/__pycache__/chromium.cpython-312.pyc,, +langchain/document_loaders/__pycache__/college_confidential.cpython-312.pyc,, +langchain/document_loaders/__pycache__/concurrent.cpython-312.pyc,, +langchain/document_loaders/__pycache__/confluence.cpython-312.pyc,, +langchain/document_loaders/__pycache__/conllu.cpython-312.pyc,, +langchain/document_loaders/__pycache__/couchbase.cpython-312.pyc,, +langchain/document_loaders/__pycache__/csv_loader.cpython-312.pyc,, +langchain/document_loaders/__pycache__/cube_semantic.cpython-312.pyc,, +langchain/document_loaders/__pycache__/datadog_logs.cpython-312.pyc,, +langchain/document_loaders/__pycache__/dataframe.cpython-312.pyc,, +langchain/document_loaders/__pycache__/diffbot.cpython-312.pyc,, +langchain/document_loaders/__pycache__/directory.cpython-312.pyc,, +langchain/document_loaders/__pycache__/discord.cpython-312.pyc,, +langchain/document_loaders/__pycache__/docugami.cpython-312.pyc,, +langchain/document_loaders/__pycache__/docusaurus.cpython-312.pyc,, +langchain/document_loaders/__pycache__/dropbox.cpython-312.pyc,, +langchain/document_loaders/__pycache__/duckdb_loader.cpython-312.pyc,, +langchain/document_loaders/__pycache__/email.cpython-312.pyc,, +langchain/document_loaders/__pycache__/epub.cpython-312.pyc,, +langchain/document_loaders/__pycache__/etherscan.cpython-312.pyc,, +langchain/document_loaders/__pycache__/evernote.cpython-312.pyc,, +langchain/document_loaders/__pycache__/excel.cpython-312.pyc,, +langchain/document_loaders/__pycache__/facebook_chat.cpython-312.pyc,, +langchain/document_loaders/__pycache__/fauna.cpython-312.pyc,, +langchain/document_loaders/__pycache__/figma.cpython-312.pyc,, +langchain/document_loaders/__pycache__/gcs_directory.cpython-312.pyc,, +langchain/document_loaders/__pycache__/gcs_file.cpython-312.pyc,, +langchain/document_loaders/__pycache__/generic.cpython-312.pyc,, +langchain/document_loaders/__pycache__/geodataframe.cpython-312.pyc,, +langchain/document_loaders/__pycache__/git.cpython-312.pyc,, +langchain/document_loaders/__pycache__/gitbook.cpython-312.pyc,, +langchain/document_loaders/__pycache__/github.cpython-312.pyc,, +langchain/document_loaders/__pycache__/google_speech_to_text.cpython-312.pyc,, +langchain/document_loaders/__pycache__/googledrive.cpython-312.pyc,, +langchain/document_loaders/__pycache__/gutenberg.cpython-312.pyc,, +langchain/document_loaders/__pycache__/helpers.cpython-312.pyc,, +langchain/document_loaders/__pycache__/hn.cpython-312.pyc,, +langchain/document_loaders/__pycache__/html.cpython-312.pyc,, +langchain/document_loaders/__pycache__/html_bs.cpython-312.pyc,, +langchain/document_loaders/__pycache__/hugging_face_dataset.cpython-312.pyc,, +langchain/document_loaders/__pycache__/ifixit.cpython-312.pyc,, +langchain/document_loaders/__pycache__/image.cpython-312.pyc,, +langchain/document_loaders/__pycache__/image_captions.cpython-312.pyc,, +langchain/document_loaders/__pycache__/imsdb.cpython-312.pyc,, +langchain/document_loaders/__pycache__/iugu.cpython-312.pyc,, +langchain/document_loaders/__pycache__/joplin.cpython-312.pyc,, +langchain/document_loaders/__pycache__/json_loader.cpython-312.pyc,, +langchain/document_loaders/__pycache__/lakefs.cpython-312.pyc,, +langchain/document_loaders/__pycache__/larksuite.cpython-312.pyc,, +langchain/document_loaders/__pycache__/markdown.cpython-312.pyc,, +langchain/document_loaders/__pycache__/mastodon.cpython-312.pyc,, +langchain/document_loaders/__pycache__/max_compute.cpython-312.pyc,, +langchain/document_loaders/__pycache__/mediawikidump.cpython-312.pyc,, +langchain/document_loaders/__pycache__/merge.cpython-312.pyc,, +langchain/document_loaders/__pycache__/mhtml.cpython-312.pyc,, +langchain/document_loaders/__pycache__/modern_treasury.cpython-312.pyc,, +langchain/document_loaders/__pycache__/mongodb.cpython-312.pyc,, +langchain/document_loaders/__pycache__/news.cpython-312.pyc,, +langchain/document_loaders/__pycache__/notebook.cpython-312.pyc,, +langchain/document_loaders/__pycache__/notion.cpython-312.pyc,, +langchain/document_loaders/__pycache__/notiondb.cpython-312.pyc,, +langchain/document_loaders/__pycache__/nuclia.cpython-312.pyc,, +langchain/document_loaders/__pycache__/obs_directory.cpython-312.pyc,, +langchain/document_loaders/__pycache__/obs_file.cpython-312.pyc,, +langchain/document_loaders/__pycache__/obsidian.cpython-312.pyc,, +langchain/document_loaders/__pycache__/odt.cpython-312.pyc,, +langchain/document_loaders/__pycache__/onedrive.cpython-312.pyc,, +langchain/document_loaders/__pycache__/onedrive_file.cpython-312.pyc,, +langchain/document_loaders/__pycache__/onenote.cpython-312.pyc,, +langchain/document_loaders/__pycache__/open_city_data.cpython-312.pyc,, +langchain/document_loaders/__pycache__/org_mode.cpython-312.pyc,, +langchain/document_loaders/__pycache__/pdf.cpython-312.pyc,, +langchain/document_loaders/__pycache__/polars_dataframe.cpython-312.pyc,, +langchain/document_loaders/__pycache__/powerpoint.cpython-312.pyc,, +langchain/document_loaders/__pycache__/psychic.cpython-312.pyc,, +langchain/document_loaders/__pycache__/pubmed.cpython-312.pyc,, +langchain/document_loaders/__pycache__/pyspark_dataframe.cpython-312.pyc,, +langchain/document_loaders/__pycache__/python.cpython-312.pyc,, +langchain/document_loaders/__pycache__/quip.cpython-312.pyc,, +langchain/document_loaders/__pycache__/readthedocs.cpython-312.pyc,, +langchain/document_loaders/__pycache__/recursive_url_loader.cpython-312.pyc,, +langchain/document_loaders/__pycache__/reddit.cpython-312.pyc,, +langchain/document_loaders/__pycache__/roam.cpython-312.pyc,, +langchain/document_loaders/__pycache__/rocksetdb.cpython-312.pyc,, +langchain/document_loaders/__pycache__/rspace.cpython-312.pyc,, +langchain/document_loaders/__pycache__/rss.cpython-312.pyc,, +langchain/document_loaders/__pycache__/rst.cpython-312.pyc,, +langchain/document_loaders/__pycache__/rtf.cpython-312.pyc,, +langchain/document_loaders/__pycache__/s3_directory.cpython-312.pyc,, +langchain/document_loaders/__pycache__/s3_file.cpython-312.pyc,, +langchain/document_loaders/__pycache__/sharepoint.cpython-312.pyc,, +langchain/document_loaders/__pycache__/sitemap.cpython-312.pyc,, +langchain/document_loaders/__pycache__/slack_directory.cpython-312.pyc,, +langchain/document_loaders/__pycache__/snowflake_loader.cpython-312.pyc,, +langchain/document_loaders/__pycache__/spreedly.cpython-312.pyc,, +langchain/document_loaders/__pycache__/srt.cpython-312.pyc,, +langchain/document_loaders/__pycache__/stripe.cpython-312.pyc,, +langchain/document_loaders/__pycache__/telegram.cpython-312.pyc,, +langchain/document_loaders/__pycache__/tencent_cos_directory.cpython-312.pyc,, +langchain/document_loaders/__pycache__/tencent_cos_file.cpython-312.pyc,, +langchain/document_loaders/__pycache__/tensorflow_datasets.cpython-312.pyc,, +langchain/document_loaders/__pycache__/text.cpython-312.pyc,, +langchain/document_loaders/__pycache__/tomarkdown.cpython-312.pyc,, +langchain/document_loaders/__pycache__/toml.cpython-312.pyc,, +langchain/document_loaders/__pycache__/trello.cpython-312.pyc,, +langchain/document_loaders/__pycache__/tsv.cpython-312.pyc,, +langchain/document_loaders/__pycache__/twitter.cpython-312.pyc,, +langchain/document_loaders/__pycache__/unstructured.cpython-312.pyc,, +langchain/document_loaders/__pycache__/url.cpython-312.pyc,, +langchain/document_loaders/__pycache__/url_playwright.cpython-312.pyc,, +langchain/document_loaders/__pycache__/url_selenium.cpython-312.pyc,, +langchain/document_loaders/__pycache__/weather.cpython-312.pyc,, +langchain/document_loaders/__pycache__/web_base.cpython-312.pyc,, +langchain/document_loaders/__pycache__/whatsapp_chat.cpython-312.pyc,, +langchain/document_loaders/__pycache__/wikipedia.cpython-312.pyc,, +langchain/document_loaders/__pycache__/word_document.cpython-312.pyc,, +langchain/document_loaders/__pycache__/xml.cpython-312.pyc,, +langchain/document_loaders/__pycache__/xorbits.cpython-312.pyc,, +langchain/document_loaders/__pycache__/youtube.cpython-312.pyc,, +langchain/document_loaders/acreom.py,sha256=MJiQAejNElcXlCL49hpmGtTw32W2oTsel6D0KuQ7dDo,635 +langchain/document_loaders/airbyte.py,sha256=4tAuXsgNGChPIHxDVTV0FvdAVrc1-7eviIEIqlxRoQM,1574 +langchain/document_loaders/airbyte_json.py,sha256=304KsE1Bzsc55PswaRR8Pyasg8exowo-2K9NnE5mOOs,650 +langchain/document_loaders/airtable.py,sha256=wJA5LNeowsJ9-OwTA-P1tz1agwNl8_04EIkPWg8ph9U,641 +langchain/document_loaders/apify_dataset.py,sha256=r2pXZSnhsOMA4JI_psA6ElXUhDaUu8u2SGe2w3AxEaE,653 +langchain/document_loaders/arcgis_loader.py,sha256=1uqSkLFOeT9Ewv-JswNZkq_bdtomQIJ_tXKGrIzjioE,635 +langchain/document_loaders/arxiv.py,sha256=FBF-LCBgKRH5e8uOfdN3rztkFnhvSBlk9_2HfqpmmXI,632 +langchain/document_loaders/assemblyai.py,sha256=54IaUHNOcC3YFl3J13Z11P-7q1CTcGTWVTFAgRFa3nY,879 +langchain/document_loaders/async_html.py,sha256=bViLHkTEFYIj6lvdzcfwXdf8lxiOFu3gQSCsJoxcohE,644 +langchain/document_loaders/azlyrics.py,sha256=ecFRc0Q9LWXjl1MYnzyBeWIC-UkyyExVQQP31kunDqs,641 +langchain/document_loaders/azure_ai_data.py,sha256=_U8H9KqSpvP7lZfySe2NMU05KLh6HFsGrC9TKfYg_NE,650 +langchain/document_loaders/azure_blob_storage_container.py,sha256=bpR-_QDoKK10ds6XZKYUa2j3nrxhY4BYgscyINIc01k,698 +langchain/document_loaders/azure_blob_storage_file.py,sha256=s1HQCeCYLqQWW36BZSDXEGuzW4X_E024QhrO7UWtdQY,683 +langchain/document_loaders/baiducloud_bos_directory.py,sha256=TNIrTPq3ixKlT1_yZM4BYv4-Z9fxuffYp8877nRMzLU,758 +langchain/document_loaders/baiducloud_bos_file.py,sha256=ZB2iXx7Z-2inGFj6L7-HQ9xWCFHogMSU6_CzBRpLEGA,716 +langchain/document_loaders/base.py,sha256=UplkbYj76vF8ty3jffZU1VreVFvDb8iJfo2MSYQ8Z-c,115 +langchain/document_loaders/base_o365.py,sha256=TIAIkZJnYYKg1FsqL1lGG9-VuRTF_soj3ozV3frUKAc,661 +langchain/document_loaders/bibtex.py,sha256=Ttru2LA4Y6M-CIMVQOgnJg3nqeimYT2_yZmyVD70ywY,635 +langchain/document_loaders/bigquery.py,sha256=Qr-EkCbTpd2LqcMXUhjDsgM99djb9YKwB18QlZKvti0,641 +langchain/document_loaders/bilibili.py,sha256=-B993m2dGBih9NouNbzeilvJae_9phR-yyIa-v0zJ0s,641 +langchain/document_loaders/blackboard.py,sha256=UJbNuY5BHRPCY07QNAjhRwPfsSjqooF8L4Rp1hyHqPo,647 +langchain/document_loaders/blob_loaders/__init__.py,sha256=u0C9XTX5BcOazQ2FVxkUpvZ_yaX84hJuOVOffABKwvE,1005 +langchain/document_loaders/blob_loaders/__pycache__/__init__.cpython-312.pyc,, +langchain/document_loaders/blob_loaders/__pycache__/file_system.cpython-312.pyc,, +langchain/document_loaders/blob_loaders/__pycache__/schema.cpython-312.pyc,, +langchain/document_loaders/blob_loaders/__pycache__/youtube_audio.cpython-312.pyc,, +langchain/document_loaders/blob_loaders/file_system.py,sha256=FoR9_Q_eEH9cV8_0zG8oacY2pgv22RmDIMAxNgVpmGE,659 +langchain/document_loaders/blob_loaders/schema.py,sha256=IldzjYjpYSpFjYG4UCBi49m0vgn-GYjwDsowwMvPUJE,707 +langchain/document_loaders/blob_loaders/youtube_audio.py,sha256=pzEvg1GnuX1SkI8PjkR4j45bmUxppzZmA_hXZQvhjL0,653 +langchain/document_loaders/blockchain.py,sha256=5QVWe7Lix41E0assUaxalyWh39AonWiTNNPChajHWKk,852 +langchain/document_loaders/brave_search.py,sha256=ApgmZ4eVEdzEeqWV4WRMt57ABBL2DuKeYC7HaaN9CxE,650 +langchain/document_loaders/browserless.py,sha256=elmryFAG7KzvwzqSDIptWX_FpqnWd9uTDX9fKBm-x7E,650 +langchain/document_loaders/chatgpt.py,sha256=1Pl0QrofVQZQ6QmHQcgId0UBxS5gUHJozwflEWYRkUE,819 +langchain/document_loaders/chromium.py,sha256=0DL70hrFIHYgLN8dDdmPgWVTnDVdh38s-gmHuDzhdI0,656 +langchain/document_loaders/college_confidential.py,sha256=QjXyvftBmNSoZwNcJPh_KU4gP26T64XqgTxh919G2cE,680 +langchain/document_loaders/concurrent.py,sha256=UJOwxeawlJSj5hRE3wK_hyKa9EylTDjqIdY0l1PTGFM,647 +langchain/document_loaders/confluence.py,sha256=mjX-iXsvn-kimHzgM9QAtOfwtkRv5fE76YyGYMAr1ug,825 +langchain/document_loaders/conllu.py,sha256=VXgi0YOFZrmSlFY-cQwhIedePfR2m3BMKmQ-Sx7FaqQ,635 +langchain/document_loaders/couchbase.py,sha256=YYDri8jCSOiNHRMZcdFYSzKnq2ILOVagsvTn3ydSzfg,644 +langchain/document_loaders/csv_loader.py,sha256=dcLO5NY98QXpxd-yClEL8puE12u0WMOqk1xTeqGySWY,754 +langchain/document_loaders/cube_semantic.py,sha256=Ia-beDUhm9Qo0Ny1mLjKN9z36xfrb_1w254SxlMx2ig,653 +langchain/document_loaders/datadog_logs.py,sha256=tXW_FsxxflsKLSWnDYu5H2v5QOMVAfvAkTSB6rL3A3s,650 +langchain/document_loaders/dataframe.py,sha256=sXor-cQBxJv4VkgiQzviymM17MNZvpGV9bCFhnJjJYg,838 +langchain/document_loaders/diffbot.py,sha256=btnwIISycdJmh8kegnWdNaD7ZHIpklH7y0o6HwVmpoE,638 +langchain/document_loaders/directory.py,sha256=oCj8Yxh8kA_AKwTCnAbSMRa5_VSbORYm7XBzBiuceV0,644 +langchain/document_loaders/discord.py,sha256=OGlnX79cQmROx874rsY92ESyTvTRdN9tgW3zxaSW-vk,650 +langchain/document_loaders/docugami.py,sha256=9ilP5sYO_gMEkg-8EQGRNSHnV9cCOJ0T2yQqmKkvDLw,641 +langchain/document_loaders/docusaurus.py,sha256=qcW6QU6t2zPcihc3LrHdlWYrTV0TMXcT-PIuPlsBKk8,647 +langchain/document_loaders/dropbox.py,sha256=cW-3S9yodFXrLyZBkT1pzwXdc_dB2xyuSXr3-t5lRwE,638 +langchain/document_loaders/duckdb_loader.py,sha256=nY02sibcCrjzS5hyGOMT7G3ayKxysRrQ4f61VDDEgDg,635 +langchain/document_loaders/email.py,sha256=zrS0qEIZf68KekCy2GoLqd53UDPu0QZLehC13-WG4UI,818 +langchain/document_loaders/epub.py,sha256=BKBn_0n58hqcs-b4Gif86X5BS1C4-e-j_i6rt8QLuwU,665 +langchain/document_loaders/etherscan.py,sha256=CDooBI_vYmT8eOynNUPWddxB1412H7HWkF6bJeqABow,644 +langchain/document_loaders/evernote.py,sha256=BvI4xwsDiOI6sGcTnu0zrumntIpuwWE8VspCWSJ703g,641 +langchain/document_loaders/excel.py,sha256=5ZFKxgw6Au1qsmn-d-u2iyhPib2ga8ZvRyWVrrL_m1Q,668 +langchain/document_loaders/facebook_chat.py,sha256=vHmbVgnqVOkDj2J-F8lQ4EgReZ87xMIokDbsnlWAlzQ,846 +langchain/document_loaders/fauna.py,sha256=zm7qyP-TSaTtWN1SYaNHRDd0y3R8n-97-HYYCHgq3us,632 +langchain/document_loaders/figma.py,sha256=yPmSaf-eyfyOn0RdSIdasqsUw5NO40T2_9RYP-3Kgj8,644 +langchain/document_loaders/gcs_directory.py,sha256=JitWVTjyZu2cCZ-7uyqGK8_lDLtMhw03QfCczbjr2lo,653 +langchain/document_loaders/gcs_file.py,sha256=KLnxYe7a53SiDLoRgGwP8uaL2FryjxH4T3gq6Ro_qj4,638 +langchain/document_loaders/generic.py,sha256=TDOjFW_VeioiMe3v9X9Ey8xFFdR0J1JsDiQEfeptgc4,654 +langchain/document_loaders/geodataframe.py,sha256=CHa_eFB3-BpbqCq3Ol6ObEMv8DEBJMuLRgB9RMQHopM,653 +langchain/document_loaders/git.py,sha256=kkz-hOrXQhpm4EUfJR3QMRQLcL3nfYRCOwRXdjPimo0,626 +langchain/document_loaders/gitbook.py,sha256=VEvJU_oqtsZLAjIjSyrFZTLVLeB1CeAkd6Jh-_ehS34,638 +langchain/document_loaders/github.py,sha256=pjmIoY7XiX0I9SKVUzXuUt-J8k4SqzIn1mFLhSciwM0,832 +langchain/document_loaders/google_speech_to_text.py,sha256=78OBvJYbqn6GizGaOv-IEqK79IBydXn7-zSWzc8Z8a4,671 +langchain/document_loaders/googledrive.py,sha256=PSQVGcv20v70gtrfYJgEd7BLtVrJ5wrlxD15pcQqp98,650 +langchain/document_loaders/gutenberg.py,sha256=RpdZ5EZTizeS-3Rzj1dSkwLq6yUu72-KmRGBtGSI70M,644 +langchain/document_loaders/helpers.py,sha256=fyAPnuQmFbTc1EwXK2co9YOQ-F3uBTPM-NhdN97NyHk,812 +langchain/document_loaders/hn.py,sha256=JAXr-BrmhiOMgfNAiz_iE6y7in94Zq9gT0nGWnYeOhk,623 +langchain/document_loaders/html.py,sha256=dAYiSW6HjenWcJLF88qRBxmj3yKTCH-ULVi5FweHG8Y,665 +langchain/document_loaders/html_bs.py,sha256=yP6RYV19i-N-rcStCtildKdyxysYkTXweYmy4neo6_o,635 +langchain/document_loaders/hugging_face_dataset.py,sha256=u1UB5o9KDOzYlLWmwgOB7VTT3uEvcpxk42QEXDESYY8,671 +langchain/document_loaders/ifixit.py,sha256=lwc7Ep_m4caRCXMWRjDJaspWUh0Hr96kdR05tsPpknM,635 +langchain/document_loaders/image.py,sha256=7PUyNXmvsMxCfOIZb7uw4CPFmSif8_Jfg65ibmUbSyw,668 +langchain/document_loaders/image_captions.py,sha256=xA2N80S184Xy9cbM9aeOO99VMa8t6PyxujerYTx9lNo,653 +langchain/document_loaders/imsdb.py,sha256=BSbHIarbtGxD5CLMsGRY-sx8loOa36XJKjmWZQQ_QPk,632 +langchain/document_loaders/iugu.py,sha256=3qZ4dniXALcddL2bGmnZpGfKCbWT9xq0SAimnUkjzAE,629 +langchain/document_loaders/joplin.py,sha256=TPdRs06BMrfc_61zTHpqn_NSlVFsxWCCd4cU1Kmh6gg,635 +langchain/document_loaders/json_loader.py,sha256=B4VZdgEr_NLPoio9OZOEGKP-YsfeLOeL8QqwhEAGolY,629 +langchain/document_loaders/lakefs.py,sha256=VQgNwhfNHS_dwZmbrIFvm6ypQC2yEc2vlipITNnTrcY,964 +langchain/document_loaders/larksuite.py,sha256=7uOfSHK5wvpixWI6ZQjffkmqtz2K0ArO36aV8T--Wyg,653 +langchain/document_loaders/markdown.py,sha256=yuIEpKAnUMpaH2pNQ4Jn0iSX_dIaSFDyy9ZINhPXQrE,683 +langchain/document_loaders/mastodon.py,sha256=2YMMO8t6tC1ewun6lvdLnggaJw9Vdeq70t2WPa1hx-Y,656 +langchain/document_loaders/max_compute.py,sha256=uGYvNHhQqAA6A9-MmVUBtJZPN9M_W66fjMPUzThvXfI,647 +langchain/document_loaders/mediawikidump.py,sha256=3Xwvz0ZluRaKX1owjGdjd_fAK6sDNfum3PnTsSO2IBE,635 +langchain/document_loaders/merge.py,sha256=s7unaxp46fxeSu-qK3F6hLUR3CyCUrE3BCH-Hj6uB3w,647 +langchain/document_loaders/mhtml.py,sha256=Czuo74rU8JYzTFpe5Hu25qFiN6-AhUiyl0Ev7KHHBkE,632 +langchain/document_loaders/modern_treasury.py,sha256=BXF7usqo7oJp2mkr9uHsPVyTqVJmssnJbFktks3ABEA,659 +langchain/document_loaders/mongodb.py,sha256=C6ml4W2SvqzGGP-pEpkbsbpVVuhysPCwl244OINBlKU,638 +langchain/document_loaders/news.py,sha256=i4xjUJQ6tfbdwPrChEFQ_Z0Nn4mHWe8MML-lWk8P_f0,638 +langchain/document_loaders/notebook.py,sha256=6aPbgeyzGvNy9R-fw6F4zny3Nc7FWLiBbb_4O2kBbPo,964 +langchain/document_loaders/notion.py,sha256=KHlUpZCrtM0wVsoJydvple3oQL1Wcwr7qEnqtZhYU5M,662 +langchain/document_loaders/notiondb.py,sha256=AkTysUciCzk8m84UBN06aKqsT28ZNShUjQHdGaU8UZc,641 +langchain/document_loaders/nuclia.py,sha256=4EgdLws1XO9Dba8AN6fogW3XQrMx9qa8HPTDz9l1Bsg,649 +langchain/document_loaders/obs_directory.py,sha256=OJA4XR8vUgN2Gn0ctDv0S-dhsVdOvLp3YxY-Cim3X1I,653 +langchain/document_loaders/obs_file.py,sha256=WS0Dp0Q2Ardm522ymQbHQjAd6WqiXVWScBNb8jeFzTA,638 +langchain/document_loaders/obsidian.py,sha256=SZmp8nJv25hVGW8zKMXDMiYYvSrXUyPafXRHhi1lnUY,641 +langchain/document_loaders/odt.py,sha256=gvlcKwB7D5sUIXwMYhsPzPAIgfZCXEf2Pvjh6DSo4Nk,662 +langchain/document_loaders/onedrive.py,sha256=DFUG6Fe2Ib5U54mKBn0aSb7cLlQEKoEonEubfloyiDo,641 +langchain/document_loaders/onedrive_file.py,sha256=qWRP8tPoaz_W8IgAEABiReYCTofxJ4I7prGxpp2k4tY,653 +langchain/document_loaders/onenote.py,sha256=Htw5L6NUe-_KfjQ3MgKyLgvvfwaU5Q8rl-0NA4uSLEQ,654 +langchain/document_loaders/open_city_data.py,sha256=TFdCvs05zXVw0i_tynaxETpOR-EpMUkJxuwCrThE0f8,653 +langchain/document_loaders/org_mode.py,sha256=6dQCFZdIlSXjtcdMYEz6LLo2dXDfZyBXKEoeBPtB-BA,680 +langchain/document_loaders/parsers/__init__.py,sha256=UQxVk3JvIFdKK8FIx6zbwVJ-StGH89Q8wE0Gr5ufapY,2142 +langchain/document_loaders/parsers/__pycache__/__init__.cpython-312.pyc,, +langchain/document_loaders/parsers/__pycache__/audio.cpython-312.pyc,, +langchain/document_loaders/parsers/__pycache__/docai.cpython-312.pyc,, +langchain/document_loaders/parsers/__pycache__/generic.cpython-312.pyc,, +langchain/document_loaders/parsers/__pycache__/grobid.cpython-312.pyc,, +langchain/document_loaders/parsers/__pycache__/msword.cpython-312.pyc,, +langchain/document_loaders/parsers/__pycache__/pdf.cpython-312.pyc,, +langchain/document_loaders/parsers/__pycache__/registry.cpython-312.pyc,, +langchain/document_loaders/parsers/__pycache__/txt.cpython-312.pyc,, +langchain/document_loaders/parsers/audio.py,sha256=uLXXAKEPEKHxz8fpqzGZue3tCkpy8RLedPI917WiB6k,985 +langchain/document_loaders/parsers/docai.py,sha256=EjPafMyWMG_AN5vw7A26eD3THSh8A4lwppo7l7S_oeo,821 +langchain/document_loaders/parsers/generic.py,sha256=Sc7hKMFU_DynQPf9L0JF9ib0bpIlPRo5r7BDAd19tuo,694 +langchain/document_loaders/parsers/grobid.py,sha256=sZNpWZ8sEHAt27ObiHCjxTslXj48mN-GWAANs6bz9ik,848 +langchain/document_loaders/parsers/html/__init__.py,sha256=vYMXQOtOLlLbXNcKP3_rJfMUBvk9h81rrYhX090YV84,678 +langchain/document_loaders/parsers/html/__pycache__/__init__.cpython-312.pyc,, +langchain/document_loaders/parsers/html/__pycache__/bs4.cpython-312.pyc,, +langchain/document_loaders/parsers/html/bs4.py,sha256=vYMXQOtOLlLbXNcKP3_rJfMUBvk9h81rrYhX090YV84,678 +langchain/document_loaders/parsers/language/__init__.py,sha256=I5SVEOfureuy4Wkk2Av-8zTJSJHHYamfC01-yLIxlPg,747 +langchain/document_loaders/parsers/language/__pycache__/__init__.cpython-312.pyc,, +langchain/document_loaders/parsers/language/__pycache__/cobol.cpython-312.pyc,, +langchain/document_loaders/parsers/language/__pycache__/code_segmenter.cpython-312.pyc,, +langchain/document_loaders/parsers/language/__pycache__/javascript.cpython-312.pyc,, +langchain/document_loaders/parsers/language/__pycache__/language_parser.cpython-312.pyc,, +langchain/document_loaders/parsers/language/__pycache__/python.cpython-312.pyc,, +langchain/document_loaders/parsers/language/cobol.py,sha256=6B5UX998ufOM9eSfmCGztWMpMdZL-td3_iHSkoygQTA,710 +langchain/document_loaders/parsers/language/code_segmenter.py,sha256=1AUipt6ipTb9IX2hnce5zqbtME6ZeQ_cneZ9T-Kum5o,742 +langchain/document_loaders/parsers/language/javascript.py,sha256=GF16AjxWVU3lk6_EesXrq0rrmOUVzVITAdbzMEvS33E,752 +langchain/document_loaders/parsers/language/language_parser.py,sha256=I5SVEOfureuy4Wkk2Av-8zTJSJHHYamfC01-yLIxlPg,747 +langchain/document_loaders/parsers/language/python.py,sha256=B5oapfGwlb5qYkVlt0YNizNtEMWYspBCZ0nCmETwvTA,715 +langchain/document_loaders/parsers/msword.py,sha256=USQSlbAOnwZ9mfLDSdsmAoRC5mt5PaeKJ9aZ1Y3uMTw,671 +langchain/document_loaders/parsers/pdf.py,sha256=kA7lAqhs6V8VH6KYDXKszLW0WOuC_DfHrH0DTR_xK7M,1662 +langchain/document_loaders/parsers/registry.py,sha256=66GZkyh-5S3J5rCV-Y1OyryPoSr8Uyf5FAaJeqwXUSs,669 +langchain/document_loaders/parsers/txt.py,sha256=jdTKEX-OL9oEi5oQkJSpKTaf87GWdwMGpdXoH70iP5A,653 +langchain/document_loaders/pdf.py,sha256=9U1bH4qmaCqigNLGkeEeiSV_Ua_9xu1Iudj_CYAJvGg,2181 +langchain/document_loaders/polars_dataframe.py,sha256=XF91YuWRYU9JBiMdRivPAJQaJYjaw3WZs-HyxNmvLH0,662 +langchain/document_loaders/powerpoint.py,sha256=6vvCmw0xwWevoBZ69dYJ_Wi767Cg6e_WP4RPMUqOuT4,689 +langchain/document_loaders/psychic.py,sha256=bcs9dUIeJQn-9Q68TJcI7LRhvHVq805vZUxt0fs38Ac,638 +langchain/document_loaders/pubmed.py,sha256=MhRrnNIvblZX75pRU_R8nafiSfkrXOW-0kR7yJwS4yw,635 +langchain/document_loaders/pyspark_dataframe.py,sha256=mGc3Ixi1hkINcvNNefpw_zTJpQIZLPsUzvE_tocl4-A,718 +langchain/document_loaders/python.py,sha256=YtUmW9vzKmX5s0HkJzZal57tdRJXgDr8gX6t32YTu6M,643 +langchain/document_loaders/quip.py,sha256=jImYfbfQ3DnOtlBLkrFh4GEF9kacliQ209BaSyDVur8,639 +langchain/document_loaders/readthedocs.py,sha256=aCWqx8Qy3xlCMYBIxsLXuBaYim3t2-dE2VyG4lZ3hoc,650 +langchain/document_loaders/recursive_url_loader.py,sha256=hg7z0Rre51Hj_SI3vxL8vd1tJksCCbLsi8EofMV7nWc,653 +langchain/document_loaders/reddit.py,sha256=LyoVr0xjTAF5SlFzBqUIxJrSr9P85MeYOHzHb99CR3w,650 +langchain/document_loaders/roam.py,sha256=r168_ppWrsKxfh-aRT-O17WKsBNa-P-IlTdgfJUX4Bc,629 +langchain/document_loaders/rocksetdb.py,sha256=89xskRnFin1B0wKtDvu_cMS_0KM0gHau8gaDSX8QHbs,638 +langchain/document_loaders/rspace.py,sha256=dN-33eb69j4YZwtdbYVFUYq4p9hTDE4a0aC2MpBil6s,649 +langchain/document_loaders/rss.py,sha256=uzbQl3Xn95ILkd2iw_QAnttGbFeXyUZP4M2lynvTyQI,638 +langchain/document_loaders/rst.py,sha256=l785O6JnnaftbjN7dax4-wytAPjFyK5g2BpfFhy8T8s,662 +langchain/document_loaders/rtf.py,sha256=VenhF2Asu-2gGXvjUykucON3pkQlV2fUZn1BAW1OwpA,662 +langchain/document_loaders/s3_directory.py,sha256=iRGYKZPbjAzmKIQ1qEMiIYt7fuXNg1gazDulJlBuKRw,650 +langchain/document_loaders/s3_file.py,sha256=SeIvDpsBnqfPgwoR3UcIUu6J3h-KyAWFtqouzsbU2l4,635 +langchain/document_loaders/sharepoint.py,sha256=RnoaOnkHyCJTkNfQsFI7Z0IqWiIk_F_xRZDxfEky7uY,647 +langchain/document_loaders/sitemap.py,sha256=8O-rIEuZg9Bg67Pcf0Ab4NhBWHC8tD30iYCzkaih5g4,638 +langchain/document_loaders/slack_directory.py,sha256=bnP1Ei4hZhZIeklwUXHRm2_eEAz3TICkeBdW3MK1-Ao,659 +langchain/document_loaders/snowflake_loader.py,sha256=jH95z1MCFLuS9ZsMiPig6ZatIxyoR3CbwnEqAGjHz88,644 +langchain/document_loaders/spreedly.py,sha256=tW2s_Eh5QPb3Z_OpwW_KWHbkzTO-lvDyeonMJQmkQXo,641 +langchain/document_loaders/srt.py,sha256=7bJN0U9_lC_cc7MoRrV2e18-ym7f5_AKf9908BRT17g,626 +langchain/document_loaders/stripe.py,sha256=ZoT76YB1IfuZaNgTNNx2k5ua_b04liRCCzBMg2tj8M4,635 +langchain/document_loaders/telegram.py,sha256=qQncqC9p28zXzU2V9aK4BLDbdv5IzPXzmHqLaRX4VOg,1122 +langchain/document_loaders/tencent_cos_directory.py,sha256=uui0vE4K2wDKGFvPFP3meMcS32UD_NVnDGhBxand71k,680 +langchain/document_loaders/tencent_cos_file.py,sha256=yeprmyjzEenvgK8anFTbUUPSHVsnVTA0X6uXKsGK9MA,659 +langchain/document_loaders/tensorflow_datasets.py,sha256=425XEsoGlgPh-dAt1hCbuXGE5d_dv6ZSFLECv8fFUhM,668 +langchain/document_loaders/text.py,sha256=1uOaX81yfsq1RXslMfXvZ3ATfQadAxHOU6JiujbNPQM,629 +langchain/document_loaders/tomarkdown.py,sha256=4Z_NoIUxdmfiw82No-oegZ0MWUg7UznI6wh6tro2LRo,647 +langchain/document_loaders/toml.py,sha256=b9fpodDg5-aLzQye1gta_NLdwQWq1Eed6m0vP6F8Mfw,629 +langchain/document_loaders/trello.py,sha256=EEnM7yrm502Njy7Wy5SQm7KCySZahVTxyau4DeuD8iE,635 +langchain/document_loaders/tsv.py,sha256=TOmceUxU5-Dn5cHiMfab9R955ZzPtb8xNBxlaXzDL5Q,662 +langchain/document_loaders/twitter.py,sha256=OZajzVtHncbgFT9EdUGAwTgVa7M6yHQa9TKnk3U9ePg,653 +langchain/document_loaders/unstructured.py,sha256=sYIi-oh8d5dV7YTkQKSL2xDXyIrkQGDc3bi0Hux6KBU,1855 +langchain/document_loaders/url.py,sha256=8yJpAlwgDO_yOzOQMRfLoFaKgLV2POli8AoIqfO7afo,662 +langchain/document_loaders/url_playwright.py,sha256=nchMdDn5UxeOZ7Wl6dYz2fPlUaxEioyG2SgVzfuDbk0,1033 +langchain/document_loaders/url_selenium.py,sha256=-XkAmejxtYpG6Ococ6Hb3ewt5VLX8lCaBptCbFosqio,650 +langchain/document_loaders/weather.py,sha256=gfuBeeYNndWgOotRpev6OZOu2NR6DwY9ys1PiedHkRw,650 +langchain/document_loaders/web_base.py,sha256=eqEIPEemj9xaY9-ujCZEjyr6S8EPb_Z5YAyzIGlpHcQ,638 +langchain/document_loaders/whatsapp_chat.py,sha256=TbrSX0v72H-p_zdzbedN73HO5rU09f82Dd2aRxoZcz4,846 +langchain/document_loaders/wikipedia.py,sha256=4ZrR2-1BuHxv1jg8GmQVZuz5tuCLl50Wt1kAfEn6oow,644 +langchain/document_loaders/word_document.py,sha256=BiTJD_qcKJK36IDU2Anq9-H8pWZkHNNWBZ9ItGHJbqM,821 +langchain/document_loaders/xml.py,sha256=FETIA68PL6XgcN1vxtfEHLglfKFThRj4wwkUqTmhCCA,662 +langchain/document_loaders/xorbits.py,sha256=ovW_5H5_Hpo2SsuDYw_4jSDKmRW_AEPQgnO9LX0AIjI,638 +langchain/document_loaders/youtube.py,sha256=5rHe0vJm_Ge4AIZJ-g9hrjdYmoDJpCIvcWejwnhPkkA,905 +langchain/document_transformers/__init__.py,sha256=R0yTuMtZosmyBKEU01OPhx9AG10LALdNY2SSoOyfFDw,2574 +langchain/document_transformers/__pycache__/__init__.cpython-312.pyc,, +langchain/document_transformers/__pycache__/beautiful_soup_transformer.cpython-312.pyc,, +langchain/document_transformers/__pycache__/doctran_text_extract.cpython-312.pyc,, +langchain/document_transformers/__pycache__/doctran_text_qa.cpython-312.pyc,, +langchain/document_transformers/__pycache__/doctran_text_translate.cpython-312.pyc,, +langchain/document_transformers/__pycache__/embeddings_redundant_filter.cpython-312.pyc,, +langchain/document_transformers/__pycache__/google_translate.cpython-312.pyc,, +langchain/document_transformers/__pycache__/html2text.cpython-312.pyc,, +langchain/document_transformers/__pycache__/long_context_reorder.cpython-312.pyc,, +langchain/document_transformers/__pycache__/nuclia_text_transform.cpython-312.pyc,, +langchain/document_transformers/__pycache__/openai_functions.cpython-312.pyc,, +langchain/document_transformers/beautiful_soup_transformer.py,sha256=hpyrvNEVyDeFHTgpSeLE20v9fN3K9T5f8a4Q-92pnjU,687 +langchain/document_transformers/doctran_text_extract.py,sha256=BZyJjEzSc2dkKenskpB6iWMiOucTYllpiKCoErGTd4g,687 +langchain/document_transformers/doctran_text_qa.py,sha256=5Cwgks1izZx_pYRBWxdAYaSDfOqRvUGCpiSW_TE_naI,675 +langchain/document_transformers/doctran_text_translate.py,sha256=AB8cVO-Eg6X6hmepJ162tCPPjxAOW6HeJJQ8Ew17O4Q,678 +langchain/document_transformers/embeddings_redundant_filter.py,sha256=hlgQi7I0WO9Jj7ZhrKt0TkisLSB07zU1QawsMxc5I7c,1667 +langchain/document_transformers/google_translate.py,sha256=BAZhU7mJY20zTXP_CQednt6mqPw7R7GBvrp2OUoJ25w,693 +langchain/document_transformers/html2text.py,sha256=RWyRbp2QV5XXfklOg4-ELRCjziozrmyVdahJ3baX-3c,675 +langchain/document_transformers/long_context_reorder.py,sha256=uRPWoCzkRvS-rp6L3qClFs8fqgxPE2mb82kztThKDRs,663 +langchain/document_transformers/nuclia_text_transform.py,sha256=IIg8LuX116M_PrnqQE7RwN_gy45BBs1UTONtujFEMeU,678 +langchain/document_transformers/openai_functions.py,sha256=UfhBLrya4MRRNWofT87qRKcZI27J8UjZTX9gn005jEA,929 +langchain/document_transformers/xsl/html_chunks_with_headers.xslt,sha256=ti9sT_zWqZQf0aaeX5zT6tfHT1CuUpAVCvzoZWutE0o,6033 +langchain/embeddings/__init__.py,sha256=TKSqom8OGm1s_0AO_i3jX3ZbGecVUlE3MOJ6-NYvfmw,8404 +langchain/embeddings/__pycache__/__init__.cpython-312.pyc,, +langchain/embeddings/__pycache__/aleph_alpha.cpython-312.pyc,, +langchain/embeddings/__pycache__/awa.cpython-312.pyc,, +langchain/embeddings/__pycache__/azure_openai.cpython-312.pyc,, +langchain/embeddings/__pycache__/baidu_qianfan_endpoint.cpython-312.pyc,, +langchain/embeddings/__pycache__/base.cpython-312.pyc,, +langchain/embeddings/__pycache__/bedrock.cpython-312.pyc,, +langchain/embeddings/__pycache__/bookend.cpython-312.pyc,, +langchain/embeddings/__pycache__/cache.cpython-312.pyc,, +langchain/embeddings/__pycache__/clarifai.cpython-312.pyc,, +langchain/embeddings/__pycache__/cloudflare_workersai.cpython-312.pyc,, +langchain/embeddings/__pycache__/cohere.cpython-312.pyc,, +langchain/embeddings/__pycache__/dashscope.cpython-312.pyc,, +langchain/embeddings/__pycache__/databricks.cpython-312.pyc,, +langchain/embeddings/__pycache__/deepinfra.cpython-312.pyc,, +langchain/embeddings/__pycache__/edenai.cpython-312.pyc,, +langchain/embeddings/__pycache__/elasticsearch.cpython-312.pyc,, +langchain/embeddings/__pycache__/embaas.cpython-312.pyc,, +langchain/embeddings/__pycache__/ernie.cpython-312.pyc,, +langchain/embeddings/__pycache__/fake.cpython-312.pyc,, +langchain/embeddings/__pycache__/fastembed.cpython-312.pyc,, +langchain/embeddings/__pycache__/google_palm.cpython-312.pyc,, +langchain/embeddings/__pycache__/gpt4all.cpython-312.pyc,, +langchain/embeddings/__pycache__/gradient_ai.cpython-312.pyc,, +langchain/embeddings/__pycache__/huggingface.cpython-312.pyc,, +langchain/embeddings/__pycache__/huggingface_hub.cpython-312.pyc,, +langchain/embeddings/__pycache__/infinity.cpython-312.pyc,, +langchain/embeddings/__pycache__/javelin_ai_gateway.cpython-312.pyc,, +langchain/embeddings/__pycache__/jina.cpython-312.pyc,, +langchain/embeddings/__pycache__/johnsnowlabs.cpython-312.pyc,, +langchain/embeddings/__pycache__/llamacpp.cpython-312.pyc,, +langchain/embeddings/__pycache__/llm_rails.cpython-312.pyc,, +langchain/embeddings/__pycache__/localai.cpython-312.pyc,, +langchain/embeddings/__pycache__/minimax.cpython-312.pyc,, +langchain/embeddings/__pycache__/mlflow.cpython-312.pyc,, +langchain/embeddings/__pycache__/mlflow_gateway.cpython-312.pyc,, +langchain/embeddings/__pycache__/modelscope_hub.cpython-312.pyc,, +langchain/embeddings/__pycache__/mosaicml.cpython-312.pyc,, +langchain/embeddings/__pycache__/nlpcloud.cpython-312.pyc,, +langchain/embeddings/__pycache__/octoai_embeddings.cpython-312.pyc,, +langchain/embeddings/__pycache__/ollama.cpython-312.pyc,, +langchain/embeddings/__pycache__/openai.cpython-312.pyc,, +langchain/embeddings/__pycache__/sagemaker_endpoint.cpython-312.pyc,, +langchain/embeddings/__pycache__/self_hosted.cpython-312.pyc,, +langchain/embeddings/__pycache__/self_hosted_hugging_face.cpython-312.pyc,, +langchain/embeddings/__pycache__/sentence_transformer.cpython-312.pyc,, +langchain/embeddings/__pycache__/spacy_embeddings.cpython-312.pyc,, +langchain/embeddings/__pycache__/tensorflow_hub.cpython-312.pyc,, +langchain/embeddings/__pycache__/vertexai.cpython-312.pyc,, +langchain/embeddings/__pycache__/voyageai.cpython-312.pyc,, +langchain/embeddings/__pycache__/xinference.cpython-312.pyc,, +langchain/embeddings/aleph_alpha.py,sha256=_yTqGDHsHbh83Zp0MjJ497ilIxkEJm5ccmxOWbJJay4,890 +langchain/embeddings/awa.py,sha256=1cnMiwKKU3ml3Zz5s5WIpcZSlYNVFFGCaeJilrxN8HE,626 +langchain/embeddings/azure_openai.py,sha256=tmICp-NOrxoVFENBy4F_0-c0l3znf8bOtBBo-UZhajg,650 +langchain/embeddings/baidu_qianfan_endpoint.py,sha256=w7BeE53d7o9Y8Xf0cZntmmziih7oBJcmF-jBW70KJlc,662 +langchain/embeddings/base.py,sha256=g1TgfcjKScPiWXWVUuoYS7U5uAudp5eGdS38cUVL9e0,7492 +langchain/embeddings/bedrock.py,sha256=tCBm3vcN0B21Ga6KvNwhgJpgjobC2VEcmPApUmwXO4E,638 +langchain/embeddings/bookend.py,sha256=qWaQXZw9Gq11kEdfIO71h1H0NaXqVKm45TiStxd2xaM,638 +langchain/embeddings/cache.py,sha256=m_ftj2XV7WzeYjjT4mrubIJ95Nd3KR5dN53BDhXAC6w,10208 +langchain/embeddings/clarifai.py,sha256=rKRbBFFCNFBkIFhH6vwvZleEvMDPOXfERXmcBzISQLg,641 +langchain/embeddings/cloudflare_workersai.py,sha256=VFbhKreyN4ACAULhzL17N1GpSUADPiNNdOyLf57J4d4,756 +langchain/embeddings/cohere.py,sha256=d9lGFQsv52mwqZ_hgyL2B-SgjZtx1xCVJwAMXCN9LU4,635 +langchain/embeddings/dashscope.py,sha256=U5SZeSfYaCeouPgQjJCZJOAwRtwStA2CZEXbqlWTPVI,644 +langchain/embeddings/databricks.py,sha256=CvahiTy5hfyXJoDrHxCwJTj9K9lGNzyc_QqKiyd7Su4,647 +langchain/embeddings/deepinfra.py,sha256=M4WSMhhzFGFPERmRcre_BlDukY01A5dOCqJvtXcMcvk,644 +langchain/embeddings/edenai.py,sha256=TPLfYUEFIeW4PTgIAUM5fnjr-FoUQDROJa7bzYaZV94,635 +langchain/embeddings/elasticsearch.py,sha256=K-8eJoBWMFkWvysBrY-uxztzJ6AtgC4fgG5_SyvsuGo,656 +langchain/embeddings/embaas.py,sha256=-xKih3yK9wl26Gy72TJSRb8c34kvEfFAQeFElRMyiIA,635 +langchain/embeddings/ernie.py,sha256=ancOw9i4hSlyq6DvDWre48UNhuatSLX0qLhe_XL_ccg,632 +langchain/embeddings/fake.py,sha256=_4RKczjWuzJzauJnW45-B0qGonQ9ExGipXAh1gurbUA,791 +langchain/embeddings/fastembed.py,sha256=BoYjzDZclNcjGJjPRmM1XwMkWjVMHkP_hm4vn9Oir3o,644 +langchain/embeddings/google_palm.py,sha256=qhkXsKRoq51vj4R91vl2HHcdY7vsnLnwubsNclZHS98,647 +langchain/embeddings/gpt4all.py,sha256=K8uJ5b6Mp7sp_OlVJlGKaKpD98YnMnS0bbmpaPCY5zs,638 +langchain/embeddings/gradient_ai.py,sha256=VNx3VxBAAF-NMR7XBqbRk_E-b-O8_iNr3exBTQGknqs,641 +langchain/embeddings/huggingface.py,sha256=bOn4tsfPh8qpbcPm9YjYW38NFMaPlMDiJ-S-ETmBE3A,1112 +langchain/embeddings/huggingface_hub.py,sha256=_Wrl6CcqvkHdrUqKCekVcdSxETvfG5EPpY9GP3GJHzg,659 +langchain/embeddings/infinity.py,sha256=77Z7lw_blqFd762tJTBO9jZ3Y0oqAqt_QaKvcy7lywA,895 +langchain/embeddings/javelin_ai_gateway.py,sha256=fd4S085ihpmNIFm-JtGOETwii0Ny2AJH4Q7brJm2rjA,665 +langchain/embeddings/jina.py,sha256=LVyrtdHcGTeyxO4GRmuhGsP9WWxMZr4Op0fajb4HbVo,629 +langchain/embeddings/johnsnowlabs.py,sha256=xM7NXUDScVinwiA_xvnebCQEm00fcJJhike1qEkn8qY,653 +langchain/embeddings/llamacpp.py,sha256=Izw87kqiofsMKRrSGU0I4IBJvDcKvGXeGt_dbTBh7Nk,641 +langchain/embeddings/llm_rails.py,sha256=L-2-dTLrg35_lx1jUeDbzCdo-r5HiipXx34G4KrdFkU,641 +langchain/embeddings/localai.py,sha256=v3TZpC5U9R5rvEbRRRlcEQgCcRP5AEiZMk1vgkSDk2w,638 +langchain/embeddings/minimax.py,sha256=CvF1ooy7qLtxcAmIGt5dtZoqoSC2Yqkc6TrA8rjpjbI,638 +langchain/embeddings/mlflow.py,sha256=Xs7-lXswxqO1mXRTjwwsCs7oAbrSx1wd_LHTlR6qJtE,635 +langchain/embeddings/mlflow_gateway.py,sha256=X1_SDRU5zJAn98_lnoNkkfY5dDbzILD72ms5MuKf1pU,662 +langchain/embeddings/modelscope_hub.py,sha256=6pTO1egt6BSGCGc4Gs9DO_oLUvcDhDO3WzE3h4fUe0E,647 +langchain/embeddings/mosaicml.py,sha256=XQODXcvN0lvM6qq4N3l7DPAmsMhbw5rfXeNgBTZ-NQ0,671 +langchain/embeddings/nlpcloud.py,sha256=jNKAs21rfuiH2HylZu5-N2brd-29j86lOaWO71HDsxM,641 +langchain/embeddings/octoai_embeddings.py,sha256=1weQtVW7WH2hX0mrs7P2z-6WgBEYHMGeHm2DLNWL9Sc,635 +langchain/embeddings/ollama.py,sha256=noQXKgwud-SuT1PRBY6zLvnKo1hy_dqNVT_-QG3pK3o,635 +langchain/embeddings/openai.py,sha256=4J7S5dzrNuzXQ4f7J_mIRER9NxjHuxBR2iry-wG_BL4,635 +langchain/embeddings/sagemaker_endpoint.py,sha256=NIDKufcL2OLxw8U8HJahpu5r6lx_s6Qgg0ugzE-anpU,900 +langchain/embeddings/self_hosted.py,sha256=AQ7gEnjUmI7kn-_dwrM0FqOlbGwWaaUSmumXUF7dAg0,647 +langchain/embeddings/self_hosted_hugging_face.py,sha256=mdK7Ae13etSmiNCZ0VYvHvTL58cRjIMHI2j355NssNE,881 +langchain/embeddings/sentence_transformer.py,sha256=fT7so5eHwf_0PFFfxKhnazYQBU9FFtlMqWyB0ehWf9k,667 +langchain/embeddings/spacy_embeddings.py,sha256=EOpAcMnK5_FfTbP-1SU1_lhjZzUe68vBDpRLaO82w6w,632 +langchain/embeddings/tensorflow_hub.py,sha256=nzcuZgetbFbwm0Pkp_0F_Ss2Dr8MCTJIXctnKyWgDYU,656 +langchain/embeddings/vertexai.py,sha256=ukb9baNzejf70bkrDXu-9PDYF-30wI1Tj8AI0w0mh0s,641 +langchain/embeddings/voyageai.py,sha256=p_GXLqyJXXDdP_aJBSGBS0aJFA4ccUaJhp0JVoYc3Eo,635 +langchain/embeddings/xinference.py,sha256=nehpiy79abQ78Bm-Y9DA8FDvpACXROSIats0S6KVT0M,647 +langchain/env.py,sha256=fucAbfcmwiN1CjKSg5l2lzquRVoE7wqfuMMlaByuyEk,476 +langchain/evaluation/__init__.py,sha256=1iX4-CeK-YkKtQh8npkJ5fhtbRPM668pPCz6SZ6WdJs,5803 +langchain/evaluation/__pycache__/__init__.cpython-312.pyc,, +langchain/evaluation/__pycache__/loading.cpython-312.pyc,, +langchain/evaluation/__pycache__/schema.cpython-312.pyc,, +langchain/evaluation/agents/__init__.py,sha256=Z3RFhkBgSauIRNp5dEUgkzY1Tr3kSeUwuotd0nrQViQ,166 +langchain/evaluation/agents/__pycache__/__init__.cpython-312.pyc,, +langchain/evaluation/agents/__pycache__/trajectory_eval_chain.cpython-312.pyc,, +langchain/evaluation/agents/__pycache__/trajectory_eval_prompt.cpython-312.pyc,, +langchain/evaluation/agents/trajectory_eval_chain.py,sha256=CB9kCkgGvj4QfPaQSN7Sx42NuGOu33W2ZG1S1eb89OQ,13925 +langchain/evaluation/agents/trajectory_eval_prompt.py,sha256=NY-kAJqoXfPP9zI9WsvEHEDp00ImG1Po9vBZm3U684M,5939 +langchain/evaluation/comparison/__init__.py,sha256=1nxR3mXQ8eimpDjfarJgDRe30YjL2yeOYkFaNj09fRY,1401 +langchain/evaluation/comparison/__pycache__/__init__.cpython-312.pyc,, +langchain/evaluation/comparison/__pycache__/eval_chain.cpython-312.pyc,, +langchain/evaluation/comparison/__pycache__/prompt.cpython-312.pyc,, +langchain/evaluation/comparison/eval_chain.py,sha256=uJSzNI8x5X_dacHkgPO_RzIjtO7b6v05Z25U_66Ej5U,15862 +langchain/evaluation/comparison/prompt.py,sha256=_mvS1BsSm4aBHUQjUWtNAYGwtLj9sYOIvPi4jZRWs6M,2359 +langchain/evaluation/criteria/__init__.py,sha256=FE5qrrz5JwWXJWXCzdyNRevEPfmmfBfjfHx-hR3pCWg,1647 +langchain/evaluation/criteria/__pycache__/__init__.cpython-312.pyc,, +langchain/evaluation/criteria/__pycache__/eval_chain.cpython-312.pyc,, +langchain/evaluation/criteria/__pycache__/prompt.cpython-312.pyc,, +langchain/evaluation/criteria/eval_chain.py,sha256=h-BsDtdjU8qXcZf3NSLCGHiGXCzLsXl77yvLGNLya6k,21242 +langchain/evaluation/criteria/prompt.py,sha256=6OgXmdvlYVzRMeAxa1fYGIxqeNAz1NkFCZ6ezLgUnZM,1756 +langchain/evaluation/embedding_distance/__init__.py,sha256=YLtGUI4ZMxjsn2Q0dGZ-R9YMFgZsarfJv9qzNEnrLQs,324 +langchain/evaluation/embedding_distance/__pycache__/__init__.cpython-312.pyc,, +langchain/evaluation/embedding_distance/__pycache__/base.cpython-312.pyc,, +langchain/evaluation/embedding_distance/base.py,sha256=eundHJyBPWnb-zcqY8gBDzJkRGAYrk15bWsHetVwbGo,18932 +langchain/evaluation/exact_match/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/evaluation/exact_match/__pycache__/__init__.cpython-312.pyc,, +langchain/evaluation/exact_match/__pycache__/base.cpython-312.pyc,, +langchain/evaluation/exact_match/base.py,sha256=9zhRWHyeyBlM2X_I34cnpnWGOiiCzGVpdP9zBlGpBX0,2736 +langchain/evaluation/loading.py,sha256=Fbiv3f9j_UmFCANo_Kl0qSscVr8QjuEaKLH9rwnPKG8,7324 +langchain/evaluation/parsing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/evaluation/parsing/__pycache__/__init__.cpython-312.pyc,, +langchain/evaluation/parsing/__pycache__/base.cpython-312.pyc,, +langchain/evaluation/parsing/__pycache__/json_distance.cpython-312.pyc,, +langchain/evaluation/parsing/__pycache__/json_schema.cpython-312.pyc,, +langchain/evaluation/parsing/base.py,sha256=oshaVFsY9ggIgOZX_3Xe-x7LPSRaQejmqLRT-nUvSVI,5242 +langchain/evaluation/parsing/json_distance.py,sha256=00h1wUNQyvjQiXi2OWlKb50Hcn_X55w4kndM1L38cAM,3662 +langchain/evaluation/parsing/json_schema.py,sha256=KaayfLXQAYwQlCbiF06oSJjT624IiwSb1QjbXig75Cs,3178 +langchain/evaluation/qa/__init__.py,sha256=_uUrc6UBe5Bcy5qZKhumLbKzLCKES0bioUylyJ0SB8c,345 +langchain/evaluation/qa/__pycache__/__init__.cpython-312.pyc,, +langchain/evaluation/qa/__pycache__/eval_chain.cpython-312.pyc,, +langchain/evaluation/qa/__pycache__/eval_prompt.cpython-312.pyc,, +langchain/evaluation/qa/__pycache__/generate_chain.cpython-312.pyc,, +langchain/evaluation/qa/__pycache__/generate_prompt.cpython-312.pyc,, +langchain/evaluation/qa/eval_chain.py,sha256=DAnsmiaOhTuvBRYyPYhrNQQtfWWu4E9e9TpOCSqwuHk,10821 +langchain/evaluation/qa/eval_prompt.py,sha256=zfJxS2-SI_SOXBDFp0xRpNAOgeELV3ti9EhcV2DFO_Y,3911 +langchain/evaluation/qa/generate_chain.py,sha256=16mh7KMQvwBnYRNfTuUHcr2nKDxo2mAmiIWxQETADk0,1035 +langchain/evaluation/qa/generate_prompt.py,sha256=g6U9K8-eq7JXOjFJokFEfBtLnHp-fpK1rgIwWYZ9Odc,606 +langchain/evaluation/regex_match/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/evaluation/regex_match/__pycache__/__init__.cpython-312.pyc,, +langchain/evaluation/regex_match/__pycache__/base.cpython-312.pyc,, +langchain/evaluation/regex_match/base.py,sha256=aixGwYPJu7Cac-08-98P50a3QlXsMhyHq0KKaFYZTwY,2392 +langchain/evaluation/schema.py,sha256=yYnf-zDWTb7IaleQhtHQeA1-u45u9Dx9z71GuiY317s,18202 +langchain/evaluation/scoring/__init__.py,sha256=D5zPsGRGCpg3KJkfAu2SN096jZi9FRlDlG4fiYV1Ko8,1113 +langchain/evaluation/scoring/__pycache__/__init__.cpython-312.pyc,, +langchain/evaluation/scoring/__pycache__/eval_chain.cpython-312.pyc,, +langchain/evaluation/scoring/__pycache__/prompt.cpython-312.pyc,, +langchain/evaluation/scoring/eval_chain.py,sha256=y8IuhK3SufKov2_vSZ43SPFYaMh5G7D22bD-i651D-s,15393 +langchain/evaluation/scoring/prompt.py,sha256=WqNq8bktJUjU8tcHWVuPJFWgsOIc-G7fYMDiejHhWIY,2130 +langchain/evaluation/string_distance/__init__.py,sha256=qAz9Z709ocAi_Yd9nbkKnFt16nc9d_gTT55N7okXWmE,286 +langchain/evaluation/string_distance/__pycache__/__init__.cpython-312.pyc,, +langchain/evaluation/string_distance/__pycache__/base.cpython-312.pyc,, +langchain/evaluation/string_distance/base.py,sha256=dGXmZyp-FoeCv-mKwdH3LFuH1JeONIzj7CI_Pe8TLkI,14001 +langchain/example_generator.py,sha256=q_JvQKn2pgJOHcBeFc851GpaR4seOZXDe9TISAJheEY,142 +langchain/formatting.py,sha256=4s5AwApo_6t2pVfoFXOgFU9sNNdpVDD44B4ryOwJMJo,168 +langchain/globals.py,sha256=SUMrEo_KlpODNBDj4JZDILhbxTK_GGDEYmUQVQ-Hzus,7436 +langchain/graphs/__init__.py,sha256=l12tO5owB32RcKbu5O8rtOK0qLVjGee9JjX3RUVT54Q,1528 +langchain/graphs/__pycache__/__init__.cpython-312.pyc,, +langchain/graphs/__pycache__/arangodb_graph.cpython-312.pyc,, +langchain/graphs/__pycache__/falkordb_graph.cpython-312.pyc,, +langchain/graphs/__pycache__/graph_document.cpython-312.pyc,, +langchain/graphs/__pycache__/graph_store.cpython-312.pyc,, +langchain/graphs/__pycache__/hugegraph.cpython-312.pyc,, +langchain/graphs/__pycache__/kuzu_graph.cpython-312.pyc,, +langchain/graphs/__pycache__/memgraph_graph.cpython-312.pyc,, +langchain/graphs/__pycache__/nebula_graph.cpython-312.pyc,, +langchain/graphs/__pycache__/neo4j_graph.cpython-312.pyc,, +langchain/graphs/__pycache__/neptune_graph.cpython-312.pyc,, +langchain/graphs/__pycache__/networkx_graph.cpython-312.pyc,, +langchain/graphs/__pycache__/rdf_graph.cpython-312.pyc,, +langchain/graphs/arangodb_graph.py,sha256=3Gu4bnS0q27AUEuUnoK2asz67iU8KpJktQ2uJvJ-iy0,796 +langchain/graphs/falkordb_graph.py,sha256=PdrxQC9Tl0txQtDTFNk2qR9m5L0apWPwq-SWq3lxGMc,618 +langchain/graphs/graph_document.py,sha256=ypGx35eUsMWcxBdbBaLgVucQ4s7ocZHe5hVnHZirNgU,862 +langchain/graphs/graph_store.py,sha256=Sy9mFUdMk0f8tPtls4XtuLA4Npk9pOaPla-kOYeygJs,633 +langchain/graphs/hugegraph.py,sha256=bJnfhi_2M9jcHAFMggWsl57vnOXwI2ltIPcC-IYGo1w,606 +langchain/graphs/kuzu_graph.py,sha256=_1bX5hbJLXQ87D-IvMAqKKZ4Lvb-jsvqFfXJU70XhW4,606 +langchain/graphs/memgraph_graph.py,sha256=Wtu9U3-LZU5Qq1cOsPkAFxbhVJCh01GRvSg8LQ0aevo,618 +langchain/graphs/nebula_graph.py,sha256=rTxBEADv2d1aAqugHXGI1iVfR4ypQ2-E2KzsUmqLVgw,612 +langchain/graphs/neo4j_graph.py,sha256=5K95GM5qQ9vPiiXSkWfHiThA570WopxwO3MWROHrOUE,609 +langchain/graphs/neptune_graph.py,sha256=x_3tOosO6R6AxR4dDA2zZUya1Bqxhur3vTONbLYmjBU,615 +langchain/graphs/networkx_graph.py,sha256=zpFqmBMpNM19kJxXObZGaQwgS6cuwuU-4mqr_xeJPhk,1042 +langchain/graphs/rdf_graph.py,sha256=i42Srtc325zzRDq-0hqpQu2NVltvsaP7tkaWSRpgvfc,603 +langchain/hub.py,sha256=GY0GL9SPz9kNuLX5_gID9S5M8MSGSMDsUIYz1iMqefk,4707 +langchain/indexes/__init__.py,sha256=U8yzfwdOGiN21XgAs3EctGr8k8nEQjOC22Tc8fbbnK8,1481 +langchain/indexes/__pycache__/__init__.cpython-312.pyc,, +langchain/indexes/__pycache__/_api.cpython-312.pyc,, +langchain/indexes/__pycache__/_sql_record_manager.cpython-312.pyc,, +langchain/indexes/__pycache__/graph.cpython-312.pyc,, +langchain/indexes/__pycache__/vectorstore.cpython-312.pyc,, +langchain/indexes/_api.py,sha256=93hOcQ5gNxwmgjV0hqcYHJQ1WkE8tc8JrAQuwKf2X80,252 +langchain/indexes/_sql_record_manager.py,sha256=ojE2-VX4s5AuzujVhZOtweC_vL3ljS48l7qAo_IdOtU,20828 +langchain/indexes/graph.py,sha256=gR1ZiM_8YfJcWmPy_sppX4fAo6slkO1F38unYnn6Z7g,907 +langchain/indexes/prompts/__init__.py,sha256=5ohFoTxhpsRyltYRwAlmdaShczCPPkyvxbc0SQ5bTCE,358 +langchain/indexes/prompts/__pycache__/__init__.cpython-312.pyc,, +langchain/indexes/prompts/__pycache__/entity_extraction.cpython-312.pyc,, +langchain/indexes/prompts/__pycache__/entity_summarization.cpython-312.pyc,, +langchain/indexes/prompts/__pycache__/knowledge_triplet_extraction.cpython-312.pyc,, +langchain/indexes/prompts/entity_extraction.py,sha256=gTKrAXGbbR3OKdtkgaq8UgigvNp8Q4oICcBHbaeVhOg,1952 +langchain/indexes/prompts/entity_summarization.py,sha256=fqL7-zIdas0H1SXXUS94otZSxeGpg-7o-Ppc_rxKeSk,1157 +langchain/indexes/prompts/knowledge_triplet_extraction.py,sha256=ZbFrUM14ZcbhiXFfbF9k8Ef7nEw1n1IB1GYD3sPTtps,1554 +langchain/indexes/vectorstore.py,sha256=UjqfWzrUcg7uqbnWV2yuUbH5ls-aAGA6flhRyitIezg,9501 +langchain/input.py,sha256=9OczJo7x4KQPqxSxihmP8hDsl7j14xosDrid-6hrjRY,283 +langchain/llms/__init__.py,sha256=qPlODTyZShYRG1Vnnku0wu_85uMpGXjaWsHouhYMs8s,17089 +langchain/llms/__pycache__/__init__.cpython-312.pyc,, +langchain/llms/__pycache__/ai21.cpython-312.pyc,, +langchain/llms/__pycache__/aleph_alpha.cpython-312.pyc,, +langchain/llms/__pycache__/amazon_api_gateway.cpython-312.pyc,, +langchain/llms/__pycache__/anthropic.cpython-312.pyc,, +langchain/llms/__pycache__/anyscale.cpython-312.pyc,, +langchain/llms/__pycache__/arcee.cpython-312.pyc,, +langchain/llms/__pycache__/aviary.cpython-312.pyc,, +langchain/llms/__pycache__/azureml_endpoint.cpython-312.pyc,, +langchain/llms/__pycache__/baidu_qianfan_endpoint.cpython-312.pyc,, +langchain/llms/__pycache__/bananadev.cpython-312.pyc,, +langchain/llms/__pycache__/base.cpython-312.pyc,, +langchain/llms/__pycache__/baseten.cpython-312.pyc,, +langchain/llms/__pycache__/beam.cpython-312.pyc,, +langchain/llms/__pycache__/bedrock.cpython-312.pyc,, +langchain/llms/__pycache__/bittensor.cpython-312.pyc,, +langchain/llms/__pycache__/cerebriumai.cpython-312.pyc,, +langchain/llms/__pycache__/chatglm.cpython-312.pyc,, +langchain/llms/__pycache__/clarifai.cpython-312.pyc,, +langchain/llms/__pycache__/cloudflare_workersai.cpython-312.pyc,, +langchain/llms/__pycache__/cohere.cpython-312.pyc,, +langchain/llms/__pycache__/ctransformers.cpython-312.pyc,, +langchain/llms/__pycache__/ctranslate2.cpython-312.pyc,, +langchain/llms/__pycache__/databricks.cpython-312.pyc,, +langchain/llms/__pycache__/deepinfra.cpython-312.pyc,, +langchain/llms/__pycache__/deepsparse.cpython-312.pyc,, +langchain/llms/__pycache__/edenai.cpython-312.pyc,, +langchain/llms/__pycache__/fake.cpython-312.pyc,, +langchain/llms/__pycache__/fireworks.cpython-312.pyc,, +langchain/llms/__pycache__/forefrontai.cpython-312.pyc,, +langchain/llms/__pycache__/gigachat.cpython-312.pyc,, +langchain/llms/__pycache__/google_palm.cpython-312.pyc,, +langchain/llms/__pycache__/gooseai.cpython-312.pyc,, +langchain/llms/__pycache__/gpt4all.cpython-312.pyc,, +langchain/llms/__pycache__/gradient_ai.cpython-312.pyc,, +langchain/llms/__pycache__/huggingface_endpoint.cpython-312.pyc,, +langchain/llms/__pycache__/huggingface_hub.cpython-312.pyc,, +langchain/llms/__pycache__/huggingface_pipeline.cpython-312.pyc,, +langchain/llms/__pycache__/huggingface_text_gen_inference.cpython-312.pyc,, +langchain/llms/__pycache__/human.cpython-312.pyc,, +langchain/llms/__pycache__/javelin_ai_gateway.cpython-312.pyc,, +langchain/llms/__pycache__/koboldai.cpython-312.pyc,, +langchain/llms/__pycache__/llamacpp.cpython-312.pyc,, +langchain/llms/__pycache__/loading.cpython-312.pyc,, +langchain/llms/__pycache__/manifest.cpython-312.pyc,, +langchain/llms/__pycache__/minimax.cpython-312.pyc,, +langchain/llms/__pycache__/mlflow.cpython-312.pyc,, +langchain/llms/__pycache__/mlflow_ai_gateway.cpython-312.pyc,, +langchain/llms/__pycache__/modal.cpython-312.pyc,, +langchain/llms/__pycache__/mosaicml.cpython-312.pyc,, +langchain/llms/__pycache__/nlpcloud.cpython-312.pyc,, +langchain/llms/__pycache__/octoai_endpoint.cpython-312.pyc,, +langchain/llms/__pycache__/ollama.cpython-312.pyc,, +langchain/llms/__pycache__/opaqueprompts.cpython-312.pyc,, +langchain/llms/__pycache__/openai.cpython-312.pyc,, +langchain/llms/__pycache__/openllm.cpython-312.pyc,, +langchain/llms/__pycache__/openlm.cpython-312.pyc,, +langchain/llms/__pycache__/pai_eas_endpoint.cpython-312.pyc,, +langchain/llms/__pycache__/petals.cpython-312.pyc,, +langchain/llms/__pycache__/pipelineai.cpython-312.pyc,, +langchain/llms/__pycache__/predibase.cpython-312.pyc,, +langchain/llms/__pycache__/predictionguard.cpython-312.pyc,, +langchain/llms/__pycache__/promptlayer_openai.cpython-312.pyc,, +langchain/llms/__pycache__/replicate.cpython-312.pyc,, +langchain/llms/__pycache__/rwkv.cpython-312.pyc,, +langchain/llms/__pycache__/sagemaker_endpoint.cpython-312.pyc,, +langchain/llms/__pycache__/self_hosted.cpython-312.pyc,, +langchain/llms/__pycache__/self_hosted_hugging_face.cpython-312.pyc,, +langchain/llms/__pycache__/stochasticai.cpython-312.pyc,, +langchain/llms/__pycache__/symblai_nebula.cpython-312.pyc,, +langchain/llms/__pycache__/textgen.cpython-312.pyc,, +langchain/llms/__pycache__/titan_takeoff.cpython-312.pyc,, +langchain/llms/__pycache__/titan_takeoff_pro.cpython-312.pyc,, +langchain/llms/__pycache__/together.cpython-312.pyc,, +langchain/llms/__pycache__/tongyi.cpython-312.pyc,, +langchain/llms/__pycache__/utils.cpython-312.pyc,, +langchain/llms/__pycache__/vertexai.cpython-312.pyc,, +langchain/llms/__pycache__/vllm.cpython-312.pyc,, +langchain/llms/__pycache__/volcengine_maas.cpython-312.pyc,, +langchain/llms/__pycache__/watsonxllm.cpython-312.pyc,, +langchain/llms/__pycache__/writer.cpython-312.pyc,, +langchain/llms/__pycache__/xinference.cpython-312.pyc,, +langchain/llms/__pycache__/yandex.cpython-312.pyc,, +langchain/llms/ai21.py,sha256=73VaSKgn0M_QPmg9fgLNoJLcsCaIz8y1p25bmYpzvFw,735 +langchain/llms/aleph_alpha.py,sha256=dAuNZMAqgcaHEBXihU8yyMrmhPdb8OScuBX_-hdNPSE,605 +langchain/llms/amazon_api_gateway.py,sha256=-DM52V7kehNZbmo-easfCsDZT6rYScWwKZgq3C_o31Q,623 +langchain/llms/anthropic.py,sha256=AwYGWqfmq0WUgo7o48x-DealXSsrCwnBRtqiPVf6Bdg,602 +langchain/llms/anyscale.py,sha256=215lcgYZDqEG68uvlPwQldbDjQK6MjFPlMkfpU5XWsA,599 +langchain/llms/arcee.py,sha256=FT3CUKiPVYXlkd-hizoudE4gBgteJeSUWvez9eeXU0U,590 +langchain/llms/aviary.py,sha256=5mNlCDGiZNHnaucRZUn_D-sPISx5mSoTkFM4NDAMXC0,593 +langchain/llms/azureml_endpoint.py,sha256=2WLPzJuQiNica7JcAYkOPL5VG1NJ3u2r4XlrgMJAxMU,1649 +langchain/llms/baidu_qianfan_endpoint.py,sha256=Mna-5WFNSNx3m7rn5DPKTgDXekbY4xoEeavoYtsqW-c,629 +langchain/llms/bananadev.py,sha256=3Y3d7oaSVx0PgBxEj4_bvp9Ky1u70ECxMlH8LdHkjy0,593 +langchain/llms/base.py,sha256=U5JQFvbg4kCqrsaPW7uGustnjHEOmq6a--zmdz2xSXk,228 +langchain/llms/baseten.py,sha256=RY7JOLc3J672AcGxiPkfvnEjgk5VPG1qSFT3WwxiH8U,596 +langchain/llms/beam.py,sha256=oRAExs08RCjIGi3if2n-FHdeO3acjgfQ19LULeQalRo,587 +langchain/llms/bedrock.py,sha256=abO85UsNQPwDHaIzqwezidRo77s1zoQB0UKc8AkA1IY,738 +langchain/llms/bittensor.py,sha256=kmGFwX1Mu9dgje_wazPNjrbBDdj_zviemuAwmndh-vU,617 +langchain/llms/cerebriumai.py,sha256=OsGlKSBJ8b-HqP-K-nXEwNah-xntwi1I1RE98kHbfF4,608 +langchain/llms/chatglm.py,sha256=DhBAE3mkCAduX5_mTCix11pHsS3QEG4dYaYHpG1ELkg,596 +langchain/llms/clarifai.py,sha256=UeCjfXBEjsyQB2S98wwG83SM_5neD0r5b953wepivXo,599 +langchain/llms/cloudflare_workersai.py,sha256=TltfKtkFFKiTfwaQzTflxH45fYC24cT47CYQRj9JCHc,680 +langchain/llms/cohere.py,sha256=1iPTEFmdkoK_jhSGAXPZ0AUuO6j45doH5GOo5ammOm0,593 +langchain/llms/ctransformers.py,sha256=uuKXt9c2Q8bUPhgzATQjRBZ2LmTftzE3GTqT33m-hTI,614 +langchain/llms/ctranslate2.py,sha256=lVcFKn0jc7iSrOlf3oVkLFov-aoTF0fwJ0J1US9uSAI,608 +langchain/llms/databricks.py,sha256=dPaicWfVdcxeBpjphQUHyGSIlRCdOKphdUaHHxJavaQ,605 +langchain/llms/deepinfra.py,sha256=yOmc6W6tQv9EEdFmICO74RTQ4CcWLQ1QO3U-eIfBhTs,602 +langchain/llms/deepsparse.py,sha256=scjce-xAuL11N_ViKNhrgtUi88SNYwehTFW6WRTGHj8,605 +langchain/llms/edenai.py,sha256=TE2uw_4c-lJHmkvXJptcC_y54mm0goH329urrhxA2Oc,593 +langchain/llms/fake.py,sha256=zne1QzIxfGKPHBeY6VpNI2fft1bvLGvGz0HIqdJZFoI,777 +langchain/llms/fireworks.py,sha256=hvXwSl-zHTloKNv9F_LUrrOQjHAdgl60ZOUh1NZxh3M,602 +langchain/llms/forefrontai.py,sha256=HpdjtuMTKuDhPImzRuDsxF9NSHY2BCsvBMG7ge-8AiM,608 +langchain/llms/gigachat.py,sha256=yuZiAApdHzu07lBoE6RKM0uf_P06DlhbdClaGgBS-Lk,599 +langchain/llms/google_palm.py,sha256=LqCCFc9QhY8Tbzm_Q8mj46We_c5DVRgewvZE1AC5L50,605 +langchain/llms/gooseai.py,sha256=aDOjKcW96_APQYwQpuNJoamZ4pwGleHgFg--8jihX_0,596 +langchain/llms/gpt4all.py,sha256=YQYs7ld6Y519TNNnInBrIf6t6BZ53GsDMa-3F2oCGts,596 +langchain/llms/gradient_ai.py,sha256=LK3N2qRuRHE5JTexgFTyy106vaCwNisb_JUGn3aFl6M,758 +langchain/llms/grammars/json.gbnf,sha256=htDQy5F1h7Q6K9kuc1j7a_LUw8Dhj-_rhQc28OJqluQ,664 +langchain/llms/grammars/list.gbnf,sha256=9cg8vDmOQ-jZvKSj-hyvTUl05Igbw_416yRQnB2VqcA,167 +langchain/llms/huggingface_endpoint.py,sha256=G8emK8_GbrPFj-QPscLWLaPqIbIn8W3d87qUhUsJkrk,632 +langchain/llms/huggingface_hub.py,sha256=DkuU48s-wyrk_cToWSUHE3rPIddCqRFalyKQx65hFMw,617 +langchain/llms/huggingface_pipeline.py,sha256=KO3F-loRr9x0NGn1fHjUGbKyw93XA3hk3PdFEWR0XnQ,632 +langchain/llms/huggingface_text_gen_inference.py,sha256=NJF5ccAbs7KFELUlpMFMQEtXxvvqRyEC39SllHYUzsw,656 +langchain/llms/human.py,sha256=MDyYYIQJUv1jo_2bCNZGYzDQrHeQA0pL8ig1bWWVkUA,614 +langchain/llms/javelin_ai_gateway.py,sha256=kMDVFGgfdzFYwSIgESTtMwY9SBAvf0hEZjHV_jrTlLY,772 +langchain/llms/koboldai.py,sha256=dZhN4IzWGcb0-u-K9vVLUZ9xL8orQwdOmeeNlA-qKGA,611 +langchain/llms/llamacpp.py,sha256=c452-Dz-lpgCsSUsMjp-6Nwic4KLFEsO6G4j6boA7UE,599 +langchain/llms/loading.py,sha256=wxcc3WE82ZFf9eFuGVZ2xI2ZjxDAL_A-SvQYBy6Mqxw,736 +langchain/llms/manifest.py,sha256=FFAtebgh-6fw-TufXhM_gdVSHpG8L9wHlp3cYYkW4a0,620 +langchain/llms/minimax.py,sha256=bG6dUOqhmDHoo2_SXM-CaOVZ-0THlKHPg7BtAq2Cgzw,596 +langchain/llms/mlflow.py,sha256=UMTv6PK886vuYyPbsJM3pq09D_Hh-k6H91vMHfq68vQ,593 +langchain/llms/mlflow_ai_gateway.py,sha256=TXx1zBEwHi3nWHxQVu4SwFeZL8KL0Zv8ihlG_Qjsk-M,620 +langchain/llms/modal.py,sha256=_hN03mgaOJclfc6MljVFSS43IBpgJeK135RsTfS8Zvw,590 +langchain/llms/mosaicml.py,sha256=GkYKX0JOr3ZCyxOCwy4PCilH1uT5fIGw26mY0a_D5HU,599 +langchain/llms/nlpcloud.py,sha256=p7N8kO0uatzUbI7Rq5YShW2Sb6N9BgOaxH5RJD3169c,599 +langchain/llms/octoai_endpoint.py,sha256=3_TYI0XPtM7yX4jwXDCd4HQFziSrb5z3rpyzUTjQzio,617 +langchain/llms/ollama.py,sha256=KMUdncjEZoPRKZiGWKA1P7VmrNtYrXMs-xveyrTTmFg,593 +langchain/llms/opaqueprompts.py,sha256=HECiyikHGoSGo4JuEOc7a82ifQgRTrjfREVL2xW0TME,614 +langchain/llms/openai.py,sha256=kBP-YFLD_m60CCR9cimRxHxo7_INhscQ1LRsE8b2IXw,885 +langchain/llms/openllm.py,sha256=SHoSCd978hWx1WmbdkKKDe1ISMyd49NDHWh7BENTCgM,596 +langchain/llms/openlm.py,sha256=H9P9FH2oQ8noerv0H2fh2W9jqkumjCSR9qRxrNyeQc8,593 +langchain/llms/pai_eas_endpoint.py,sha256=A0l9i71KP0mslJM2bXEjhRT5MzSAfF__DBkuc4Q1Om4,617 +langchain/llms/petals.py,sha256=bWTFZGxdqXEZh6-Ue5STSRIA0pgcsKeZHtzkJJTVw-c,593 +langchain/llms/pipelineai.py,sha256=V2eyfARJOe6gypuC7CYFXxB0Uig6kTovgb7qB3g5bcY,605 +langchain/llms/predibase.py,sha256=kJqSKzgPiGTlGDMNhJ_M_8DT1h6dsa2TfhSCDeM_wmY,602 +langchain/llms/predictionguard.py,sha256=HQLOrP_XhPMbMQf04xLplSO7aFA7Mv5J_VpU3VBtIPw,620 +langchain/llms/promptlayer_openai.py,sha256=Jad_0DKc7q6foZJt3spRzsnR68KKDNh51BgRAYQg7ko,742 +langchain/llms/replicate.py,sha256=BBbjaqPC2FjzlOuK-RE5MXm7a1mJV0St2Jd8nbWd9KU,602 +langchain/llms/rwkv.py,sha256=lBSMI9gDeEz_xD3I_SF4WxeInn-S_G8EOuvseCNNYKo,587 +langchain/llms/sagemaker_endpoint.py,sha256=J5VJ-BAa71CPx_56hllgaW9dB_n1iXDszFrgFgVuSvk,808 +langchain/llms/self_hosted.py,sha256=gFEQGcJPtls356AaUuCnqHQUWId2NDhcE9j-NSjT2Y4,629 +langchain/llms/self_hosted_hugging_face.py,sha256=FTZfVNIyhlZsDvkYyOyT-Icj_qxdJQtG3HXdX1kb1PI,647 +langchain/llms/stochasticai.py,sha256=yxmrpPM1d6yly5o-dMgGVOGgYpXW1_apVhFJl30w7hs,611 +langchain/llms/symblai_nebula.py,sha256=sXbxMJYnXzpN6UXLD-tEfHaW3scmdmTfVI1_ccVenTY,593 +langchain/llms/textgen.py,sha256=-kp78Ls1agt2Axo1M7COMiHqGzjulTF4waUfAcH5cHA,596 +langchain/llms/titan_takeoff.py,sha256=X0TlAqJ_zP1ykR_aB42pRvM0X0Pf0B5aYCMwA_m43gY,611 +langchain/llms/titan_takeoff_pro.py,sha256=zsuz74v9kPA7qnlqBLYQ64aZLiFTNvioj3QLTQ-8YUA,620 +langchain/llms/together.py,sha256=O-E3mQGUmLjMPLBuF61Sjn7Zzi4DggLpg0W00rage_A,599 +langchain/llms/tongyi.py,sha256=es36Q1wmkCV67gxfoFW1Z4QuA9MmAfSgkmTeBlyG-zo,593 +langchain/llms/utils.py,sha256=G4M28trSGum10iwsHzdCxtnlDWU3Hfc_KUyx5wA4F84,644 +langchain/llms/vertexai.py,sha256=prvGorBxGjOFqRcDRIj94qSqjrUhYPDKsnbDMXpb9vw,709 +langchain/llms/vllm.py,sha256=3y8QQDQYjgN8zrUPCBnKnUtrFFCMd80Ykgk5nQ23M_E,670 +langchain/llms/volcengine_maas.py,sha256=jgGoinXPXS42bV43A2mYPBXzh5EglkgtoUcHO1IbhtI,805 +langchain/llms/watsonxllm.py,sha256=iQza2z5rJyLUtWWbS1V3m4oVrtDuy-5x76UfoUQyDyk,605 +langchain/llms/writer.py,sha256=8vrVw0ZRAst4cODMfETRp9P2TEd--r-ci9rTcZro4vs,593 +langchain/llms/xinference.py,sha256=mn6rd723aZX5oYeAOY_ZjiyKHSntbjRFi9ZnjjdVbmY,605 +langchain/llms/yandex.py,sha256=M2-ZBgax9rVkVVeLBUtvY8NPRDXNLxroCShqf5lVh9k,602 +langchain/load/__init__.py,sha256=tOEiP80mSLbYtwzqVnSwNBdmP5lq4AGquNmr0nuIfno,207 +langchain/load/__pycache__/__init__.cpython-312.pyc,, +langchain/load/__pycache__/dump.cpython-312.pyc,, +langchain/load/__pycache__/load.cpython-312.pyc,, +langchain/load/__pycache__/serializable.cpython-312.pyc,, +langchain/load/dump.py,sha256=st-Wju0x5jrMVfMzjeKF1jo3Jvn8b1cCCfLrAaIYvhM,100 +langchain/load/load.py,sha256=sxSF6ySrMY4ouq77JPiuZKRx2lyVbqLoMi5ni5bHzAI,98 +langchain/load/serializable.py,sha256=6iZp1sg_ozIDqXTDEk60IP89UEwZEJ4j0oMaHascLKI,412 +langchain/memory/__init__.py,sha256=kQFlaG2Yuz1Y7U8e3Ngbv-13I3BPGKAI06Lz9sL-Lbc,5574 +langchain/memory/__pycache__/__init__.cpython-312.pyc,, +langchain/memory/__pycache__/buffer.cpython-312.pyc,, +langchain/memory/__pycache__/buffer_window.cpython-312.pyc,, +langchain/memory/__pycache__/chat_memory.cpython-312.pyc,, +langchain/memory/__pycache__/combined.cpython-312.pyc,, +langchain/memory/__pycache__/entity.cpython-312.pyc,, +langchain/memory/__pycache__/kg.cpython-312.pyc,, +langchain/memory/__pycache__/motorhead_memory.cpython-312.pyc,, +langchain/memory/__pycache__/prompt.cpython-312.pyc,, +langchain/memory/__pycache__/readonly.cpython-312.pyc,, +langchain/memory/__pycache__/simple.cpython-312.pyc,, +langchain/memory/__pycache__/summary.cpython-312.pyc,, +langchain/memory/__pycache__/summary_buffer.cpython-312.pyc,, +langchain/memory/__pycache__/token_buffer.cpython-312.pyc,, +langchain/memory/__pycache__/utils.cpython-312.pyc,, +langchain/memory/__pycache__/vectorstore.cpython-312.pyc,, +langchain/memory/__pycache__/vectorstore_token_buffer_memory.cpython-312.pyc,, +langchain/memory/__pycache__/zep_memory.cpython-312.pyc,, +langchain/memory/buffer.py,sha256=q2I9VTVXnm6y34aOUweUDTDoCKbvXDLEltDMb6ro0i8,5993 +langchain/memory/buffer_window.py,sha256=6S3t4gnAlroddQuonLn_r-XCTUAvk9tcWsmJwMRVJIc,1974 +langchain/memory/chat_memory.py,sha256=NQYpBiI9AxjA20kLM4IuQQ_vdheOV1lE0iJi9iT2aNA,3434 +langchain/memory/chat_message_histories/__init__.py,sha256=AdCCNl_rxX4OVVLK6ZwwpMTo8VXzAS4v9bH1v2QjHec,3506 +langchain/memory/chat_message_histories/__pycache__/__init__.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/astradb.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/cassandra.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/cosmos_db.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/dynamodb.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/elasticsearch.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/file.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/firestore.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/in_memory.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/momento.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/mongodb.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/neo4j.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/postgres.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/redis.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/rocksetdb.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/singlestoredb.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/sql.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/streamlit.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/upstash_redis.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/xata.cpython-312.pyc,, +langchain/memory/chat_message_histories/__pycache__/zep.cpython-312.pyc,, +langchain/memory/chat_message_histories/astradb.py,sha256=KeIpJKN4LWHdjdpoeStBn8xazqoP0mVHCqZB1lw_AS4,692 +langchain/memory/chat_message_histories/cassandra.py,sha256=OTSR2lgFyBQWZpw1Gw-aE9Kmtxth8JQGzhN_Qd5mKwM,698 +langchain/memory/chat_message_histories/cosmos_db.py,sha256=CwP8sV8I0wr2WdGShGH3Z7hXqUNNG0l_9s7ld2Rr26o,695 +langchain/memory/chat_message_histories/dynamodb.py,sha256=nhoT6peSxR5E7qJY5k0pG_OZgTEvyQAA3LeSecU5oJ0,695 +langchain/memory/chat_message_histories/elasticsearch.py,sha256=8BM8Lx0FY1oqDr-IIIF6iXLRuwOYb8-SamAq9gkK3aw,727 +langchain/memory/chat_message_histories/file.py,sha256=S3fvV_3ripTM2bWz02-NyVRSOzMw5B1Sa_k5pu1fJEk,683 +langchain/memory/chat_message_histories/firestore.py,sha256=8vbkbMk3wqeL-MInlD-kYefglR8L5E6KQRLPfjDpAEg,698 +langchain/memory/chat_message_histories/in_memory.py,sha256=yEw3IaYUR8CsQFx0IIUPE-OaSdMzRkk4uDSHhUJulvs,130 +langchain/memory/chat_message_histories/momento.py,sha256=OpBM8BIjgti9HsotQBx8rYDSP0tPVcpMPmGdrXmiAdg,692 +langchain/memory/chat_message_histories/mongodb.py,sha256=Hu4os-Kexj_1PtSQZwdMnuXyfRhXFu5Q3ng7Kd1vn8I,692 +langchain/memory/chat_message_histories/neo4j.py,sha256=8KbwkOMs__79dwbhWYpK9G72aoCPO3a7MzowZOfzSL4,686 +langchain/memory/chat_message_histories/postgres.py,sha256=FK-NHZwf7KbuaCSbwWGVO6lWyTSXYrNdrb4pLgkAHX4,695 +langchain/memory/chat_message_histories/redis.py,sha256=3YkQO9wg5NT3JwmkpCphR8BSTP6Q6pz_TVESQB7lyNs,686 +langchain/memory/chat_message_histories/rocksetdb.py,sha256=2xQAK2dvF0VToghg-Hm6gSkE88BV7uPf_I9ZSmbHhwU,692 +langchain/memory/chat_message_histories/singlestoredb.py,sha256=7H3GDgXoxF_rRlYsk-s6XEfIYEJTzrUWOXIPy9zjgDA,727 +langchain/memory/chat_message_histories/sql.py,sha256=Lcwk8ec9zSriImXJb7LbMpSdpWZU9_s1cU-v5ngHSPg,1033 +langchain/memory/chat_message_histories/streamlit.py,sha256=gGwDE9T3hF3c5ojd-jPLB8Drpd5ktQw_nob2FHINV7M,698 +langchain/memory/chat_message_histories/upstash_redis.py,sha256=M-sV600Ey7erOjRQTjzT5C_bt2mLw6RcJtX07YnoluQ,724 +langchain/memory/chat_message_histories/xata.py,sha256=mu8boSJYSS5TUp2qj8k210ZnZ2tqjyuRj_SHPH_g4qw,683 +langchain/memory/chat_message_histories/zep.py,sha256=v2dAHGuV1HANCmxsVZSnXZAzRwIgOmwJ4HxvIM74fYM,680 +langchain/memory/combined.py,sha256=nNSgBwbqXm_qLORf0QxBt0ychj1RDdTZOzAObmIPi3I,2929 +langchain/memory/entity.py,sha256=Ov2OEXSm8TFQyWIStGxzxCGzVE1wZoO0YIa912bkUAM,17191 +langchain/memory/kg.py,sha256=DNerFp7WY8z6igywdH7KAuq3W2O1DVoPMBsGvw5WebQ,645 +langchain/memory/motorhead_memory.py,sha256=OXjtlAQi1ioRXdM3GVcYmReynkKn8Vm1e5TruqecUR8,658 +langchain/memory/prompt.py,sha256=r8vxZSRydSOWJzRszStN0Wky4n3fyM_QJ2XoKMsP3JA,8181 +langchain/memory/readonly.py,sha256=tesHDH19kNGu31zD6RIYs8zQhGim0KNX_JG_R-1YJBM,780 +langchain/memory/simple.py,sha256=A9Wizw9KcBnQj70_AtnJ_bM6CDqvwcCel3kZvQflz4Y,749 +langchain/memory/summary.py,sha256=ziSQc8Q8UY8HJ0E5O-hubvB-rGP0ntf4YO1p9E6AfgE,4688 +langchain/memory/summary_buffer.py,sha256=bfhvQ8lcAQ1hDcP62BvShynBGih9hpy7ndpYQuHcuas,5502 +langchain/memory/token_buffer.py,sha256=cCGS4l9lD846WHzif-vAdmoWQ7FFBVqdJ2GWVSgQWHE,2544 +langchain/memory/utils.py,sha256=LFDdAV-vKYwLSsPtbHx600SHs9lXDaMvdH06TjFDvlU,605 +langchain/memory/vectorstore.py,sha256=tfMTd9zcGkfgdIN5KcadpD4322MabO8PL99nOBlsqK4,4204 +langchain/memory/vectorstore_token_buffer_memory.py,sha256=OiOGOp26ti9PAgI7M4Na5fz_ynROc-Tgc135n6ZuPQI,7618 +langchain/memory/zep_memory.py,sha256=WMrAJ7jymx0_0d3JnhCuklJxfomsGhEEEQ6uPMJ21Bo,628 +langchain/model_laboratory.py,sha256=vTxTAwZxqWH3ZxuWgAbwit49n28_N_HRvk7s_WjaMjE,4080 +langchain/output_parsers/__init__.py,sha256=A9fDuB-lYuOIN8QbDx-fULqSwugB7saLRKD23gdaIl4,2720 +langchain/output_parsers/__pycache__/__init__.cpython-312.pyc,, +langchain/output_parsers/__pycache__/boolean.cpython-312.pyc,, +langchain/output_parsers/__pycache__/combining.cpython-312.pyc,, +langchain/output_parsers/__pycache__/datetime.cpython-312.pyc,, +langchain/output_parsers/__pycache__/enum.cpython-312.pyc,, +langchain/output_parsers/__pycache__/ernie_functions.cpython-312.pyc,, +langchain/output_parsers/__pycache__/fix.cpython-312.pyc,, +langchain/output_parsers/__pycache__/format_instructions.cpython-312.pyc,, +langchain/output_parsers/__pycache__/json.cpython-312.pyc,, +langchain/output_parsers/__pycache__/list.cpython-312.pyc,, +langchain/output_parsers/__pycache__/loading.cpython-312.pyc,, +langchain/output_parsers/__pycache__/openai_functions.cpython-312.pyc,, +langchain/output_parsers/__pycache__/openai_tools.cpython-312.pyc,, +langchain/output_parsers/__pycache__/pandas_dataframe.cpython-312.pyc,, +langchain/output_parsers/__pycache__/prompts.cpython-312.pyc,, +langchain/output_parsers/__pycache__/pydantic.cpython-312.pyc,, +langchain/output_parsers/__pycache__/rail_parser.cpython-312.pyc,, +langchain/output_parsers/__pycache__/regex.cpython-312.pyc,, +langchain/output_parsers/__pycache__/regex_dict.cpython-312.pyc,, +langchain/output_parsers/__pycache__/retry.cpython-312.pyc,, +langchain/output_parsers/__pycache__/structured.cpython-312.pyc,, +langchain/output_parsers/__pycache__/xml.cpython-312.pyc,, +langchain/output_parsers/__pycache__/yaml.cpython-312.pyc,, +langchain/output_parsers/boolean.py,sha256=1-_Xtqhq-9ll4GxfPXW_5sAjAbODCWKF6yTPdVhY8mQ,1689 +langchain/output_parsers/combining.py,sha256=oMXoqQ4JweRQ5iW3r_kP74tV7iHhes3RAbyYjrK8Elg,1783 +langchain/output_parsers/datetime.py,sha256=to48EKA70VnQmGy_IS8t_F7u0iHTbDBhb7KP_1nRVws,1950 +langchain/output_parsers/enum.py,sha256=F1_nNGnwRIk6U5dtFil8eMU0ePQM7rqK4uG8Ocqw6jo,1231 +langchain/output_parsers/ernie_functions.py,sha256=86DsYlAGncjRalnmw5ZGwhH80lP2ms6zaw8PJGC3m3Q,1427 +langchain/output_parsers/fix.py,sha256=QAra1xRX1fgEOkahlrCanT_3gYPq1waRtwJ0jBVLme8,5590 +langchain/output_parsers/format_instructions.py,sha256=y5oSpjwzgmvYRNhfe0JmKHHdFZZP65L2snJI6xcMXEY,3958 +langchain/output_parsers/json.py,sha256=2FJL7uLd7pHgvpQm-r5XDyt9S1ZZ9mlJUW8ilQAQ0k4,340 +langchain/output_parsers/list.py,sha256=D35r0U51Xy5wHn-VcWxr97Ftul4UqszmyLetDi4syYQ,310 +langchain/output_parsers/loading.py,sha256=YD3RZ8TTBVtVTXdV14xpj_RNZqrJgclk9J9fHQI7YIA,702 +langchain/output_parsers/openai_functions.py,sha256=XmqUCySXGsaHugtItczb8K71lrQIfMNYvAofP9ZEF7U,364 +langchain/output_parsers/openai_tools.py,sha256=beZWrEXyOyGMVWJ7lWE7xxEgbfQCuQnHligdxuEQxng,229 +langchain/output_parsers/pandas_dataframe.py,sha256=f4P-KTfnfxyDkhlL3EXYrMfpRGgk8o3L2vZts4srhYE,6556 +langchain/output_parsers/prompts.py,sha256=zVhB4xjeWW3MKm4ZM8RfIiPUMg06SJAhYVmCa3jCNS8,508 +langchain/output_parsers/pydantic.py,sha256=uxbrfdyPnZxfdDvmuDr3QOmBFMwML3SfMDEmAKqmyvA,99 +langchain/output_parsers/rail_parser.py,sha256=iHmX3ux2jE2k0MsLqe5XCrJ1eQOBBfZtRbRzQoYPTfU,691 +langchain/output_parsers/regex.py,sha256=vTKMtX9E85BtpxrPWPdBLP1AbLmMCIjeRA-38KSGUh0,1218 +langchain/output_parsers/regex_dict.py,sha256=rkh3FMBQsHuaT54aAb8r8OpnFlf2cLk1LQSp1Ef1jGM,1719 +langchain/output_parsers/retry.py,sha256=PIEFrOmsff1227xSvQLnAGD7UuOK62M7twPAQK9W9kA,10569 +langchain/output_parsers/structured.py,sha256=UVatL4JcIeko9eElqmuL0DKcSfmhi0xi0IZdl7grzJ4,3135 +langchain/output_parsers/xml.py,sha256=WDHazWjxO-nDAzxkBJrd1tGINVrzo4mH2-Qgqtz9Y2w,93 +langchain/output_parsers/yaml.py,sha256=L2hGULhPjMEIMR0iBF3BKHTB1kIto9H68v-UeQDMGR8,2403 +langchain/prompts/__init__.py,sha256=TrRYiHB4qLiB8Ai4OohIijntIy_Xd5Y76cbZjPxjWNI,3153 +langchain/prompts/__pycache__/__init__.cpython-312.pyc,, +langchain/prompts/__pycache__/base.cpython-312.pyc,, +langchain/prompts/__pycache__/chat.cpython-312.pyc,, +langchain/prompts/__pycache__/few_shot.cpython-312.pyc,, +langchain/prompts/__pycache__/few_shot_with_templates.cpython-312.pyc,, +langchain/prompts/__pycache__/loading.cpython-312.pyc,, +langchain/prompts/__pycache__/pipeline.cpython-312.pyc,, +langchain/prompts/__pycache__/prompt.cpython-312.pyc,, +langchain/prompts/base.py,sha256=QATYkT1NM2-QElHrC4qapaOm3FDxDOgPCdJixuziSbM,565 +langchain/prompts/chat.py,sha256=ohOf8VGpdG2FaEBCzSLB0YPdT_8LmBwQGnb1pYVlZFc,1045 +langchain/prompts/example_selector/__init__.py,sha256=xW0hmB8xziqLrPbvNTusslJgwdBtV7k8bzWhz_YjpDs,1153 +langchain/prompts/example_selector/__pycache__/__init__.cpython-312.pyc,, +langchain/prompts/example_selector/__pycache__/base.cpython-312.pyc,, +langchain/prompts/example_selector/__pycache__/length_based.cpython-312.pyc,, +langchain/prompts/example_selector/__pycache__/ngram_overlap.cpython-312.pyc,, +langchain/prompts/example_selector/__pycache__/semantic_similarity.cpython-312.pyc,, +langchain/prompts/example_selector/base.py,sha256=3n6781kzGl-MphxZkad_GvFBgU5r8VuxD2q6FOcZ5fk,105 +langchain/prompts/example_selector/length_based.py,sha256=ZA-o8JtrvRldXlow83arXEPZJL69c2q6-cCclgi85yg,136 +langchain/prompts/example_selector/ngram_overlap.py,sha256=U8kB-UvE8dLK45xznTAOqJSr2RnmMqbwBjzCKMcjMIY,877 +langchain/prompts/example_selector/semantic_similarity.py,sha256=NkoM8e6-Cn9Mo3mH4Zgl8e3Gb4WnsqarwZalFqdgCcI,288 +langchain/prompts/few_shot.py,sha256=gY6lvzJ71_Rsg6o8xx_L3DD8cBy42n1Dc9GCHQmwqOs,265 +langchain/prompts/few_shot_with_templates.py,sha256=Dr2NQbv46aY44wMLz21Ai1jmvzbIhPYW4yYv6GLlVbI,128 +langchain/prompts/loading.py,sha256=i5tFvi3So9-joanAD2rwsp3jZq0nLBCgJ6fO7uFLcPw,530 +langchain/prompts/pipeline.py,sha256=vTdOcggYTfRc4VV2ob-19fsU_iSc96USyazS2EKxthk,133 +langchain/prompts/prompt.py,sha256=Q8sBG8MMTlIq_ErEbIsY0dnXkSCthAr8ntpAu3ZR6X8,153 +langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/pydantic_v1/__init__.py,sha256=B6Ip4VHhdRsGwvhAM-Yis_sJI9o3_4qrXdO8kz6NtiM,1613 +langchain/pydantic_v1/__pycache__/__init__.cpython-312.pyc,, +langchain/pydantic_v1/__pycache__/dataclasses.cpython-312.pyc,, +langchain/pydantic_v1/__pycache__/main.cpython-312.pyc,, +langchain/pydantic_v1/dataclasses.py,sha256=Fx3eqADjvyn-AWR7VgE5iomG9BISOMZwlyRceNymqlY,839 +langchain/pydantic_v1/main.py,sha256=LV90h8S_PRllFICCi0zJCIoAmexg2o2y3zpKSLMkr-o,832 +langchain/python.py,sha256=TxVqzUU1IjM8WSmM73FEw5KxpEWhXG4OKq8sAJ9yJnU,555 +langchain/requests.py,sha256=PezKhBbDty3VXl5vl7K6aKacNFcIvFGy22SgtaW0AYQ,906 +langchain/retrievers/__init__.py,sha256=aEtuBB68EchIUnvcFitbwaLAorbdg06Eyu1m2PgClJI,6661 +langchain/retrievers/__pycache__/__init__.cpython-312.pyc,, +langchain/retrievers/__pycache__/arcee.cpython-312.pyc,, +langchain/retrievers/__pycache__/arxiv.cpython-312.pyc,, +langchain/retrievers/__pycache__/azure_ai_search.cpython-312.pyc,, +langchain/retrievers/__pycache__/bedrock.cpython-312.pyc,, +langchain/retrievers/__pycache__/bm25.cpython-312.pyc,, +langchain/retrievers/__pycache__/chaindesk.cpython-312.pyc,, +langchain/retrievers/__pycache__/chatgpt_plugin_retriever.cpython-312.pyc,, +langchain/retrievers/__pycache__/cohere_rag_retriever.cpython-312.pyc,, +langchain/retrievers/__pycache__/contextual_compression.cpython-312.pyc,, +langchain/retrievers/__pycache__/databerry.cpython-312.pyc,, +langchain/retrievers/__pycache__/docarray.cpython-312.pyc,, +langchain/retrievers/__pycache__/elastic_search_bm25.cpython-312.pyc,, +langchain/retrievers/__pycache__/embedchain.cpython-312.pyc,, +langchain/retrievers/__pycache__/ensemble.cpython-312.pyc,, +langchain/retrievers/__pycache__/google_cloud_documentai_warehouse.cpython-312.pyc,, +langchain/retrievers/__pycache__/google_vertex_ai_search.cpython-312.pyc,, +langchain/retrievers/__pycache__/kay.cpython-312.pyc,, +langchain/retrievers/__pycache__/kendra.cpython-312.pyc,, +langchain/retrievers/__pycache__/knn.cpython-312.pyc,, +langchain/retrievers/__pycache__/llama_index.cpython-312.pyc,, +langchain/retrievers/__pycache__/merger_retriever.cpython-312.pyc,, +langchain/retrievers/__pycache__/metal.cpython-312.pyc,, +langchain/retrievers/__pycache__/milvus.cpython-312.pyc,, +langchain/retrievers/__pycache__/multi_query.cpython-312.pyc,, +langchain/retrievers/__pycache__/multi_vector.cpython-312.pyc,, +langchain/retrievers/__pycache__/outline.cpython-312.pyc,, +langchain/retrievers/__pycache__/parent_document_retriever.cpython-312.pyc,, +langchain/retrievers/__pycache__/pinecone_hybrid_search.cpython-312.pyc,, +langchain/retrievers/__pycache__/pubmed.cpython-312.pyc,, +langchain/retrievers/__pycache__/pupmed.cpython-312.pyc,, +langchain/retrievers/__pycache__/re_phraser.cpython-312.pyc,, +langchain/retrievers/__pycache__/remote_retriever.cpython-312.pyc,, +langchain/retrievers/__pycache__/svm.cpython-312.pyc,, +langchain/retrievers/__pycache__/tavily_search_api.cpython-312.pyc,, +langchain/retrievers/__pycache__/tfidf.cpython-312.pyc,, +langchain/retrievers/__pycache__/time_weighted_retriever.cpython-312.pyc,, +langchain/retrievers/__pycache__/vespa_retriever.cpython-312.pyc,, +langchain/retrievers/__pycache__/weaviate_hybrid_search.cpython-312.pyc,, +langchain/retrievers/__pycache__/web_research.cpython-312.pyc,, +langchain/retrievers/__pycache__/wikipedia.cpython-312.pyc,, +langchain/retrievers/__pycache__/you.cpython-312.pyc,, +langchain/retrievers/__pycache__/zep.cpython-312.pyc,, +langchain/retrievers/__pycache__/zilliz.cpython-312.pyc,, +langchain/retrievers/arcee.py,sha256=e9wEnHNjWWkcjPYuQfyQQEksqT0ZvERSGx4mohj9HYM,629 +langchain/retrievers/arxiv.py,sha256=PKA4WPdYiap6UYMwfSzRpSJmspf2kBsMmOteRW81nYg,629 +langchain/retrievers/azure_ai_search.py,sha256=9hr064mqP5n6MKbn1RPOX5VQIhirOaESYDsZIAWly9g,824 +langchain/retrievers/bedrock.py,sha256=2dr5Ebx7fHpUfWYWw03cRn3qOtm-L6aWYNZSZX8v2-Y,979 +langchain/retrievers/bm25.py,sha256=L3Pq77NNfV0YDlMkU-ODvJN8ksi1SROQ-vYpPqN5gHs,819 +langchain/retrievers/chaindesk.py,sha256=e3oHctHNecz14jz70sMw0_YrFjeWXv7Q04r--DnxWq4,641 +langchain/retrievers/chatgpt_plugin_retriever.py,sha256=Pds7FgWv-e6u43noFsO3v2YV8Y6FUjdkmYs5zjl79Nk,653 +langchain/retrievers/cohere_rag_retriever.py,sha256=YMhx_AmBHUDw6-_cQtnESl0WKjtRmjvbDNQvZs3iYm4,641 +langchain/retrievers/contextual_compression.py,sha256=kf7Ze1lI35SDfKwzJht9DAH0U7rZ9LUllxtdsSB517g,2294 +langchain/retrievers/databerry.py,sha256=uMTLwG-QWCaORSPeFshi105VvXCizjF6551XHXXjzcE,661 +langchain/retrievers/docarray.py,sha256=5BHkTy7uI5HUFi-k9qS6ZYxMyGdKbAwxhKqpz3cNCTM,791 +langchain/retrievers/document_compressors/__init__.py,sha256=H0xp8dSYIEYZWdAEQN_zY4DX6gx3kepw9jTC_gUSZyk,1263 +langchain/retrievers/document_compressors/__pycache__/__init__.cpython-312.pyc,, +langchain/retrievers/document_compressors/__pycache__/base.cpython-312.pyc,, +langchain/retrievers/document_compressors/__pycache__/chain_extract.cpython-312.pyc,, +langchain/retrievers/document_compressors/__pycache__/chain_extract_prompt.cpython-312.pyc,, +langchain/retrievers/document_compressors/__pycache__/chain_filter.cpython-312.pyc,, +langchain/retrievers/document_compressors/__pycache__/chain_filter_prompt.cpython-312.pyc,, +langchain/retrievers/document_compressors/__pycache__/cohere_rerank.cpython-312.pyc,, +langchain/retrievers/document_compressors/__pycache__/cross_encoder.cpython-312.pyc,, +langchain/retrievers/document_compressors/__pycache__/cross_encoder_rerank.cpython-312.pyc,, +langchain/retrievers/document_compressors/__pycache__/embeddings_filter.cpython-312.pyc,, +langchain/retrievers/document_compressors/__pycache__/flashrank_rerank.cpython-312.pyc,, +langchain/retrievers/document_compressors/__pycache__/listwise_rerank.cpython-312.pyc,, +langchain/retrievers/document_compressors/base.py,sha256=NEOacHMM0BZ6bbl_lSWvgmIaHh8_qtdMTaLmtgqOcJY,2958 +langchain/retrievers/document_compressors/chain_extract.py,sha256=BhIuaIr6_wPIBSiHBj6HJUt-o0JEJKbpqbGr2Z6q5QE,4470 +langchain/retrievers/document_compressors/chain_extract_prompt.py,sha256=FezN4Fk0tRcRFcD1Nf1r2SUyUt49yQKzdcV_iCQj6rE,366 +langchain/retrievers/document_compressors/chain_filter.py,sha256=Zj_jEJ4TMmfk4dzJ1Zlj5DDiw52Pa0urhSGehWMEGoY,4740 +langchain/retrievers/document_compressors/chain_filter_prompt.py,sha256=FTQRPiEsZ0Q9MQXXkpBwxtcqJ9D6Zq0GbuTmMpXHobA,231 +langchain/retrievers/document_compressors/cohere_rerank.py,sha256=iK3zJ1XokEuwgoBolEr-TWcoIdXRn0nEIFXlRRNuV5E,4582 +langchain/retrievers/document_compressors/cross_encoder.py,sha256=vXjdgFx73jSWplCKTkT1R1dvBZMEvR1WOedPYd12-fM,362 +langchain/retrievers/document_compressors/cross_encoder_rerank.py,sha256=0xtvDDk5PnjLiGxghdMF0atfeBwgs5e_dsYxYhfJFTs,1569 +langchain/retrievers/document_compressors/embeddings_filter.py,sha256=ndJl-zbaHnHrFOBhwBvC0kXHKsTzxD6RzD-MBH5g1Hk,5645 +langchain/retrievers/document_compressors/flashrank_rerank.py,sha256=Eo86fJ_T2IbEEeCkI_5rb3Ao4gsdenv-_Ukt33MuMko,709 +langchain/retrievers/document_compressors/listwise_rerank.py,sha256=MOogZEj8mfYUOqhle7QNoHhSt5z7QpkOmCMJ8D0z8wg,5142 +langchain/retrievers/elastic_search_bm25.py,sha256=eRboOkRQj-_E53gUQIZzxQ1bX0-uEMv7LAQSD7K7Qf8,665 +langchain/retrievers/embedchain.py,sha256=IUnhr3QK7IJ4IMHZDrTBpZuVQ1kyxhG-bAjmOMXb5eA,644 +langchain/retrievers/ensemble.py,sha256=Hh4lXa-0HMMfX4vew-BVoYSnZBainL-VmNoOc7r9U08,10568 +langchain/retrievers/google_cloud_documentai_warehouse.py,sha256=wJZu2kOHjrBOpTeaPBxyKMIA9OlMuiZ4kul2FG1lJ0k,695 +langchain/retrievers/google_vertex_ai_search.py,sha256=MlYVMne4jYU7lif0y5A-cQNC89DPnsCRljrQPm80GKQ,1040 +langchain/retrievers/kay.py,sha256=rvIPgoA7IrNsYeJ2B4J-gaviS84inzmlifKoNWKEgc8,629 +langchain/retrievers/kendra.py,sha256=ewpW4C5UouNrixGz6ftYl4YQHLV5gQIDo9_89ln0lvU,2235 +langchain/retrievers/knn.py,sha256=0Y-svEgovGaPjcCDdolXoRnMvTeQRqxfqc1LevHb13U,623 +langchain/retrievers/llama_index.py,sha256=TKuU8atpKcsoRuaK_iU5HLFOjHN8e3FxCe61shja22w,800 +langchain/retrievers/merger_retriever.py,sha256=RTckPwFpc5ZfnPe0qBoPVh10hTGZd2WVaU9UpKoQ87I,3477 +langchain/retrievers/metal.py,sha256=E9KmySjhmpq_kZhDhOLS8sH4KpbOnWUodR4-3Kd2E30,629 +langchain/retrievers/milvus.py,sha256=f_vi-uodWcS5PyYq-8QD8S7Bx1t_uVswQtqG2D35XnE,796 +langchain/retrievers/multi_query.py,sha256=Xsy75n4eHzO9Zg0YTHkB0onXdJlaNdJRz0asVLSFLuU,7227 +langchain/retrievers/multi_vector.py,sha256=zl8Cr8XF0wWY1bS_r7cBItdeVAD7cYFFHqoFAllnLGY,4729 +langchain/retrievers/outline.py,sha256=uNuqhoHkfDx73ZEYbHbFjVmJfW-eAdLUzyC9EuoV608,635 +langchain/retrievers/parent_document_retriever.py,sha256=qaWiXkXXHJ5lgIifeaaFMquxhpSYq5Zeqjwrn01_GN0,6004 +langchain/retrievers/pinecone_hybrid_search.py,sha256=oEbmHdKIZ86H1O8GhzNC1KVfKb_xAJdRJXpODMY6X3Y,674 +langchain/retrievers/pubmed.py,sha256=kbgj7U6x5YiXcVWobxIJDPnx3eiBAMK5HyRlELcIxsY,632 +langchain/retrievers/pupmed.py,sha256=kbgj7U6x5YiXcVWobxIJDPnx3eiBAMK5HyRlELcIxsY,632 +langchain/retrievers/re_phraser.py,sha256=Kd3uiducQaeUyGTJceFe2wTalCpRxt0M0qXWDisXlb0,2745 +langchain/retrievers/remote_retriever.py,sha256=f1jPII31IkNrhkH1LvlUlNLRQNMKNvgE_7qHa3o3P04,659 +langchain/retrievers/self_query/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/retrievers/self_query/__pycache__/__init__.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/astradb.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/base.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/chroma.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/dashvector.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/databricks_vector_search.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/deeplake.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/dingo.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/elasticsearch.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/milvus.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/mongodb_atlas.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/myscale.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/opensearch.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/pgvector.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/pinecone.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/qdrant.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/redis.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/supabase.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/tencentvectordb.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/timescalevector.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/vectara.cpython-312.pyc,, +langchain/retrievers/self_query/__pycache__/weaviate.cpython-312.pyc,, +langchain/retrievers/self_query/astradb.py,sha256=lxlkYOr8xicH7MNyQKIg3Wc-XwhVpKGBn7maqYyR3Hk,670 +langchain/retrievers/self_query/base.py,sha256=7Vb1kjYddXibqjJ2IKGbItnhciwuAm3wnqmGCngbSSo,14327 +langchain/retrievers/self_query/chroma.py,sha256=F0u_3Id1J1hIYM2D8_oNL2JJVetTFDyqW6fuGhjZ0ew,665 +langchain/retrievers/self_query/dashvector.py,sha256=CJAJQuJYNmw_GUIwwlPx3Scu1uDESTnFF-CzZEwFRRg,685 +langchain/retrievers/self_query/databricks_vector_search.py,sha256=S9V-XRfG6taeW3yRx_NZs4h-R4TiyHLnuJTIZa5rsqM,782 +langchain/retrievers/self_query/deeplake.py,sha256=hVci80sTlDfm_ePMZ2_1o17xehurYHgQ17-sMH6rzuQ,816 +langchain/retrievers/self_query/dingo.py,sha256=f5hMThUmLd9DTinAYcbfcv8bFGtoeDIsNu5fUmU0WA8,666 +langchain/retrievers/self_query/elasticsearch.py,sha256=a0LwB9oPPn7LfxVWDwAeUZKMZCDdaaIaREm-S-UQ9sk,717 +langchain/retrievers/self_query/milvus.py,sha256=tGJ1ryjepwrDfLtlBZiZDuZMGDLariu28pQ9co_TanQ,792 +langchain/retrievers/self_query/mongodb_atlas.py,sha256=A3Zn_dXHLhkVwuwtiDFamcwSKGKEny-wHJAu_IItwDI,714 +langchain/retrievers/self_query/myscale.py,sha256=-DLTLqnRGlys2U0SkkC4LrexAU1DSvUP0fQJ5vctIbk,670 +langchain/retrievers/self_query/opensearch.py,sha256=SYJcNg_8Ye_9uQkSvWukSFT-XH98fvSIkNbI1KomK4c,685 +langchain/retrievers/self_query/pgvector.py,sha256=11K1m9jAW_VCs2nKwOlKFnpfaoCOeeMtwMvOSGhz0-4,675 +langchain/retrievers/self_query/pinecone.py,sha256=tJvtyNThwHLwwkhewi80QDTCoEiqZq46SQVzTBV7QaU,675 +langchain/retrievers/self_query/qdrant.py,sha256=j5FaWfDINvPIcEATb3HvYxvUeP_z9MPwjOw9JSrm9xc,665 +langchain/retrievers/self_query/redis.py,sha256=O9aa0jGOGGHLzmkfurOVpej7RlFAdrko6gFwtQvrmQ8,660 +langchain/retrievers/self_query/supabase.py,sha256=KESLUAI1rF3S0r0_Jr9KMcbKyr0bdYrvZy1W4EtOm_0,693 +langchain/retrievers/self_query/tencentvectordb.py,sha256=6LAMnCYz4kbdYLkf3JaW2Cg6uAsD6eW2znGmTvzJkKM,743 +langchain/retrievers/self_query/timescalevector.py,sha256=2upb91H4GI2fqqLbQpNWtHvhn_laKgy2NoosINcPeJs,743 +langchain/retrievers/self_query/vectara.py,sha256=nuFXBKoykbuFwghwi9oY-bag-oUl_wHSwSwqDbuF9mY,798 +langchain/retrievers/self_query/weaviate.py,sha256=yGZaecnQxya37u5pQ5A3ceq3ce99fJm6hYjz7TOIAk0,675 +langchain/retrievers/svm.py,sha256=7y1tmOLRO77QrQT7AfBLstT6On9-qrNB6oGcX4Dapik,623 +langchain/retrievers/tavily_search_api.py,sha256=ps9V1sm7E2Rvs55rcGKOkKNXrogZf5XM12QH50ZBmL4,833 +langchain/retrievers/tfidf.py,sha256=cOFPvAVCowCka69SMFzFif19UCIKEtE2YmTLqife8lM,629 +langchain/retrievers/time_weighted_retriever.py,sha256=SIX0IWRpzAPefL6TE2BTRbsKj45F2yRYmoHs-1nvZJg,7603 +langchain/retrievers/vespa_retriever.py,sha256=x7CVYW-SB252BazUpfrQKX7v7JwgbewGPUnE0ZhEuZY,629 +langchain/retrievers/weaviate_hybrid_search.py,sha256=iPhw6DJn0C7-SshAmkardTKMV4taAAxkrEiyZ3KVbXI,674 +langchain/retrievers/web_research.py,sha256=e6YkiQVIwNNvbsLZjvaCtxlMFkDN0pouDkKBNV-yGY0,939 +langchain/retrievers/wikipedia.py,sha256=scMTc8ef9FDtIFC2ZUWsz03fqw1Fgwaw0LsmKfF2GIs,641 +langchain/retrievers/you.py,sha256=TvZapklNoSzxBTgKQuTTWW3xOblMOoCdrKZQX-AZYFY,623 +langchain/retrievers/zep.py,sha256=v7M0yTCSZx6hH7S230LjUnaJzbHp-G0kl05QZWuBo18,855 +langchain/retrievers/zilliz.py,sha256=hWyoQ6HNbEzETqfPH7wC7SriXXPvRuDbBKQ8A7H3m0M,796 +langchain/runnables/__init__.py,sha256=_5XwnxKdD038iAev__Q7G36pVxXmIEFTY8y2MvjEDqk,693 +langchain/runnables/__pycache__/__init__.cpython-312.pyc,, +langchain/runnables/__pycache__/hub.cpython-312.pyc,, +langchain/runnables/__pycache__/openai_functions.cpython-312.pyc,, +langchain/runnables/hub.py,sha256=EZZtbr7IROslFsyZoGQg7YvZUU7GX7iFWl6wkiaXVEI,809 +langchain/runnables/openai_functions.py,sha256=S3JA--ymrLRXm624O3q6xbqv_IVPRHqVij2opMsYiec,1546 +langchain/schema/__init__.py,sha256=0oXOJGG54Oo_PMIW5-kvfe30YMebBWK09ErMxOZOJuI,2066 +langchain/schema/__pycache__/__init__.cpython-312.pyc,, +langchain/schema/__pycache__/agent.cpython-312.pyc,, +langchain/schema/__pycache__/cache.cpython-312.pyc,, +langchain/schema/__pycache__/chat.cpython-312.pyc,, +langchain/schema/__pycache__/chat_history.cpython-312.pyc,, +langchain/schema/__pycache__/document.cpython-312.pyc,, +langchain/schema/__pycache__/embeddings.cpython-312.pyc,, +langchain/schema/__pycache__/exceptions.cpython-312.pyc,, +langchain/schema/__pycache__/language_model.cpython-312.pyc,, +langchain/schema/__pycache__/memory.cpython-312.pyc,, +langchain/schema/__pycache__/messages.cpython-312.pyc,, +langchain/schema/__pycache__/output.cpython-312.pyc,, +langchain/schema/__pycache__/output_parser.cpython-312.pyc,, +langchain/schema/__pycache__/prompt.cpython-312.pyc,, +langchain/schema/__pycache__/prompt_template.cpython-312.pyc,, +langchain/schema/__pycache__/retriever.cpython-312.pyc,, +langchain/schema/__pycache__/storage.cpython-312.pyc,, +langchain/schema/__pycache__/vectorstore.cpython-312.pyc,, +langchain/schema/agent.py,sha256=ziu7m5uOBKguXx1QwbElIqUEBdMnLQaFTYGw54N5g5U,149 +langchain/schema/cache.py,sha256=COiub2FmG_h_tW8Mwe9Amgyp0DKcuGPHIlCwrmjPrj0,105 +langchain/schema/callbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/schema/callbacks/__pycache__/__init__.cpython-312.pyc,, +langchain/schema/callbacks/__pycache__/base.cpython-312.pyc,, +langchain/schema/callbacks/__pycache__/manager.cpython-312.pyc,, +langchain/schema/callbacks/__pycache__/stdout.cpython-312.pyc,, +langchain/schema/callbacks/__pycache__/streaming_stdout.cpython-312.pyc,, +langchain/schema/callbacks/base.py,sha256=3SiPT5ZfIVGKlYAFuQfQQ4PPv87oRq_CbsMSDjSjpBo,511 +langchain/schema/callbacks/manager.py,sha256=vvaqMDtG_kRuT9KNBLrchSNDTmdch8KiwMAJrqIi6Yc,1511 +langchain/schema/callbacks/stdout.py,sha256=9weMjKUjKSTcWmeb3Sb2KKblj7C0-QTa1SzUzRMbjw0,103 +langchain/schema/callbacks/streaming_stdout.py,sha256=URkFIyAS4V9HAiPQuiLgi5mGzBdVF5RfaRYQKhyChI0,131 +langchain/schema/callbacks/tracers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/schema/callbacks/tracers/__pycache__/__init__.cpython-312.pyc,, +langchain/schema/callbacks/tracers/__pycache__/base.cpython-312.pyc,, +langchain/schema/callbacks/tracers/__pycache__/evaluation.cpython-312.pyc,, +langchain/schema/callbacks/tracers/__pycache__/langchain.cpython-312.pyc,, +langchain/schema/callbacks/tracers/__pycache__/langchain_v1.cpython-312.pyc,, +langchain/schema/callbacks/tracers/__pycache__/log_stream.cpython-312.pyc,, +langchain/schema/callbacks/tracers/__pycache__/root_listeners.cpython-312.pyc,, +langchain/schema/callbacks/tracers/__pycache__/run_collector.cpython-312.pyc,, +langchain/schema/callbacks/tracers/__pycache__/schemas.cpython-312.pyc,, +langchain/schema/callbacks/tracers/__pycache__/stdout.cpython-312.pyc,, +langchain/schema/callbacks/tracers/base.py,sha256=6Y1iOQCE9K4fojdnyVIOEjZOMhBAgdSHRF2cXDmURWM,113 +langchain/schema/callbacks/tracers/evaluation.py,sha256=ZJItnN_hXGnVs1DIK4J9xG3EUWlt9iU9pKSJ60XpgDQ,176 +langchain/schema/callbacks/tracers/langchain.py,sha256=joRSY8NZPOUkq65sEZk8hyc_6adb_WJPWa2eu7aB9Ic,219 +langchain/schema/callbacks/tracers/langchain_v1.py,sha256=xSen-sx2Uc-URj4vTRpE5IsqaStG9WvsuRJZnLPQxfg,127 +langchain/schema/callbacks/tracers/log_stream.py,sha256=GOuTDkHx-uqD5wmu8KjgUXAoZ3LXEoeGw_FMa8ZvEhw,226 +langchain/schema/callbacks/tracers/root_listeners.py,sha256=z4sMzTA35qnAd5S5K19Fu-8rySYOIDnEgYf0SjoQhk0,105 +langchain/schema/callbacks/tracers/run_collector.py,sha256=xDu5e45bJW8PyGaFul9tenkbjZ__MtfR1FoqpqM-BsA,120 +langchain/schema/callbacks/tracers/schemas.py,sha256=WxTeFFvqKWh-Xvi6xCknVuLQ-q8C1nz4epc74RRnnoc,470 +langchain/schema/callbacks/tracers/stdout.py,sha256=heL8T5-kz279OVoG8pfULzYWwSt-OoYIUjrUqXpCCtc,257 +langchain/schema/chat.py,sha256=oTl-ap5KvXKSRrYXhZnqzcnR-tA2omq0tbnJXBcnO9k,80 +langchain/schema/chat_history.py,sha256=PApD2cIU2t6UZ5ohOic4fBZwY6HBwDQQbq9fageqkqA,101 +langchain/schema/document.py,sha256=TYs9k58mJo7DX-rf787JH9gcrm77oXtTbEcxbLuv3Ig,122 +langchain/schema/embeddings.py,sha256=WKl4o-zRuYGbD0AorklFp6ddCwtqRwp9xjpjaoouBRk,75 +langchain/schema/exceptions.py,sha256=ivVZKFnKg4U6LehuoCmZbbbLwDtRNIcrpmg0a0BZoOI,91 +langchain/schema/language_model.py,sha256=BZF1e7i8wX7ojubZRA2r4WCiBskxjFEfTUZazAFwDUI,367 +langchain/schema/memory.py,sha256=D12yH--Zf5AgvLCsCQNScHgNf1fL8ML_6_xH8InDX6k,71 +langchain/schema/messages.py,sha256=uhGY7Aug-qV-F45qaHj-_crr-S0hW_TCO7fiE-uGc6c,1048 +langchain/schema/output.py,sha256=bzzYNNH_uyYQg2pP82GneSbal2vFY-DOLajLF7kR-C4,320 +langchain/schema/output_parser.py,sha256=esyw2_45NzYaotHDtmZIO_teic_28vUzAKYD2Vl5wVc,651 +langchain/schema/prompt.py,sha256=L1eCkCkvv4IYcHKyTjU_BCDh1WKcKrNLhsisRG4OWQU,80 +langchain/schema/prompt_template.py,sha256=7xuYKspZuoR4VAIKUUHc10llvjmGoAhYjsusQbUNeJM,124 +langchain/schema/retriever.py,sha256=H5ejH7tVlF8Fq96LOY_dD0XUgXe_KlfcThFvFTL8H1o,81 +langchain/schema/runnable/__init__.py,sha256=GQg5Qc2GG5kk_uxfUevpgXNnp_iSdGlzYXzDN4Km6a0,1797 +langchain/schema/runnable/__pycache__/__init__.cpython-312.pyc,, +langchain/schema/runnable/__pycache__/base.cpython-312.pyc,, +langchain/schema/runnable/__pycache__/branch.cpython-312.pyc,, +langchain/schema/runnable/__pycache__/config.cpython-312.pyc,, +langchain/schema/runnable/__pycache__/configurable.cpython-312.pyc,, +langchain/schema/runnable/__pycache__/fallbacks.cpython-312.pyc,, +langchain/schema/runnable/__pycache__/history.cpython-312.pyc,, +langchain/schema/runnable/__pycache__/passthrough.cpython-312.pyc,, +langchain/schema/runnable/__pycache__/retry.cpython-312.pyc,, +langchain/schema/runnable/__pycache__/router.cpython-312.pyc,, +langchain/schema/runnable/__pycache__/utils.cpython-312.pyc,, +langchain/schema/runnable/base.py,sha256=6PP6QPWM7kV9tJjifbOcGu8bYmMQ2kl6dpbMq8dlbWA,781 +langchain/schema/runnable/branch.py,sha256=YvrdYOVJgi2bMXiNqiV2BBiuE-ySFVhQN02k9BdHAaM,89 +langchain/schema/runnable/config.py,sha256=5F1tZqL8SgkXl0BgZVBYsohAhfoSsoxp3DJZMjSd5cc,665 +langchain/schema/runnable/configurable.py,sha256=vSHtq-244VAnseY4z81d1dQGAu9kRzD2gkAYZHhrR_w,333 +langchain/schema/runnable/fallbacks.py,sha256=UK0bKO5yqc10zSZ2Cy4MJ8bs3TjHHY6-w7b5C9hPfQk,106 +langchain/schema/runnable/history.py,sha256=XFmbL4BDFnIbAkvPmeR40JjWoOIbuxZgJ67RZkoPdHU,260 +langchain/schema/runnable/passthrough.py,sha256=TQQX7Y7Sw4TlMac1cWEdtpcP0pb5VmNnSu39Ifu65DI,205 +langchain/schema/runnable/retry.py,sha256=nA5xkzD55UjsoooBmXsbQyq5XwS7Q-HRrZ6CD7SYSk8,94 +langchain/schema/runnable/router.py,sha256=hNTC-suV3N_iqZq1y6Wvo9j0PbDRRoTqfPnUUV0-9_0,117 +langchain/schema/runnable/utils.py,sha256=KDVyxVGynXVMJWKc89zw0_KoruWgZ47hpTSWfw92VQM,1118 +langchain/schema/storage.py,sha256=qHjS9oAC68daYtTS-bSzGrJUCin8BO46E92o8aN6c7U,85 +langchain/schema/vectorstore.py,sha256=S0l6WtBB7gNEQUnEsKhQzXNkjhow2lTx_p_OeJyNIJ8,137 +langchain/serpapi.py,sha256=puHG-Hq7j3GNpG8F3DhqslTs716Nilp0uTDQjfAsR7U,663 +langchain/smith/__init__.py,sha256=Hek5hZeJ8kMkCaZAHPhcqScoqjWqpvavEzGLVbCioPg,3509 +langchain/smith/__pycache__/__init__.cpython-312.pyc,, +langchain/smith/evaluation/__init__.py,sha256=z9uREFLECT3nu7WKmGV4aSEXUTTeaCOLx89GHixy4jo,2198 +langchain/smith/evaluation/__pycache__/__init__.cpython-312.pyc,, +langchain/smith/evaluation/__pycache__/config.cpython-312.pyc,, +langchain/smith/evaluation/__pycache__/name_generation.cpython-312.pyc,, +langchain/smith/evaluation/__pycache__/progress.cpython-312.pyc,, +langchain/smith/evaluation/__pycache__/runner_utils.cpython-312.pyc,, +langchain/smith/evaluation/__pycache__/string_run_evaluator.cpython-312.pyc,, +langchain/smith/evaluation/__pycache__/utils.cpython-312.pyc,, +langchain/smith/evaluation/config.py,sha256=u0TKCnNiY-GCKE-Q-QEBb0AvnG9XndoISqOzT-qf2fA,13464 +langchain/smith/evaluation/name_generation.py,sha256=IWocrWNjWnV8GhHJ7BrbGcWK1v9TUikzubpSBNz4Px4,9936 +langchain/smith/evaluation/progress.py,sha256=OVEISP5s6u4nVN2aMrTCdkjpcpsi2KxdcL01ITJOa1o,3331 +langchain/smith/evaluation/runner_utils.py,sha256=iujxPpHYY8Hrs8MUtf5qmJUApH85iu7EZBSv0OLy_Vo,54220 +langchain/smith/evaluation/string_run_evaluator.py,sha256=jz9FJyskTx5kh5FSH1XnnlBbc1M2C-kv-fW7TP3Y8nQ,17255 +langchain/smith/evaluation/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/sql_database.py,sha256=PbNTfJjIUemMO9ZkLiMIpKF-9GJ7Kto3ShcQrLPoOqk,664 +langchain/storage/__init__.py,sha256=Wm7zDu_UBenWVknjoTiRpafsDgubMJdB1oEuV88aVyk,1585 +langchain/storage/__pycache__/__init__.cpython-312.pyc,, +langchain/storage/__pycache__/_lc_store.cpython-312.pyc,, +langchain/storage/__pycache__/encoder_backed.cpython-312.pyc,, +langchain/storage/__pycache__/exceptions.cpython-312.pyc,, +langchain/storage/__pycache__/file_system.cpython-312.pyc,, +langchain/storage/__pycache__/in_memory.cpython-312.pyc,, +langchain/storage/__pycache__/redis.cpython-312.pyc,, +langchain/storage/__pycache__/upstash_redis.cpython-312.pyc,, +langchain/storage/_lc_store.py,sha256=cT_YRduMLPgBun80vYDT0t3Szbqmn4M-6PphwMMLSxE,2518 +langchain/storage/encoder_backed.py,sha256=Mc4fxXEvk3a7ZkwcY_7cEN5jF57RuEfshpnwRV_8kOs,4327 +langchain/storage/exceptions.py,sha256=P5FiMbxsTA0bLbc96i_DgWmQGOUEc1snGBtxn7sOjZk,89 +langchain/storage/file_system.py,sha256=TElisCouxXc6juIY86MQysiYFYOcHsla8lzhIW4zqS0,6121 +langchain/storage/in_memory.py,sha256=_cIhvfvHUA5cKZtEl6r-XcXEYuLdH008HnRWLgjzivw,369 +langchain/storage/redis.py,sha256=xexZVCK4jWmxPadNpC2lVwYuP2tekvGxB2D3-PM9Scg,611 +langchain/storage/upstash_redis.py,sha256=V8IIiK6z5NWjt07NxmoB_fj2JPiCwLPTKEGErRVL0NU,751 +langchain/text_splitter.py,sha256=XY-QPMvS0HgNXfdMAr-9tC2AIjS-RCsjNTMf29wWz6k,1554 +langchain/tools/__init__.py,sha256=zwwrCN3Be_RvPk0kb1i19yqy5aQUrN-JEOtt1z_JOyk,5840 +langchain/tools/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/__pycache__/base.cpython-312.pyc,, +langchain/tools/__pycache__/convert_to_openai.cpython-312.pyc,, +langchain/tools/__pycache__/ifttt.cpython-312.pyc,, +langchain/tools/__pycache__/plugin.cpython-312.pyc,, +langchain/tools/__pycache__/render.cpython-312.pyc,, +langchain/tools/__pycache__/retriever.cpython-312.pyc,, +langchain/tools/__pycache__/yahoo_finance_news.cpython-312.pyc,, +langchain/tools/ainetwork/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/tools/ainetwork/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/ainetwork/__pycache__/app.cpython-312.pyc,, +langchain/tools/ainetwork/__pycache__/base.cpython-312.pyc,, +langchain/tools/ainetwork/__pycache__/owner.cpython-312.pyc,, +langchain/tools/ainetwork/__pycache__/rule.cpython-312.pyc,, +langchain/tools/ainetwork/__pycache__/transfer.cpython-312.pyc,, +langchain/tools/ainetwork/__pycache__/value.cpython-312.pyc,, +langchain/tools/ainetwork/app.py,sha256=RXdQg5zvIxWnsJjdail29o7KLcuOmvKbmKdFLZ2iBrM,863 +langchain/tools/ainetwork/base.py,sha256=n_8cVdu9yZsOIZ54PVOwLUhCysrnbzXneP40cH5FzEY,748 +langchain/tools/ainetwork/owner.py,sha256=2sYH0N7fftdHAeX_5JXMD8-2AsCBShtyxe4Ah5dmk1s,767 +langchain/tools/ainetwork/rule.py,sha256=nlXkKJTSS2dfcBRcjvvOYeAV9dbchJbcFrjyt86w6YY,762 +langchain/tools/ainetwork/transfer.py,sha256=GH6o6UF-47MYk47Z4JZcmDl6whoPV2fW1QoA166rCqc,785 +langchain/tools/ainetwork/value.py,sha256=_ujvDzBvTsNJCX-SDmgCAjCWtrhzgxpZQU3-Xar-MZI,770 +langchain/tools/amadeus/__init__.py,sha256=CTN7269XmJe0zUSzp-JP7PNR2q6BU-3OtTCa40p_YA4,906 +langchain/tools/amadeus/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/amadeus/__pycache__/base.cpython-312.pyc,, +langchain/tools/amadeus/__pycache__/closest_airport.cpython-312.pyc,, +langchain/tools/amadeus/__pycache__/flight_search.cpython-312.pyc,, +langchain/tools/amadeus/base.py,sha256=KYiIhkxb7laTWWAdI4qn5S2RawUvf2C7vCBKYDM8Fn0,648 +langchain/tools/amadeus/closest_airport.py,sha256=GgmyXCbVkfY3RwwgShZtoh7hAwhNWV3pO8vTRPks0n4,851 +langchain/tools/amadeus/flight_search.py,sha256=3CGczobeerOib0G1ToPiATJC76Cuvsg2nffpADz-Nq0,833 +langchain/tools/arxiv/__init__.py,sha256=8i_5wwMXHX1BHQN7cDLCtqjYvN4_AxkAdwhNGgRmHtE,25 +langchain/tools/arxiv/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/arxiv/__pycache__/tool.cpython-312.pyc,, +langchain/tools/arxiv/tool.py,sha256=wCJD5HVuOb5ou8BBTh7q7XUm1eqrNt6X-zeqlFygeFQ,763 +langchain/tools/azure_cognitive_services/__init__.py,sha256=40dPLDjAAxT9Qb5_Fa6Uidw18Wyl31rD5K0GqDWyOl4,1259 +langchain/tools/azure_cognitive_services/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/azure_cognitive_services/__pycache__/form_recognizer.cpython-312.pyc,, +langchain/tools/azure_cognitive_services/__pycache__/image_analysis.cpython-312.pyc,, +langchain/tools/azure_cognitive_services/__pycache__/speech2text.cpython-312.pyc,, +langchain/tools/azure_cognitive_services/__pycache__/text2speech.cpython-312.pyc,, +langchain/tools/azure_cognitive_services/__pycache__/text_analytics_health.cpython-312.pyc,, +langchain/tools/azure_cognitive_services/form_recognizer.py,sha256=L2BRqkHsAtG8TsEjNtBvmaQ0ZtP14EOvJ3b4K_OQr5A,658 +langchain/tools/azure_cognitive_services/image_analysis.py,sha256=eusJMU47EJrFpVj5cneEy4XjPwj_9T62_O5c_Bi5D0I,655 +langchain/tools/azure_cognitive_services/speech2text.py,sha256=5ysm4hGNGByErmc_XNbEfxmZAIxrNpaa5kc3-F_OEt8,649 +langchain/tools/azure_cognitive_services/text2speech.py,sha256=NhhsdHwISSRezrJFQhoo9iO8jcuMg_iq1-jPXYNTSOI,649 +langchain/tools/azure_cognitive_services/text_analytics_health.py,sha256=XuEJOMrVeY4peYIOU9LJ55lz7PaMdJu7-2DDe2QnhH8,673 +langchain/tools/base.py,sha256=yHaCAT9yFt3f8-bt4OATVpY0kIcrL3hyp9cHpItoE_4,332 +langchain/tools/bearly/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/tools/bearly/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/bearly/__pycache__/tool.cpython-312.pyc,, +langchain/tools/bearly/tool.py,sha256=o-8XiHjt67ZWtGWoJ45l4DCwqnowzwHS4I2_q4FgOUo,957 +langchain/tools/bing_search/__init__.py,sha256=lgfZMcqTQg8QdQy_brdr8yZz_Q_d_LACULgEngKSZuk,753 +langchain/tools/bing_search/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/bing_search/__pycache__/tool.cpython-312.pyc,, +langchain/tools/bing_search/tool.py,sha256=CCUMr-mOgK3l7jeWJB4qbWctqkmbtB3yXn4yTWX171g,721 +langchain/tools/brave_search/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/tools/brave_search/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/brave_search/__pycache__/tool.cpython-312.pyc,, +langchain/tools/brave_search/tool.py,sha256=Vd0547u4s-ZLcRoUT1J2scE454fS9Zw1axOuYasulpE,610 +langchain/tools/clickup/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/tools/clickup/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/clickup/__pycache__/tool.cpython-312.pyc,, +langchain/tools/clickup/tool.py,sha256=MXVUg4gfedIWbLyuCSkcw2qB2Oe8Q6sKg0kGk2wb0aQ,642 +langchain/tools/convert_to_openai.py,sha256=Q5jmmrk6uOsBQktN_RKwft17WpCsa9MrA-sa0ZLdE2E,157 +langchain/tools/dataforseo_api_search/__init__.py,sha256=Ef396XGpwMmQIjBqJ5dT9KWPw4HcDhQzod5msya6xgM,928 +langchain/tools/dataforseo_api_search/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/dataforseo_api_search/__pycache__/tool.cpython-312.pyc,, +langchain/tools/dataforseo_api_search/tool.py,sha256=vApsHrR7eIjI8NL5jJWQMigLLbJatlA9guQvc4Y5Zfc,897 +langchain/tools/ddg_search/__init__.py,sha256=qCPZf7sEK13jlMcN48d9Uj9ESVmyFF-h_KKOQxAqlpw,672 +langchain/tools/ddg_search/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/ddg_search/__pycache__/tool.cpython-312.pyc,, +langchain/tools/ddg_search/tool.py,sha256=e7pSCz8k_YaPyU6r7MjEyX7FSc4H0wF37IAMPpqv6ao,1024 +langchain/tools/e2b_data_analysis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/tools/e2b_data_analysis/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/e2b_data_analysis/__pycache__/tool.cpython-312.pyc,, +langchain/tools/e2b_data_analysis/tool.py,sha256=BYT4rTMzsrgzL6PYckB4IrSbUg17GX2tqqCaHQQBaoc,990 +langchain/tools/edenai/__init__.py,sha256=3-wPYWDTBIX0W4fd4BjgSeIJ1F34xLVc9etJ_mdJ9Vw,1514 +langchain/tools/edenai/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/edenai/__pycache__/audio_speech_to_text.cpython-312.pyc,, +langchain/tools/edenai/__pycache__/audio_text_to_speech.cpython-312.pyc,, +langchain/tools/edenai/__pycache__/edenai_base_tool.cpython-312.pyc,, +langchain/tools/edenai/__pycache__/image_explicitcontent.cpython-312.pyc,, +langchain/tools/edenai/__pycache__/image_objectdetection.cpython-312.pyc,, +langchain/tools/edenai/__pycache__/ocr_identityparser.cpython-312.pyc,, +langchain/tools/edenai/__pycache__/ocr_invoiceparser.cpython-312.pyc,, +langchain/tools/edenai/__pycache__/text_moderation.cpython-312.pyc,, +langchain/tools/edenai/audio_speech_to_text.py,sha256=rOhWf0JcnwTFo7KOU2Jb_VZpK2lXrL4GjQ3pswuYVVY,643 +langchain/tools/edenai/audio_text_to_speech.py,sha256=icmLd5-tgTfpaiuPyAB3cGq5reeIKkxbqqU3e1lY1_0,643 +langchain/tools/edenai/edenai_base_tool.py,sha256=pDyqhA7ZaGyLpZn5cxCfWYnaf6dRH2ilYg-frQvUQ-g,607 +langchain/tools/edenai/image_explicitcontent.py,sha256=crjn8ehCjz-81MD2GuMdRzm-6ItjdQyYjonnF-mAtiE,646 +langchain/tools/edenai/image_objectdetection.py,sha256=_2JqpGtJFOetCL3EAK5AGWw1X9CLSWLn9SYSAEfUaI8,652 +langchain/tools/edenai/ocr_identityparser.py,sha256=9iAwFFI02_i4Q0SfXxJ5Wxy5YKeAGiMbdAC4yaGKOEM,634 +langchain/tools/edenai/ocr_invoiceparser.py,sha256=qCzEd0Y8HC5EMcdReFTlMAcMBwCftXMzKAucP4QGAPY,649 +langchain/tools/edenai/text_moderation.py,sha256=9v-vvuMuBtZOXo9RhXFSLTt4A457KdJUVuzmt-6pfgM,649 +langchain/tools/eleven_labs/__init__.py,sha256=TaybL32JdNPASNDYoEiqeUuT9CSsC_VIuhF5rblcM3E,687 +langchain/tools/eleven_labs/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/eleven_labs/__pycache__/models.cpython-312.pyc,, +langchain/tools/eleven_labs/__pycache__/text2speech.cpython-312.pyc,, +langchain/tools/eleven_labs/models.py,sha256=GsaMeHHk29_9gJCWoy13yRkuksZecw4hMp5p4zzGqcg,660 +langchain/tools/eleven_labs/text2speech.py,sha256=zDtX1hdJ7q4JVQmmVoVXcvLxRSGc_AcD9brZXib3D_I,652 +langchain/tools/file_management/__init__.py,sha256=LJXnQ1yKn1phQLePjuiSY7On1Y93Ka4boAX9t2Vi-X8,1243 +langchain/tools/file_management/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/file_management/__pycache__/copy.cpython-312.pyc,, +langchain/tools/file_management/__pycache__/delete.cpython-312.pyc,, +langchain/tools/file_management/__pycache__/file_search.cpython-312.pyc,, +langchain/tools/file_management/__pycache__/list_dir.cpython-312.pyc,, +langchain/tools/file_management/__pycache__/move.cpython-312.pyc,, +langchain/tools/file_management/__pycache__/read.cpython-312.pyc,, +langchain/tools/file_management/__pycache__/write.cpython-312.pyc,, +langchain/tools/file_management/copy.py,sha256=2qSV_x86h0-TeYCgi544DkXvwX4rPwIA0NeLt4BmWPQ,789 +langchain/tools/file_management/delete.py,sha256=F-gTxWPsz25dhaHGmm3Hno4QfmILvPtUf_bCNQYj2qs,805 +langchain/tools/file_management/file_search.py,sha256=JVrbKUMl1sg3jOOsUn51aRw7esA63qImGugbLNq-QJY,815 +langchain/tools/file_management/list_dir.py,sha256=kO5SWu8SaeTvlErDC9aaEgdHFeEBCSFJ0zW2da1wWto,836 +langchain/tools/file_management/move.py,sha256=H2hYVymvC732ICABRBVU1oTsschhYCxaZQ4b-iBOBSo,789 +langchain/tools/file_management/read.py,sha256=UynghMZQ_SaeLm_C57n8YV8dMaIYB2IMYavZcveD5fI,789 +langchain/tools/file_management/write.py,sha256=8PnnqyQmVxxBi6H_iHYmaGEBv0ebv_w4xqSzDYwphbg,797 +langchain/tools/github/__init__.py,sha256=ZXL9LlaXRlpyALvDiNVUpUA6KpyfAzEuC443yl8JHAE,18 +langchain/tools/github/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/github/__pycache__/tool.cpython-312.pyc,, +langchain/tools/github/tool.py,sha256=gwdg6QMNqRsz9Wrl0-3eviJrbfIdKr6t0IGsgienx3c,637 +langchain/tools/gitlab/__init__.py,sha256=7R2k7i3s3Ylo6QfzxByw3doSjUOdAQUBtW8ZcQJjQSI,18 +langchain/tools/gitlab/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/gitlab/__pycache__/tool.cpython-312.pyc,, +langchain/tools/gitlab/tool.py,sha256=39bkLVZC_71_iu62m3Z7wLQDgDEWQAnp8BmtIVaABPw,637 +langchain/tools/gmail/__init__.py,sha256=1KG0Xgul84zrykBE8MG85vGpx-FtnSJEgS32ZbfdP8w,1057 +langchain/tools/gmail/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/gmail/__pycache__/base.cpython-312.pyc,, +langchain/tools/gmail/__pycache__/create_draft.cpython-312.pyc,, +langchain/tools/gmail/__pycache__/get_message.cpython-312.pyc,, +langchain/tools/gmail/__pycache__/get_thread.cpython-312.pyc,, +langchain/tools/gmail/__pycache__/search.cpython-312.pyc,, +langchain/tools/gmail/__pycache__/send_message.cpython-312.pyc,, +langchain/tools/gmail/base.py,sha256=O7Ix5BbdIoinIT4DopwsJNKlVTK4sZqU0p-32vbLyXs,638 +langchain/tools/gmail/create_draft.py,sha256=qTm3GG5x39BK4bwUhwBg-Tfu8csJMwHtBWW2xwJvivs,809 +langchain/tools/gmail/get_message.py,sha256=hUPmlWvhrvoH7IRbOYyFydpfVKUQsZRPQZAEmiJUf3w,801 +langchain/tools/gmail/get_thread.py,sha256=0VfFdJqLogqiWIORGTC0dTsBR9P82EXH0SVRN64jUU4,793 +langchain/tools/gmail/search.py,sha256=SI8nwvqR4TfKAeJpZy03K4eTGjJJ4sQ37nKTwoK5SdA,863 +langchain/tools/gmail/send_message.py,sha256=R8BjdsKjG7ZvgsEb8v-kR0ravOsKV3k1NGIGZW2N5_U,809 +langchain/tools/golden_query/__init__.py,sha256=JSUAxfXE-rPRaPMZFJyEj5fYAXvetRcU1ZfsOYif1Xw,682 +langchain/tools/golden_query/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/golden_query/__pycache__/tool.cpython-312.pyc,, +langchain/tools/golden_query/tool.py,sha256=V35_Tk_nMpWagqWdnm_xuveTl-0dSbapLU1Hs9apL6M,655 +langchain/tools/google_cloud/__init__.py,sha256=uqwnNRpkMKee1ZQbMYGp40xIc8_9RJ109YsSgqpAaJQ,685 +langchain/tools/google_cloud/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/google_cloud/__pycache__/texttospeech.cpython-312.pyc,, +langchain/tools/google_cloud/texttospeech.py,sha256=dLL9l7vT3os1dBbdq3wuFpULN9E98Z87V3pwIrmdEx8,658 +langchain/tools/google_finance/__init__.py,sha256=f9ILvvjHFhnfrJuSRC9FJyy_cb-X522eFa4iz2mnmtQ,721 +langchain/tools/google_finance/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/google_finance/__pycache__/tool.cpython-312.pyc,, +langchain/tools/google_finance/tool.py,sha256=cPTlXtddu1AOBJ9hnU_C8BNkQjsEoSXpztmyEUXILZU,686 +langchain/tools/google_jobs/__init__.py,sha256=sdAA9sqiH4naSZJmr4dc2_iQeVkzo6ekkqmNj9a__Js,697 +langchain/tools/google_jobs/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/google_jobs/__pycache__/tool.cpython-312.pyc,, +langchain/tools/google_jobs/tool.py,sha256=m_73No8eSvZlXT8VHB26i8qWqo6YpJxe2-MjzMjZBl8,665 +langchain/tools/google_lens/__init__.py,sha256=Rft2PCUCFahkDXJ7danMpmDISpy4A1ZoVntBHbqbQbw,697 +langchain/tools/google_lens/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/google_lens/__pycache__/tool.cpython-312.pyc,, +langchain/tools/google_lens/tool.py,sha256=nm60E38Xi5TZnw8ZR5lXefNMbIBfG0cDvZDlw41sw-U,665 +langchain/tools/google_places/__init__.py,sha256=bhQe-lK9cecM0n18lFLzv6h4X9vbV-0Yl_7waM6WB9w,659 +langchain/tools/google_places/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/google_places/__pycache__/tool.cpython-312.pyc,, +langchain/tools/google_places/tool.py,sha256=2d95vrXgAgMpB7zX25h3Ssi2ZdDrq0eF7h3IQ0YzXaE,812 +langchain/tools/google_scholar/__init__.py,sha256=e5NBMjW0eRE7AtJDA_Dx5rxaRMPFdICUeUALyDa-yUo,721 +langchain/tools/google_scholar/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/google_scholar/__pycache__/tool.cpython-312.pyc,, +langchain/tools/google_scholar/tool.py,sha256=hsaE0lJTJknpqHa46ZsSL3rabox9ULZtIjMVDpP5BaY,686 +langchain/tools/google_search/__init__.py,sha256=PPRzpLJuW5OglRvWkDPoES_jVFQiapEczJ9vWtCNf70,767 +langchain/tools/google_search/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/google_search/__pycache__/tool.cpython-312.pyc,, +langchain/tools/google_search/tool.py,sha256=W-jo7IsWfHQsA0VZjuowl9-nFWRm1BaaZ0oROrHAt_I,733 +langchain/tools/google_serper/__init__.py,sha256=bY7uI85mWkpZ7VM1mcb-mG0WjzPz-DzjxC_Hf0u5XyU,815 +langchain/tools/google_serper/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/google_serper/__pycache__/tool.cpython-312.pyc,, +langchain/tools/google_serper/tool.py,sha256=MG0kNZ6-OdsR1vISzazx1K4I9UYiOrJsBdeyrlDmznc,733 +langchain/tools/google_trends/__init__.py,sha256=j-oyHw4LTw6E3fJ8bd7zOxgbihlXa8EveKSe8tIfMZs,715 +langchain/tools/google_trends/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/google_trends/__pycache__/tool.cpython-312.pyc,, +langchain/tools/google_trends/tool.py,sha256=ol8HBWZ5BSX4wiBiVawTDsGCri9wKVv2rAvGdHlpUtw,681 +langchain/tools/graphql/__init__.py,sha256=5WzEFZc0S0sh1mn6kciABqotz0Zf1fftuwJ6XTs5LgU,47 +langchain/tools/graphql/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/graphql/__pycache__/tool.cpython-312.pyc,, +langchain/tools/graphql/tool.py,sha256=v5sxV_mhIqbOGvJZVeFuhLhcPOx0nFzC_Z31BXhX--I,622 +langchain/tools/human/__init__.py,sha256=HGK8Br-y8MbyZUsf7RVbMCpf5tMPX444ThzixUEh2j4,656 +langchain/tools/human/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/human/__pycache__/tool.cpython-312.pyc,, +langchain/tools/human/tool.py,sha256=6Y2LKsqZ8McVNyv62Wd9FiiC9G71I4mQxjjQXutZFSs,616 +langchain/tools/ifttt.py,sha256=lhCJlRMsQhPBXoNEdLwojozCQkcCiIPmAKjt0n3TPVg,613 +langchain/tools/interaction/__init__.py,sha256=RYCJKa2M7CrzMbz59xYFJ_c3hwGJKOPyyP4G_sAt48w,43 +langchain/tools/interaction/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/interaction/__pycache__/tool.cpython-312.pyc,, +langchain/tools/interaction/tool.py,sha256=SRzhXb0f3ef54jyy-e_AtJLoSkCdnQn5kDTDDtZFInM,625 +langchain/tools/jira/__init__.py,sha256=Zz6Gy5kGFFIfVAnG0a6c4ovi5XM9KZheGKaZ_fFbmGY,17 +langchain/tools/jira/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/jira/__pycache__/tool.cpython-312.pyc,, +langchain/tools/jira/tool.py,sha256=arxkH6yOQL7I6aveu-1fIIXryKIZMhWx6IH0-OU-Bmg,607 +langchain/tools/json/__init__.py,sha256=ieEWuRmzcehYXhGc-KcC6z1Lhbbn_nBEyMtnE04vyFU,46 +langchain/tools/json/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/json/__pycache__/tool.cpython-312.pyc,, +langchain/tools/json/tool.py,sha256=9f2W6pYVF7BqKHYsrJk_14n3cEegRmTRVNC0EJohT9Q,859 +langchain/tools/memorize/__init__.py,sha256=ge_bfvoAp6W8HEhwO9BhZJSsEHCCeOEIUhib6C6p1xI,678 +langchain/tools/memorize/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/memorize/__pycache__/tool.cpython-312.pyc,, +langchain/tools/memorize/tool.py,sha256=UmTKwsNF9L0_qb-emeiQimfaM32LPoOxy_tPkf2Sndw,733 +langchain/tools/merriam_webster/__init__.py,sha256=6n0Uz-TRpAh6M7LMI_p6_qa1c-4vT2kEvU3nDgxzr1Q,35 +langchain/tools/merriam_webster/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/merriam_webster/__pycache__/tool.cpython-312.pyc,, +langchain/tools/merriam_webster/tool.py,sha256=ejeOPp_3b66K1i57JK7i8JgP_Iyk4k215Y3RcwmyvZw,643 +langchain/tools/metaphor_search/__init__.py,sha256=6Rx-2y5PzaBl9DxwOsTK9VEjkWDPlKCkCzm8qRFmz30,676 +langchain/tools/metaphor_search/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/metaphor_search/__pycache__/tool.cpython-312.pyc,, +langchain/tools/metaphor_search/tool.py,sha256=aXR9lbm2UmtvpyLTetGri6glnANFDnCF88g98l28PX4,640 +langchain/tools/multion/__init__.py,sha256=gvqo2kDXyYo1tbwWCCm6hLif5ganUx3b2fAmpS7FiLs,1106 +langchain/tools/multion/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/multion/__pycache__/close_session.cpython-312.pyc,, +langchain/tools/multion/__pycache__/create_session.cpython-312.pyc,, +langchain/tools/multion/__pycache__/update_session.cpython-312.pyc,, +langchain/tools/multion/close_session.py,sha256=43L2yfXwCVjKIWeYrDXSgNuiLY6uNtEyMkEuMyiZxHk,833 +langchain/tools/multion/create_session.py,sha256=iZ42r9J5zK4Og_RmNxywPxvxBWMrrFyssoGk65rPEAU,842 +langchain/tools/multion/update_session.py,sha256=xcS_u1rqbws1ngCg1lpE506kuuoKXO-knK4cVcQy7_Y,842 +langchain/tools/nasa/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/tools/nasa/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/nasa/__pycache__/tool.cpython-312.pyc,, +langchain/tools/nasa/tool.py,sha256=sNULW723KBDxM0A-9ZcAOT4iaV9UlgQed3s5lok5ZAo,607 +langchain/tools/nuclia/__init__.py,sha256=ODEa1xqgG-q0bvAwHFf2mRCvKxetBnjSnxZXXV0TkOs,667 +langchain/tools/nuclia/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/nuclia/__pycache__/tool.cpython-312.pyc,, +langchain/tools/nuclia/tool.py,sha256=DxF-T7N1_hpCNWAyGTr4vGzd7-tz5uwQV_Jcxm1XJaI,760 +langchain/tools/office365/__init__.py,sha256=SFtbeGYsAOIMlUqCvY5BNAXxsiC25cWZwy3Pfygztsw,1086 +langchain/tools/office365/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/office365/__pycache__/base.cpython-312.pyc,, +langchain/tools/office365/__pycache__/create_draft_message.cpython-312.pyc,, +langchain/tools/office365/__pycache__/events_search.cpython-312.pyc,, +langchain/tools/office365/__pycache__/messages_search.cpython-312.pyc,, +langchain/tools/office365/__pycache__/send_event.cpython-312.pyc,, +langchain/tools/office365/__pycache__/send_message.cpython-312.pyc,, +langchain/tools/office365/base.py,sha256=5wtkKvKOM8526s1RoU_WE-mWNc3bSfOJv8tYiHCArKo,643 +langchain/tools/office365/create_draft_message.py,sha256=gG3vfG3rtub5E7mzBk63P3guHLVJHXFFV-oHZ_LFU4Q,905 +langchain/tools/office365/events_search.py,sha256=ZTaGb43oBy05wSrGEHtFktIcm7xaGUNW20ll7rnjOJI,819 +langchain/tools/office365/messages_search.py,sha256=BeQ40wbCZYZDHpI_2hK3fV6M0Ok_hGzjfWuzhXCj1Xs,823 +langchain/tools/office365/send_event.py,sha256=j8U-pd9eBqb_eLOiruVqSYBS7xhCJZkmEheLN6dEs4I,798 +langchain/tools/office365/send_message.py,sha256=Y4Td9b9s6keFu4mSFIcpTJ5r2vV2UyfTDHV4g4GE9-Q,814 +langchain/tools/openapi/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/tools/openapi/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/openapi/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/tools/openapi/utils/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/openapi/utils/__pycache__/api_models.cpython-312.pyc,, +langchain/tools/openapi/utils/__pycache__/openapi_utils.cpython-312.pyc,, +langchain/tools/openapi/utils/api_models.py,sha256=SksygX0e-Ru1Xwb9y2a0aE8hdf7ECdQrG7HMjWUJwe0,1859 +langchain/tools/openapi/utils/openapi_utils.py,sha256=z9HAe_je8ZZpktigb-CTtn7-2i5MpMG_AdKr5oEfk3E,834 +langchain/tools/openweathermap/__init__.py,sha256=zE_dB55MSM1qw5smlNl1q-wl5st4yI9P4WMj3Kr9qBM,678 +langchain/tools/openweathermap/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/openweathermap/__pycache__/tool.cpython-312.pyc,, +langchain/tools/openweathermap/tool.py,sha256=OwzpCnxd7LCcvSk2wzmyCeHlGVxCoMvVh2Ik-uFWlH0,643 +langchain/tools/playwright/__init__.py,sha256=CN1b65rTAog-IVwvtru71utWYe5m6varX_t6aFLecgc,1283 +langchain/tools/playwright/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/playwright/__pycache__/base.cpython-312.pyc,, +langchain/tools/playwright/__pycache__/click.cpython-312.pyc,, +langchain/tools/playwright/__pycache__/current_page.cpython-312.pyc,, +langchain/tools/playwright/__pycache__/extract_hyperlinks.cpython-312.pyc,, +langchain/tools/playwright/__pycache__/extract_text.cpython-312.pyc,, +langchain/tools/playwright/__pycache__/get_elements.cpython-312.pyc,, +langchain/tools/playwright/__pycache__/navigate.cpython-312.pyc,, +langchain/tools/playwright/__pycache__/navigate_back.cpython-312.pyc,, +langchain/tools/playwright/base.py,sha256=7r44CWX7KzbUeVLQblcOyKBcMz8ZIRm0Hk7qMVUpgPk,654 +langchain/tools/playwright/click.py,sha256=-AMVYcYyQ1iI8LOUD05lW9oYoCtUVmNuF8xySsGP-PI,775 +langchain/tools/playwright/current_page.py,sha256=nc0pQZW5pTstp8rNxqJbnXYIMG4CXw1GnTaFugUswgg,631 +langchain/tools/playwright/extract_hyperlinks.py,sha256=YOxa5k73wjdrxk3coN6jQUaxziuR5RNaHWAjY1puZ8A,906 +langchain/tools/playwright/extract_text.py,sha256=jHSV29G8aBUCOBNRR6XyHgmUns6-9TDBoKJkgN0Mq-o,622 +langchain/tools/playwright/get_elements.py,sha256=ssPziWpw8aZ3lYWaEgMMWu4U4n1yXGj1bWRobLz83c4,825 +langchain/tools/playwright/navigate.py,sha256=HH4Gny7puMrxdtt10W6xmPexDu5MYOJy-fgr5fKCqbA,799 +langchain/tools/playwright/navigate_back.py,sha256=wT7X5quHMNmoonfvwQ0BEwuweXwQqNSgc4r-K95SXxg,625 +langchain/tools/plugin.py,sha256=F-P33vQdoI9e3mMZartp7ku1yXiigGHih87IsnSubH4,935 +langchain/tools/powerbi/__init__.py,sha256=lFy__65sASd5e8Eac1E1RHN58uTVSOMprb88zClyEZU,52 +langchain/tools/powerbi/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/powerbi/__pycache__/tool.cpython-312.pyc,, +langchain/tools/powerbi/tool.py,sha256=1xc6-4J01oR-QS_T09wQHYM5NafmwOwuKO3opyXMgwA,849 +langchain/tools/pubmed/__init__.py,sha256=KdYkXaHkUWLyuY35F0HRoZlX6PtTuTCPCYqlkgmBUgY,26 +langchain/tools/pubmed/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/pubmed/__pycache__/tool.cpython-312.pyc,, +langchain/tools/pubmed/tool.py,sha256=LApDl_LcNNqm1Hdv2miwZB-BOMOsZzrBhbT6_rKZdis,619 +langchain/tools/python/__init__.py,sha256=8FRZUNJFEgYkLBB5ufIhNXWQW_igEmssygI5qCxwC3Y,515 +langchain/tools/python/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/reddit_search/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/tools/reddit_search/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/reddit_search/__pycache__/tool.cpython-312.pyc,, +langchain/tools/reddit_search/tool.py,sha256=hciQJ1fJZXperrPCIwn0-aizG3U-dyUelblXPVThmOI,730 +langchain/tools/render.py,sha256=Bs7a0rmFCaZycvmrF4hkR6AEG4u4bOKHjNoYtBpancg,665 +langchain/tools/requests/__init__.py,sha256=oeutQGdlOp3p6PbcAAfjdYpftaXFmJYJgSWw5SGb6IM,52 +langchain/tools/requests/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/requests/__pycache__/tool.cpython-312.pyc,, +langchain/tools/requests/tool.py,sha256=KxWI9m-0lAZQYHua3hTSfAcp9KzR_XU7SjJKwnFFCI0,1167 +langchain/tools/retriever.py,sha256=0VrBXvUq_7XA1CZrsn8uB82-DPJWyTYvRfoP11LOlwE,246 +langchain/tools/scenexplain/__init__.py,sha256=rRP3hoEnMUUHwABFgXFLGCJkoQi4lyg585ONrgWis3k,31 +langchain/tools/scenexplain/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/scenexplain/__pycache__/tool.cpython-312.pyc,, +langchain/tools/scenexplain/tool.py,sha256=3UkRwufg-1iqedu4atd93Ow_nkxSxzDfTqS8oDJvakE,799 +langchain/tools/searchapi/__init__.py,sha256=0mwliCX-Vm6Y_lMbhUPqcojGIHVar4IARLP6AOjgXGg,797 +langchain/tools/searchapi/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/searchapi/__pycache__/tool.cpython-312.pyc,, +langchain/tools/searchapi/tool.py,sha256=F3ljbT9yg_FFfppc6o3GDEDuHiIHz_dLdisIcIOAHv0,715 +langchain/tools/searx_search/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/tools/searx_search/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/searx_search/__pycache__/tool.cpython-312.pyc,, +langchain/tools/searx_search/tool.py,sha256=RcAis0KwJ8ypZrPutPTs-AtoJdJvRdOrrpu1mZPLi5E,727 +langchain/tools/shell/__init__.py,sha256=kpZzd3QxceFPFYLvihJM0zRMHB5XHGPZcqnAaPgcIzE,623 +langchain/tools/shell/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/shell/__pycache__/tool.cpython-312.pyc,, +langchain/tools/shell/tool.py,sha256=R6T4UcXFz1c5kmMxlXVZk_-gbdqoHZ-cMiIK3Xt55hM,751 +langchain/tools/slack/__init__.py,sha256=KVyDSTD4XaSEPr88dhy_2qUFaRHmy8v5oFLtq8GND2c,984 +langchain/tools/slack/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/slack/__pycache__/base.cpython-312.pyc,, +langchain/tools/slack/__pycache__/get_channel.cpython-312.pyc,, +langchain/tools/slack/__pycache__/get_message.cpython-312.pyc,, +langchain/tools/slack/__pycache__/schedule_message.cpython-312.pyc,, +langchain/tools/slack/__pycache__/send_message.cpython-312.pyc,, +langchain/tools/slack/base.py,sha256=UvJuE-R_5OJw0FejdNdrm0VeIdP1TFE1aZIqAFLZ-SA,638 +langchain/tools/slack/get_channel.py,sha256=rTEvL35IZYck2MvO0zfbVgA0i67HuUmuZ8t2TNp0-mQ,622 +langchain/tools/slack/get_message.py,sha256=RhkzvsPKTwP8R4eWQD1UXGyLYAONuzZ07tKUQ19huj0,816 +langchain/tools/slack/schedule_message.py,sha256=nY1aJyhHeLUN-0c-piqvwSOpo6x0vsTBPmFhh-aXkzM,841 +langchain/tools/slack/send_message.py,sha256=p3QDIBfMSvKvmtAH9td-5r3NDZ9jlxAUIQMDhxg7reU,809 +langchain/tools/sleep/__init__.py,sha256=O3fn_ASDE-eDcU3FsBaPTmLHV75hhMS4c6v2qzrak5E,18 +langchain/tools/sleep/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/sleep/__pycache__/tool.cpython-312.pyc,, +langchain/tools/sleep/tool.py,sha256=w6zOs80dKYW7Pj9qbNrmb4ILUH8EIjclz3KlT7D-3ZU,751 +langchain/tools/spark_sql/__init__.py,sha256=HDxRN6dODaOCPByAO48uZz3GbVZd49fE905zLArXCMA,44 +langchain/tools/spark_sql/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/spark_sql/__pycache__/tool.cpython-312.pyc,, +langchain/tools/spark_sql/tool.py,sha256=aM7Z9DUgnT79aYgotgYwPgbIkBR3BsL5HK7Ny_ePWrA,1064 +langchain/tools/sql_database/__init__.py,sha256=Z7WNXu1y5-DhuoeA_Ync-Zcg3uK1lhdfQOlKBWAifmo,49 +langchain/tools/sql_database/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/sql_database/__pycache__/prompt.cpython-312.pyc,, +langchain/tools/sql_database/__pycache__/tool.cpython-312.pyc,, +langchain/tools/sql_database/prompt.py,sha256=hdVHOrH3VwKtJk_vNMTdlWOB6USJ83RIJ2PtDHnYjsw,505 +langchain/tools/sql_database/tool.py,sha256=pDVK6-ePN_jaZr5VeRdhlqx3reajOtFeOXwbagjF5qY,1109 +langchain/tools/stackexchange/__init__.py,sha256=dLGMnzEmyYZGoPsv215mPeqAU03McJJ_2WGkIioj3yY,33 +langchain/tools/stackexchange/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/stackexchange/__pycache__/tool.cpython-312.pyc,, +langchain/tools/stackexchange/tool.py,sha256=FhG269gO3LctEVdSoR0dp0axUVG4x8GCRflfKz1d9mw,628 +langchain/tools/steam/__init__.py,sha256=_hg6uHJlBNJnCFPctYr80psy7o2hRsuzemhtPYHLENA,24 +langchain/tools/steam/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/steam/__pycache__/tool.cpython-312.pyc,, +langchain/tools/steam/tool.py,sha256=20fdcfUocG7QK8K4WsK3X5zR9OLlhmYEUwNPTaEUleM,634 +langchain/tools/steamship_image_generation/__init__.py,sha256=Ockp4ePxVoTxCfXQ6IhDCXcyKwfcLkCFRKZKTYbcfz4,695 +langchain/tools/steamship_image_generation/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/steamship_image_generation/__pycache__/tool.cpython-312.pyc,, +langchain/tools/steamship_image_generation/tool.py,sha256=zDdKZAimtcoE7yKJ6BSTZ1t_GRuTbwwtV4on9ECIM_k,847 +langchain/tools/tavily_search/__init__.py,sha256=jB2RI8bxQ2kgTRSCMi3uJdJCtN9gjW1qlUtjzj9kXpM,840 +langchain/tools/tavily_search/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/tavily_search/__pycache__/tool.cpython-312.pyc,, +langchain/tools/tavily_search/tool.py,sha256=hLGNmPlCk76-iUIxZHzT9tyK_PDFY3B9q9Mav2hpYJY,913 +langchain/tools/vectorstore/__init__.py,sha256=kheVdgDafCJHOhU5D5SBZZg9x_j5_gveZHqVhZ0pSZ8,51 +langchain/tools/vectorstore/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/vectorstore/__pycache__/tool.cpython-312.pyc,, +langchain/tools/vectorstore/tool.py,sha256=K9nSZ88dbSeu5Kpb9_CCE_TJb8u-wBtWxQmZE3Igp0M,791 +langchain/tools/wikipedia/__init__.py,sha256=h-dMgHpibxNGwmU14vNzpEMhy7TuFPUP_d4GYXzMZZ4,29 +langchain/tools/wikipedia/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/wikipedia/__pycache__/tool.cpython-312.pyc,, +langchain/tools/wikipedia/tool.py,sha256=T9_0ygb86a3ptl9NSsZ6hckuHpXlBNCdV8Ht86oedKM,628 +langchain/tools/wolfram_alpha/__init__.py,sha256=TqUr2bSth2XmYREgYmKX-nv21pm1KaclXfN3n6zsEEY,671 +langchain/tools/wolfram_alpha/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/wolfram_alpha/__pycache__/tool.cpython-312.pyc,, +langchain/tools/wolfram_alpha/tool.py,sha256=u4n2h5Mif3cSN9-lxn4Zb5--Pbaq4PCT6UMWfHorUCs,637 +langchain/tools/yahoo_finance_news.py,sha256=Oux9SFN0OrhR-Thz47jhIcLFwjMF018bYAWoViDZD3o,637 +langchain/tools/youtube/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain/tools/youtube/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/youtube/__pycache__/search.cpython-312.pyc,, +langchain/tools/youtube/search.py,sha256=EpZPKchS1q9KFJeffMjAE2dfThbFwefWUZJTv6gCx8U,628 +langchain/tools/zapier/__init__.py,sha256=9PNtXtEMHK74BrNQ3l5DVTxnGNp6nhQPr31RCuFE6hM,765 +langchain/tools/zapier/__pycache__/__init__.cpython-312.pyc,, +langchain/tools/zapier/__pycache__/tool.cpython-312.pyc,, +langchain/tools/zapier/tool.py,sha256=GGyvSMzNG57HS0PQ1x7F-1a1qQWztAd7qb9WsuXJgSw,745 +langchain/utilities/__init__.py,sha256=H3_sc8ePWIW8HSLpeRkeITUame9nsR5CXc-1mTHw2pk,6023 +langchain/utilities/__pycache__/__init__.cpython-312.pyc,, +langchain/utilities/__pycache__/alpha_vantage.cpython-312.pyc,, +langchain/utilities/__pycache__/anthropic.cpython-312.pyc,, +langchain/utilities/__pycache__/apify.cpython-312.pyc,, +langchain/utilities/__pycache__/arcee.cpython-312.pyc,, +langchain/utilities/__pycache__/arxiv.cpython-312.pyc,, +langchain/utilities/__pycache__/asyncio.cpython-312.pyc,, +langchain/utilities/__pycache__/awslambda.cpython-312.pyc,, +langchain/utilities/__pycache__/bibtex.cpython-312.pyc,, +langchain/utilities/__pycache__/bing_search.cpython-312.pyc,, +langchain/utilities/__pycache__/brave_search.cpython-312.pyc,, +langchain/utilities/__pycache__/clickup.cpython-312.pyc,, +langchain/utilities/__pycache__/dalle_image_generator.cpython-312.pyc,, +langchain/utilities/__pycache__/dataforseo_api_search.cpython-312.pyc,, +langchain/utilities/__pycache__/duckduckgo_search.cpython-312.pyc,, +langchain/utilities/__pycache__/github.cpython-312.pyc,, +langchain/utilities/__pycache__/gitlab.cpython-312.pyc,, +langchain/utilities/__pycache__/golden_query.cpython-312.pyc,, +langchain/utilities/__pycache__/google_finance.cpython-312.pyc,, +langchain/utilities/__pycache__/google_jobs.cpython-312.pyc,, +langchain/utilities/__pycache__/google_lens.cpython-312.pyc,, +langchain/utilities/__pycache__/google_places_api.cpython-312.pyc,, +langchain/utilities/__pycache__/google_scholar.cpython-312.pyc,, +langchain/utilities/__pycache__/google_search.cpython-312.pyc,, +langchain/utilities/__pycache__/google_serper.cpython-312.pyc,, +langchain/utilities/__pycache__/google_trends.cpython-312.pyc,, +langchain/utilities/__pycache__/graphql.cpython-312.pyc,, +langchain/utilities/__pycache__/jira.cpython-312.pyc,, +langchain/utilities/__pycache__/loading.cpython-312.pyc,, +langchain/utilities/__pycache__/max_compute.cpython-312.pyc,, +langchain/utilities/__pycache__/merriam_webster.cpython-312.pyc,, +langchain/utilities/__pycache__/metaphor_search.cpython-312.pyc,, +langchain/utilities/__pycache__/nasa.cpython-312.pyc,, +langchain/utilities/__pycache__/opaqueprompts.cpython-312.pyc,, +langchain/utilities/__pycache__/openapi.cpython-312.pyc,, +langchain/utilities/__pycache__/openweathermap.cpython-312.pyc,, +langchain/utilities/__pycache__/outline.cpython-312.pyc,, +langchain/utilities/__pycache__/portkey.cpython-312.pyc,, +langchain/utilities/__pycache__/powerbi.cpython-312.pyc,, +langchain/utilities/__pycache__/pubmed.cpython-312.pyc,, +langchain/utilities/__pycache__/python.cpython-312.pyc,, +langchain/utilities/__pycache__/reddit_search.cpython-312.pyc,, +langchain/utilities/__pycache__/redis.cpython-312.pyc,, +langchain/utilities/__pycache__/requests.cpython-312.pyc,, +langchain/utilities/__pycache__/scenexplain.cpython-312.pyc,, +langchain/utilities/__pycache__/searchapi.cpython-312.pyc,, +langchain/utilities/__pycache__/searx_search.cpython-312.pyc,, +langchain/utilities/__pycache__/serpapi.cpython-312.pyc,, +langchain/utilities/__pycache__/spark_sql.cpython-312.pyc,, +langchain/utilities/__pycache__/sql_database.cpython-312.pyc,, +langchain/utilities/__pycache__/stackexchange.cpython-312.pyc,, +langchain/utilities/__pycache__/steam.cpython-312.pyc,, +langchain/utilities/__pycache__/tavily_search.cpython-312.pyc,, +langchain/utilities/__pycache__/tensorflow_datasets.cpython-312.pyc,, +langchain/utilities/__pycache__/twilio.cpython-312.pyc,, +langchain/utilities/__pycache__/vertexai.cpython-312.pyc,, +langchain/utilities/__pycache__/wikipedia.cpython-312.pyc,, +langchain/utilities/__pycache__/wolfram_alpha.cpython-312.pyc,, +langchain/utilities/__pycache__/zapier.cpython-312.pyc,, +langchain/utilities/alpha_vantage.py,sha256=OgO-VWunWKArZQxq7r4awzxf8-wCbQZmIpb9gvVuH0s,651 +langchain/utilities/anthropic.py,sha256=mBrkZ5DaeZepzdJaUkkGziD7m2fsMjx5paytoruLwOo,839 +langchain/utilities/apify.py,sha256=sFbnvcDDczN5o1xjU_k296GuwwZCtbPCfEKHbdzvlQw,621 +langchain/utilities/arcee.py,sha256=9IdAVeprrQVuLJ9Hpgs6sYo-HUxna_7jJgBZaiQ8GeU,1336 +langchain/utilities/arxiv.py,sha256=FY639OMmPddw2wF6nNiDpYm5JBU6jJxdmU94pN4khBY,630 +langchain/utilities/asyncio.py,sha256=1V67tgNokxxNYRrsHgog1YPFq4hHubMrZF3u7WcxiIc,274 +langchain/utilities/awslambda.py,sha256=GLfqh3q4hvLdD86RkFrr2Kf9KyLDaUgqDIPV4W7cbXA,624 +langchain/utilities/bibtex.py,sha256=FVmrpIc9oIstVTLpzQXsTHAwstJd1aHH2FTmNaAaqWI,642 +langchain/utilities/bing_search.py,sha256=w-wTP-Z6Fv7bRSZiMiCqcYrAnCiuzcypz4WhpboECfI,645 +langchain/utilities/brave_search.py,sha256=rQn2F4Wsw4tcvR-UiPySWkOgdbW-gBry5NjyVih5lfM,639 +langchain/utilities/clickup.py,sha256=lG-bicw30IV37K8BylJpZ_U13Fwm0TgI3P_8apPeas4,1180 +langchain/utilities/dalle_image_generator.py,sha256=5Sk-_aNzkVgsW3y6V96W9HiqnPeRj7xbBLUmbBWnZg4,680 +langchain/utilities/dataforseo_api_search.py,sha256=5HkMcCjr9TWEtp_O2p11gxZP_N4svKi6ZdlA_nwf_X8,695 +langchain/utilities/duckduckgo_search.py,sha256=fVBlNEcDMVsYScfEu_hEno_2aL5B96KWhvpxoPgh7yU,663 +langchain/utilities/github.py,sha256=6NYd9Qv0goandSBNwCjEYvVP0m-4RpkqNO6OlLY-0hk,647 +langchain/utilities/gitlab.py,sha256=QMi7rqk7xxq7OpY6y0WpqJWKV86IvGARWk6ZwinVNkI,647 +langchain/utilities/golden_query.py,sha256=sWKQjioKLvzLiYZKsFZUifQAmbaTpxMydF8m-A02K6o,648 +langchain/utilities/google_finance.py,sha256=DAsA36vrRlVJPwa1yZI0k2filJHe7qr-3bhet4lasEM,654 +langchain/utilities/google_jobs.py,sha256=3NgW2uwx3tFXQ4bkaPxxPgy1ZygG7-YlcZz36l2DhO4,645 +langchain/utilities/google_lens.py,sha256=ugXkJAFA_XzGo6jtlJ28gqvn8bIXvJ0pnDY4Vx8qeBk,645 +langchain/utilities/google_places_api.py,sha256=Y9-FxGhxep4FH26t87Wd73HuAMbb_nrPcDwRG9f0BRU,651 +langchain/utilities/google_scholar.py,sha256=eRMBnbPUI2dbDcSNsliXQrYaKLMD7EJEMGdSVOSVZng,654 +langchain/utilities/google_search.py,sha256=xvMHWwSNXtKUjR_lK_IbilESp6M9wkdnbd1jEK7o8Ag,651 +langchain/utilities/google_serper.py,sha256=HsxyJnaDT6S8ZOrjnK7rKfyrLy4Tx3UbiZN7cu20Jb0,651 +langchain/utilities/google_trends.py,sha256=jue7s_nthiN_2whG1p4pIdaRluYOB7w9izfhHn22cxs,651 +langchain/utilities/graphql.py,sha256=Y_6s4GNcqMttL24pFIVTKcqu2K3nM5Gcq-F25iry5_Q,636 +langchain/utilities/jira.py,sha256=HQImW_p4B01Tp9Pm8YcvcnqVMWe6yCYVb4WPE3UJca0,627 +langchain/utilities/loading.py,sha256=4QtpLz_q9F_fijDtgcez3m8N0AQb2pcnVF8N4lfUWTA,122 +langchain/utilities/max_compute.py,sha256=eN_ZNfNV7BJZyabY2seOu4sZ0f7K4rIDkfrBZt9NH5c,645 +langchain/utilities/merriam_webster.py,sha256=ED6c9ghEFLzjR1TEyrZGFi4GhF0wbXDUvRwy0yHnDJo,657 +langchain/utilities/metaphor_search.py,sha256=lu6rOuqgiaz5B7T8dyFOkt9-iS0FDrAFUW3WSCF50wg,657 +langchain/utilities/nasa.py,sha256=tLc0ls3mDtRFcMxdy_JZJIbAo1liW8a1JlpwGsby9wc,627 +langchain/utilities/opaqueprompts.py,sha256=WXNgdsKgpC1cQwnWMZgGJvKCSSYc-um6Ug9_rAlyiWc,739 +langchain/utilities/openapi.py,sha256=JhltqdAXi8R_bgO_pNG0aVBBLv8YM3GT2kJY4htwBns,753 +langchain/utilities/openweathermap.py,sha256=C-7qv5Ew5PPQsb9lPzVvDvTKyB5175_J4AytqE-NWmI,657 +langchain/utilities/outline.py,sha256=xlT8l23BPPTgeBirZ6PTRGuaq0FkOh6xByRQIH7EQpw,636 +langchain/utilities/portkey.py,sha256=y-R0mWJeJL3Fk4AWMt79jW1BSQF0J4YWcYAXAqytECk,606 +langchain/utilities/powerbi.py,sha256=GZHYSwSmx6ONOIu0dOeucT8fedWAQBIqPkHkvIVGI84,627 +langchain/utilities/pubmed.py,sha256=O32mmI3xN0SQIFV38-wDh9WmhgWe3yDwIIAkkwZJqMA,633 +langchain/utilities/python.py,sha256=TxVqzUU1IjM8WSmM73FEw5KxpEWhXG4OKq8sAJ9yJnU,555 +langchain/utilities/reddit_search.py,sha256=ifhYqW0ocDJtj6wR39bBEdg0sm6M3j9b5boN2KfaK8E,685 +langchain/utilities/redis.py,sha256=ReyZTSsaJCqt5sagnDyvz8uwp_9HSU_QrDO55REDhz8,889 +langchain/utilities/requests.py,sha256=oQOanvlxXdm0t3d3WvpOpb3xa0Dhas7ozC9I0fbYlsw,712 +langchain/utilities/scenexplain.py,sha256=lb5B72iBzf72MYBvC95WHwsErvuJLolSUAgCQhZ3os8,648 +langchain/utilities/searchapi.py,sha256=2iuhnJfjfk1mm78oYKeDts2_fbR2txIkqr0w8DTlxro,642 +langchain/utilities/searx_search.py,sha256=_bcsn1n1kWN8FGiO6eg8uRGCuaAmeV0KgJz3qnJNEDg,804 +langchain/utilities/serpapi.py,sha256=ayBvZ9iTZVMQx3vZbB0QYvtKgsjiiTN3aGxDVNlpeC4,782 +langchain/utilities/spark_sql.py,sha256=cZFDTsdxbvGuTM7mPZjiVPGlhE62PEluViz81poJVI4,609 +langchain/utilities/sql_database.py,sha256=8o7NtgNTHNC8_kqvicuw5KtLeRaoUJxEBVaC8HosCII,786 +langchain/utilities/stackexchange.py,sha256=Nw0RQ6ipvJXTTbCnaT3-m4S86TiE05_4NfU_zcOPCk0,654 +langchain/utilities/steam.py,sha256=2OocGP3N5Px3n38x24uRCyIN0_Wg6mJqTaeWF6YVrr8,639 +langchain/utilities/tavily_search.py,sha256=D4QK6J-5v5mamB0yhY8NPqlGAQXYqLz2NrTvPRvhiYg,685 +langchain/utilities/tensorflow_datasets.py,sha256=opQlDnCFnDCHK2bEJFUKKZeRneg6QBaqNzmvnHU7Gug,639 +langchain/utilities/twilio.py,sha256=vA9M8ce5K8KGqnn-O9K1bL6IUc8gxDOuKspwxAGs4ys,633 +langchain/utilities/vertexai.py,sha256=JwDLIvEzfT17DkQP4uhC7tv-hvn48FcTdJggtKIAdQg,1056 +langchain/utilities/wikipedia.py,sha256=Nepa7g64k-xiyQWna_Xq7stlR70bfRNtkeoJDBtJfo4,642 +langchain/utilities/wolfram_alpha.py,sha256=r9lRCFIe-DVxYd_YaTv1Xcg9jiumMTHPzKDqXc345Z4,651 +langchain/utilities/zapier.py,sha256=n6pmcpbd6Q0uX1Mn2EyI3QqsXn6FsnghbeEku7m1tYA,633 +langchain/utils/__init__.py,sha256=RX9EaD9hiuKy7i_RDNpaQwTfF7DD-QDcjv-a5XJ7tg8,1846 +langchain/utils/__pycache__/__init__.cpython-312.pyc,, +langchain/utils/__pycache__/aiter.cpython-312.pyc,, +langchain/utils/__pycache__/env.cpython-312.pyc,, +langchain/utils/__pycache__/ernie_functions.cpython-312.pyc,, +langchain/utils/__pycache__/formatting.cpython-312.pyc,, +langchain/utils/__pycache__/html.cpython-312.pyc,, +langchain/utils/__pycache__/input.cpython-312.pyc,, +langchain/utils/__pycache__/iter.cpython-312.pyc,, +langchain/utils/__pycache__/json_schema.cpython-312.pyc,, +langchain/utils/__pycache__/loading.cpython-312.pyc,, +langchain/utils/__pycache__/math.cpython-312.pyc,, +langchain/utils/__pycache__/openai.cpython-312.pyc,, +langchain/utils/__pycache__/openai_functions.cpython-312.pyc,, +langchain/utils/__pycache__/pydantic.cpython-312.pyc,, +langchain/utils/__pycache__/strings.cpython-312.pyc,, +langchain/utils/__pycache__/utils.cpython-312.pyc,, +langchain/utils/aiter.py,sha256=7Ut0ojyQxDkgcPstziVZDlfmTlXSUYmQgDGBvDh83R0,102 +langchain/utils/env.py,sha256=KfFYCkcpxbeKl6JfpNWZmkxR1suHcpQuapIm-Dv2PlM,124 +langchain/utils/ernie_functions.py,sha256=LI1VMc4xhdv-gyNvXdkRAbNoaPj5y-AeyXkUmHqH0Wg,1140 +langchain/utils/formatting.py,sha256=zrQEAw_328CgHtFLC1GKnpdobUzQtJ6jHR7ZUp1cBSA,91 +langchain/utils/html.py,sha256=YbIEQdxD5ud0TYyGxWDoIp_10vGUExubF4XSdMjx6gE,421 +langchain/utils/input.py,sha256=eMvdGirCwY78ecqpqNyqEvOYeRSXTAufxqvEyd9MOx0,211 +langchain/utils/iter.py,sha256=w_FxBEiZ6SHbVrk6aRcNbCxrZtEqQ7Lf7_IeQBr6Yeo,133 +langchain/utils/json_schema.py,sha256=nSw7j5ZV6abc6wDtUQMfiJhMmwnVrGfIQA64KGuyTCc,258 +langchain/utils/loading.py,sha256=zr5W8pnZXlMysIQQtuYphOJxDBcrfeXoz632Tw0seLg,92 +langchain/utils/math.py,sha256=gNEYP8--I5z2lSBXN-kLtw7A47JPpgNPPRJKVqWRCng,918 +langchain/utils/openai.py,sha256=dK6irHyqdXLZHPAVn-CROxhhEOoZ_k4DU4fntuxQcd4,627 +langchain/utils/openai_functions.py,sha256=IfqVZGBW_iuJNCQ3w5ajDT791eJoYI2-UfauKWQ0kiU,325 +langchain/utils/pydantic.py,sha256=rg6ren6e7cJVyJKXyUNVdxS6ZsGeshV6e8iBEzfV_UU,111 +langchain/utils/strings.py,sha256=01zPdb-3EyRBfLMw8tFGJPmhZvW1O80gx_u_VmRjb38,148 +langchain/utils/utils.py,sha256=3HIZ2CfO10fr_OWAtWpwNyFDf18oRW12IZJOEJdgPsM,446 +langchain/vectorstores/__init__.py,sha256=87uYwwyr6BAW7XZyLrAFsu9jM0G_k-JdNHWLdfEeuoo,8071 +langchain/vectorstores/__pycache__/__init__.cpython-312.pyc,, +langchain/vectorstores/__pycache__/alibabacloud_opensearch.cpython-312.pyc,, +langchain/vectorstores/__pycache__/analyticdb.cpython-312.pyc,, +langchain/vectorstores/__pycache__/annoy.cpython-312.pyc,, +langchain/vectorstores/__pycache__/astradb.cpython-312.pyc,, +langchain/vectorstores/__pycache__/atlas.cpython-312.pyc,, +langchain/vectorstores/__pycache__/awadb.cpython-312.pyc,, +langchain/vectorstores/__pycache__/azure_cosmos_db.cpython-312.pyc,, +langchain/vectorstores/__pycache__/azuresearch.cpython-312.pyc,, +langchain/vectorstores/__pycache__/bageldb.cpython-312.pyc,, +langchain/vectorstores/__pycache__/baiducloud_vector_search.cpython-312.pyc,, +langchain/vectorstores/__pycache__/base.cpython-312.pyc,, +langchain/vectorstores/__pycache__/cassandra.cpython-312.pyc,, +langchain/vectorstores/__pycache__/chroma.cpython-312.pyc,, +langchain/vectorstores/__pycache__/clarifai.cpython-312.pyc,, +langchain/vectorstores/__pycache__/clickhouse.cpython-312.pyc,, +langchain/vectorstores/__pycache__/dashvector.cpython-312.pyc,, +langchain/vectorstores/__pycache__/databricks_vector_search.cpython-312.pyc,, +langchain/vectorstores/__pycache__/deeplake.cpython-312.pyc,, +langchain/vectorstores/__pycache__/dingo.cpython-312.pyc,, +langchain/vectorstores/__pycache__/elastic_vector_search.cpython-312.pyc,, +langchain/vectorstores/__pycache__/elasticsearch.cpython-312.pyc,, +langchain/vectorstores/__pycache__/epsilla.cpython-312.pyc,, +langchain/vectorstores/__pycache__/faiss.cpython-312.pyc,, +langchain/vectorstores/__pycache__/hippo.cpython-312.pyc,, +langchain/vectorstores/__pycache__/hologres.cpython-312.pyc,, +langchain/vectorstores/__pycache__/lancedb.cpython-312.pyc,, +langchain/vectorstores/__pycache__/llm_rails.cpython-312.pyc,, +langchain/vectorstores/__pycache__/marqo.cpython-312.pyc,, +langchain/vectorstores/__pycache__/matching_engine.cpython-312.pyc,, +langchain/vectorstores/__pycache__/meilisearch.cpython-312.pyc,, +langchain/vectorstores/__pycache__/milvus.cpython-312.pyc,, +langchain/vectorstores/__pycache__/momento_vector_index.cpython-312.pyc,, +langchain/vectorstores/__pycache__/mongodb_atlas.cpython-312.pyc,, +langchain/vectorstores/__pycache__/myscale.cpython-312.pyc,, +langchain/vectorstores/__pycache__/neo4j_vector.cpython-312.pyc,, +langchain/vectorstores/__pycache__/nucliadb.cpython-312.pyc,, +langchain/vectorstores/__pycache__/opensearch_vector_search.cpython-312.pyc,, +langchain/vectorstores/__pycache__/pgembedding.cpython-312.pyc,, +langchain/vectorstores/__pycache__/pgvecto_rs.cpython-312.pyc,, +langchain/vectorstores/__pycache__/pgvector.cpython-312.pyc,, +langchain/vectorstores/__pycache__/pinecone.cpython-312.pyc,, +langchain/vectorstores/__pycache__/qdrant.cpython-312.pyc,, +langchain/vectorstores/__pycache__/rocksetdb.cpython-312.pyc,, +langchain/vectorstores/__pycache__/scann.cpython-312.pyc,, +langchain/vectorstores/__pycache__/semadb.cpython-312.pyc,, +langchain/vectorstores/__pycache__/singlestoredb.cpython-312.pyc,, +langchain/vectorstores/__pycache__/sklearn.cpython-312.pyc,, +langchain/vectorstores/__pycache__/sqlitevss.cpython-312.pyc,, +langchain/vectorstores/__pycache__/starrocks.cpython-312.pyc,, +langchain/vectorstores/__pycache__/supabase.cpython-312.pyc,, +langchain/vectorstores/__pycache__/tair.cpython-312.pyc,, +langchain/vectorstores/__pycache__/tencentvectordb.cpython-312.pyc,, +langchain/vectorstores/__pycache__/tigris.cpython-312.pyc,, +langchain/vectorstores/__pycache__/tiledb.cpython-312.pyc,, +langchain/vectorstores/__pycache__/timescalevector.cpython-312.pyc,, +langchain/vectorstores/__pycache__/typesense.cpython-312.pyc,, +langchain/vectorstores/__pycache__/usearch.cpython-312.pyc,, +langchain/vectorstores/__pycache__/utils.cpython-312.pyc,, +langchain/vectorstores/__pycache__/vald.cpython-312.pyc,, +langchain/vectorstores/__pycache__/vearch.cpython-312.pyc,, +langchain/vectorstores/__pycache__/vectara.cpython-312.pyc,, +langchain/vectorstores/__pycache__/vespa.cpython-312.pyc,, +langchain/vectorstores/__pycache__/weaviate.cpython-312.pyc,, +langchain/vectorstores/__pycache__/xata.cpython-312.pyc,, +langchain/vectorstores/__pycache__/yellowbrick.cpython-312.pyc,, +langchain/vectorstores/__pycache__/zep.cpython-312.pyc,, +langchain/vectorstores/__pycache__/zilliz.cpython-312.pyc,, +langchain/vectorstores/alibabacloud_opensearch.py,sha256=mRYNGcHigNbm-t2jBFn_bW_O7PEmT5Xw7YdE9QxGeWg,833 +langchain/vectorstores/analyticdb.py,sha256=bSbxDEAb4AFemCypaGYde7IEQtIsPjXvX4w7zxtqwTI,621 +langchain/vectorstores/annoy.py,sha256=LwMPWBPffirmM3fVL4maK5geerBZj9AuBGI_vQEra4g,606 +langchain/vectorstores/astradb.py,sha256=7mQylKwD5qkbGwGn5kZZtqHB7Jlz0fKjiTiRSucBvFQ,612 +langchain/vectorstores/atlas.py,sha256=TPo_5trgivqahd38pH8dgT_8g6M80hxxqLy0mywZ524,612 +langchain/vectorstores/awadb.py,sha256=sTvFw-twdQ8T9S43vR2GT843zNl_IXEaTTYG-JMi7eI,606 +langchain/vectorstores/azure_cosmos_db.py,sha256=ZEsZ5-E6NtNWo-obrEH6aKx05DXoGvVDhDJplTcm19M,873 +langchain/vectorstores/azuresearch.py,sha256=qF2cM1Sgj53dpxg4ltrzklq3N_OPr38fWQwn1P5YLTs,867 +langchain/vectorstores/bageldb.py,sha256=FuAzV5FUM-ELXvaAzCIL44fDiG8Tk2RXhG5e4_yldjU,606 +langchain/vectorstores/baiducloud_vector_search.py,sha256=Zi6OIvLHNVKTCgR11ps_MZYC1dL0UaHu7cPqnVWLUZM,633 +langchain/vectorstores/base.py,sha256=264EWH9pnWThSFqVQJi_ySfBbtViGV4d496rcyL96DY,125 +langchain/vectorstores/cassandra.py,sha256=9Op9UfW-gsmF0B-B1CiYBcJqBsihLlm8bTL51AWpwXQ,618 +langchain/vectorstores/chroma.py,sha256=OCgNAdo8oMIWXEoGKZZCKEa9fnoNx3QhxpIFG283Mkk,609 +langchain/vectorstores/clarifai.py,sha256=QOa46y4DqCx8-OXayjQcgT-3TsMBD3_p716tbUxABS8,615 +langchain/vectorstores/clickhouse.py,sha256=YSXRC0grD1RGVknOiLZy1Gi7Dt-3LNWXBPrYAtPh2fg,736 +langchain/vectorstores/dashvector.py,sha256=fh1WywOykPFmQ1bTSJR4DXig6a9_W-thO2Kbcf-CZl4,621 +langchain/vectorstores/databricks_vector_search.py,sha256=nmqb9S1YaYUcslOnVV8d1-WLjx-yAdFBI3Nz-i-w2r0,657 +langchain/vectorstores/deeplake.py,sha256=pNFBZQa9M57MnO5RyFD3fahXS5_mDcqfesUZ5hg0KwE,615 +langchain/vectorstores/dingo.py,sha256=P3Lhx_wbxFl7T-d-5pADC9i2wOQPP_iNb731OEe3LoE,606 +langchain/vectorstores/docarray/__init__.py,sha256=ExKCVCyD5c1N1A8GV9tuyhdYNcK1hHmDwNKcJHVaotA,797 +langchain/vectorstores/docarray/__pycache__/__init__.cpython-312.pyc,, +langchain/vectorstores/docarray/__pycache__/base.cpython-312.pyc,, +langchain/vectorstores/docarray/__pycache__/hnsw.cpython-312.pyc,, +langchain/vectorstores/docarray/__pycache__/in_memory.cpython-312.pyc,, +langchain/vectorstores/docarray/base.py,sha256=Q0ix9zRWvVQJvuajOiFTzCrzLNwhRRrQMhUNhLMIYQY,658 +langchain/vectorstores/docarray/hnsw.py,sha256=BLShuKefoJlwhOuK7rG_WQfPHMcDax2QAhqbFFq6CgQ,645 +langchain/vectorstores/docarray/in_memory.py,sha256=G6s9O1EuPLArYRUwMZ-gF81GLnGMqkbSubC4WsnSIFU,657 +langchain/vectorstores/elastic_vector_search.py,sha256=i28DVcFkmbwuaRU79_FOdRzFCzHGZycWSQl3LGA7fxY,757 +langchain/vectorstores/elasticsearch.py,sha256=xPhl4IBnpdNKEDIxEp6nAhtt7pvxANQ77Vjx-qC_aXo,1294 +langchain/vectorstores/epsilla.py,sha256=6knfuzq-AJqHV1ekA_fcv-qErQo-EuHLqrsRdAQAxp0,612 +langchain/vectorstores/faiss.py,sha256=23szhtKYXIHjRVH9NZ8mU5Jry0Z6-ABb1xUVz60z5zg,606 +langchain/vectorstores/hippo.py,sha256=57WuM7mmEIk71Hae-jv6_bGmIOyc85fBklDzct9l5cc,618 +langchain/vectorstores/hologres.py,sha256=JALbhutok-Afx1Nenq6QrR4nglKF01AkiWlBZ6Sv9nk,615 +langchain/vectorstores/lancedb.py,sha256=Mp_IT1XcHAsxCannErHtB_V8Di2fG9zBXDo-ymohi0s,612 +langchain/vectorstores/llm_rails.py,sha256=Sl1RnLi0FZXrKgNlcbizFz9-RytyZFi4rypL0Gd8lY0,795 +langchain/vectorstores/marqo.py,sha256=DNICVFDqLxlKrTFUQ2WzbDS6oh-ZPGwX09W-46J7Y7M,606 +langchain/vectorstores/matching_engine.py,sha256=8Yri51XJI8c2BUXse7NInw3FMBOtlNANAjb2NF5eO5A,633 +langchain/vectorstores/meilisearch.py,sha256=rn7QYsY1H1LFZWalgfPCL11TboUUUM_WHX9w46XjjG4,624 +langchain/vectorstores/milvus.py,sha256=IvOhILzgaI2l-3yxd3BsuSV3IQULNWEQ8bVHspkblh4,609 +langchain/vectorstores/momento_vector_index.py,sha256=z7C357JDmpuT-dIb48mv45AEty62xKGuVFBOGsOm56Y,645 +langchain/vectorstores/mongodb_atlas.py,sha256=BM-EVrqZa7mZYVd8IQRMO64hixkBAhi-ogMj27j94o4,663 +langchain/vectorstores/myscale.py,sha256=DXp9zCLWkhOdnTAe2K57yZgaztq49XSj9bnFy5Zq5XI,890 +langchain/vectorstores/neo4j_vector.py,sha256=i8TcDpIxkhy9MFOzyw18tkc0qSqEZrs0Ns6aXxmldb4,789 +langchain/vectorstores/nucliadb.py,sha256=4qqNe50Z6fa021jZ3h7KZLBWg3SjIndq-X8XQ8R1wm4,633 +langchain/vectorstores/opensearch_vector_search.py,sha256=ThgEuH2jgz85JND98r65_O1kTn7zyvoVEE1JHpJPkJ0,657 +langchain/vectorstores/pgembedding.py,sha256=nQnbAQD4_4c0FA567SHKNPXv-fuOIs5vVnoohfS_o4A,1042 +langchain/vectorstores/pgvecto_rs.py,sha256=_mb6FCyBf-VykDhRq2FrrKJqkOR559R5TNflQkd72W0,643 +langchain/vectorstores/pgvector.py,sha256=JzB8ZKUbZlQpjRQUU0rdbOloldAu0E7v1yRATHNrOKo,790 +langchain/vectorstores/pinecone.py,sha256=4s3yUWyKiCQ_aQENJYgOcqO7RpdUC4ZQEF7W6JzPNog,615 +langchain/vectorstores/qdrant.py,sha256=NrdadRir4YdSDi6btJxR-NIOmXKhwGyy9UWKmYuwJZA,777 +langchain/vectorstores/redis/__init__.py,sha256=J5v4HV4JkpATWRPNerDJ8UoV5wRt1Eg02r4gOGPU1v0,1295 +langchain/vectorstores/redis/__pycache__/__init__.cpython-312.pyc,, +langchain/vectorstores/redis/__pycache__/base.cpython-312.pyc,, +langchain/vectorstores/redis/__pycache__/filters.cpython-312.pyc,, +langchain/vectorstores/redis/__pycache__/schema.cpython-312.pyc,, +langchain/vectorstores/redis/base.py,sha256=N3h1hGZWyn_lF275pniyh-HYFsJ5hbRmSi9EyVgrOUo,956 +langchain/vectorstores/redis/filters.py,sha256=iaMr3l7tF9OpBqV2c97iBNvlYZLx9m21FsAfJNeAz9o,1514 +langchain/vectorstores/redis/schema.py,sha256=s_YxAFOxFjYI-owCkJyjYwZMqpsxGmWYj2UVslVEl04,1745 +langchain/vectorstores/rocksetdb.py,sha256=2E-IN2qLRlR1jJQ6E3sKfXDbOYKEhGn-uer0VDSL0eo,612 +langchain/vectorstores/scann.py,sha256=5-C-SnX76rfXjKYLaHs7IFzkA-aHahFIka8D0LE1cX4,606 +langchain/vectorstores/semadb.py,sha256=OZy0xwmU9h6Z1zdVwnHHT-FojXQ-nK_xeTtBizSFqOs,609 +langchain/vectorstores/singlestoredb.py,sha256=FTG_ShGAj5VR5eH8Mnf4GYZNn2VG0VmEh4ShiVnAUAQ,630 +langchain/vectorstores/sklearn.py,sha256=3hS6Jg6xtn-eS7RdtSOeGv-sxU5YAVUozNvmZbqmPEo,1325 +langchain/vectorstores/sqlitevss.py,sha256=usGTkUhpPjz5_SJhME3pQnd-vnGKOqtH_xo4TTfzH8c,618 +langchain/vectorstores/starrocks.py,sha256=N8o8O9JjRDr3YfcgSzaG6DtimVPteY-HgybCpSH6J_M,798 +langchain/vectorstores/supabase.py,sha256=nppWkC8idP7ZYbtYmIZTiQhRun6Rvte01NQB7IbOBVQ,648 +langchain/vectorstores/tair.py,sha256=TKJoDNjRAJ3NGLwOURC4dR3t1Q-2u7r6-D0or6XJXC4,603 +langchain/vectorstores/tencentvectordb.py,sha256=mE-7OVi6OXxDiiLpbJ-tWNBrwKvnp1Id8TXHsLLgDFY,953 +langchain/vectorstores/tigris.py,sha256=oJQikaGRtHbFxzCT-2t3r1XbTMo7kJ4fZRnWpTHwgHU,609 +langchain/vectorstores/tiledb.py,sha256=pKbmHT6pu3jhCZgl7ZRxTLJFCwZRfbbZehFuL6Tk9t0,609 +langchain/vectorstores/timescalevector.py,sha256=pVhqLoYyj_tIErJ6Jk-eN2vPW2dqGWfGna8tMmdWuJc,636 +langchain/vectorstores/typesense.py,sha256=QooHCAv93k7xFBpTkqciayfPlbuUphPlhqd-68PfW7I,618 +langchain/vectorstores/usearch.py,sha256=k19BUdYJoMf_aFNpzZejm_6ae5Ux5Fwq1GXU832WhKE,612 +langchain/vectorstores/utils.py,sha256=utnedZtUpsqBfucBXdQ9mOnNW8gr15iXnZzKAdx7IrQ,958 +langchain/vectorstores/vald.py,sha256=DiFA1mIX8rb-2NTeZdmT0-BNodccOfqkG1AfzuwExdQ,603 +langchain/vectorstores/vearch.py,sha256=kcvwnzo34U73ODN2_-8KmCOsFNZrd-R-E2oOlHQXrSg,609 +langchain/vectorstores/vectara.py,sha256=7xM1RGpIt_c28djf3_8730gVXuUScuNIz549geAY0MA,785 +langchain/vectorstores/vespa.py,sha256=sOvSd7lggraOQvOHgO8kwPV0le0bZanc58PhzSMT0K0,621 +langchain/vectorstores/weaviate.py,sha256=xe33U-lhmfHEAlp9E9HjKLMQjkv0EvZK04TTwg8Fu6Q,615 +langchain/vectorstores/xata.py,sha256=HW_Oi5Hz8rH2JaUhRNWQ-3hLYmNzD8eAz6K5YqPArmI,646 +langchain/vectorstores/yellowbrick.py,sha256=-lnjGcRE8Q1nEPOTdbKYTw5noS2cy2ce1ePOU804-_o,624 +langchain/vectorstores/zep.py,sha256=RJ2auxoA6uHHLEZknw3_jeFmYJYVt-PWKMBcNMGV6TM,798 +langchain/vectorstores/zilliz.py,sha256=XhPPIUfKPFJw0_svCoBgCnNkkBLoRVVcyuMfOnE5IxU,609 diff --git a/venv/Lib/site-packages/langchain-0.3.25.dist-info/REQUESTED b/venv/Lib/site-packages/langchain-0.3.25.dist-info/REQUESTED new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain-0.3.25.dist-info/WHEEL b/venv/Lib/site-packages/langchain-0.3.25.dist-info/WHEEL new file mode 100644 index 00000000..45ec8c4e --- /dev/null +++ b/venv/Lib/site-packages/langchain-0.3.25.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: pdm-backend (2.4.4) +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/Lib/site-packages/langchain-0.3.25.dist-info/entry_points.txt b/venv/Lib/site-packages/langchain-0.3.25.dist-info/entry_points.txt new file mode 100644 index 00000000..c3ad4726 --- /dev/null +++ b/venv/Lib/site-packages/langchain-0.3.25.dist-info/entry_points.txt @@ -0,0 +1,4 @@ +[console_scripts] + +[gui_scripts] + diff --git a/venv/Lib/site-packages/langchain-0.3.25.dist-info/licenses/LICENSE b/venv/Lib/site-packages/langchain-0.3.25.dist-info/licenses/LICENSE new file mode 100644 index 00000000..39577386 --- /dev/null +++ b/venv/Lib/site-packages/langchain-0.3.25.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) LangChain, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/Lib/site-packages/langchain/__init__.py b/venv/Lib/site-packages/langchain/__init__.py new file mode 100644 index 00000000..528f773d --- /dev/null +++ b/venv/Lib/site-packages/langchain/__init__.py @@ -0,0 +1,439 @@ +# ruff: noqa: E402 +"""Main entrypoint into package.""" + +import warnings +from importlib import metadata +from typing import Any, Optional + +from langchain_core._api.deprecation import surface_langchain_deprecation_warnings + +try: + __version__ = metadata.version(__package__) +except metadata.PackageNotFoundError: + # Case where package metadata is not available. + __version__ = "" +del metadata # optional, avoids polluting the results of dir(__package__) + + +def _warn_on_import(name: str, replacement: Optional[str] = None) -> None: + """Warn on import of deprecated module.""" + from langchain._api.interactive_env import is_interactive_env + + if is_interactive_env(): + # No warnings for interactive environments. + # This is done to avoid polluting the output of interactive environments + # where users rely on auto-complete and may trigger this warning + # even if they are not using any deprecated modules + return + + if replacement: + warnings.warn( + f"Importing {name} from langchain root module is no longer supported. " + f"Please use {replacement} instead." + ) + else: + warnings.warn( + f"Importing {name} from langchain root module is no longer supported." + ) + + +# Surfaces Deprecation and Pending Deprecation warnings from langchain. +surface_langchain_deprecation_warnings() + + +def __getattr__(name: str) -> Any: + if name == "MRKLChain": + from langchain.agents import MRKLChain + + _warn_on_import(name, replacement="langchain.agents.MRKLChain") + + return MRKLChain + elif name == "ReActChain": + from langchain.agents import ReActChain + + _warn_on_import(name, replacement="langchain.agents.ReActChain") + + return ReActChain + elif name == "SelfAskWithSearchChain": + from langchain.agents import SelfAskWithSearchChain + + _warn_on_import(name, replacement="langchain.agents.SelfAskWithSearchChain") + + return SelfAskWithSearchChain + elif name == "ConversationChain": + from langchain.chains import ConversationChain + + _warn_on_import(name, replacement="langchain.chains.ConversationChain") + + return ConversationChain + elif name == "LLMBashChain": + raise ImportError( + "This module has been moved to langchain-experimental. " + "For more details: " + "https://github.com/langchain-ai/langchain/discussions/11352." + "To access this code, install it with `pip install langchain-experimental`." + "`from langchain_experimental.llm_bash.base " + "import LLMBashChain`" + ) + + elif name == "LLMChain": + from langchain.chains import LLMChain + + _warn_on_import(name, replacement="langchain.chains.LLMChain") + + return LLMChain + elif name == "LLMCheckerChain": + from langchain.chains import LLMCheckerChain + + _warn_on_import(name, replacement="langchain.chains.LLMCheckerChain") + + return LLMCheckerChain + elif name == "LLMMathChain": + from langchain.chains import LLMMathChain + + _warn_on_import(name, replacement="langchain.chains.LLMMathChain") + + return LLMMathChain + elif name == "QAWithSourcesChain": + from langchain.chains import QAWithSourcesChain + + _warn_on_import(name, replacement="langchain.chains.QAWithSourcesChain") + + return QAWithSourcesChain + elif name == "VectorDBQA": + from langchain.chains import VectorDBQA + + _warn_on_import(name, replacement="langchain.chains.VectorDBQA") + + return VectorDBQA + elif name == "VectorDBQAWithSourcesChain": + from langchain.chains import VectorDBQAWithSourcesChain + + _warn_on_import(name, replacement="langchain.chains.VectorDBQAWithSourcesChain") + + return VectorDBQAWithSourcesChain + elif name == "InMemoryDocstore": + from langchain_community.docstore import InMemoryDocstore + + _warn_on_import(name, replacement="langchain.docstore.InMemoryDocstore") + + return InMemoryDocstore + elif name == "Wikipedia": + from langchain_community.docstore import Wikipedia + + _warn_on_import(name, replacement="langchain.docstore.Wikipedia") + + return Wikipedia + elif name == "Anthropic": + from langchain_community.llms import Anthropic + + _warn_on_import(name, replacement="langchain_community.llms.Anthropic") + + return Anthropic + elif name == "Banana": + from langchain_community.llms import Banana + + _warn_on_import(name, replacement="langchain_community.llms.Banana") + + return Banana + elif name == "CerebriumAI": + from langchain_community.llms import CerebriumAI + + _warn_on_import(name, replacement="langchain_community.llms.CerebriumAI") + + return CerebriumAI + elif name == "Cohere": + from langchain_community.llms import Cohere + + _warn_on_import(name, replacement="langchain_community.llms.Cohere") + + return Cohere + elif name == "ForefrontAI": + from langchain_community.llms import ForefrontAI + + _warn_on_import(name, replacement="langchain_community.llms.ForefrontAI") + + return ForefrontAI + elif name == "GooseAI": + from langchain_community.llms import GooseAI + + _warn_on_import(name, replacement="langchain_community.llms.GooseAI") + + return GooseAI + elif name == "HuggingFaceHub": + from langchain_community.llms import HuggingFaceHub + + _warn_on_import(name, replacement="langchain_community.llms.HuggingFaceHub") + + return HuggingFaceHub + elif name == "HuggingFaceTextGenInference": + from langchain_community.llms import HuggingFaceTextGenInference + + _warn_on_import( + name, replacement="langchain_community.llms.HuggingFaceTextGenInference" + ) + + return HuggingFaceTextGenInference + elif name == "LlamaCpp": + from langchain_community.llms import LlamaCpp + + _warn_on_import(name, replacement="langchain_community.llms.LlamaCpp") + + return LlamaCpp + elif name == "Modal": + from langchain_community.llms import Modal + + _warn_on_import(name, replacement="langchain_community.llms.Modal") + + return Modal + elif name == "OpenAI": + from langchain_community.llms import OpenAI + + _warn_on_import(name, replacement="langchain_community.llms.OpenAI") + + return OpenAI + elif name == "Petals": + from langchain_community.llms import Petals + + _warn_on_import(name, replacement="langchain_community.llms.Petals") + + return Petals + elif name == "PipelineAI": + from langchain_community.llms import PipelineAI + + _warn_on_import(name, replacement="langchain_community.llms.PipelineAI") + + return PipelineAI + elif name == "SagemakerEndpoint": + from langchain_community.llms import SagemakerEndpoint + + _warn_on_import(name, replacement="langchain_community.llms.SagemakerEndpoint") + + return SagemakerEndpoint + elif name == "StochasticAI": + from langchain_community.llms import StochasticAI + + _warn_on_import(name, replacement="langchain_community.llms.StochasticAI") + + return StochasticAI + elif name == "Writer": + from langchain_community.llms import Writer + + _warn_on_import(name, replacement="langchain_community.llms.Writer") + + return Writer + elif name == "HuggingFacePipeline": + from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline + + _warn_on_import( + name, + replacement="langchain_community.llms.huggingface_pipeline.HuggingFacePipeline", + ) + + return HuggingFacePipeline + elif name == "FewShotPromptTemplate": + from langchain_core.prompts import FewShotPromptTemplate + + _warn_on_import( + name, replacement="langchain_core.prompts.FewShotPromptTemplate" + ) + + return FewShotPromptTemplate + elif name == "Prompt": + from langchain_core.prompts import PromptTemplate + + _warn_on_import(name, replacement="langchain_core.prompts.PromptTemplate") + + # it's renamed as prompt template anyways + # this is just for backwards compat + return PromptTemplate + elif name == "PromptTemplate": + from langchain_core.prompts import PromptTemplate + + _warn_on_import(name, replacement="langchain_core.prompts.PromptTemplate") + + return PromptTemplate + elif name == "BasePromptTemplate": + from langchain_core.prompts import BasePromptTemplate + + _warn_on_import(name, replacement="langchain_core.prompts.BasePromptTemplate") + + return BasePromptTemplate + elif name == "ArxivAPIWrapper": + from langchain_community.utilities import ArxivAPIWrapper + + _warn_on_import( + name, replacement="langchain_community.utilities.ArxivAPIWrapper" + ) + + return ArxivAPIWrapper + elif name == "GoldenQueryAPIWrapper": + from langchain_community.utilities import GoldenQueryAPIWrapper + + _warn_on_import( + name, replacement="langchain_community.utilities.GoldenQueryAPIWrapper" + ) + + return GoldenQueryAPIWrapper + elif name == "GoogleSearchAPIWrapper": + from langchain_community.utilities import GoogleSearchAPIWrapper + + _warn_on_import( + name, replacement="langchain_community.utilities.GoogleSearchAPIWrapper" + ) + + return GoogleSearchAPIWrapper + elif name == "GoogleSerperAPIWrapper": + from langchain_community.utilities import GoogleSerperAPIWrapper + + _warn_on_import( + name, replacement="langchain_community.utilities.GoogleSerperAPIWrapper" + ) + + return GoogleSerperAPIWrapper + elif name == "PowerBIDataset": + from langchain_community.utilities import PowerBIDataset + + _warn_on_import( + name, replacement="langchain_community.utilities.PowerBIDataset" + ) + + return PowerBIDataset + elif name == "SearxSearchWrapper": + from langchain_community.utilities import SearxSearchWrapper + + _warn_on_import( + name, replacement="langchain_community.utilities.SearxSearchWrapper" + ) + + return SearxSearchWrapper + elif name == "WikipediaAPIWrapper": + from langchain_community.utilities import WikipediaAPIWrapper + + _warn_on_import( + name, replacement="langchain_community.utilities.WikipediaAPIWrapper" + ) + + return WikipediaAPIWrapper + elif name == "WolframAlphaAPIWrapper": + from langchain_community.utilities import WolframAlphaAPIWrapper + + _warn_on_import( + name, replacement="langchain_community.utilities.WolframAlphaAPIWrapper" + ) + + return WolframAlphaAPIWrapper + elif name == "SQLDatabase": + from langchain_community.utilities import SQLDatabase + + _warn_on_import(name, replacement="langchain_community.utilities.SQLDatabase") + + return SQLDatabase + elif name == "FAISS": + from langchain_community.vectorstores import FAISS + + _warn_on_import(name, replacement="langchain_community.vectorstores.FAISS") + + return FAISS + elif name == "ElasticVectorSearch": + from langchain_community.vectorstores import ElasticVectorSearch + + _warn_on_import( + name, replacement="langchain_community.vectorstores.ElasticVectorSearch" + ) + + return ElasticVectorSearch + # For backwards compatibility + elif name == "SerpAPIChain" or name == "SerpAPIWrapper": + from langchain_community.utilities import SerpAPIWrapper + + _warn_on_import( + name, replacement="langchain_community.utilities.SerpAPIWrapper" + ) + + return SerpAPIWrapper + elif name == "verbose": + from langchain.globals import _verbose + + _warn_on_import( + name, + replacement=( + "langchain.globals.set_verbose() / langchain.globals.get_verbose()" + ), + ) + + return _verbose + elif name == "debug": + from langchain.globals import _debug + + _warn_on_import( + name, + replacement=( + "langchain.globals.set_debug() / langchain.globals.get_debug()" + ), + ) + + return _debug + elif name == "llm_cache": + from langchain.globals import _llm_cache + + _warn_on_import( + name, + replacement=( + "langchain.globals.set_llm_cache() / langchain.globals.get_llm_cache()" + ), + ) + + return _llm_cache + else: + raise AttributeError(f"Could not find: {name}") + + +__all__ = [ + "LLMChain", + "LLMCheckerChain", + "LLMMathChain", + "ArxivAPIWrapper", + "GoldenQueryAPIWrapper", + "SelfAskWithSearchChain", + "SerpAPIWrapper", + "SerpAPIChain", + "SearxSearchWrapper", + "GoogleSearchAPIWrapper", + "GoogleSerperAPIWrapper", + "WolframAlphaAPIWrapper", + "WikipediaAPIWrapper", + "Anthropic", + "Banana", + "CerebriumAI", + "Cohere", + "ForefrontAI", + "GooseAI", + "Modal", + "OpenAI", + "Petals", + "PipelineAI", + "StochasticAI", + "Writer", + "BasePromptTemplate", + "Prompt", + "FewShotPromptTemplate", + "PromptTemplate", + "ReActChain", + "Wikipedia", + "HuggingFaceHub", + "SagemakerEndpoint", + "HuggingFacePipeline", + "SQLDatabase", + "PowerBIDataset", + "FAISS", + "MRKLChain", + "VectorDBQA", + "ElasticVectorSearch", + "InMemoryDocstore", + "ConversationChain", + "VectorDBQAWithSourcesChain", + "QAWithSourcesChain", + "LlamaCpp", + "HuggingFaceTextGenInference", +] diff --git a/venv/Lib/site-packages/langchain/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..5963c0e5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/base_language.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/base_language.cpython-312.pyc new file mode 100644 index 00000000..446e4c46 Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/base_language.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/cache.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/cache.cpython-312.pyc new file mode 100644 index 00000000..995eac88 Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/cache.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/env.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/env.cpython-312.pyc new file mode 100644 index 00000000..2b6888e0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/env.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/example_generator.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/example_generator.cpython-312.pyc new file mode 100644 index 00000000..7b45692c Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/example_generator.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/formatting.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/formatting.cpython-312.pyc new file mode 100644 index 00000000..acdf63d1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/formatting.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/globals.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/globals.cpython-312.pyc new file mode 100644 index 00000000..188362f7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/globals.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/hub.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/hub.cpython-312.pyc new file mode 100644 index 00000000..1fa53886 Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/hub.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/input.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/input.cpython-312.pyc new file mode 100644 index 00000000..5ca42020 Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/input.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/model_laboratory.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/model_laboratory.cpython-312.pyc new file mode 100644 index 00000000..a106f652 Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/model_laboratory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/python.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/python.cpython-312.pyc new file mode 100644 index 00000000..adf3de65 Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/python.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/requests.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/requests.cpython-312.pyc new file mode 100644 index 00000000..5c7238fe Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/requests.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/serpapi.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/serpapi.cpython-312.pyc new file mode 100644 index 00000000..212b0f9d Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/serpapi.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/sql_database.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/sql_database.cpython-312.pyc new file mode 100644 index 00000000..4c725303 Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/sql_database.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/__pycache__/text_splitter.cpython-312.pyc b/venv/Lib/site-packages/langchain/__pycache__/text_splitter.cpython-312.pyc new file mode 100644 index 00000000..2d3fe02f Binary files /dev/null and b/venv/Lib/site-packages/langchain/__pycache__/text_splitter.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/_api/__init__.py b/venv/Lib/site-packages/langchain/_api/__init__.py new file mode 100644 index 00000000..1f746db8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/_api/__init__.py @@ -0,0 +1,28 @@ +"""Helper functions for managing the LangChain API. + +This module is only relevant for LangChain developers, not for users. + +.. warning:: + + This module and its submodules are for internal use only. Do not use them + in your own code. We may change the API at any time with no warning. + +""" + +from .deprecation import ( + LangChainDeprecationWarning, + deprecated, + suppress_langchain_deprecation_warning, + surface_langchain_deprecation_warnings, + warn_deprecated, +) +from .module_import import create_importer + +__all__ = [ + "deprecated", + "LangChainDeprecationWarning", + "suppress_langchain_deprecation_warning", + "surface_langchain_deprecation_warnings", + "warn_deprecated", + "create_importer", +] diff --git a/venv/Lib/site-packages/langchain/_api/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/_api/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..25b68624 Binary files /dev/null and b/venv/Lib/site-packages/langchain/_api/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/_api/__pycache__/deprecation.cpython-312.pyc b/venv/Lib/site-packages/langchain/_api/__pycache__/deprecation.cpython-312.pyc new file mode 100644 index 00000000..f6065585 Binary files /dev/null and b/venv/Lib/site-packages/langchain/_api/__pycache__/deprecation.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/_api/__pycache__/interactive_env.cpython-312.pyc b/venv/Lib/site-packages/langchain/_api/__pycache__/interactive_env.cpython-312.pyc new file mode 100644 index 00000000..5e78e0f1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/_api/__pycache__/interactive_env.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/_api/__pycache__/module_import.cpython-312.pyc b/venv/Lib/site-packages/langchain/_api/__pycache__/module_import.cpython-312.pyc new file mode 100644 index 00000000..e3dc7bfe Binary files /dev/null and b/venv/Lib/site-packages/langchain/_api/__pycache__/module_import.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/_api/__pycache__/path.cpython-312.pyc b/venv/Lib/site-packages/langchain/_api/__pycache__/path.cpython-312.pyc new file mode 100644 index 00000000..7ea6fc0a Binary files /dev/null and b/venv/Lib/site-packages/langchain/_api/__pycache__/path.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/_api/deprecation.py b/venv/Lib/site-packages/langchain/_api/deprecation.py new file mode 100644 index 00000000..6d5f2cf6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/_api/deprecation.py @@ -0,0 +1,32 @@ +from langchain_core._api.deprecation import ( + LangChainDeprecationWarning, + LangChainPendingDeprecationWarning, + deprecated, + suppress_langchain_deprecation_warning, + surface_langchain_deprecation_warnings, + warn_deprecated, +) + +AGENT_DEPRECATION_WARNING = ( + "LangChain agents will continue to be supported, but it is recommended for new " + "use cases to be built with LangGraph. LangGraph offers a more flexible and " + "full-featured framework for building agents, including support for " + "tool-calling, persistence of state, and human-in-the-loop workflows. For " + "details, refer to the " + "`LangGraph documentation `_" + " as well as guides for " + "`Migrating from AgentExecutor `_" # noqa: E501 + " and LangGraph's " + "`Pre-built ReAct agent `_." # noqa: E501 +) + + +__all__ = [ + "AGENT_DEPRECATION_WARNING", + "LangChainDeprecationWarning", + "LangChainPendingDeprecationWarning", + "deprecated", + "suppress_langchain_deprecation_warning", + "warn_deprecated", + "surface_langchain_deprecation_warnings", +] diff --git a/venv/Lib/site-packages/langchain/_api/interactive_env.py b/venv/Lib/site-packages/langchain/_api/interactive_env.py new file mode 100644 index 00000000..7752b6b4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/_api/interactive_env.py @@ -0,0 +1,5 @@ +def is_interactive_env() -> bool: + """Determine if running within IPython or Jupyter.""" + import sys + + return hasattr(sys, "ps2") diff --git a/venv/Lib/site-packages/langchain/_api/module_import.py b/venv/Lib/site-packages/langchain/_api/module_import.py new file mode 100644 index 00000000..83fa1483 --- /dev/null +++ b/venv/Lib/site-packages/langchain/_api/module_import.py @@ -0,0 +1,148 @@ +import importlib +from typing import Any, Callable, Optional + +from langchain_core._api import internal, warn_deprecated + +from langchain._api.interactive_env import is_interactive_env + +ALLOWED_TOP_LEVEL_PKGS = { + "langchain_community", + "langchain_core", + "langchain", +} + + +def create_importer( + package: str, + *, + module_lookup: Optional[dict[str, str]] = None, + deprecated_lookups: Optional[dict[str, str]] = None, + fallback_module: Optional[str] = None, +) -> Callable[[str], Any]: + """Create a function that helps retrieve objects from their new locations. + + The goal of this function is to help users transition from deprecated + imports to new imports. + + The function will raise deprecation warning on loops using + deprecated_lookups or fallback_module. + + Module lookups will import without deprecation warnings (used to speed + up imports from large namespaces like llms or chat models). + + This function should ideally only be used with deprecated imports not with + existing imports that are valid, as in addition to raising deprecation warnings + the dynamic imports can create other issues for developers (e.g., + loss of type information, IDE support for going to definition etc). + + Args: + package: current package. Use __package__ + module_lookup: maps name of object to the module where it is defined. + e.g., + { + "MyDocumentLoader": ( + "langchain_community.document_loaders.my_document_loader" + ) + } + deprecated_lookups: same as module look up, but will raise + deprecation warnings. + fallback_module: module to import from if the object is not found in + module_lookup or if module_lookup is not provided. + + Returns: + A function that imports objects from the specified modules. + """ + all_module_lookup = {**(deprecated_lookups or {}), **(module_lookup or {})} + + def import_by_name(name: str) -> Any: + """Import stores from langchain_community.""" + # If not in interactive env, raise warning. + if all_module_lookup and name in all_module_lookup: + new_module = all_module_lookup[name] + if new_module.split(".")[0] not in ALLOWED_TOP_LEVEL_PKGS: + raise AssertionError( + f"Importing from {new_module} is not allowed. " + f"Allowed top-level packages are: {ALLOWED_TOP_LEVEL_PKGS}" + ) + + try: + module = importlib.import_module(new_module) + except ModuleNotFoundError as e: + if new_module.startswith("langchain_community"): + raise ModuleNotFoundError( + f"Module {new_module} not found. " + "Please install langchain-community to access this module. " + "You can install it using `pip install -U langchain-community`" + ) from e + raise + + try: + result = getattr(module, name) + if ( + not is_interactive_env() + and deprecated_lookups + and name in deprecated_lookups + ): + # Depth 3: + # internal.py + # module_import.py + # Module in langchain that uses this function + # [calling code] whose frame we want to inspect. + if not internal.is_caller_internal(depth=3): + warn_deprecated( + since="0.1", + pending=False, + removal="1.0", + message=( + f"Importing {name} from {package} is deprecated. " + f"Please replace deprecated imports:\n\n" + f">> from {package} import {name}\n\n" + "with new imports of:\n\n" + f">> from {new_module} import {name}\n" + "You can use the langchain cli to **automatically** " + "upgrade many imports. Please see documentation here " + "" + ), + ) + return result + except Exception as e: + raise AttributeError( + f"module {new_module} has no attribute {name}" + ) from e + + if fallback_module: + try: + module = importlib.import_module(fallback_module) + result = getattr(module, name) + if not is_interactive_env(): + # Depth 3: + # internal.py + # module_import.py + # Module in langchain that uses this function + # [calling code] whose frame we want to inspect. + if not internal.is_caller_internal(depth=3): + warn_deprecated( + since="0.1", + pending=False, + removal="1.0", + message=( + f"Importing {name} from {package} is deprecated. " + f"Please replace deprecated imports:\n\n" + f">> from {package} import {name}\n\n" + "with new imports of:\n\n" + f">> from {fallback_module} import {name}\n" + "You can use the langchain cli to **automatically** " + "upgrade many imports. Please see documentation here " + "" + ), + ) + return result + + except Exception as e: + raise AttributeError( + f"module {fallback_module} has no attribute {name}" + ) from e + + raise AttributeError(f"module {package} has no attribute {name}") + + return import_by_name diff --git a/venv/Lib/site-packages/langchain/_api/path.py b/venv/Lib/site-packages/langchain/_api/path.py new file mode 100644 index 00000000..5ee0fe81 --- /dev/null +++ b/venv/Lib/site-packages/langchain/_api/path.py @@ -0,0 +1,3 @@ +from langchain_core._api.path import as_import_path, get_relative_path + +__all__ = ["get_relative_path", "as_import_path"] diff --git a/venv/Lib/site-packages/langchain/adapters/__init__.py b/venv/Lib/site-packages/langchain/adapters/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/adapters/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/adapters/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..4958cf69 Binary files /dev/null and b/venv/Lib/site-packages/langchain/adapters/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/adapters/__pycache__/openai.cpython-312.pyc b/venv/Lib/site-packages/langchain/adapters/__pycache__/openai.cpython-312.pyc new file mode 100644 index 00000000..b02405cb Binary files /dev/null and b/venv/Lib/site-packages/langchain/adapters/__pycache__/openai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/adapters/openai.py b/venv/Lib/site-packages/langchain/adapters/openai.py new file mode 100644 index 00000000..445eccd5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/adapters/openai.py @@ -0,0 +1,63 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.adapters.openai import ( + Chat, + ChatCompletion, + ChatCompletionChunk, + ChatCompletions, + Choice, + ChoiceChunk, + Completions, + IndexableBaseModel, + chat, + convert_dict_to_message, + convert_message_to_dict, + convert_messages_for_finetuning, + convert_openai_messages, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +MODULE_LOOKUP = { + "IndexableBaseModel": "langchain_community.adapters.openai", + "Choice": "langchain_community.adapters.openai", + "ChatCompletions": "langchain_community.adapters.openai", + "ChoiceChunk": "langchain_community.adapters.openai", + "ChatCompletionChunk": "langchain_community.adapters.openai", + "convert_dict_to_message": "langchain_community.adapters.openai", + "convert_message_to_dict": "langchain_community.adapters.openai", + "convert_openai_messages": "langchain_community.adapters.openai", + "ChatCompletion": "langchain_community.adapters.openai", + "convert_messages_for_finetuning": "langchain_community.adapters.openai", + "Completions": "langchain_community.adapters.openai", + "Chat": "langchain_community.adapters.openai", + "chat": "langchain_community.adapters.openai", +} + +_import_attribute = create_importer(__file__, deprecated_lookups=MODULE_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "IndexableBaseModel", + "Choice", + "ChatCompletions", + "ChoiceChunk", + "ChatCompletionChunk", + "convert_dict_to_message", + "convert_message_to_dict", + "convert_openai_messages", + "ChatCompletion", + "convert_messages_for_finetuning", + "Completions", + "Chat", + "chat", +] diff --git a/venv/Lib/site-packages/langchain/agents/__init__.py b/venv/Lib/site-packages/langchain/agents/__init__.py new file mode 100644 index 00000000..e0250585 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/__init__.py @@ -0,0 +1,181 @@ +""" +**Agent** is a class that uses an LLM to choose a sequence of actions to take. + +In Chains, a sequence of actions is hardcoded. In Agents, +a language model is used as a reasoning engine to determine which actions +to take and in which order. + +Agents select and use **Tools** and **Toolkits** for actions. + +**Class hierarchy:** + +.. code-block:: + + BaseSingleActionAgent --> LLMSingleActionAgent + OpenAIFunctionsAgent + XMLAgent + Agent --> Agent # Examples: ZeroShotAgent, ChatAgent + + + BaseMultiActionAgent --> OpenAIMultiFunctionsAgent + + +**Main helpers:** + +.. code-block:: + + AgentType, AgentExecutor, AgentOutputParser, AgentExecutorIterator, + AgentAction, AgentFinish + +""" # noqa: E501 + +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from langchain_core._api.path import as_import_path +from langchain_core.tools import Tool +from langchain_core.tools.convert import tool + +from langchain._api import create_importer +from langchain.agents.agent import ( + Agent, + AgentExecutor, + AgentOutputParser, + BaseMultiActionAgent, + BaseSingleActionAgent, + LLMSingleActionAgent, +) +from langchain.agents.agent_iterator import AgentExecutorIterator +from langchain.agents.agent_toolkits.vectorstore.base import ( + create_vectorstore_agent, + create_vectorstore_router_agent, +) +from langchain.agents.agent_types import AgentType +from langchain.agents.conversational.base import ConversationalAgent +from langchain.agents.conversational_chat.base import ConversationalChatAgent +from langchain.agents.initialize import initialize_agent +from langchain.agents.json_chat.base import create_json_chat_agent +from langchain.agents.loading import load_agent +from langchain.agents.mrkl.base import MRKLChain, ZeroShotAgent +from langchain.agents.openai_functions_agent.base import ( + OpenAIFunctionsAgent, + create_openai_functions_agent, +) +from langchain.agents.openai_functions_multi_agent.base import OpenAIMultiFunctionsAgent +from langchain.agents.openai_tools.base import create_openai_tools_agent +from langchain.agents.react.agent import create_react_agent +from langchain.agents.react.base import ReActChain, ReActTextWorldAgent +from langchain.agents.self_ask_with_search.base import ( + SelfAskWithSearchChain, + create_self_ask_with_search_agent, +) +from langchain.agents.structured_chat.base import ( + StructuredChatAgent, + create_structured_chat_agent, +) +from langchain.agents.tool_calling_agent.base import create_tool_calling_agent +from langchain.agents.xml.base import XMLAgent, create_xml_agent + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.json.base import create_json_agent + from langchain_community.agent_toolkits.load_tools import ( + get_all_tool_names, + load_huggingface_tool, + load_tools, + ) + from langchain_community.agent_toolkits.openapi.base import create_openapi_agent + from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent + from langchain_community.agent_toolkits.powerbi.chat_base import ( + create_pbi_chat_agent, + ) + from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent + from langchain_community.agent_toolkits.sql.base import create_sql_agent + +DEPRECATED_CODE = [ + "create_csv_agent", + "create_pandas_dataframe_agent", + "create_spark_dataframe_agent", + "create_xorbits_agent", +] + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "create_json_agent": "langchain_community.agent_toolkits.json.base", + "create_openapi_agent": "langchain_community.agent_toolkits.openapi.base", + "create_pbi_agent": "langchain_community.agent_toolkits.powerbi.base", + "create_pbi_chat_agent": "langchain_community.agent_toolkits.powerbi.chat_base", + "create_spark_sql_agent": "langchain_community.agent_toolkits.spark_sql.base", + "create_sql_agent": "langchain_community.agent_toolkits.sql.base", + "load_tools": "langchain_community.agent_toolkits.load_tools", + "load_huggingface_tool": "langchain_community.agent_toolkits.load_tools", + "get_all_tool_names": "langchain_community.agent_toolkits.load_tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Get attr name.""" + if name in DEPRECATED_CODE: + # Get directory of langchain package + HERE = Path(__file__).parents[1] + relative_path = as_import_path( + Path(__file__).parent, suffix=name, relative_to=HERE + ) + old_path = "langchain." + relative_path + new_path = "langchain_experimental." + relative_path + raise ImportError( + f"{name} has been moved to langchain experimental. " + "See https://github.com/langchain-ai/langchain/discussions/11680" + "for more information.\n" + f"Please update your import statement from: `{old_path}` to `{new_path}`." + ) + return _import_attribute(name) + + +__all__ = [ + "Agent", + "AgentExecutor", + "AgentExecutorIterator", + "AgentOutputParser", + "AgentType", + "BaseMultiActionAgent", + "BaseSingleActionAgent", + "ConversationalAgent", + "ConversationalChatAgent", + "LLMSingleActionAgent", + "MRKLChain", + "OpenAIFunctionsAgent", + "OpenAIMultiFunctionsAgent", + "ReActChain", + "ReActTextWorldAgent", + "SelfAskWithSearchChain", + "StructuredChatAgent", + "ZeroShotAgent", + "create_json_agent", + "create_openapi_agent", + "create_pbi_agent", + "create_pbi_chat_agent", + "create_spark_sql_agent", + "create_sql_agent", + "create_vectorstore_agent", + "create_vectorstore_router_agent", + "get_all_tool_names", + "initialize_agent", + "load_agent", + "load_huggingface_tool", + "load_tools", + "XMLAgent", + "create_openai_functions_agent", + "create_xml_agent", + "create_react_agent", + "create_openai_tools_agent", + "create_self_ask_with_search_agent", + "create_json_chat_agent", + "create_structured_chat_agent", + "create_tool_calling_agent", + "Tool", + "tool", +] diff --git a/venv/Lib/site-packages/langchain/agents/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..ee2d6a32 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/__pycache__/agent.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/__pycache__/agent.cpython-312.pyc new file mode 100644 index 00000000..61e453f3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/__pycache__/agent.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/__pycache__/agent_iterator.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/__pycache__/agent_iterator.cpython-312.pyc new file mode 100644 index 00000000..1911caa6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/__pycache__/agent_iterator.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/__pycache__/agent_types.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/__pycache__/agent_types.cpython-312.pyc new file mode 100644 index 00000000..f341bc45 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/__pycache__/agent_types.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/__pycache__/initialize.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/__pycache__/initialize.cpython-312.pyc new file mode 100644 index 00000000..958959dd Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/__pycache__/initialize.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/__pycache__/load_tools.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/__pycache__/load_tools.cpython-312.pyc new file mode 100644 index 00000000..cd2e2d0e Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/__pycache__/load_tools.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/__pycache__/loading.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/__pycache__/loading.cpython-312.pyc new file mode 100644 index 00000000..8b370026 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/__pycache__/loading.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/__pycache__/schema.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/__pycache__/schema.cpython-312.pyc new file mode 100644 index 00000000..d9cd92cc Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/__pycache__/schema.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/__pycache__/tools.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/__pycache__/tools.cpython-312.pyc new file mode 100644 index 00000000..221282d7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/__pycache__/tools.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/__pycache__/types.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/__pycache__/types.cpython-312.pyc new file mode 100644 index 00000000..a3a720e9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/__pycache__/types.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..3330244a Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent.py b/venv/Lib/site-packages/langchain/agents/agent.py new file mode 100644 index 00000000..f13f668b --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent.py @@ -0,0 +1,1801 @@ +"""Chain that takes in an input and produces an action and action input.""" + +from __future__ import annotations + +import asyncio +import builtins +import json +import logging +import time +from abc import abstractmethod +from collections.abc import AsyncIterator, Iterator, Sequence +from pathlib import Path +from typing import ( + Any, + Callable, + Optional, + Union, + cast, +) + +import yaml +from langchain_core._api import deprecated +from langchain_core.agents import AgentAction, AgentFinish, AgentStep +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + AsyncCallbackManagerForToolRun, + BaseCallbackManager, + CallbackManagerForChainRun, + CallbackManagerForToolRun, + Callbacks, +) +from langchain_core.exceptions import OutputParserException +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import BaseMessage +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.prompts import BasePromptTemplate +from langchain_core.prompts.few_shot import FewShotPromptTemplate +from langchain_core.prompts.prompt import PromptTemplate +from langchain_core.runnables import Runnable, RunnableConfig, ensure_config +from langchain_core.runnables.utils import AddableDict +from langchain_core.tools import BaseTool +from langchain_core.utils.input import get_color_mapping +from pydantic import BaseModel, ConfigDict, model_validator +from typing_extensions import Self + +from langchain._api.deprecation import AGENT_DEPRECATION_WARNING +from langchain.agents.agent_iterator import AgentExecutorIterator +from langchain.agents.agent_types import AgentType +from langchain.agents.tools import InvalidTool +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain +from langchain.utilities.asyncio import asyncio_timeout + +logger = logging.getLogger(__name__) + + +class BaseSingleActionAgent(BaseModel): + """Base Single Action Agent class.""" + + @property + def return_values(self) -> list[str]: + """Return values of the agent.""" + return ["output"] + + def get_allowed_tools(self) -> Optional[list[str]]: + return None + + @abstractmethod + def plan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[AgentAction, AgentFinish]: + """Given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + + @abstractmethod + async def aplan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[AgentAction, AgentFinish]: + """Async given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + + @property + @abstractmethod + def input_keys(self) -> list[str]: + """Return the input keys. + + :meta private: + """ + + def return_stopped_response( + self, + early_stopping_method: str, + intermediate_steps: list[tuple[AgentAction, str]], + **kwargs: Any, + ) -> AgentFinish: + """Return response when agent has been stopped due to max iterations. + + Args: + early_stopping_method: Method to use for early stopping. + intermediate_steps: Steps the LLM has taken to date, + along with observations. + **kwargs: User inputs. + + Returns: + AgentFinish: Agent finish object. + + Raises: + ValueError: If `early_stopping_method` is not supported. + """ + if early_stopping_method == "force": + # `force` just returns a constant string + return AgentFinish( + {"output": "Agent stopped due to iteration limit or time limit."}, "" + ) + else: + raise ValueError( + f"Got unsupported early_stopping_method `{early_stopping_method}`" + ) + + @classmethod + def from_llm_and_tools( + cls, + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + callback_manager: Optional[BaseCallbackManager] = None, + **kwargs: Any, + ) -> BaseSingleActionAgent: + """Construct an agent from an LLM and tools. + + Args: + llm: Language model to use. + tools: Tools to use. + callback_manager: Callback manager to use. + kwargs: Additional arguments. + + Returns: + BaseSingleActionAgent: Agent object. + """ + raise NotImplementedError + + @property + def _agent_type(self) -> str: + """Return Identifier of an agent type.""" + raise NotImplementedError + + def dict(self, **kwargs: Any) -> builtins.dict: + """Return dictionary representation of agent. + + Returns: + Dict: Dictionary representation of agent. + """ + _dict = super().model_dump() + try: + _type = self._agent_type + except NotImplementedError: + _type = None + if isinstance(_type, AgentType): + _dict["_type"] = str(_type.value) + elif _type is not None: + _dict["_type"] = _type + return _dict + + def save(self, file_path: Union[Path, str]) -> None: + """Save the agent. + + Args: + file_path: Path to file to save the agent to. + + Example: + .. code-block:: python + + # If working with agent executor + agent.agent.save(file_path="path/agent.yaml") + """ + # Convert file to Path object. + if isinstance(file_path, str): + save_path = Path(file_path) + else: + save_path = file_path + + directory_path = save_path.parent + directory_path.mkdir(parents=True, exist_ok=True) + + # Fetch dictionary to save + agent_dict = self.dict() + if "_type" not in agent_dict: + raise NotImplementedError(f"Agent {self} does not support saving") + + if save_path.suffix == ".json": + with open(file_path, "w") as f: + json.dump(agent_dict, f, indent=4) + elif save_path.suffix.endswith((".yaml", ".yml")): + with open(file_path, "w") as f: + yaml.dump(agent_dict, f, default_flow_style=False) + else: + raise ValueError(f"{save_path} must be json or yaml") + + def tool_run_logging_kwargs(self) -> builtins.dict: + """Return logging kwargs for tool run.""" + return {} + + +class BaseMultiActionAgent(BaseModel): + """Base Multi Action Agent class.""" + + @property + def return_values(self) -> list[str]: + """Return values of the agent.""" + return ["output"] + + def get_allowed_tools(self) -> Optional[list[str]]: + """Get allowed tools. + + Returns: + Optional[List[str]]: Allowed tools. + """ + return None + + @abstractmethod + def plan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[list[AgentAction], AgentFinish]: + """Given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with the observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Actions specifying what tool to use. + """ + + @abstractmethod + async def aplan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[list[AgentAction], AgentFinish]: + """Async given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with the observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Actions specifying what tool to use. + """ + + @property + @abstractmethod + def input_keys(self) -> list[str]: + """Return the input keys. + + :meta private: + """ + + def return_stopped_response( + self, + early_stopping_method: str, + intermediate_steps: list[tuple[AgentAction, str]], + **kwargs: Any, + ) -> AgentFinish: + """Return response when agent has been stopped due to max iterations. + + Args: + early_stopping_method: Method to use for early stopping. + intermediate_steps: Steps the LLM has taken to date, + along with observations. + **kwargs: User inputs. + + Returns: + AgentFinish: Agent finish object. + + Raises: + ValueError: If `early_stopping_method` is not supported. + """ + if early_stopping_method == "force": + # `force` just returns a constant string + return AgentFinish({"output": "Agent stopped due to max iterations."}, "") + else: + raise ValueError( + f"Got unsupported early_stopping_method `{early_stopping_method}`" + ) + + @property + def _agent_type(self) -> str: + """Return Identifier of an agent type.""" + raise NotImplementedError + + def dict(self, **kwargs: Any) -> builtins.dict: + """Return dictionary representation of agent.""" + _dict = super().model_dump() + try: + _dict["_type"] = str(self._agent_type) + except NotImplementedError: + pass + return _dict + + def save(self, file_path: Union[Path, str]) -> None: + """Save the agent. + + Args: + file_path: Path to file to save the agent to. + + Raises: + NotImplementedError: If agent does not support saving. + ValueError: If file_path is not json or yaml. + + Example: + .. code-block:: python + + # If working with agent executor + agent.agent.save(file_path="path/agent.yaml") + """ + # Convert file to Path object. + if isinstance(file_path, str): + save_path = Path(file_path) + else: + save_path = file_path + + # Fetch dictionary to save + agent_dict = self.dict() + if "_type" not in agent_dict: + raise NotImplementedError(f"Agent {self} does not support saving.") + + directory_path = save_path.parent + directory_path.mkdir(parents=True, exist_ok=True) + + if save_path.suffix == ".json": + with open(file_path, "w") as f: + json.dump(agent_dict, f, indent=4) + elif save_path.suffix.endswith((".yaml", ".yml")): + with open(file_path, "w") as f: + yaml.dump(agent_dict, f, default_flow_style=False) + else: + raise ValueError(f"{save_path} must be json or yaml") + + def tool_run_logging_kwargs(self) -> builtins.dict: + """Return logging kwargs for tool run.""" + + return {} + + +class AgentOutputParser(BaseOutputParser[Union[AgentAction, AgentFinish]]): + """Base class for parsing agent output into agent action/finish.""" + + @abstractmethod + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + """Parse text into agent action/finish.""" + + +class MultiActionAgentOutputParser( + BaseOutputParser[Union[list[AgentAction], AgentFinish]] +): + """Base class for parsing agent output into agent actions/finish. + + This is used for agents that can return multiple actions. + """ + + @abstractmethod + def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]: + """Parse text into agent actions/finish. + + Args: + text: Text to parse. + + Returns: + Union[List[AgentAction], AgentFinish]: + List of agent actions or agent finish. + """ + + +class RunnableAgent(BaseSingleActionAgent): + """Agent powered by Runnables.""" + + runnable: Runnable[dict, Union[AgentAction, AgentFinish]] + """Runnable to call to get agent action.""" + input_keys_arg: list[str] = [] + return_keys_arg: list[str] = [] + stream_runnable: bool = True + """Whether to stream from the runnable or not. + + If True then underlying LLM is invoked in a streaming fashion to make it possible + to get access to the individual LLM tokens when using stream_log with the Agent + Executor. If False then LLM is invoked in a non-streaming fashion and + individual LLM tokens will not be available in stream_log. + """ + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @property + def return_values(self) -> list[str]: + """Return values of the agent.""" + return self.return_keys_arg + + @property + def input_keys(self) -> list[str]: + """Return the input keys.""" + return self.input_keys_arg + + def plan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[AgentAction, AgentFinish]: + """Based on past history and current inputs, decide what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with the observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}} + final_output: Any = None + if self.stream_runnable: + # Use streaming to make sure that the underlying LLM is invoked in a + # streaming + # fashion to make it possible to get access to the individual LLM tokens + # when using stream_log with the Agent Executor. + # Because the response from the plan is not a generator, we need to + # accumulate the output into final output and return that. + for chunk in self.runnable.stream(inputs, config={"callbacks": callbacks}): + if final_output is None: + final_output = chunk + else: + final_output += chunk + else: + final_output = self.runnable.invoke(inputs, config={"callbacks": callbacks}) + + return final_output + + async def aplan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[ + AgentAction, + AgentFinish, + ]: + """Async based on past history and current inputs, decide what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}} + final_output: Any = None + if self.stream_runnable: + # Use streaming to make sure that the underlying LLM is invoked in a + # streaming + # fashion to make it possible to get access to the individual LLM tokens + # when using stream_log with the Agent Executor. + # Because the response from the plan is not a generator, we need to + # accumulate the output into final output and return that. + async for chunk in self.runnable.astream( + inputs, config={"callbacks": callbacks} + ): + if final_output is None: + final_output = chunk + else: + final_output += chunk + else: + final_output = await self.runnable.ainvoke( + inputs, config={"callbacks": callbacks} + ) + return final_output + + +class RunnableMultiActionAgent(BaseMultiActionAgent): + """Agent powered by Runnables.""" + + runnable: Runnable[dict, Union[list[AgentAction], AgentFinish]] + """Runnable to call to get agent actions.""" + input_keys_arg: list[str] = [] + return_keys_arg: list[str] = [] + stream_runnable: bool = True + """Whether to stream from the runnable or not. + + If True then underlying LLM is invoked in a streaming fashion to make it possible + to get access to the individual LLM tokens when using stream_log with the Agent + Executor. If False then LLM is invoked in a non-streaming fashion and + individual LLM tokens will not be available in stream_log. + """ + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @property + def return_values(self) -> list[str]: + """Return values of the agent.""" + return self.return_keys_arg + + @property + def input_keys(self) -> list[str]: + """Return the input keys. + + Returns: + List of input keys. + """ + return self.input_keys_arg + + def plan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[ + list[AgentAction], + AgentFinish, + ]: + """Based on past history and current inputs, decide what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with the observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}} + final_output: Any = None + if self.stream_runnable: + # Use streaming to make sure that the underlying LLM is invoked in a + # streaming + # fashion to make it possible to get access to the individual LLM tokens + # when using stream_log with the Agent Executor. + # Because the response from the plan is not a generator, we need to + # accumulate the output into final output and return that. + for chunk in self.runnable.stream(inputs, config={"callbacks": callbacks}): + if final_output is None: + final_output = chunk + else: + final_output += chunk + else: + final_output = self.runnable.invoke(inputs, config={"callbacks": callbacks}) + + return final_output + + async def aplan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[ + list[AgentAction], + AgentFinish, + ]: + """Async based on past history and current inputs, decide what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}} + final_output: Any = None + if self.stream_runnable: + # Use streaming to make sure that the underlying LLM is invoked in a + # streaming + # fashion to make it possible to get access to the individual LLM tokens + # when using stream_log with the Agent Executor. + # Because the response from the plan is not a generator, we need to + # accumulate the output into final output and return that. + async for chunk in self.runnable.astream( + inputs, config={"callbacks": callbacks} + ): + if final_output is None: + final_output = chunk + else: + final_output += chunk + else: + final_output = await self.runnable.ainvoke( + inputs, config={"callbacks": callbacks} + ) + + return final_output + + +@deprecated( + "0.1.0", + message=AGENT_DEPRECATION_WARNING, + removal="1.0", +) +class LLMSingleActionAgent(BaseSingleActionAgent): + """Base class for single action agents.""" + + llm_chain: LLMChain + """LLMChain to use for agent.""" + output_parser: AgentOutputParser + """Output parser to use for agent.""" + stop: list[str] + """List of strings to stop on.""" + + @property + def input_keys(self) -> list[str]: + """Return the input keys. + + Returns: + List of input keys. + """ + return list(set(self.llm_chain.input_keys) - {"intermediate_steps"}) + + def dict(self, **kwargs: Any) -> builtins.dict: + """Return dictionary representation of agent.""" + _dict = super().dict() + del _dict["output_parser"] + return _dict + + def plan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[AgentAction, AgentFinish]: + """Given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with the observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + output = self.llm_chain.run( + intermediate_steps=intermediate_steps, + stop=self.stop, + callbacks=callbacks, + **kwargs, + ) + return self.output_parser.parse(output) + + async def aplan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[AgentAction, AgentFinish]: + """Async given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + output = await self.llm_chain.arun( + intermediate_steps=intermediate_steps, + stop=self.stop, + callbacks=callbacks, + **kwargs, + ) + return self.output_parser.parse(output) + + def tool_run_logging_kwargs(self) -> builtins.dict: + """Return logging kwargs for tool run.""" + return { + "llm_prefix": "", + "observation_prefix": "" if len(self.stop) == 0 else self.stop[0], + } + + +@deprecated( + "0.1.0", + message=AGENT_DEPRECATION_WARNING, + removal="1.0", +) +class Agent(BaseSingleActionAgent): + """Agent that calls the language model and deciding the action. + + This is driven by a LLMChain. The prompt in the LLMChain MUST include + a variable called "agent_scratchpad" where the agent can put its + intermediary work. + """ + + llm_chain: LLMChain + """LLMChain to use for agent.""" + output_parser: AgentOutputParser + """Output parser to use for agent.""" + allowed_tools: Optional[list[str]] = None + """Allowed tools for the agent. If None, all tools are allowed.""" + + def dict(self, **kwargs: Any) -> builtins.dict: + """Return dictionary representation of agent.""" + _dict = super().dict() + del _dict["output_parser"] + return _dict + + def get_allowed_tools(self) -> Optional[list[str]]: + """Get allowed tools.""" + return self.allowed_tools + + @property + def return_values(self) -> list[str]: + """Return values of the agent.""" + return ["output"] + + def _fix_text(self, text: str) -> str: + """Fix the text. + + Args: + text: Text to fix. + + Returns: + str: Fixed text. + """ + raise ValueError("fix_text not implemented for this agent.") + + @property + def _stop(self) -> list[str]: + return [ + f"\n{self.observation_prefix.rstrip()}", + f"\n\t{self.observation_prefix.rstrip()}", + ] + + def _construct_scratchpad( + self, intermediate_steps: list[tuple[AgentAction, str]] + ) -> Union[str, list[BaseMessage]]: + """Construct the scratchpad that lets the agent continue its thought process.""" + thoughts = "" + for action, observation in intermediate_steps: + thoughts += action.log + thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}" + return thoughts + + def plan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[AgentAction, AgentFinish]: + """Given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) + full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs) + return self.output_parser.parse(full_output) + + async def aplan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[AgentAction, AgentFinish]: + """Async given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) + full_output = await self.llm_chain.apredict(callbacks=callbacks, **full_inputs) + agent_output = await self.output_parser.aparse(full_output) + return agent_output + + def get_full_inputs( + self, intermediate_steps: list[tuple[AgentAction, str]], **kwargs: Any + ) -> builtins.dict[str, Any]: + """Create the full inputs for the LLMChain from intermediate steps. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations. + **kwargs: User inputs. + + Returns: + Dict[str, Any]: Full inputs for the LLMChain. + """ + thoughts = self._construct_scratchpad(intermediate_steps) + new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop} + full_inputs = {**kwargs, **new_inputs} + return full_inputs + + @property + def input_keys(self) -> list[str]: + """Return the input keys. + + :meta private: + """ + return list(set(self.llm_chain.input_keys) - {"agent_scratchpad"}) + + @model_validator(mode="after") + def validate_prompt(self) -> Self: + """Validate that prompt matches format. + + Args: + values: Values to validate. + + Returns: + Dict: Validated values. + + Raises: + ValueError: If `agent_scratchpad` is not in prompt.input_variables + and prompt is not a FewShotPromptTemplate or a PromptTemplate. + """ + prompt = self.llm_chain.prompt + if "agent_scratchpad" not in prompt.input_variables: + logger.warning( + "`agent_scratchpad` should be a variable in prompt.input_variables." + " Did not find it, so adding it at the end." + ) + prompt.input_variables.append("agent_scratchpad") + if isinstance(prompt, PromptTemplate): + prompt.template += "\n{agent_scratchpad}" + elif isinstance(prompt, FewShotPromptTemplate): + prompt.suffix += "\n{agent_scratchpad}" + else: + raise ValueError(f"Got unexpected prompt type {type(prompt)}") + return self + + @property + @abstractmethod + def observation_prefix(self) -> str: + """Prefix to append the observation with.""" + + @property + @abstractmethod + def llm_prefix(self) -> str: + """Prefix to append the LLM call with.""" + + @classmethod + @abstractmethod + def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: + """Create a prompt for this class. + + Args: + tools: Tools to use. + + Returns: + BasePromptTemplate: Prompt template. + """ + + @classmethod + def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: + """Validate that appropriate tools are passed in. + + Args: + tools: Tools to use. + """ + + pass + + @classmethod + @abstractmethod + def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: + """Get default output parser for this class.""" + + @classmethod + def from_llm_and_tools( + cls, + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + callback_manager: Optional[BaseCallbackManager] = None, + output_parser: Optional[AgentOutputParser] = None, + **kwargs: Any, + ) -> Agent: + """Construct an agent from an LLM and tools. + + Args: + llm: Language model to use. + tools: Tools to use. + callback_manager: Callback manager to use. + output_parser: Output parser to use. + kwargs: Additional arguments. + + Returns: + Agent: Agent object. + """ + cls._validate_tools(tools) + llm_chain = LLMChain( + llm=llm, + prompt=cls.create_prompt(tools), + callback_manager=callback_manager, + ) + tool_names = [tool.name for tool in tools] + _output_parser = output_parser or cls._get_default_output_parser() + return cls( + llm_chain=llm_chain, + allowed_tools=tool_names, + output_parser=_output_parser, + **kwargs, + ) + + def return_stopped_response( + self, + early_stopping_method: str, + intermediate_steps: list[tuple[AgentAction, str]], + **kwargs: Any, + ) -> AgentFinish: + """Return response when agent has been stopped due to max iterations. + + Args: + early_stopping_method: Method to use for early stopping. + intermediate_steps: Steps the LLM has taken to date, + along with observations. + **kwargs: User inputs. + + Returns: + AgentFinish: Agent finish object. + + Raises: + ValueError: If `early_stopping_method` is not in ['force', 'generate']. + """ + if early_stopping_method == "force": + # `force` just returns a constant string + return AgentFinish( + {"output": "Agent stopped due to iteration limit or time limit."}, "" + ) + elif early_stopping_method == "generate": + # Generate does one final forward pass + thoughts = "" + for action, observation in intermediate_steps: + thoughts += action.log + thoughts += ( + f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}" + ) + # Adding to the previous steps, we now tell the LLM to make a final pred + thoughts += ( + "\n\nI now need to return a final answer based on the previous steps:" + ) + new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop} + full_inputs = {**kwargs, **new_inputs} + full_output = self.llm_chain.predict(**full_inputs) + # We try to extract a final answer + parsed_output = self.output_parser.parse(full_output) + if isinstance(parsed_output, AgentFinish): + # If we can extract, we send the correct stuff + return parsed_output + else: + # If we can extract, but the tool is not the final tool, + # we just return the full output + return AgentFinish({"output": full_output}, full_output) + else: + raise ValueError( + "early_stopping_method should be one of `force` or `generate`, " + f"got {early_stopping_method}" + ) + + def tool_run_logging_kwargs(self) -> builtins.dict: + """Return logging kwargs for tool run.""" + return { + "llm_prefix": self.llm_prefix, + "observation_prefix": self.observation_prefix, + } + + +class ExceptionTool(BaseTool): + """Tool that just returns the query.""" + + name: str = "_Exception" + """Name of the tool.""" + description: str = "Exception tool" + """Description of the tool.""" + + def _run( + self, + query: str, + run_manager: Optional[CallbackManagerForToolRun] = None, + ) -> str: + return query + + async def _arun( + self, + query: str, + run_manager: Optional[AsyncCallbackManagerForToolRun] = None, + ) -> str: + return query + + +NextStepOutput = list[Union[AgentFinish, AgentAction, AgentStep]] +RunnableAgentType = Union[RunnableAgent, RunnableMultiActionAgent] + + +class AgentExecutor(Chain): + """Agent that is using tools.""" + + agent: Union[BaseSingleActionAgent, BaseMultiActionAgent, Runnable] + """The agent to run for creating a plan and determining actions + to take at each step of the execution loop.""" + tools: Sequence[BaseTool] + """The valid tools the agent can call.""" + return_intermediate_steps: bool = False + """Whether to return the agent's trajectory of intermediate steps + at the end in addition to the final output.""" + max_iterations: Optional[int] = 15 + """The maximum number of steps to take before ending the execution + loop. + + Setting to 'None' could lead to an infinite loop.""" + max_execution_time: Optional[float] = None + """The maximum amount of wall clock time to spend in the execution + loop. + """ + early_stopping_method: str = "force" + """The method to use for early stopping if the agent never + returns `AgentFinish`. Either 'force' or 'generate'. + + `"force"` returns a string saying that it stopped because it met a + time or iteration limit. + + `"generate"` calls the agent's LLM Chain one final time to generate + a final answer based on the previous steps. + """ + handle_parsing_errors: Union[bool, str, Callable[[OutputParserException], str]] = ( + False + ) + """How to handle errors raised by the agent's output parser. + Defaults to `False`, which raises the error. + If `true`, the error will be sent back to the LLM as an observation. + If a string, the string itself will be sent to the LLM as an observation. + If a callable function, the function will be called with the exception + as an argument, and the result of that function will be passed to the agent + as an observation. + """ + trim_intermediate_steps: Union[ + int, Callable[[list[tuple[AgentAction, str]]], list[tuple[AgentAction, str]]] + ] = -1 + """How to trim the intermediate steps before returning them. + Defaults to -1, which means no trimming. + """ + + @classmethod + def from_agent_and_tools( + cls, + agent: Union[BaseSingleActionAgent, BaseMultiActionAgent, Runnable], + tools: Sequence[BaseTool], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> AgentExecutor: + """Create from agent and tools. + + Args: + agent: Agent to use. + tools: Tools to use. + callbacks: Callbacks to use. + kwargs: Additional arguments. + + Returns: + AgentExecutor: Agent executor object. + """ + return cls( + agent=agent, + tools=tools, + callbacks=callbacks, + **kwargs, + ) + + @model_validator(mode="after") + def validate_tools(self) -> Self: + """Validate that tools are compatible with agent. + + Args: + values: Values to validate. + + Returns: + Dict: Validated values. + + Raises: + ValueError: If allowed tools are different than provided tools. + """ + agent = self.agent + tools = self.tools + allowed_tools = agent.get_allowed_tools() # type: ignore[union-attr] + if allowed_tools is not None: + if set(allowed_tools) != set([tool.name for tool in tools]): + raise ValueError( + f"Allowed tools ({allowed_tools}) different than " + f"provided tools ({[tool.name for tool in tools]})" + ) + return self + + @model_validator(mode="before") + @classmethod + def validate_runnable_agent(cls, values: dict) -> Any: + """Convert runnable to agent if passed in. + + Args: + values: Values to validate. + + Returns: + Dict: Validated values. + """ + agent = values.get("agent") + if agent and isinstance(agent, Runnable): + try: + output_type = agent.OutputType + except Exception as _: + multi_action = False + else: + multi_action = output_type == Union[list[AgentAction], AgentFinish] + + stream_runnable = values.pop("stream_runnable", True) + if multi_action: + values["agent"] = RunnableMultiActionAgent( + runnable=agent, stream_runnable=stream_runnable + ) + else: + values["agent"] = RunnableAgent( + runnable=agent, stream_runnable=stream_runnable + ) + return values + + @property + def _action_agent(self) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: + """Type cast self.agent. + + If the `agent` attribute is a Runnable, it will be converted one of + RunnableAgentType in the validate_runnable_agent root_validator. + + To support instantiating with a Runnable, here we explicitly cast the type + to reflect the changes made in the root_validator. + """ + if isinstance(self.agent, Runnable): + return cast(RunnableAgentType, self.agent) + else: + return self.agent + + def save(self, file_path: Union[Path, str]) -> None: + """Raise error - saving not supported for Agent Executors. + + Args: + file_path: Path to save to. + + Raises: + ValueError: Saving not supported for agent executors. + """ + raise ValueError( + "Saving not supported for agent executors. " + "If you are trying to save the agent, please use the " + "`.save_agent(...)`" + ) + + def save_agent(self, file_path: Union[Path, str]) -> None: + """Save the underlying agent. + + Args: + file_path: Path to save to. + """ + return self._action_agent.save(file_path) + + def iter( + self, + inputs: Any, + callbacks: Callbacks = None, + *, + include_run_info: bool = False, + async_: bool = False, # arg kept for backwards compat, but ignored + ) -> AgentExecutorIterator: + """Enables iteration over steps taken to reach final output. + + Args: + inputs: Inputs to the agent. + callbacks: Callbacks to run. + include_run_info: Whether to include run info. + async_: Whether to run async. (Ignored) + + Returns: + AgentExecutorIterator: Agent executor iterator object. + """ + return AgentExecutorIterator( + self, + inputs, + callbacks, + tags=self.tags, + include_run_info=include_run_info, + ) + + @property + def input_keys(self) -> list[str]: + """Return the input keys. + + :meta private: + """ + return self._action_agent.input_keys + + @property + def output_keys(self) -> list[str]: + """Return the singular output key. + + :meta private: + """ + if self.return_intermediate_steps: + return self._action_agent.return_values + ["intermediate_steps"] + else: + return self._action_agent.return_values + + def lookup_tool(self, name: str) -> BaseTool: + """Lookup tool by name. + + Args: + name: Name of tool. + + Returns: + BaseTool: Tool object. + """ + return {tool.name: tool for tool in self.tools}[name] + + def _should_continue(self, iterations: int, time_elapsed: float) -> bool: + if self.max_iterations is not None and iterations >= self.max_iterations: + return False + if ( + self.max_execution_time is not None + and time_elapsed >= self.max_execution_time + ): + return False + + return True + + def _return( + self, + output: AgentFinish, + intermediate_steps: list, + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + if run_manager: + run_manager.on_agent_finish(output, color="green", verbose=self.verbose) + final_output = output.return_values + if self.return_intermediate_steps: + final_output["intermediate_steps"] = intermediate_steps + return final_output + + async def _areturn( + self, + output: AgentFinish, + intermediate_steps: list, + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + if run_manager: + await run_manager.on_agent_finish( + output, color="green", verbose=self.verbose + ) + final_output = output.return_values + if self.return_intermediate_steps: + final_output["intermediate_steps"] = intermediate_steps + return final_output + + def _consume_next_step( + self, values: NextStepOutput + ) -> Union[AgentFinish, list[tuple[AgentAction, str]]]: + if isinstance(values[-1], AgentFinish): + assert len(values) == 1 + return values[-1] + else: + return [ + (a.action, a.observation) for a in values if isinstance(a, AgentStep) + ] + + def _take_next_step( + self, + name_to_tool_map: dict[str, BaseTool], + color_mapping: dict[str, str], + inputs: dict[str, str], + intermediate_steps: list[tuple[AgentAction, str]], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> Union[AgentFinish, list[tuple[AgentAction, str]]]: + return self._consume_next_step( + [ + a + for a in self._iter_next_step( + name_to_tool_map, + color_mapping, + inputs, + intermediate_steps, + run_manager, + ) + ] + ) + + def _iter_next_step( + self, + name_to_tool_map: dict[str, BaseTool], + color_mapping: dict[str, str], + inputs: dict[str, str], + intermediate_steps: list[tuple[AgentAction, str]], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]: + """Take a single step in the thought-action-observation loop. + + Override this to take control of how the agent makes and acts on choices. + """ + try: + intermediate_steps = self._prepare_intermediate_steps(intermediate_steps) + + # Call the LLM to see what to do. + output = self._action_agent.plan( + intermediate_steps, + callbacks=run_manager.get_child() if run_manager else None, + **inputs, + ) + except OutputParserException as e: + if isinstance(self.handle_parsing_errors, bool): + raise_error = not self.handle_parsing_errors + else: + raise_error = False + if raise_error: + raise ValueError( + "An output parsing error occurred. " + "In order to pass this error back to the agent and have it try " + "again, pass `handle_parsing_errors=True` to the AgentExecutor. " + f"This is the error: {str(e)}" + ) + text = str(e) + if isinstance(self.handle_parsing_errors, bool): + if e.send_to_llm: + observation = str(e.observation) + text = str(e.llm_output) + else: + observation = "Invalid or incomplete response" + elif isinstance(self.handle_parsing_errors, str): + observation = self.handle_parsing_errors + elif callable(self.handle_parsing_errors): + observation = self.handle_parsing_errors(e) + else: + raise ValueError("Got unexpected type of `handle_parsing_errors`") + output = AgentAction("_Exception", observation, text) + if run_manager: + run_manager.on_agent_action(output, color="green") + tool_run_kwargs = self._action_agent.tool_run_logging_kwargs() + observation = ExceptionTool().run( + output.tool_input, + verbose=self.verbose, + color=None, + callbacks=run_manager.get_child() if run_manager else None, + **tool_run_kwargs, + ) + yield AgentStep(action=output, observation=observation) + return + + # If the tool chosen is the finishing tool, then we end and return. + if isinstance(output, AgentFinish): + yield output + return + + actions: list[AgentAction] + if isinstance(output, AgentAction): + actions = [output] + else: + actions = output + for agent_action in actions: + yield agent_action + for agent_action in actions: + yield self._perform_agent_action( + name_to_tool_map, color_mapping, agent_action, run_manager + ) + + def _perform_agent_action( + self, + name_to_tool_map: dict[str, BaseTool], + color_mapping: dict[str, str], + agent_action: AgentAction, + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> AgentStep: + if run_manager: + run_manager.on_agent_action(agent_action, color="green") + # Otherwise we lookup the tool + if agent_action.tool in name_to_tool_map: + tool = name_to_tool_map[agent_action.tool] + return_direct = tool.return_direct + color = color_mapping[agent_action.tool] + tool_run_kwargs = self._action_agent.tool_run_logging_kwargs() + if return_direct: + tool_run_kwargs["llm_prefix"] = "" + # We then call the tool on the tool input to get an observation + observation = tool.run( + agent_action.tool_input, + verbose=self.verbose, + color=color, + callbacks=run_manager.get_child() if run_manager else None, + **tool_run_kwargs, + ) + else: + tool_run_kwargs = self._action_agent.tool_run_logging_kwargs() + observation = InvalidTool().run( + { + "requested_tool_name": agent_action.tool, + "available_tool_names": list(name_to_tool_map.keys()), + }, + verbose=self.verbose, + color=None, + callbacks=run_manager.get_child() if run_manager else None, + **tool_run_kwargs, + ) + return AgentStep(action=agent_action, observation=observation) + + async def _atake_next_step( + self, + name_to_tool_map: dict[str, BaseTool], + color_mapping: dict[str, str], + inputs: dict[str, str], + intermediate_steps: list[tuple[AgentAction, str]], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> Union[AgentFinish, list[tuple[AgentAction, str]]]: + return self._consume_next_step( + [ + a + async for a in self._aiter_next_step( + name_to_tool_map, + color_mapping, + inputs, + intermediate_steps, + run_manager, + ) + ] + ) + + async def _aiter_next_step( + self, + name_to_tool_map: dict[str, BaseTool], + color_mapping: dict[str, str], + inputs: dict[str, str], + intermediate_steps: list[tuple[AgentAction, str]], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> AsyncIterator[Union[AgentFinish, AgentAction, AgentStep]]: + """Take a single step in the thought-action-observation loop. + + Override this to take control of how the agent makes and acts on choices. + """ + try: + intermediate_steps = self._prepare_intermediate_steps(intermediate_steps) + + # Call the LLM to see what to do. + output = await self._action_agent.aplan( + intermediate_steps, + callbacks=run_manager.get_child() if run_manager else None, + **inputs, + ) + except OutputParserException as e: + if isinstance(self.handle_parsing_errors, bool): + raise_error = not self.handle_parsing_errors + else: + raise_error = False + if raise_error: + raise ValueError( + "An output parsing error occurred. " + "In order to pass this error back to the agent and have it try " + "again, pass `handle_parsing_errors=True` to the AgentExecutor. " + f"This is the error: {str(e)}" + ) + text = str(e) + if isinstance(self.handle_parsing_errors, bool): + if e.send_to_llm: + observation = str(e.observation) + text = str(e.llm_output) + else: + observation = "Invalid or incomplete response" + elif isinstance(self.handle_parsing_errors, str): + observation = self.handle_parsing_errors + elif callable(self.handle_parsing_errors): + observation = self.handle_parsing_errors(e) + else: + raise ValueError("Got unexpected type of `handle_parsing_errors`") + output = AgentAction("_Exception", observation, text) + tool_run_kwargs = self._action_agent.tool_run_logging_kwargs() + observation = await ExceptionTool().arun( + output.tool_input, + verbose=self.verbose, + color=None, + callbacks=run_manager.get_child() if run_manager else None, + **tool_run_kwargs, + ) + yield AgentStep(action=output, observation=observation) + return + + # If the tool chosen is the finishing tool, then we end and return. + if isinstance(output, AgentFinish): + yield output + return + + actions: list[AgentAction] + if isinstance(output, AgentAction): + actions = [output] + else: + actions = output + for agent_action in actions: + yield agent_action + + # Use asyncio.gather to run multiple tool.arun() calls concurrently + result = await asyncio.gather( + *[ + self._aperform_agent_action( + name_to_tool_map, color_mapping, agent_action, run_manager + ) + for agent_action in actions + ], + ) + + # TODO This could yield each result as it becomes available + for chunk in result: + yield chunk + + async def _aperform_agent_action( + self, + name_to_tool_map: dict[str, BaseTool], + color_mapping: dict[str, str], + agent_action: AgentAction, + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> AgentStep: + if run_manager: + await run_manager.on_agent_action( + agent_action, verbose=self.verbose, color="green" + ) + # Otherwise we lookup the tool + if agent_action.tool in name_to_tool_map: + tool = name_to_tool_map[agent_action.tool] + return_direct = tool.return_direct + color = color_mapping[agent_action.tool] + tool_run_kwargs = self._action_agent.tool_run_logging_kwargs() + if return_direct: + tool_run_kwargs["llm_prefix"] = "" + # We then call the tool on the tool input to get an observation + observation = await tool.arun( + agent_action.tool_input, + verbose=self.verbose, + color=color, + callbacks=run_manager.get_child() if run_manager else None, + **tool_run_kwargs, + ) + else: + tool_run_kwargs = self._action_agent.tool_run_logging_kwargs() + observation = await InvalidTool().arun( + { + "requested_tool_name": agent_action.tool, + "available_tool_names": list(name_to_tool_map.keys()), + }, + verbose=self.verbose, + color=None, + callbacks=run_manager.get_child() if run_manager else None, + **tool_run_kwargs, + ) + return AgentStep(action=agent_action, observation=observation) + + def _call( + self, + inputs: dict[str, str], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Run text through and get agent response.""" + # Construct a mapping of tool name to tool for easy lookup + name_to_tool_map = {tool.name: tool for tool in self.tools} + # We construct a mapping from each tool to a color, used for logging. + color_mapping = get_color_mapping( + [tool.name for tool in self.tools], excluded_colors=["green", "red"] + ) + intermediate_steps: list[tuple[AgentAction, str]] = [] + # Let's start tracking the number of iterations and time elapsed + iterations = 0 + time_elapsed = 0.0 + start_time = time.time() + # We now enter the agent loop (until it returns something). + while self._should_continue(iterations, time_elapsed): + next_step_output = self._take_next_step( + name_to_tool_map, + color_mapping, + inputs, + intermediate_steps, + run_manager=run_manager, + ) + if isinstance(next_step_output, AgentFinish): + return self._return( + next_step_output, intermediate_steps, run_manager=run_manager + ) + + intermediate_steps.extend(next_step_output) + if len(next_step_output) == 1: + next_step_action = next_step_output[0] + # See if tool should return directly + tool_return = self._get_tool_return(next_step_action) + if tool_return is not None: + return self._return( + tool_return, intermediate_steps, run_manager=run_manager + ) + iterations += 1 + time_elapsed = time.time() - start_time + output = self._action_agent.return_stopped_response( + self.early_stopping_method, intermediate_steps, **inputs + ) + return self._return(output, intermediate_steps, run_manager=run_manager) + + async def _acall( + self, + inputs: dict[str, str], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, str]: + """Async run text through and get agent response.""" + # Construct a mapping of tool name to tool for easy lookup + name_to_tool_map = {tool.name: tool for tool in self.tools} + # We construct a mapping from each tool to a color, used for logging. + color_mapping = get_color_mapping( + [tool.name for tool in self.tools], excluded_colors=["green"] + ) + intermediate_steps: list[tuple[AgentAction, str]] = [] + # Let's start tracking the number of iterations and time elapsed + iterations = 0 + time_elapsed = 0.0 + start_time = time.time() + # We now enter the agent loop (until it returns something). + try: + async with asyncio_timeout(self.max_execution_time): + while self._should_continue(iterations, time_elapsed): + next_step_output = await self._atake_next_step( + name_to_tool_map, + color_mapping, + inputs, + intermediate_steps, + run_manager=run_manager, + ) + if isinstance(next_step_output, AgentFinish): + return await self._areturn( + next_step_output, + intermediate_steps, + run_manager=run_manager, + ) + + intermediate_steps.extend(next_step_output) + if len(next_step_output) == 1: + next_step_action = next_step_output[0] + # See if tool should return directly + tool_return = self._get_tool_return(next_step_action) + if tool_return is not None: + return await self._areturn( + tool_return, intermediate_steps, run_manager=run_manager + ) + + iterations += 1 + time_elapsed = time.time() - start_time + output = self._action_agent.return_stopped_response( + self.early_stopping_method, intermediate_steps, **inputs + ) + return await self._areturn( + output, intermediate_steps, run_manager=run_manager + ) + except (TimeoutError, asyncio.TimeoutError): + # stop early when interrupted by the async timeout + output = self._action_agent.return_stopped_response( + self.early_stopping_method, intermediate_steps, **inputs + ) + return await self._areturn( + output, intermediate_steps, run_manager=run_manager + ) + + def _get_tool_return( + self, next_step_output: tuple[AgentAction, str] + ) -> Optional[AgentFinish]: + """Check if the tool is a returning tool.""" + agent_action, observation = next_step_output + name_to_tool_map = {tool.name: tool for tool in self.tools} + return_value_key = "output" + if len(self._action_agent.return_values) > 0: + return_value_key = self._action_agent.return_values[0] + # Invalid tools won't be in the map, so we return False. + if agent_action.tool in name_to_tool_map: + if name_to_tool_map[agent_action.tool].return_direct: + return AgentFinish( + {return_value_key: observation}, + "", + ) + return None + + def _prepare_intermediate_steps( + self, intermediate_steps: list[tuple[AgentAction, str]] + ) -> list[tuple[AgentAction, str]]: + if ( + isinstance(self.trim_intermediate_steps, int) + and self.trim_intermediate_steps > 0 + ): + return intermediate_steps[-self.trim_intermediate_steps :] + elif callable(self.trim_intermediate_steps): + return self.trim_intermediate_steps(intermediate_steps) + else: + return intermediate_steps + + def stream( + self, + input: Union[dict[str, Any], Any], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Iterator[AddableDict]: + """Enables streaming over steps taken to reach final output. + + Args: + input: Input to the agent. + config: Config to use. + kwargs: Additional arguments. + + Yields: + AddableDict: Addable dictionary. + """ + config = ensure_config(config) + iterator = AgentExecutorIterator( + self, + input, + config.get("callbacks"), + tags=config.get("tags"), + metadata=config.get("metadata"), + run_name=config.get("run_name"), + run_id=config.get("run_id"), + yield_actions=True, + **kwargs, + ) + yield from iterator + + async def astream( + self, + input: Union[dict[str, Any], Any], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[AddableDict]: + """Async enables streaming over steps taken to reach final output. + + Args: + input: Input to the agent. + config: Config to use. + kwargs: Additional arguments. + + Yields: + AddableDict: Addable dictionary. + """ + + config = ensure_config(config) + iterator = AgentExecutorIterator( + self, + input, + config.get("callbacks"), + tags=config.get("tags"), + metadata=config.get("metadata"), + run_name=config.get("run_name"), + run_id=config.get("run_id"), + yield_actions=True, + **kwargs, + ) + async for step in iterator: + yield step diff --git a/venv/Lib/site-packages/langchain/agents/agent_iterator.py b/venv/Lib/site-packages/langchain/agents/agent_iterator.py new file mode 100644 index 00000000..2e07b298 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_iterator.py @@ -0,0 +1,414 @@ +from __future__ import annotations + +import asyncio +import logging +import time +from collections.abc import AsyncIterator, Iterator +from typing import ( + TYPE_CHECKING, + Any, + Optional, + Union, +) +from uuid import UUID + +from langchain_core.agents import ( + AgentAction, + AgentFinish, + AgentStep, +) +from langchain_core.callbacks import ( + AsyncCallbackManager, + AsyncCallbackManagerForChainRun, + CallbackManager, + CallbackManagerForChainRun, + Callbacks, +) +from langchain_core.load.dump import dumpd +from langchain_core.outputs import RunInfo +from langchain_core.runnables.utils import AddableDict +from langchain_core.tools import BaseTool +from langchain_core.utils.input import get_color_mapping + +from langchain.schema import RUN_KEY +from langchain.utilities.asyncio import asyncio_timeout + +if TYPE_CHECKING: + from langchain.agents.agent import AgentExecutor, NextStepOutput + +logger = logging.getLogger(__name__) + + +class AgentExecutorIterator: + """Iterator for AgentExecutor.""" + + def __init__( + self, + agent_executor: AgentExecutor, + inputs: Any, + callbacks: Callbacks = None, + *, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + run_name: Optional[str] = None, + run_id: Optional[UUID] = None, + include_run_info: bool = False, + yield_actions: bool = False, + ): + """ + Initialize the AgentExecutorIterator with the given AgentExecutor, + inputs, and optional callbacks. + + Args: + agent_executor (AgentExecutor): The AgentExecutor to iterate over. + inputs (Any): The inputs to the AgentExecutor. + callbacks (Callbacks, optional): The callbacks to use during iteration. + Defaults to None. + tags (Optional[list[str]], optional): The tags to use during iteration. + Defaults to None. + metadata (Optional[Dict[str, Any]], optional): The metadata to use + during iteration. Defaults to None. + run_name (Optional[str], optional): The name of the run. Defaults to None. + run_id (Optional[UUID], optional): The ID of the run. Defaults to None. + include_run_info (bool, optional): Whether to include run info + in the output. Defaults to False. + yield_actions (bool, optional): Whether to yield actions as they + are generated. Defaults to False. + """ + self._agent_executor = agent_executor + self.inputs = inputs + self.callbacks = callbacks + self.tags = tags + self.metadata = metadata + self.run_name = run_name + self.run_id = run_id + self.include_run_info = include_run_info + self.yield_actions = yield_actions + self.reset() + + _inputs: dict[str, str] + callbacks: Callbacks + tags: Optional[list[str]] + metadata: Optional[dict[str, Any]] + run_name: Optional[str] + run_id: Optional[UUID] + include_run_info: bool + yield_actions: bool + + @property + def inputs(self) -> dict[str, str]: + """The inputs to the AgentExecutor.""" + return self._inputs + + @inputs.setter + def inputs(self, inputs: Any) -> None: + self._inputs = self.agent_executor.prep_inputs(inputs) + + @property + def agent_executor(self) -> AgentExecutor: + """The AgentExecutor to iterate over.""" + return self._agent_executor + + @agent_executor.setter + def agent_executor(self, agent_executor: AgentExecutor) -> None: + self._agent_executor = agent_executor + # force re-prep inputs in case agent_executor's prep_inputs fn changed + self.inputs = self.inputs + + @property + def name_to_tool_map(self) -> dict[str, BaseTool]: + """A mapping of tool names to tools.""" + return {tool.name: tool for tool in self.agent_executor.tools} + + @property + def color_mapping(self) -> dict[str, str]: + """A mapping of tool names to colors.""" + return get_color_mapping( + [tool.name for tool in self.agent_executor.tools], + excluded_colors=["green", "red"], + ) + + def reset(self) -> None: + """ + Reset the iterator to its initial state, clearing intermediate steps, + iterations, and time elapsed. + """ + logger.debug("(Re)setting AgentExecutorIterator to fresh state") + self.intermediate_steps: list[tuple[AgentAction, str]] = [] + self.iterations = 0 + # maybe better to start these on the first __anext__ call? + self.time_elapsed = 0.0 + self.start_time = time.time() + + def update_iterations(self) -> None: + """ + Increment the number of iterations and update the time elapsed. + """ + self.iterations += 1 + self.time_elapsed = time.time() - self.start_time + logger.debug( + f"Agent Iterations: {self.iterations} ({self.time_elapsed:.2f}s elapsed)" + ) + + def make_final_outputs( + self, + outputs: dict[str, Any], + run_manager: Union[CallbackManagerForChainRun, AsyncCallbackManagerForChainRun], + ) -> AddableDict: + # have access to intermediate steps by design in iterator, + # so return only outputs may as well always be true. + + prepared_outputs = AddableDict( + self.agent_executor.prep_outputs( + self.inputs, outputs, return_only_outputs=True + ) + ) + if self.include_run_info: + prepared_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id) + return prepared_outputs + + def __iter__(self: AgentExecutorIterator) -> Iterator[AddableDict]: + logger.debug("Initialising AgentExecutorIterator") + self.reset() + callback_manager = CallbackManager.configure( + self.callbacks, + self.agent_executor.callbacks, + self.agent_executor.verbose, + self.tags, + self.agent_executor.tags, + self.metadata, + self.agent_executor.metadata, + ) + run_manager = callback_manager.on_chain_start( + dumpd(self.agent_executor), + self.inputs, + self.run_id, + name=self.run_name, + ) + try: + while self.agent_executor._should_continue( + self.iterations, self.time_elapsed + ): + # take the next step: this plans next action, executes it, + # yielding action and observation as they are generated + next_step_seq: NextStepOutput = [] + for chunk in self.agent_executor._iter_next_step( + self.name_to_tool_map, + self.color_mapping, + self.inputs, + self.intermediate_steps, + run_manager, + ): + next_step_seq.append(chunk) + # if we're yielding actions, yield them as they come + # do not yield AgentFinish, which will be handled below + if self.yield_actions: + if isinstance(chunk, AgentAction): + yield AddableDict(actions=[chunk], messages=chunk.messages) + elif isinstance(chunk, AgentStep): + yield AddableDict(steps=[chunk], messages=chunk.messages) + + # convert iterator output to format handled by _process_next_step_output + next_step = self.agent_executor._consume_next_step(next_step_seq) + # update iterations and time elapsed + self.update_iterations() + # decide if this is the final output + output = self._process_next_step_output(next_step, run_manager) + is_final = "intermediate_step" not in output + # yield the final output always + # for backwards compat, yield int. output if not yielding actions + if not self.yield_actions or is_final: + yield output + # if final output reached, stop iteration + if is_final: + return + except BaseException as e: + run_manager.on_chain_error(e) + raise + + # if we got here means we exhausted iterations or time + yield self._stop(run_manager) + + async def __aiter__(self) -> AsyncIterator[AddableDict]: + """ + N.B. __aiter__ must be a normal method, so need to initialize async run manager + on first __anext__ call where we can await it + """ + logger.debug("Initialising AgentExecutorIterator (async)") + self.reset() + callback_manager = AsyncCallbackManager.configure( + self.callbacks, + self.agent_executor.callbacks, + self.agent_executor.verbose, + self.tags, + self.agent_executor.tags, + self.metadata, + self.agent_executor.metadata, + ) + run_manager = await callback_manager.on_chain_start( + dumpd(self.agent_executor), + self.inputs, + self.run_id, + name=self.run_name, + ) + try: + async with asyncio_timeout(self.agent_executor.max_execution_time): + while self.agent_executor._should_continue( + self.iterations, self.time_elapsed + ): + # take the next step: this plans next action, executes it, + # yielding action and observation as they are generated + next_step_seq: NextStepOutput = [] + async for chunk in self.agent_executor._aiter_next_step( + self.name_to_tool_map, + self.color_mapping, + self.inputs, + self.intermediate_steps, + run_manager, + ): + next_step_seq.append(chunk) + # if we're yielding actions, yield them as they come + # do not yield AgentFinish, which will be handled below + if self.yield_actions: + if isinstance(chunk, AgentAction): + yield AddableDict( + actions=[chunk], messages=chunk.messages + ) + elif isinstance(chunk, AgentStep): + yield AddableDict( + steps=[chunk], messages=chunk.messages + ) + + # convert iterator output to format handled by _process_next_step + next_step = self.agent_executor._consume_next_step(next_step_seq) + # update iterations and time elapsed + self.update_iterations() + # decide if this is the final output + output = await self._aprocess_next_step_output( + next_step, run_manager + ) + is_final = "intermediate_step" not in output + # yield the final output always + # for backwards compat, yield int. output if not yielding actions + if not self.yield_actions or is_final: + yield output + # if final output reached, stop iteration + if is_final: + return + except (TimeoutError, asyncio.TimeoutError): + yield await self._astop(run_manager) + return + except BaseException as e: + await run_manager.on_chain_error(e) + raise + + # if we got here means we exhausted iterations or time + yield await self._astop(run_manager) + + def _process_next_step_output( + self, + next_step_output: Union[AgentFinish, list[tuple[AgentAction, str]]], + run_manager: CallbackManagerForChainRun, + ) -> AddableDict: + """ + Process the output of the next step, + handling AgentFinish and tool return cases. + """ + logger.debug("Processing output of Agent loop step") + if isinstance(next_step_output, AgentFinish): + logger.debug( + "Hit AgentFinish: _return -> on_chain_end -> run final output logic" + ) + return self._return(next_step_output, run_manager=run_manager) + + self.intermediate_steps.extend(next_step_output) + logger.debug("Updated intermediate_steps with step output") + + # Check for tool return + if len(next_step_output) == 1: + next_step_action = next_step_output[0] + tool_return = self.agent_executor._get_tool_return(next_step_action) + if tool_return is not None: + return self._return(tool_return, run_manager=run_manager) + + return AddableDict(intermediate_step=next_step_output) + + async def _aprocess_next_step_output( + self, + next_step_output: Union[AgentFinish, list[tuple[AgentAction, str]]], + run_manager: AsyncCallbackManagerForChainRun, + ) -> AddableDict: + """ + Process the output of the next async step, + handling AgentFinish and tool return cases. + """ + logger.debug("Processing output of async Agent loop step") + if isinstance(next_step_output, AgentFinish): + logger.debug( + "Hit AgentFinish: _areturn -> on_chain_end -> run final output logic" + ) + return await self._areturn(next_step_output, run_manager=run_manager) + + self.intermediate_steps.extend(next_step_output) + logger.debug("Updated intermediate_steps with step output") + + # Check for tool return + if len(next_step_output) == 1: + next_step_action = next_step_output[0] + tool_return = self.agent_executor._get_tool_return(next_step_action) + if tool_return is not None: + return await self._areturn(tool_return, run_manager=run_manager) + + return AddableDict(intermediate_step=next_step_output) + + def _stop(self, run_manager: CallbackManagerForChainRun) -> AddableDict: + """ + Stop the iterator and raise a StopIteration exception with the stopped response. + """ + logger.warning("Stopping agent prematurely due to triggering stop condition") + # this manually constructs agent finish with output key + output = self.agent_executor._action_agent.return_stopped_response( + self.agent_executor.early_stopping_method, + self.intermediate_steps, + **self.inputs, + ) + return self._return(output, run_manager=run_manager) + + async def _astop(self, run_manager: AsyncCallbackManagerForChainRun) -> AddableDict: + """ + Stop the async iterator and raise a StopAsyncIteration exception with + the stopped response. + """ + logger.warning("Stopping agent prematurely due to triggering stop condition") + output = self.agent_executor._action_agent.return_stopped_response( + self.agent_executor.early_stopping_method, + self.intermediate_steps, + **self.inputs, + ) + return await self._areturn(output, run_manager=run_manager) + + def _return( + self, output: AgentFinish, run_manager: CallbackManagerForChainRun + ) -> AddableDict: + """ + Return the final output of the iterator. + """ + returned_output = self.agent_executor._return( + output, self.intermediate_steps, run_manager=run_manager + ) + returned_output["messages"] = output.messages + run_manager.on_chain_end(returned_output) + return self.make_final_outputs(returned_output, run_manager) + + async def _areturn( + self, output: AgentFinish, run_manager: AsyncCallbackManagerForChainRun + ) -> AddableDict: + """ + Return the final output of the async iterator. + """ + returned_output = await self.agent_executor._areturn( + output, self.intermediate_steps, run_manager=run_manager + ) + returned_output["messages"] = output.messages + await run_manager.on_chain_end(returned_output) + return self.make_final_outputs(returned_output, run_manager) diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/__init__.py new file mode 100644 index 00000000..6e5c2993 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/__init__.py @@ -0,0 +1,166 @@ +"""Agent toolkits contain integrations with various resources and services. + +LangChain has a large ecosystem of integrations with various external resources +like local and remote file systems, APIs and databases. + +These integrations allow developers to create versatile applications that combine the +power of LLMs with the ability to access, interact with and manipulate external +resources. + +When developing an application, developers should inspect the capabilities and +permissions of the tools that underlie the given agent toolkit, and determine +whether permissions of the given toolkit are appropriate for the application. + +See [Security](https://python.langchain.com/docs/security) for more information. +""" + +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from langchain_core._api.path import as_import_path +from langchain_core.tools.retriever import create_retriever_tool + +from langchain._api import create_importer +from langchain.agents.agent_toolkits.conversational_retrieval.openai_functions import ( + create_conversational_retrieval_agent, +) +from langchain.agents.agent_toolkits.vectorstore.base import ( + create_vectorstore_agent, + create_vectorstore_router_agent, +) +from langchain.agents.agent_toolkits.vectorstore.toolkit import ( + VectorStoreInfo, + VectorStoreRouterToolkit, + VectorStoreToolkit, +) + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit + from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit + from langchain_community.agent_toolkits.azure_cognitive_services import ( + AzureCognitiveServicesToolkit, + ) + from langchain_community.agent_toolkits.file_management.toolkit import ( + FileManagementToolkit, + ) + from langchain_community.agent_toolkits.gmail.toolkit import GmailToolkit + from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit + from langchain_community.agent_toolkits.json.base import create_json_agent + from langchain_community.agent_toolkits.json.toolkit import JsonToolkit + from langchain_community.agent_toolkits.multion.toolkit import MultionToolkit + from langchain_community.agent_toolkits.nasa.toolkit import NasaToolkit + from langchain_community.agent_toolkits.nla.toolkit import NLAToolkit + from langchain_community.agent_toolkits.office365.toolkit import O365Toolkit + from langchain_community.agent_toolkits.openapi.base import create_openapi_agent + from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit + from langchain_community.agent_toolkits.playwright.toolkit import ( + PlayWrightBrowserToolkit, + ) + from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent + from langchain_community.agent_toolkits.powerbi.chat_base import ( + create_pbi_chat_agent, + ) + from langchain_community.agent_toolkits.powerbi.toolkit import PowerBIToolkit + from langchain_community.agent_toolkits.slack.toolkit import SlackToolkit + from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent + from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit + from langchain_community.agent_toolkits.sql.base import create_sql_agent + from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit + from langchain_community.agent_toolkits.steam.toolkit import SteamToolkit + from langchain_community.agent_toolkits.zapier.toolkit import ZapierToolkit + +DEPRECATED_AGENTS = [ + "create_csv_agent", + "create_pandas_dataframe_agent", + "create_xorbits_agent", + "create_python_agent", + "create_spark_dataframe_agent", +] + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AINetworkToolkit": "langchain_community.agent_toolkits.ainetwork.toolkit", + "AmadeusToolkit": "langchain_community.agent_toolkits.amadeus.toolkit", + "AzureCognitiveServicesToolkit": ( + "langchain_community.agent_toolkits.azure_cognitive_services" + ), + "FileManagementToolkit": ( + "langchain_community.agent_toolkits.file_management.toolkit" + ), + "GmailToolkit": "langchain_community.agent_toolkits.gmail.toolkit", + "JiraToolkit": "langchain_community.agent_toolkits.jira.toolkit", + "JsonToolkit": "langchain_community.agent_toolkits.json.toolkit", + "MultionToolkit": "langchain_community.agent_toolkits.multion.toolkit", + "NasaToolkit": "langchain_community.agent_toolkits.nasa.toolkit", + "NLAToolkit": "langchain_community.agent_toolkits.nla.toolkit", + "O365Toolkit": "langchain_community.agent_toolkits.office365.toolkit", + "OpenAPIToolkit": "langchain_community.agent_toolkits.openapi.toolkit", + "PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit", + "PowerBIToolkit": "langchain_community.agent_toolkits.powerbi.toolkit", + "SlackToolkit": "langchain_community.agent_toolkits.slack.toolkit", + "SteamToolkit": "langchain_community.agent_toolkits.steam.toolkit", + "SQLDatabaseToolkit": "langchain_community.agent_toolkits.sql.toolkit", + "SparkSQLToolkit": "langchain_community.agent_toolkits.spark_sql.toolkit", + "ZapierToolkit": "langchain_community.agent_toolkits.zapier.toolkit", + "create_json_agent": "langchain_community.agent_toolkits.json.base", + "create_openapi_agent": "langchain_community.agent_toolkits.openapi.base", + "create_pbi_agent": "langchain_community.agent_toolkits.powerbi.base", + "create_pbi_chat_agent": "langchain_community.agent_toolkits.powerbi.chat_base", + "create_spark_sql_agent": "langchain_community.agent_toolkits.spark_sql.base", + "create_sql_agent": "langchain_community.agent_toolkits.sql.base", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Get attr name.""" + if name in DEPRECATED_AGENTS: + relative_path = as_import_path(Path(__file__).parent, suffix=name) + old_path = "langchain." + relative_path + new_path = "langchain_experimental." + relative_path + raise ImportError( + f"{name} has been moved to langchain experimental. " + "See https://github.com/langchain-ai/langchain/discussions/11680" + "for more information.\n" + f"Please update your import statement from: `{old_path}` to `{new_path}`." + ) + return _import_attribute(name) + + +__all__ = [ + "AINetworkToolkit", + "AmadeusToolkit", + "AzureCognitiveServicesToolkit", + "FileManagementToolkit", + "GmailToolkit", + "JiraToolkit", + "JsonToolkit", + "MultionToolkit", + "NasaToolkit", + "NLAToolkit", + "O365Toolkit", + "OpenAPIToolkit", + "PlayWrightBrowserToolkit", + "PowerBIToolkit", + "SlackToolkit", + "SteamToolkit", + "SQLDatabaseToolkit", + "SparkSQLToolkit", + "VectorStoreInfo", + "VectorStoreRouterToolkit", + "VectorStoreToolkit", + "ZapierToolkit", + "create_json_agent", + "create_openapi_agent", + "create_pbi_agent", + "create_pbi_chat_agent", + "create_spark_sql_agent", + "create_sql_agent", + "create_vectorstore_agent", + "create_vectorstore_router_agent", + "create_conversational_retrieval_agent", + "create_retriever_tool", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..5029a99b Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/__pycache__/azure_cognitive_services.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/__pycache__/azure_cognitive_services.cpython-312.pyc new file mode 100644 index 00000000..968983c1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/__pycache__/azure_cognitive_services.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..7e57dfce Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/ainetwork/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/ainetwork/__init__.py new file mode 100644 index 00000000..c4295f2e --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/ainetwork/__init__.py @@ -0,0 +1 @@ +"""AINetwork toolkit.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/ainetwork/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/ainetwork/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..81a44d2f Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/ainetwork/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/ainetwork/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/ainetwork/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..77984480 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/ainetwork/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/ainetwork/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/ainetwork/toolkit.py new file mode 100644 index 00000000..2a797a0b --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/ainetwork/toolkit.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AINetworkToolkit": "langchain_community.agent_toolkits.ainetwork.toolkit" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AINetworkToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/amadeus/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/amadeus/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..7cf6bc0b Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/amadeus/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/amadeus/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/amadeus/toolkit.py new file mode 100644 index 00000000..e7e0b93d --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/amadeus/toolkit.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AmadeusToolkit": "langchain_community.agent_toolkits.amadeus.toolkit" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["AmadeusToolkit"] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/azure_cognitive_services.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/azure_cognitive_services.py new file mode 100644 index 00000000..4f49ae86 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/azure_cognitive_services.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.azure_cognitive_services import ( + AzureCognitiveServicesToolkit, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AzureCognitiveServicesToolkit": ( + "langchain_community.agent_toolkits.azure_cognitive_services" + ) +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureCognitiveServicesToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/base.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/base.py new file mode 100644 index 00000000..50cfb86c --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/base.py @@ -0,0 +1,3 @@ +from langchain_core.tools import BaseToolkit + +__all__ = ["BaseToolkit"] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/clickup/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/clickup/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/clickup/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/clickup/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..c645c316 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/clickup/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/clickup/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/clickup/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..2a047a8e Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/clickup/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/clickup/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/clickup/toolkit.py new file mode 100644 index 00000000..e58a8678 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/clickup/toolkit.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ClickupToolkit": "langchain_community.agent_toolkits.clickup.toolkit" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ClickupToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..884fc571 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/__pycache__/openai_functions.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/__pycache__/openai_functions.cpython-312.pyc new file mode 100644 index 00000000..6c955273 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/__pycache__/openai_functions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..58c8e334 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py new file mode 100644 index 00000000..6604a86f --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py @@ -0,0 +1,85 @@ +from typing import Any, Optional + +from langchain_core.language_models import BaseLanguageModel +from langchain_core.memory import BaseMemory +from langchain_core.messages import SystemMessage +from langchain_core.prompts.chat import MessagesPlaceholder +from langchain_core.tools import BaseTool + +from langchain.agents.agent import AgentExecutor +from langchain.agents.openai_functions_agent.agent_token_buffer_memory import ( + AgentTokenBufferMemory, +) +from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent +from langchain.memory.token_buffer import ConversationTokenBufferMemory + + +def _get_default_system_message() -> SystemMessage: + return SystemMessage( + content=( + "Do your best to answer the questions. " + "Feel free to use any tools available to look up " + "relevant information, only if necessary" + ) + ) + + +def create_conversational_retrieval_agent( + llm: BaseLanguageModel, + tools: list[BaseTool], + remember_intermediate_steps: bool = True, + memory_key: str = "chat_history", + system_message: Optional[SystemMessage] = None, + verbose: bool = False, + max_token_limit: int = 2000, + **kwargs: Any, +) -> AgentExecutor: + """A convenience method for creating a conversational retrieval agent. + + Args: + llm: The language model to use, should be ChatOpenAI + tools: A list of tools the agent has access to + remember_intermediate_steps: Whether the agent should remember intermediate + steps or not. Intermediate steps refer to prior action/observation + pairs from previous questions. The benefit of remembering these is if + there is relevant information in there, the agent can use it to answer + follow up questions. The downside is it will take up more tokens. + memory_key: The name of the memory key in the prompt. + system_message: The system message to use. By default, a basic one will + be used. + verbose: Whether or not the final AgentExecutor should be verbose or not, + defaults to False. + max_token_limit: The max number of tokens to keep around in memory. + Defaults to 2000. + + Returns: + An agent executor initialized appropriately + """ + + if remember_intermediate_steps: + memory: BaseMemory = AgentTokenBufferMemory( + memory_key=memory_key, llm=llm, max_token_limit=max_token_limit + ) + else: + memory = ConversationTokenBufferMemory( + memory_key=memory_key, + return_messages=True, + output_key="output", + llm=llm, + max_token_limit=max_token_limit, + ) + + _system_message = system_message or _get_default_system_message() + prompt = OpenAIFunctionsAgent.create_prompt( + system_message=_system_message, + extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)], + ) + agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt) + return AgentExecutor( + agent=agent, + tools=tools, + memory=memory, + verbose=verbose, + return_intermediate_steps=remember_intermediate_steps, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/tool.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/tool.py new file mode 100644 index 00000000..f7a09af8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/conversational_retrieval/tool.py @@ -0,0 +1,3 @@ +from langchain.tools.retriever import create_retriever_tool + +__all__ = ["create_retriever_tool"] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/csv/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/csv/__init__.py new file mode 100644 index 00000000..4b049802 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/csv/__init__.py @@ -0,0 +1,26 @@ +from pathlib import Path +from typing import Any + +from langchain_core._api.path import as_import_path + + +def __getattr__(name: str) -> Any: + """Get attr name.""" + + if name == "create_csv_agent": + # Get directory of langchain package + HERE = Path(__file__).parents[3] + here = as_import_path(Path(__file__).parent, relative_to=HERE) + + old_path = "langchain." + here + "." + name + new_path = "langchain_experimental." + here + "." + name + raise ImportError( + "This agent has been moved to langchain experiment. " + "This agent relies on python REPL tool under the hood, so to use it " + "safely please sandbox the python REPL. " + "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " + "and https://github.com/langchain-ai/langchain/discussions/11680" + "To keep using this code as is, install langchain experimental and " + f"update your import statement from:\n `{old_path}` to `{new_path}`." + ) + raise AttributeError(f"{name} does not exist") diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/csv/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/csv/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..e7fbc629 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/csv/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/file_management/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/file_management/__init__.py new file mode 100644 index 00000000..4210b888 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/file_management/__init__.py @@ -0,0 +1,31 @@ +"""Local file management toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.file_management.toolkit import ( + FileManagementToolkit, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FileManagementToolkit": ( + "langchain_community.agent_toolkits.file_management.toolkit" + ) +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FileManagementToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/file_management/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/file_management/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..5778c7ec Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/file_management/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/file_management/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/file_management/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..59fe4003 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/file_management/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/file_management/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/file_management/toolkit.py new file mode 100644 index 00000000..ac6830e1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/file_management/toolkit.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.file_management.toolkit import ( + FileManagementToolkit, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FileManagementToolkit": ( + "langchain_community.agent_toolkits.file_management.toolkit" + ) +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FileManagementToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/github/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/github/__init__.py new file mode 100644 index 00000000..bcd9368a --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/github/__init__.py @@ -0,0 +1 @@ +"""GitHub Toolkit.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/github/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/github/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..484bac7c Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/github/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/github/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/github/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..0bf04634 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/github/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/github/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/github/toolkit.py new file mode 100644 index 00000000..c5ce5db6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/github/toolkit.py @@ -0,0 +1,69 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.github.toolkit import ( + BranchName, + CommentOnIssue, + CreateFile, + CreatePR, + CreateReviewRequest, + DeleteFile, + DirectoryPath, + GetIssue, + GetPR, + GitHubToolkit, + NoInput, + ReadFile, + SearchCode, + SearchIssuesAndPRs, + UpdateFile, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "NoInput": "langchain_community.agent_toolkits.github.toolkit", + "GetIssue": "langchain_community.agent_toolkits.github.toolkit", + "CommentOnIssue": "langchain_community.agent_toolkits.github.toolkit", + "GetPR": "langchain_community.agent_toolkits.github.toolkit", + "CreatePR": "langchain_community.agent_toolkits.github.toolkit", + "CreateFile": "langchain_community.agent_toolkits.github.toolkit", + "ReadFile": "langchain_community.agent_toolkits.github.toolkit", + "UpdateFile": "langchain_community.agent_toolkits.github.toolkit", + "DeleteFile": "langchain_community.agent_toolkits.github.toolkit", + "DirectoryPath": "langchain_community.agent_toolkits.github.toolkit", + "BranchName": "langchain_community.agent_toolkits.github.toolkit", + "SearchCode": "langchain_community.agent_toolkits.github.toolkit", + "CreateReviewRequest": "langchain_community.agent_toolkits.github.toolkit", + "SearchIssuesAndPRs": "langchain_community.agent_toolkits.github.toolkit", + "GitHubToolkit": "langchain_community.agent_toolkits.github.toolkit", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NoInput", + "GetIssue", + "CommentOnIssue", + "GetPR", + "CreatePR", + "CreateFile", + "ReadFile", + "UpdateFile", + "DeleteFile", + "DirectoryPath", + "BranchName", + "SearchCode", + "CreateReviewRequest", + "SearchIssuesAndPRs", + "GitHubToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/gitlab/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gitlab/__init__.py new file mode 100644 index 00000000..7d3ca720 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gitlab/__init__.py @@ -0,0 +1 @@ +"""GitLab Toolkit.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/gitlab/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gitlab/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..f5492f0c Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gitlab/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/gitlab/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gitlab/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..278a768e Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gitlab/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/gitlab/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gitlab/toolkit.py new file mode 100644 index 00000000..c603fc45 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gitlab/toolkit.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GitLabToolkit": "langchain_community.agent_toolkits.gitlab.toolkit" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GitLabToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/gmail/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gmail/__init__.py new file mode 100644 index 00000000..02e7f816 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gmail/__init__.py @@ -0,0 +1 @@ +"""Gmail toolkit.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/gmail/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gmail/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..0c7c00bc Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gmail/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/gmail/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gmail/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..7fd9d858 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gmail/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/gmail/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gmail/toolkit.py new file mode 100644 index 00000000..65255dcb --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/gmail/toolkit.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.gmail.toolkit import GmailToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GmailToolkit": "langchain_community.agent_toolkits.gmail.toolkit"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GmailToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/jira/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/jira/__init__.py new file mode 100644 index 00000000..9f7c6755 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/jira/__init__.py @@ -0,0 +1 @@ +"""Jira Toolkit.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/jira/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/jira/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..483766e3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/jira/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/jira/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/jira/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..b4484e63 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/jira/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/jira/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/jira/toolkit.py new file mode 100644 index 00000000..4a94a9e8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/jira/toolkit.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"JiraToolkit": "langchain_community.agent_toolkits.jira.toolkit"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JiraToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__init__.py new file mode 100644 index 00000000..bfab0ec6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__init__.py @@ -0,0 +1 @@ +"""Json agent.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..78997d1a Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..1aaab357 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..6403db53 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..e2f0a287 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/base.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/base.py new file mode 100644 index 00000000..f6016326 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/base.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.json.base import create_json_agent + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "create_json_agent": "langchain_community.agent_toolkits.json.base" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "create_json_agent", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/prompt.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/prompt.py new file mode 100644 index 00000000..b7816543 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/prompt.py @@ -0,0 +1,24 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "JSON_PREFIX": "langchain_community.agent_toolkits.json.prompt", + "JSON_SUFFIX": "langchain_community.agent_toolkits.json.prompt", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["JSON_PREFIX", "JSON_SUFFIX"] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/toolkit.py new file mode 100644 index 00000000..4db6fa2c --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/json/toolkit.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.json.toolkit import JsonToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"JsonToolkit": "langchain_community.agent_toolkits.json.toolkit"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JsonToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/multion/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/multion/__init__.py new file mode 100644 index 00000000..56c72151 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/multion/__init__.py @@ -0,0 +1 @@ +"""MultiOn Toolkit.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/multion/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/multion/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..62b80747 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/multion/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/multion/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/multion/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..dfea6505 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/multion/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/multion/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/multion/toolkit.py new file mode 100644 index 00000000..eee04a9e --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/multion/toolkit.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.multion.toolkit import MultionToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MultionToolkit": "langchain_community.agent_toolkits.multion.toolkit" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MultionToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/nasa/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nasa/__init__.py new file mode 100644 index 00000000..a13c3ec7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nasa/__init__.py @@ -0,0 +1 @@ +"""NASA Toolkit""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/nasa/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nasa/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..76ae08c1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nasa/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/nasa/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nasa/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..d827c070 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nasa/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/nasa/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nasa/toolkit.py new file mode 100644 index 00000000..3ddb794b --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nasa/toolkit.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.nasa.toolkit import NasaToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NasaToolkit": "langchain_community.agent_toolkits.nasa.toolkit"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NasaToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..d6409694 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..58e43dfe Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..5a465ae0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/tool.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/tool.py new file mode 100644 index 00000000..6168ba39 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.nla.tool import NLATool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NLATool": "langchain_community.agent_toolkits.nla.tool"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NLATool", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/toolkit.py new file mode 100644 index 00000000..491d1e6a --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/nla/toolkit.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.nla.toolkit import NLAToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NLAToolkit": "langchain_community.agent_toolkits.nla.toolkit"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NLAToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/office365/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/office365/__init__.py new file mode 100644 index 00000000..acd0a87f --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/office365/__init__.py @@ -0,0 +1 @@ +"""Office365 toolkit.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/office365/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/office365/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..45f98246 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/office365/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/office365/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/office365/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..1c87d7fa Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/office365/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/office365/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/office365/toolkit.py new file mode 100644 index 00000000..466d979d --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/office365/toolkit.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.office365.toolkit import O365Toolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "O365Toolkit": "langchain_community.agent_toolkits.office365.toolkit" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "O365Toolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__init__.py new file mode 100644 index 00000000..5d06e271 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__init__.py @@ -0,0 +1 @@ +"""OpenAPI spec agent.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..206f2bfb Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..0a6bf74f Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/planner.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/planner.cpython-312.pyc new file mode 100644 index 00000000..e1eb5fc7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/planner.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/planner_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/planner_prompt.cpython-312.pyc new file mode 100644 index 00000000..a5c5c2f5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/planner_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..1fd4a29d Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/spec.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/spec.cpython-312.pyc new file mode 100644 index 00000000..06c143f6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/spec.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..8f0c15ed Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/base.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/base.py new file mode 100644 index 00000000..acb6eaba --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/base.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.openapi.base import create_openapi_agent + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "create_openapi_agent": "langchain_community.agent_toolkits.openapi.base" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "create_openapi_agent", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/planner.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/planner.py new file mode 100644 index 00000000..ad276207 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/planner.py @@ -0,0 +1,52 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.openapi.planner import ( + RequestsDeleteToolWithParsing, + RequestsGetToolWithParsing, + RequestsPatchToolWithParsing, + RequestsPostToolWithParsing, + RequestsPutToolWithParsing, + create_openapi_agent, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "RequestsGetToolWithParsing": ( + "langchain_community.agent_toolkits.openapi.planner" + ), + "RequestsPostToolWithParsing": ( + "langchain_community.agent_toolkits.openapi.planner" + ), + "RequestsPatchToolWithParsing": ( + "langchain_community.agent_toolkits.openapi.planner" + ), + "RequestsPutToolWithParsing": ( + "langchain_community.agent_toolkits.openapi.planner" + ), + "RequestsDeleteToolWithParsing": ( + "langchain_community.agent_toolkits.openapi.planner" + ), + "create_openapi_agent": "langchain_community.agent_toolkits.openapi.planner", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RequestsGetToolWithParsing", + "RequestsPostToolWithParsing", + "RequestsPatchToolWithParsing", + "RequestsPutToolWithParsing", + "RequestsDeleteToolWithParsing", + "create_openapi_agent", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/planner_prompt.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/planner_prompt.py new file mode 100644 index 00000000..6ab49fc6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/planner_prompt.py @@ -0,0 +1,103 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.openapi.planner_prompt import ( + API_CONTROLLER_PROMPT, + API_CONTROLLER_TOOL_DESCRIPTION, + API_CONTROLLER_TOOL_NAME, + API_ORCHESTRATOR_PROMPT, + API_PLANNER_PROMPT, + API_PLANNER_TOOL_DESCRIPTION, + API_PLANNER_TOOL_NAME, + PARSING_DELETE_PROMPT, + PARSING_GET_PROMPT, + PARSING_PATCH_PROMPT, + PARSING_POST_PROMPT, + PARSING_PUT_PROMPT, + REQUESTS_DELETE_TOOL_DESCRIPTION, + REQUESTS_GET_TOOL_DESCRIPTION, + REQUESTS_PATCH_TOOL_DESCRIPTION, + REQUESTS_POST_TOOL_DESCRIPTION, + REQUESTS_PUT_TOOL_DESCRIPTION, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "API_CONTROLLER_PROMPT": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "API_CONTROLLER_TOOL_DESCRIPTION": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "API_CONTROLLER_TOOL_NAME": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "API_ORCHESTRATOR_PROMPT": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "API_PLANNER_PROMPT": ("langchain_community.agent_toolkits.openapi.planner_prompt"), + "API_PLANNER_TOOL_DESCRIPTION": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "API_PLANNER_TOOL_NAME": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "PARSING_DELETE_PROMPT": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "PARSING_GET_PROMPT": ("langchain_community.agent_toolkits.openapi.planner_prompt"), + "PARSING_PATCH_PROMPT": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "PARSING_POST_PROMPT": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "PARSING_PUT_PROMPT": ("langchain_community.agent_toolkits.openapi.planner_prompt"), + "REQUESTS_DELETE_TOOL_DESCRIPTION": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "REQUESTS_GET_TOOL_DESCRIPTION": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "REQUESTS_PATCH_TOOL_DESCRIPTION": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "REQUESTS_POST_TOOL_DESCRIPTION": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), + "REQUESTS_PUT_TOOL_DESCRIPTION": ( + "langchain_community.agent_toolkits.openapi.planner_prompt" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "API_PLANNER_PROMPT", + "API_PLANNER_TOOL_NAME", + "API_PLANNER_TOOL_DESCRIPTION", + "API_CONTROLLER_PROMPT", + "API_CONTROLLER_TOOL_NAME", + "API_CONTROLLER_TOOL_DESCRIPTION", + "API_ORCHESTRATOR_PROMPT", + "REQUESTS_GET_TOOL_DESCRIPTION", + "PARSING_GET_PROMPT", + "REQUESTS_POST_TOOL_DESCRIPTION", + "PARSING_POST_PROMPT", + "REQUESTS_PATCH_TOOL_DESCRIPTION", + "PARSING_PATCH_PROMPT", + "REQUESTS_PUT_TOOL_DESCRIPTION", + "PARSING_PUT_PROMPT", + "REQUESTS_DELETE_TOOL_DESCRIPTION", + "PARSING_DELETE_PROMPT", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/prompt.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/prompt.py new file mode 100644 index 00000000..7f124bad --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/prompt.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.openapi.prompt import ( + DESCRIPTION, + OPENAPI_PREFIX, + OPENAPI_SUFFIX, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DESCRIPTION": "langchain_community.agent_toolkits.openapi.prompt", + "OPENAPI_PREFIX": "langchain_community.agent_toolkits.openapi.prompt", + "OPENAPI_SUFFIX": "langchain_community.agent_toolkits.openapi.prompt", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["OPENAPI_PREFIX", "OPENAPI_SUFFIX", "DESCRIPTION"] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/spec.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/spec.py new file mode 100644 index 00000000..03256304 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/spec.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.openapi.spec import ( + ReducedOpenAPISpec, + reduce_openapi_spec, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ReducedOpenAPISpec": "langchain_community.agent_toolkits.openapi.spec", + "reduce_openapi_spec": "langchain_community.agent_toolkits.openapi.spec", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ReducedOpenAPISpec", + "reduce_openapi_spec", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/toolkit.py new file mode 100644 index 00000000..bb20794b --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/openapi/toolkit.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.openapi.toolkit import ( + OpenAPIToolkit, + RequestsToolkit, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "RequestsToolkit": "langchain_community.agent_toolkits.openapi.toolkit", + "OpenAPIToolkit": "langchain_community.agent_toolkits.openapi.toolkit", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RequestsToolkit", + "OpenAPIToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/pandas/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/pandas/__init__.py new file mode 100644 index 00000000..a5cc6fc6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/pandas/__init__.py @@ -0,0 +1,26 @@ +from pathlib import Path +from typing import Any + +from langchain_core._api.path import as_import_path + + +def __getattr__(name: str) -> Any: + """Get attr name.""" + + if name == "create_pandas_dataframe_agent": + # Get directory of langchain package + HERE = Path(__file__).parents[3] + here = as_import_path(Path(__file__).parent, relative_to=HERE) + + old_path = "langchain." + here + "." + name + new_path = "langchain_experimental." + here + "." + name + raise ImportError( + "This agent has been moved to langchain experiment. " + "This agent relies on python REPL tool under the hood, so to use it " + "safely please sandbox the python REPL. " + "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " + "and https://github.com/langchain-ai/langchain/discussions/11680" + "To keep using this code as is, install langchain experimental and " + f"update your import statement from:\n `{old_path}` to `{new_path}`." + ) + raise AttributeError(f"{name} does not exist") diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/pandas/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/pandas/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..1299a120 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/pandas/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/playwright/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/playwright/__init__.py new file mode 100644 index 00000000..169766c1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/playwright/__init__.py @@ -0,0 +1,29 @@ +"""Playwright browser toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.playwright.toolkit import ( + PlayWrightBrowserToolkit, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PlayWrightBrowserToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/playwright/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/playwright/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..354b2fc6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/playwright/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/playwright/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/playwright/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..b702ae13 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/playwright/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/playwright/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/playwright/toolkit.py new file mode 100644 index 00000000..debd504a --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/playwright/toolkit.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.playwright.toolkit import ( + PlayWrightBrowserToolkit, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PlayWrightBrowserToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__init__.py new file mode 100644 index 00000000..42a9b09a --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__init__.py @@ -0,0 +1 @@ +"""Power BI agent.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..3b867bb7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..3253e352 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/chat_base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/chat_base.cpython-312.pyc new file mode 100644 index 00000000..1ed182d4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/chat_base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..4d3a9330 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..53dbe158 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/base.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/base.py new file mode 100644 index 00000000..18538bbc --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/base.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "create_pbi_agent": "langchain_community.agent_toolkits.powerbi.base" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "create_pbi_agent", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/chat_base.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/chat_base.py new file mode 100644 index 00000000..db0bed9b --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/chat_base.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.powerbi.chat_base import ( + create_pbi_chat_agent, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "create_pbi_chat_agent": "langchain_community.agent_toolkits.powerbi.chat_base" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "create_pbi_chat_agent", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/prompt.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/prompt.py new file mode 100644 index 00000000..325dbe08 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/prompt.py @@ -0,0 +1,36 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.powerbi.prompt import ( + POWERBI_CHAT_PREFIX, + POWERBI_CHAT_SUFFIX, + POWERBI_PREFIX, + POWERBI_SUFFIX, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "POWERBI_CHAT_PREFIX": "langchain_community.agent_toolkits.powerbi.prompt", + "POWERBI_CHAT_SUFFIX": "langchain_community.agent_toolkits.powerbi.prompt", + "POWERBI_PREFIX": "langchain_community.agent_toolkits.powerbi.prompt", + "POWERBI_SUFFIX": "langchain_community.agent_toolkits.powerbi.prompt", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "POWERBI_PREFIX", + "POWERBI_SUFFIX", + "POWERBI_CHAT_PREFIX", + "POWERBI_CHAT_SUFFIX", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/toolkit.py new file mode 100644 index 00000000..b008c385 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/powerbi/toolkit.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.powerbi.toolkit import PowerBIToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PowerBIToolkit": "langchain_community.agent_toolkits.powerbi.toolkit" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PowerBIToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/python/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/python/__init__.py new file mode 100644 index 00000000..85d36eeb --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/python/__init__.py @@ -0,0 +1,26 @@ +from pathlib import Path +from typing import Any + +from langchain_core._api.path import as_import_path + + +def __getattr__(name: str) -> Any: + """Get attr name.""" + + if name == "create_python_agent": + # Get directory of langchain package + HERE = Path(__file__).parents[3] + here = as_import_path(Path(__file__).parent, relative_to=HERE) + + old_path = "langchain." + here + "." + name + new_path = "langchain_experimental." + here + "." + name + raise ImportError( + "This agent has been moved to langchain experiment. " + "This agent relies on python REPL tool under the hood, so to use it " + "safely please sandbox the python REPL. " + "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " + "and https://github.com/langchain-ai/langchain/discussions/11680" + "To keep using this code as is, install langchain experimental and " + f"update your import statement from:\n `{old_path}` to `{new_path}`." + ) + raise AttributeError(f"{name} does not exist") diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/python/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/python/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..e3a2d77d Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/python/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/slack/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/slack/__init__.py new file mode 100644 index 00000000..1ec5ae70 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/slack/__init__.py @@ -0,0 +1 @@ +"""Slack toolkit.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/slack/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/slack/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..daaf93ce Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/slack/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/slack/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/slack/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..05de54d4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/slack/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/slack/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/slack/toolkit.py new file mode 100644 index 00000000..b747bf46 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/slack/toolkit.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.slack.toolkit import SlackToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SlackToolkit": "langchain_community.agent_toolkits.slack.toolkit"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SlackToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark/__init__.py new file mode 100644 index 00000000..73ec5b97 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark/__init__.py @@ -0,0 +1,26 @@ +from pathlib import Path +from typing import Any + +from langchain_core._api.path import as_import_path + + +def __getattr__(name: str) -> Any: + """Get attr name.""" + + if name == "create_spark_dataframe_agent": + # Get directory of langchain package + HERE = Path(__file__).parents[3] + here = as_import_path(Path(__file__).parent, relative_to=HERE) + + old_path = "langchain." + here + "." + name + new_path = "langchain_experimental." + here + "." + name + raise ImportError( + "This agent has been moved to langchain experiment. " + "This agent relies on python REPL tool under the hood, so to use it " + "safely please sandbox the python REPL. " + "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " + "and https://github.com/langchain-ai/langchain/discussions/11680" + "To keep using this code as is, install langchain experimental and " + f"update your import statement from:\n `{old_path}` to `{new_path}`." + ) + raise AttributeError(f"{name} does not exist") diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..bd9659e5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__init__.py new file mode 100644 index 00000000..4308c079 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__init__.py @@ -0,0 +1 @@ +"""Spark SQL agent.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..1f16c268 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..49611856 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..af9090e6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..855ed5ba Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/base.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/base.py new file mode 100644 index 00000000..25b3bda1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/base.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "create_spark_sql_agent": "langchain_community.agent_toolkits.spark_sql.base" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "create_spark_sql_agent", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/prompt.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/prompt.py new file mode 100644 index 00000000..f381c0f9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/prompt.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.spark_sql.prompt import ( + SQL_PREFIX, + SQL_SUFFIX, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SQL_PREFIX": "langchain_community.agent_toolkits.spark_sql.prompt", + "SQL_SUFFIX": "langchain_community.agent_toolkits.spark_sql.prompt", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["SQL_PREFIX", "SQL_SUFFIX"] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/toolkit.py new file mode 100644 index 00000000..4c393c02 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/spark_sql/toolkit.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SparkSQLToolkit": "langchain_community.agent_toolkits.spark_sql.toolkit" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SparkSQLToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__init__.py new file mode 100644 index 00000000..74293a52 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__init__.py @@ -0,0 +1 @@ +"""SQL agent.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..6bfd20b6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..a96a5285 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..17d7a3b0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..98f5958c Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/base.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/base.py new file mode 100644 index 00000000..0b73f87a --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/base.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.sql.base import create_sql_agent + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"create_sql_agent": "langchain_community.agent_toolkits.sql.base"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "create_sql_agent", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/prompt.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/prompt.py new file mode 100644 index 00000000..e06162b6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/prompt.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.sql.prompt import ( + SQL_FUNCTIONS_SUFFIX, + SQL_PREFIX, + SQL_SUFFIX, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SQL_PREFIX": "langchain_community.agent_toolkits.sql.prompt", + "SQL_SUFFIX": "langchain_community.agent_toolkits.sql.prompt", + "SQL_FUNCTIONS_SUFFIX": "langchain_community.agent_toolkits.sql.prompt", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["SQL_PREFIX", "SQL_SUFFIX", "SQL_FUNCTIONS_SUFFIX"] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/toolkit.py new file mode 100644 index 00000000..e330c489 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/sql/toolkit.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SQLDatabaseToolkit": "langchain_community.agent_toolkits.sql.toolkit" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SQLDatabaseToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/steam/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/steam/__init__.py new file mode 100644 index 00000000..f9998108 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/steam/__init__.py @@ -0,0 +1 @@ +"""Steam Toolkit.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/steam/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/steam/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..90147372 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/steam/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/steam/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/steam/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..d054e436 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/steam/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/steam/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/steam/toolkit.py new file mode 100644 index 00000000..aa141081 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/steam/toolkit.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.steam.toolkit import SteamToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SteamToolkit": "langchain_community.agent_toolkits.steam.toolkit"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SteamToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__init__.py new file mode 100644 index 00000000..ee15a97e --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__init__.py @@ -0,0 +1 @@ +"""Agent toolkit for interacting with vector stores.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..773b8d47 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..cff93325 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..4b2915dc Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..dea01b1a Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/base.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/base.py new file mode 100644 index 00000000..9abc92be --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/base.py @@ -0,0 +1,217 @@ +"""VectorStore agent.""" + +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks.base import BaseCallbackManager +from langchain_core.language_models import BaseLanguageModel + +from langchain.agents.agent import AgentExecutor +from langchain.agents.agent_toolkits.vectorstore.prompt import PREFIX, ROUTER_PREFIX +from langchain.agents.agent_toolkits.vectorstore.toolkit import ( + VectorStoreRouterToolkit, + VectorStoreToolkit, +) +from langchain.agents.mrkl.base import ZeroShotAgent +from langchain.chains.llm import LLMChain + + +@deprecated( + since="0.2.13", + removal="1.0", + message=( + "This function will continue to be supported, but it is recommended for new " + "use cases to be built with LangGraph. LangGraph offers a more flexible and " + "full-featured framework for building agents, including support for " + "tool-calling, persistence of state, and human-in-the-loop workflows. " + "See API reference for this function for a replacement implementation: " + "https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_agent.html " # noqa: E501 + "Read more here on how to create agents that query vector stores: " + "https://python.langchain.com/docs/how_to/qa_chat_history_how_to/#agents" + ), +) +def create_vectorstore_agent( + llm: BaseLanguageModel, + toolkit: VectorStoreToolkit, + callback_manager: Optional[BaseCallbackManager] = None, + prefix: str = PREFIX, + verbose: bool = False, + agent_executor_kwargs: Optional[dict[str, Any]] = None, + **kwargs: Any, +) -> AgentExecutor: + """Construct a VectorStore agent from an LLM and tools. + + Note: this class is deprecated. See below for a replacement that uses tool + calling methods and LangGraph. Install LangGraph with: + + .. code-block:: bash + + pip install -U langgraph + + .. code-block:: python + + from langchain_core.tools import create_retriever_tool + from langchain_core.vectorstores import InMemoryVectorStore + from langchain_openai import ChatOpenAI, OpenAIEmbeddings + from langgraph.prebuilt import create_react_agent + + llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) + + vector_store = InMemoryVectorStore.from_texts( + [ + "Dogs are great companions, known for their loyalty and friendliness.", + "Cats are independent pets that often enjoy their own space.", + ], + OpenAIEmbeddings(), + ) + + tool = create_retriever_tool( + vector_store.as_retriever(), + "pet_information_retriever", + "Fetches information about pets.", + ) + + agent = create_react_agent(llm, [tool]) + + for step in agent.stream( + {"messages": [("human", "What are dogs known for?")]}, + stream_mode="values", + ): + step["messages"][-1].pretty_print() + + Args: + llm (BaseLanguageModel): LLM that will be used by the agent + toolkit (VectorStoreToolkit): Set of tools for the agent + callback_manager (Optional[BaseCallbackManager], optional): Object to handle the callback [ Defaults to None. ] + prefix (str, optional): The prefix prompt for the agent. If not provided uses default PREFIX. + verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ] + agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ] + kwargs: Additional named parameters to pass to the ZeroShotAgent. + + Returns: + AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response + """ # noqa: E501 + tools = toolkit.get_tools() + prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) + llm_chain = LLMChain( + llm=llm, + prompt=prompt, + callback_manager=callback_manager, + ) + tool_names = [tool.name for tool in tools] + agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) + return AgentExecutor.from_agent_and_tools( + agent=agent, + tools=tools, + callback_manager=callback_manager, + verbose=verbose, + **(agent_executor_kwargs or {}), + ) + + +@deprecated( + since="0.2.13", + removal="1.0", + message=( + "This function will continue to be supported, but it is recommended for new " + "use cases to be built with LangGraph. LangGraph offers a more flexible and " + "full-featured framework for building agents, including support for " + "tool-calling, persistence of state, and human-in-the-loop workflows. " + "See API reference for this function for a replacement implementation: " + "https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_router_agent.html " # noqa: E501 + "Read more here on how to create agents that query vector stores: " + "https://python.langchain.com/docs/how_to/qa_chat_history_how_to/#agents" + ), +) +def create_vectorstore_router_agent( + llm: BaseLanguageModel, + toolkit: VectorStoreRouterToolkit, + callback_manager: Optional[BaseCallbackManager] = None, + prefix: str = ROUTER_PREFIX, + verbose: bool = False, + agent_executor_kwargs: Optional[dict[str, Any]] = None, + **kwargs: Any, +) -> AgentExecutor: + """Construct a VectorStore router agent from an LLM and tools. + + Note: this class is deprecated. See below for a replacement that uses tool + calling methods and LangGraph. Install LangGraph with: + + .. code-block:: bash + + pip install -U langgraph + + .. code-block:: python + + from langchain_core.tools import create_retriever_tool + from langchain_core.vectorstores import InMemoryVectorStore + from langchain_openai import ChatOpenAI, OpenAIEmbeddings + from langgraph.prebuilt import create_react_agent + + llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) + + pet_vector_store = InMemoryVectorStore.from_texts( + [ + "Dogs are great companions, known for their loyalty and friendliness.", + "Cats are independent pets that often enjoy their own space.", + ], + OpenAIEmbeddings(), + ) + + food_vector_store = InMemoryVectorStore.from_texts( + [ + "Carrots are orange and delicious.", + "Apples are red and delicious.", + ], + OpenAIEmbeddings(), + ) + + tools = [ + create_retriever_tool( + pet_vector_store.as_retriever(), + "pet_information_retriever", + "Fetches information about pets.", + ), + create_retriever_tool( + food_vector_store.as_retriever(), + "food_information_retriever", + "Fetches information about food.", + ) + ] + + agent = create_react_agent(llm, tools) + + for step in agent.stream( + {"messages": [("human", "Tell me about carrots.")]}, + stream_mode="values", + ): + step["messages"][-1].pretty_print() + + Args: + llm (BaseLanguageModel): LLM that will be used by the agent + toolkit (VectorStoreRouterToolkit): Set of tools for the agent which have routing capability with multiple vector stores + callback_manager (Optional[BaseCallbackManager], optional): Object to handle the callback [ Defaults to None. ] + prefix (str, optional): The prefix prompt for the router agent. If not provided uses default ROUTER_PREFIX. + verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ] + agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ] + kwargs: Additional named parameters to pass to the ZeroShotAgent. + + Returns: + AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response. + """ # noqa: E501 + tools = toolkit.get_tools() + prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) + llm_chain = LLMChain( + llm=llm, + prompt=prompt, + callback_manager=callback_manager, + ) + tool_names = [tool.name for tool in tools] + agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) + return AgentExecutor.from_agent_and_tools( + agent=agent, + tools=tools, + callback_manager=callback_manager, + verbose=verbose, + **(agent_executor_kwargs or {}), + ) diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/prompt.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/prompt.py new file mode 100644 index 00000000..a2837e56 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/prompt.py @@ -0,0 +1,13 @@ +# flake8: noqa + +PREFIX = """You are an agent designed to answer questions about sets of documents. +You have access to tools for interacting with the documents, and the inputs to the tools are questions. +Sometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so. +If the question does not seem relevant to any of the tools provided, just return "I don't know" as the answer. +""" + +ROUTER_PREFIX = """You are an agent designed to answer questions. +You have access to tools for interacting with different sources, and the inputs to the tools are questions. +Your main task is to decide which of the tools is relevant for answering question at hand. +For complex questions, you can break the question down into sub questions and use tools to answers the sub questions. +""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/toolkit.py new file mode 100644 index 00000000..08c2c1a6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/vectorstore/toolkit.py @@ -0,0 +1,96 @@ +"""Toolkit for interacting with a vector store.""" + +from langchain_core.language_models import BaseLanguageModel +from langchain_core.tools import BaseTool +from langchain_core.tools.base import BaseToolkit +from langchain_core.vectorstores import VectorStore +from pydantic import BaseModel, ConfigDict, Field + + +class VectorStoreInfo(BaseModel): + """Information about a VectorStore.""" + + vectorstore: VectorStore = Field(exclude=True) + name: str + description: str + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + +class VectorStoreToolkit(BaseToolkit): + """Toolkit for interacting with a Vector Store.""" + + vectorstore_info: VectorStoreInfo = Field(exclude=True) + llm: BaseLanguageModel + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def get_tools(self) -> list[BaseTool]: + """Get the tools in the toolkit.""" + try: + from langchain_community.tools.vectorstore.tool import ( + VectorStoreQATool, + VectorStoreQAWithSourcesTool, + ) + except ImportError: + raise ImportError( + "You need to install langchain-community to use this toolkit." + ) + description = VectorStoreQATool.get_description( + self.vectorstore_info.name, self.vectorstore_info.description + ) + qa_tool = VectorStoreQATool( + name=self.vectorstore_info.name, + description=description, + vectorstore=self.vectorstore_info.vectorstore, + llm=self.llm, + ) + description = VectorStoreQAWithSourcesTool.get_description( + self.vectorstore_info.name, self.vectorstore_info.description + ) + qa_with_sources_tool = VectorStoreQAWithSourcesTool( + name=f"{self.vectorstore_info.name}_with_sources", + description=description, + vectorstore=self.vectorstore_info.vectorstore, + llm=self.llm, + ) + return [qa_tool, qa_with_sources_tool] + + +class VectorStoreRouterToolkit(BaseToolkit): + """Toolkit for routing between Vector Stores.""" + + vectorstores: list[VectorStoreInfo] = Field(exclude=True) + llm: BaseLanguageModel + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def get_tools(self) -> list[BaseTool]: + """Get the tools in the toolkit.""" + tools: list[BaseTool] = [] + try: + from langchain_community.tools.vectorstore.tool import ( + VectorStoreQATool, + ) + except ImportError: + raise ImportError( + "You need to install langchain-community to use this toolkit." + ) + for vectorstore_info in self.vectorstores: + description = VectorStoreQATool.get_description( + vectorstore_info.name, vectorstore_info.description + ) + qa_tool = VectorStoreQATool( + name=vectorstore_info.name, + description=description, + vectorstore=vectorstore_info.vectorstore, + llm=self.llm, + ) + tools.append(qa_tool) + return tools diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/xorbits/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/xorbits/__init__.py new file mode 100644 index 00000000..fd8fc13b --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/xorbits/__init__.py @@ -0,0 +1,26 @@ +from pathlib import Path +from typing import Any + +from langchain_core._api.path import as_import_path + + +def __getattr__(name: str) -> Any: + """Get attr name.""" + + if name == "create_xorbits_agent": + # Get directory of langchain package + HERE = Path(__file__).parents[3] + here = as_import_path(Path(__file__).parent, relative_to=HERE) + + old_path = "langchain." + here + "." + name + new_path = "langchain_experimental." + here + "." + name + raise ImportError( + "This agent has been moved to langchain experiment. " + "This agent relies on python REPL tool under the hood, so to use it " + "safely please sandbox the python REPL. " + "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " + "and https://github.com/langchain-ai/langchain/discussions/11680" + "To keep using this code as is, install langchain experimental and " + f"update your import statement from:\n `{old_path}` to `{new_path}`." + ) + raise AttributeError(f"{name} does not exist") diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/xorbits/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/xorbits/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..a66bcd52 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/xorbits/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/zapier/__init__.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/zapier/__init__.py new file mode 100644 index 00000000..faef4a32 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/zapier/__init__.py @@ -0,0 +1 @@ +"""Zapier Toolkit.""" diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/zapier/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/zapier/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..0ba7a9a3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/zapier/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/zapier/__pycache__/toolkit.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/agent_toolkits/zapier/__pycache__/toolkit.cpython-312.pyc new file mode 100644 index 00000000..deac5f3d Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/agent_toolkits/zapier/__pycache__/toolkit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/agent_toolkits/zapier/toolkit.py b/venv/Lib/site-packages/langchain/agents/agent_toolkits/zapier/toolkit.py new file mode 100644 index 00000000..83461b54 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_toolkits/zapier/toolkit.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.agent_toolkits.zapier.toolkit import ZapierToolkit + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ZapierToolkit": "langchain_community.agent_toolkits.zapier.toolkit" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ZapierToolkit", +] diff --git a/venv/Lib/site-packages/langchain/agents/agent_types.py b/venv/Lib/site-packages/langchain/agents/agent_types.py new file mode 100644 index 00000000..e6a72a98 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/agent_types.py @@ -0,0 +1,57 @@ +"""Module definitions of agent types together with corresponding agents.""" + +from enum import Enum + +from langchain_core._api import deprecated + +from langchain._api.deprecation import AGENT_DEPRECATION_WARNING + + +@deprecated( + "0.1.0", + message=AGENT_DEPRECATION_WARNING, + removal="1.0", +) +class AgentType(str, Enum): + """An enum for agent types. + + See documentation: https://python.langchain.com/docs/modules/agents/agent_types/ + """ + + ZERO_SHOT_REACT_DESCRIPTION = "zero-shot-react-description" + """A zero shot agent that does a reasoning step before acting.""" + + REACT_DOCSTORE = "react-docstore" + """A zero shot agent that does a reasoning step before acting. + + This agent has access to a document store that allows it to look up + relevant information to answering the question. + """ + + SELF_ASK_WITH_SEARCH = "self-ask-with-search" + """An agent that breaks down a complex question into a series of simpler questions. + + This agent uses a search tool to look up answers to the simpler questions + in order to answer the original complex question. + """ + CONVERSATIONAL_REACT_DESCRIPTION = "conversational-react-description" + CHAT_ZERO_SHOT_REACT_DESCRIPTION = "chat-zero-shot-react-description" + """A zero shot agent that does a reasoning step before acting. + + This agent is designed to be used in conjunction + """ + + CHAT_CONVERSATIONAL_REACT_DESCRIPTION = "chat-conversational-react-description" + + STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION = ( + "structured-chat-zero-shot-react-description" + ) + """An zero-shot react agent optimized for chat models. + + This agent is capable of invoking tools that have multiple inputs. + """ + + OPENAI_FUNCTIONS = "openai-functions" + """An agent optimized for using open AI functions.""" + + OPENAI_MULTI_FUNCTIONS = "openai-multi-functions" diff --git a/venv/Lib/site-packages/langchain/agents/chat/__init__.py b/venv/Lib/site-packages/langchain/agents/chat/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/agents/chat/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/chat/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..50a5eb9c Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/chat/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/chat/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/chat/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..e564c4f0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/chat/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/chat/__pycache__/output_parser.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/chat/__pycache__/output_parser.cpython-312.pyc new file mode 100644 index 00000000..8a428fdc Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/chat/__pycache__/output_parser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/chat/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/chat/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..6383e6d0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/chat/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/chat/base.py b/venv/Lib/site-packages/langchain/agents/chat/base.py new file mode 100644 index 00000000..19488b3b --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/chat/base.py @@ -0,0 +1,184 @@ +from collections.abc import Sequence +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.agents import AgentAction +from langchain_core.callbacks import BaseCallbackManager +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate +from langchain_core.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, +) +from langchain_core.tools import BaseTool +from pydantic import Field + +from langchain._api.deprecation import AGENT_DEPRECATION_WARNING +from langchain.agents.agent import Agent, AgentOutputParser +from langchain.agents.chat.output_parser import ChatOutputParser +from langchain.agents.chat.prompt import ( + FORMAT_INSTRUCTIONS, + HUMAN_MESSAGE, + SYSTEM_MESSAGE_PREFIX, + SYSTEM_MESSAGE_SUFFIX, +) +from langchain.agents.utils import validate_tools_single_input +from langchain.chains.llm import LLMChain + + +@deprecated( + "0.1.0", + message=AGENT_DEPRECATION_WARNING, + removal="1.0", +) +class ChatAgent(Agent): + """Chat Agent.""" + + output_parser: AgentOutputParser = Field(default_factory=ChatOutputParser) + """Output parser for the agent.""" + + @property + def observation_prefix(self) -> str: + """Prefix to append the observation with.""" + return "Observation: " + + @property + def llm_prefix(self) -> str: + """Prefix to append the llm call with.""" + return "Thought:" + + def _construct_scratchpad( + self, intermediate_steps: list[tuple[AgentAction, str]] + ) -> str: + agent_scratchpad = super()._construct_scratchpad(intermediate_steps) + if not isinstance(agent_scratchpad, str): + raise ValueError("agent_scratchpad should be of type string.") + if agent_scratchpad: + return ( + f"This was your previous work " + f"(but I haven't seen any of it! I only see what " + f"you return as final answer):\n{agent_scratchpad}" + ) + else: + return agent_scratchpad + + @classmethod + def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: + return ChatOutputParser() + + @classmethod + def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: + super()._validate_tools(tools) + validate_tools_single_input(class_name=cls.__name__, tools=tools) + + @property + def _stop(self) -> list[str]: + return ["Observation:"] + + @classmethod + def create_prompt( + cls, + tools: Sequence[BaseTool], + system_message_prefix: str = SYSTEM_MESSAGE_PREFIX, + system_message_suffix: str = SYSTEM_MESSAGE_SUFFIX, + human_message: str = HUMAN_MESSAGE, + format_instructions: str = FORMAT_INSTRUCTIONS, + input_variables: Optional[list[str]] = None, + ) -> BasePromptTemplate: + """Create a prompt from a list of tools. + + Args: + tools: A list of tools. + system_message_prefix: The system message prefix. + Default is SYSTEM_MESSAGE_PREFIX. + system_message_suffix: The system message suffix. + Default is SYSTEM_MESSAGE_SUFFIX. + human_message: The human message. Default is HUMAN_MESSAGE. + format_instructions: The format instructions. + Default is FORMAT_INSTRUCTIONS. + input_variables: The input variables. Default is None. + + Returns: + A prompt template. + """ + + tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) + tool_names = ", ".join([tool.name for tool in tools]) + format_instructions = format_instructions.format(tool_names=tool_names) + template = "\n\n".join( + [ + system_message_prefix, + tool_strings, + format_instructions, + system_message_suffix, + ] + ) + messages = [ + SystemMessagePromptTemplate.from_template(template), + HumanMessagePromptTemplate.from_template(human_message), + ] + if input_variables is None: + input_variables = ["input", "agent_scratchpad"] + return ChatPromptTemplate(input_variables=input_variables, messages=messages) + + @classmethod + def from_llm_and_tools( + cls, + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + callback_manager: Optional[BaseCallbackManager] = None, + output_parser: Optional[AgentOutputParser] = None, + system_message_prefix: str = SYSTEM_MESSAGE_PREFIX, + system_message_suffix: str = SYSTEM_MESSAGE_SUFFIX, + human_message: str = HUMAN_MESSAGE, + format_instructions: str = FORMAT_INSTRUCTIONS, + input_variables: Optional[list[str]] = None, + **kwargs: Any, + ) -> Agent: + """Construct an agent from an LLM and tools. + + Args: + llm: The language model. + tools: A list of tools. + callback_manager: The callback manager. Default is None. + output_parser: The output parser. Default is None. + system_message_prefix: The system message prefix. + Default is SYSTEM_MESSAGE_PREFIX. + system_message_suffix: The system message suffix. + Default is SYSTEM_MESSAGE_SUFFIX. + human_message: The human message. Default is HUMAN_MESSAGE. + format_instructions: The format instructions. + Default is FORMAT_INSTRUCTIONS. + input_variables: The input variables. Default is None. + kwargs: Additional keyword arguments. + + Returns: + An agent. + """ + cls._validate_tools(tools) + prompt = cls.create_prompt( + tools, + system_message_prefix=system_message_prefix, + system_message_suffix=system_message_suffix, + human_message=human_message, + format_instructions=format_instructions, + input_variables=input_variables, + ) + llm_chain = LLMChain( + llm=llm, + prompt=prompt, + callback_manager=callback_manager, + ) + tool_names = [tool.name for tool in tools] + _output_parser = output_parser or cls._get_default_output_parser() + return cls( + llm_chain=llm_chain, + allowed_tools=tool_names, + output_parser=_output_parser, + **kwargs, + ) + + @property + def _agent_type(self) -> str: + raise ValueError diff --git a/venv/Lib/site-packages/langchain/agents/chat/output_parser.py b/venv/Lib/site-packages/langchain/agents/chat/output_parser.py new file mode 100644 index 00000000..fd15a4aa --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/chat/output_parser.py @@ -0,0 +1,71 @@ +import json +import re +from re import Pattern +from typing import Union + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.exceptions import OutputParserException + +from langchain.agents.agent import AgentOutputParser +from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS + +FINAL_ANSWER_ACTION = "Final Answer:" + + +class ChatOutputParser(AgentOutputParser): + """Output parser for the chat agent.""" + + format_instructions: str = FORMAT_INSTRUCTIONS + """Default formatting instructions""" + + pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL) + """Regex pattern to parse the output.""" + + def get_format_instructions(self) -> str: + """Returns formatting instructions for the given output parser.""" + return self.format_instructions + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + """Parse the output from the agent into + an AgentAction or AgentFinish object. + + Args: + text: The text to parse. + + Returns: + An AgentAction or AgentFinish object. + + Raises: + OutputParserException: If the output could not be parsed. + ValueError: If the action could not be found. + """ + + includes_answer = FINAL_ANSWER_ACTION in text + try: + found = self.pattern.search(text) + if not found: + # Fast fail to parse Final Answer. + raise ValueError("action not found") + action = found.group(1) + response = json.loads(action.strip()) + includes_action = "action" in response + if includes_answer and includes_action: + raise OutputParserException( + "Parsing LLM output produced a final answer " + f"and a parse-able action: {text}" + ) + return AgentAction( + response["action"], response.get("action_input", {}), text + ) + + except Exception as exc: + if not includes_answer: + raise OutputParserException( + f"Could not parse LLM output: {text}" + ) from exc + output = text.split(FINAL_ANSWER_ACTION)[-1].strip() + return AgentFinish({"output": output}, text) + + @property + def _type(self) -> str: + return "chat" diff --git a/venv/Lib/site-packages/langchain/agents/chat/prompt.py b/venv/Lib/site-packages/langchain/agents/chat/prompt.py new file mode 100644 index 00000000..4343739b --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/chat/prompt.py @@ -0,0 +1,30 @@ +# flake8: noqa +SYSTEM_MESSAGE_PREFIX = """Answer the following questions as best you can. You have access to the following tools:""" +FORMAT_INSTRUCTIONS = """The way you use the tools is by specifying a json blob. +Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here). + +The only values that should be in the "action" field are: {tool_names} + +The $JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. Here is an example of a valid $JSON_BLOB: + +``` +{{{{ + "action": $TOOL_NAME, + "action_input": $INPUT +}}}} +``` + +ALWAYS use the following format: + +Question: the input question you must answer +Thought: you should always think about what to do +Action: +``` +$JSON_BLOB +``` +Observation: the result of the action +... (this Thought/Action/Observation can repeat N times) +Thought: I now know the final answer +Final Answer: the final answer to the original input question""" +SYSTEM_MESSAGE_SUFFIX = """Begin! Reminder to always use the exact characters `Final Answer` when responding.""" +HUMAN_MESSAGE = "{input}\n\n{agent_scratchpad}" diff --git a/venv/Lib/site-packages/langchain/agents/conversational/__init__.py b/venv/Lib/site-packages/langchain/agents/conversational/__init__.py new file mode 100644 index 00000000..94290c9c --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/conversational/__init__.py @@ -0,0 +1 @@ +"""An agent designed to hold a conversation in addition to using tools.""" diff --git a/venv/Lib/site-packages/langchain/agents/conversational/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/conversational/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..23334232 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/conversational/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/conversational/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/conversational/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..7a1855cb Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/conversational/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/conversational/__pycache__/output_parser.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/conversational/__pycache__/output_parser.cpython-312.pyc new file mode 100644 index 00000000..e5564284 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/conversational/__pycache__/output_parser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/conversational/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/conversational/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..314a6c03 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/conversational/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/conversational/base.py b/venv/Lib/site-packages/langchain/agents/conversational/base.py new file mode 100644 index 00000000..32846b0c --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/conversational/base.py @@ -0,0 +1,172 @@ +"""An agent designed to hold a conversation in addition to using tools.""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import BaseCallbackManager +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import PromptTemplate +from langchain_core.tools import BaseTool +from pydantic import Field + +from langchain._api.deprecation import AGENT_DEPRECATION_WARNING +from langchain.agents.agent import Agent, AgentOutputParser +from langchain.agents.agent_types import AgentType +from langchain.agents.conversational.output_parser import ConvoOutputParser +from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX +from langchain.agents.utils import validate_tools_single_input +from langchain.chains import LLMChain + + +@deprecated( + "0.1.0", + message=AGENT_DEPRECATION_WARNING, + removal="1.0", +) +class ConversationalAgent(Agent): + """An agent that holds a conversation in addition to using tools.""" + + ai_prefix: str = "AI" + """Prefix to use before AI output.""" + output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser) + """Output parser for the agent.""" + + @classmethod + def _get_default_output_parser( + cls, ai_prefix: str = "AI", **kwargs: Any + ) -> AgentOutputParser: + return ConvoOutputParser(ai_prefix=ai_prefix) + + @property + def _agent_type(self) -> str: + """Return Identifier of agent type.""" + return AgentType.CONVERSATIONAL_REACT_DESCRIPTION + + @property + def observation_prefix(self) -> str: + """Prefix to append the observation with. + + Returns: + "Observation: " + """ + return "Observation: " + + @property + def llm_prefix(self) -> str: + """Prefix to append the llm call with. + + Returns: + "Thought: " + """ + return "Thought:" + + @classmethod + def create_prompt( + cls, + tools: Sequence[BaseTool], + prefix: str = PREFIX, + suffix: str = SUFFIX, + format_instructions: str = FORMAT_INSTRUCTIONS, + ai_prefix: str = "AI", + human_prefix: str = "Human", + input_variables: Optional[list[str]] = None, + ) -> PromptTemplate: + """Create prompt in the style of the zero-shot agent. + + Args: + tools: List of tools the agent will have access to, used to format the + prompt. + prefix: String to put before the list of tools. Defaults to PREFIX. + suffix: String to put after the list of tools. Defaults to SUFFIX. + format_instructions: Instructions on how to use the tools. Defaults to + FORMAT_INSTRUCTIONS + ai_prefix: String to use before AI output. Defaults to "AI". + human_prefix: String to use before human output. + Defaults to "Human". + input_variables: List of input variables the final prompt will expect. + Defaults to ["input", "chat_history", "agent_scratchpad"]. + + Returns: + A PromptTemplate with the template assembled from the pieces here. + """ + tool_strings = "\n".join( + [f"> {tool.name}: {tool.description}" for tool in tools] + ) + tool_names = ", ".join([tool.name for tool in tools]) + format_instructions = format_instructions.format( + tool_names=tool_names, ai_prefix=ai_prefix, human_prefix=human_prefix + ) + template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) + if input_variables is None: + input_variables = ["input", "chat_history", "agent_scratchpad"] + return PromptTemplate(template=template, input_variables=input_variables) + + @classmethod + def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: + super()._validate_tools(tools) + validate_tools_single_input(cls.__name__, tools) + + @classmethod + def from_llm_and_tools( + cls, + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + callback_manager: Optional[BaseCallbackManager] = None, + output_parser: Optional[AgentOutputParser] = None, + prefix: str = PREFIX, + suffix: str = SUFFIX, + format_instructions: str = FORMAT_INSTRUCTIONS, + ai_prefix: str = "AI", + human_prefix: str = "Human", + input_variables: Optional[list[str]] = None, + **kwargs: Any, + ) -> Agent: + """Construct an agent from an LLM and tools. + + Args: + llm: The language model to use. + tools: A list of tools to use. + callback_manager: The callback manager to use. Default is None. + output_parser: The output parser to use. Default is None. + prefix: The prefix to use in the prompt. Default is PREFIX. + suffix: The suffix to use in the prompt. Default is SUFFIX. + format_instructions: The format instructions to use. + Default is FORMAT_INSTRUCTIONS. + ai_prefix: The prefix to use before AI output. Default is "AI". + human_prefix: The prefix to use before human output. + Default is "Human". + input_variables: The input variables to use. Default is None. + **kwargs: Any additional keyword arguments to pass to the agent. + + Returns: + An agent. + """ + cls._validate_tools(tools) + prompt = cls.create_prompt( + tools, + ai_prefix=ai_prefix, + human_prefix=human_prefix, + prefix=prefix, + suffix=suffix, + format_instructions=format_instructions, + input_variables=input_variables, + ) + llm_chain = LLMChain( + llm=llm, + prompt=prompt, + callback_manager=callback_manager, + ) + tool_names = [tool.name for tool in tools] + _output_parser = output_parser or cls._get_default_output_parser( + ai_prefix=ai_prefix + ) + return cls( + llm_chain=llm_chain, + allowed_tools=tool_names, + ai_prefix=ai_prefix, + output_parser=_output_parser, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/agents/conversational/output_parser.py b/venv/Lib/site-packages/langchain/agents/conversational/output_parser.py new file mode 100644 index 00000000..dadcfabb --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/conversational/output_parser.py @@ -0,0 +1,49 @@ +import re +from typing import Union + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.exceptions import OutputParserException + +from langchain.agents.agent import AgentOutputParser +from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS + + +class ConvoOutputParser(AgentOutputParser): + """Output parser for the conversational agent.""" + + ai_prefix: str = "AI" + """Prefix to use before AI output.""" + + format_instructions: str = FORMAT_INSTRUCTIONS + """Default formatting instructions""" + + def get_format_instructions(self) -> str: + """Returns formatting instructions for the given output parser.""" + return self.format_instructions + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + """Parse the output from the agent into + an AgentAction or AgentFinish object. + + Args: + text: The text to parse. + + Returns: + An AgentAction or AgentFinish object. + """ + + if f"{self.ai_prefix}:" in text: + return AgentFinish( + {"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text + ) + regex = r"Action: (.*?)[\n]*Action Input: ([\s\S]*)" + match = re.search(regex, text, re.DOTALL) + if not match: + raise OutputParserException(f"Could not parse LLM output: `{text}`") + action = match.group(1) + action_input = match.group(2) + return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text) + + @property + def _type(self) -> str: + return "conversational" diff --git a/venv/Lib/site-packages/langchain/agents/conversational/prompt.py b/venv/Lib/site-packages/langchain/agents/conversational/prompt.py new file mode 100644 index 00000000..15268a76 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/conversational/prompt.py @@ -0,0 +1,36 @@ +# flake8: noqa +PREFIX = """Assistant is a large language model trained by OpenAI. + +Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. + +Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. + +Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist. + +TOOLS: +------ + +Assistant has access to the following tools:""" +FORMAT_INSTRUCTIONS = """To use a tool, please use the following format: + +``` +Thought: Do I need to use a tool? Yes +Action: the action to take, should be one of [{tool_names}] +Action Input: the input to the action +Observation: the result of the action +``` + +When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format: + +``` +Thought: Do I need to use a tool? No +{ai_prefix}: [your response here] +```""" + +SUFFIX = """Begin! + +Previous conversation history: +{chat_history} + +New input: {input} +{agent_scratchpad}""" diff --git a/venv/Lib/site-packages/langchain/agents/conversational_chat/__init__.py b/venv/Lib/site-packages/langchain/agents/conversational_chat/__init__.py new file mode 100644 index 00000000..94290c9c --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/conversational_chat/__init__.py @@ -0,0 +1 @@ +"""An agent designed to hold a conversation in addition to using tools.""" diff --git a/venv/Lib/site-packages/langchain/agents/conversational_chat/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/conversational_chat/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..3e4a1d9e Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/conversational_chat/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/conversational_chat/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/conversational_chat/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..ce4cd0e7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/conversational_chat/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/conversational_chat/__pycache__/output_parser.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/conversational_chat/__pycache__/output_parser.cpython-312.pyc new file mode 100644 index 00000000..6bca4918 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/conversational_chat/__pycache__/output_parser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/conversational_chat/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/conversational_chat/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..6b5070cb Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/conversational_chat/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/conversational_chat/base.py b/venv/Lib/site-packages/langchain/agents/conversational_chat/base.py new file mode 100644 index 00000000..35bf4bff --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/conversational_chat/base.py @@ -0,0 +1,179 @@ +"""An agent designed to hold a conversation in addition to using tools.""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.agents import AgentAction +from langchain_core.callbacks import BaseCallbackManager +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import AIMessage, BaseMessage, HumanMessage +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.prompts import BasePromptTemplate +from langchain_core.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + MessagesPlaceholder, + SystemMessagePromptTemplate, +) +from langchain_core.tools import BaseTool +from pydantic import Field + +from langchain.agents.agent import Agent, AgentOutputParser +from langchain.agents.conversational_chat.output_parser import ConvoOutputParser +from langchain.agents.conversational_chat.prompt import ( + PREFIX, + SUFFIX, + TEMPLATE_TOOL_RESPONSE, +) +from langchain.agents.utils import validate_tools_single_input +from langchain.chains import LLMChain + + +@deprecated("0.1.0", alternative="create_json_chat_agent", removal="1.0") +class ConversationalChatAgent(Agent): + """An agent designed to hold a conversation in addition to using tools.""" + + output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser) + """Output parser for the agent.""" + template_tool_response: str = TEMPLATE_TOOL_RESPONSE + """Template for the tool response.""" + + @classmethod + def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: + return ConvoOutputParser() + + @property + def _agent_type(self) -> str: + raise NotImplementedError + + @property + def observation_prefix(self) -> str: + """Prefix to append the observation with. + + Returns: + "Observation: " + """ + return "Observation: " + + @property + def llm_prefix(self) -> str: + """Prefix to append the llm call with. + + Returns: + "Thought: " + """ + return "Thought:" + + @classmethod + def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: + super()._validate_tools(tools) + validate_tools_single_input(cls.__name__, tools) + + @classmethod + def create_prompt( + cls, + tools: Sequence[BaseTool], + system_message: str = PREFIX, + human_message: str = SUFFIX, + input_variables: Optional[list[str]] = None, + output_parser: Optional[BaseOutputParser] = None, + ) -> BasePromptTemplate: + """Create a prompt for the agent. + + Args: + tools: The tools to use. + system_message: The system message to use. + Defaults to the PREFIX. + human_message: The human message to use. + Defaults to the SUFFIX. + input_variables: The input variables to use. Defaults to None. + output_parser: The output parser to use. Defaults to None. + + Returns: + A PromptTemplate. + """ + tool_strings = "\n".join( + [f"> {tool.name}: {tool.description}" for tool in tools] + ) + tool_names = ", ".join([tool.name for tool in tools]) + _output_parser = output_parser or cls._get_default_output_parser() + format_instructions = human_message.format( + format_instructions=_output_parser.get_format_instructions() + ) + final_prompt = format_instructions.format( + tool_names=tool_names, tools=tool_strings + ) + if input_variables is None: + input_variables = ["input", "chat_history", "agent_scratchpad"] + messages = [ + SystemMessagePromptTemplate.from_template(system_message), + MessagesPlaceholder(variable_name="chat_history"), + HumanMessagePromptTemplate.from_template(final_prompt), + MessagesPlaceholder(variable_name="agent_scratchpad"), + ] + return ChatPromptTemplate(input_variables=input_variables, messages=messages) + + def _construct_scratchpad( + self, intermediate_steps: list[tuple[AgentAction, str]] + ) -> list[BaseMessage]: + """Construct the scratchpad that lets the agent continue its thought process.""" + thoughts: list[BaseMessage] = [] + for action, observation in intermediate_steps: + thoughts.append(AIMessage(content=action.log)) + human_message = HumanMessage( + content=self.template_tool_response.format(observation=observation) + ) + thoughts.append(human_message) + return thoughts + + @classmethod + def from_llm_and_tools( + cls, + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + callback_manager: Optional[BaseCallbackManager] = None, + output_parser: Optional[AgentOutputParser] = None, + system_message: str = PREFIX, + human_message: str = SUFFIX, + input_variables: Optional[list[str]] = None, + **kwargs: Any, + ) -> Agent: + """Construct an agent from an LLM and tools. + + Args: + llm: The language model to use. + tools: A list of tools to use. + callback_manager: The callback manager to use. Default is None. + output_parser: The output parser to use. Default is None. + system_message: The system message to use. Default is PREFIX. + human_message: The human message to use. Default is SUFFIX. + input_variables: The input variables to use. Default is None. + **kwargs: Any additional arguments. + + Returns: + An agent. + """ + cls._validate_tools(tools) + _output_parser = output_parser or cls._get_default_output_parser() + prompt = cls.create_prompt( + tools, + system_message=system_message, + human_message=human_message, + input_variables=input_variables, + output_parser=_output_parser, + ) + llm_chain = LLMChain( + llm=llm, + prompt=prompt, + callback_manager=callback_manager, + ) + tool_names = [tool.name for tool in tools] + return cls( + llm_chain=llm_chain, + allowed_tools=tool_names, + output_parser=_output_parser, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/agents/conversational_chat/output_parser.py b/venv/Lib/site-packages/langchain/agents/conversational_chat/output_parser.py new file mode 100644 index 00000000..daf317f2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/conversational_chat/output_parser.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +from typing import Union + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.exceptions import OutputParserException +from langchain_core.utils.json import parse_json_markdown + +from langchain.agents import AgentOutputParser +from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS + + +# Define a class that parses output for conversational agents +class ConvoOutputParser(AgentOutputParser): + """Output parser for the conversational agent.""" + + format_instructions: str = FORMAT_INSTRUCTIONS + """Default formatting instructions""" + + def get_format_instructions(self) -> str: + """Returns formatting instructions for the given output parser.""" + return self.format_instructions + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + """Attempts to parse the given text into an AgentAction or AgentFinish. + + Raises: + OutputParserException if parsing fails. + """ + try: + # Attempt to parse the text into a structured format (assumed to be JSON + # stored as markdown) + response = parse_json_markdown(text) + + # If the response contains an 'action' and 'action_input' + if "action" in response and "action_input" in response: + action, action_input = response["action"], response["action_input"] + + # If the action indicates a final answer, return an AgentFinish + if action == "Final Answer": + return AgentFinish({"output": action_input}, text) + else: + # Otherwise, return an AgentAction with the specified action and + # input + return AgentAction(action, action_input, text) + else: + # If the necessary keys aren't present in the response, raise an + # exception + raise OutputParserException( + f"Missing 'action' or 'action_input' in LLM output: {text}" + ) + except Exception as e: + # If any other exception is raised during parsing, also raise an + # OutputParserException + raise OutputParserException(f"Could not parse LLM output: {text}") from e + + @property + def _type(self) -> str: + return "conversational_chat" diff --git a/venv/Lib/site-packages/langchain/agents/conversational_chat/prompt.py b/venv/Lib/site-packages/langchain/agents/conversational_chat/prompt.py new file mode 100644 index 00000000..712a9238 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/conversational_chat/prompt.py @@ -0,0 +1,57 @@ +# flake8: noqa +PREFIX = """Assistant is a large language model trained by OpenAI. + +Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. + +Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. + +Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.""" + +FORMAT_INSTRUCTIONS = """RESPONSE FORMAT INSTRUCTIONS +---------------------------- + +When responding to me, please output a response in one of two formats: + +**Option 1:** +Use this if you want the human to use a tool. +Markdown code snippet formatted in the following schema: + +```json +{{{{ + "action": string, \\\\ The action to take. Must be one of {tool_names} + "action_input": string \\\\ The input to the action +}}}} +``` + +**Option #2:** +Use this if you want to respond directly to the human. Markdown code snippet formatted in the following schema: + +```json +{{{{ + "action": "Final Answer", + "action_input": string \\\\ You should put what you want to return to use here +}}}} +```""" + +SUFFIX = """TOOLS +------ +Assistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are: + +{{tools}} + +{format_instructions} + +USER'S INPUT +-------------------- +Here is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else): + +{{{{input}}}}""" + +TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE: +--------------------- +{observation} + +USER'S INPUT +-------------------- + +Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else.""" diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/__init__.py b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__init__.py new file mode 100644 index 00000000..c34745e5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__init__.py @@ -0,0 +1,25 @@ +"""Logic for formatting intermediate steps into an agent scratchpad. + +Intermediate steps refers to the list of (AgentAction, observation) tuples +that result from previous iterations of the agent. +Depending on the prompting strategy you are using, you may want to format these +differently before passing them into the LLM. +""" + +from langchain.agents.format_scratchpad.log import format_log_to_str +from langchain.agents.format_scratchpad.log_to_messages import format_log_to_messages +from langchain.agents.format_scratchpad.openai_functions import ( + format_to_openai_function_messages, + format_to_openai_functions, +) +from langchain.agents.format_scratchpad.tools import format_to_tool_messages +from langchain.agents.format_scratchpad.xml import format_xml + +__all__ = [ + "format_xml", + "format_to_openai_function_messages", + "format_to_openai_functions", + "format_to_tool_messages", + "format_log_to_str", + "format_log_to_messages", +] diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..49ec1f54 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/log.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/log.cpython-312.pyc new file mode 100644 index 00000000..258cb0d2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/log.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/log_to_messages.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/log_to_messages.cpython-312.pyc new file mode 100644 index 00000000..c4cfe1f9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/log_to_messages.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/openai_functions.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/openai_functions.cpython-312.pyc new file mode 100644 index 00000000..9ecb3815 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/openai_functions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/openai_tools.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/openai_tools.cpython-312.pyc new file mode 100644 index 00000000..c25205f6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/openai_tools.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/tools.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/tools.cpython-312.pyc new file mode 100644 index 00000000..296f9677 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/tools.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/xml.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/xml.cpython-312.pyc new file mode 100644 index 00000000..77c88f51 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/format_scratchpad/__pycache__/xml.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/log.py b/venv/Lib/site-packages/langchain/agents/format_scratchpad/log.py new file mode 100644 index 00000000..bf24a96a --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/format_scratchpad/log.py @@ -0,0 +1,25 @@ +from langchain_core.agents import AgentAction + + +def format_log_to_str( + intermediate_steps: list[tuple[AgentAction, str]], + observation_prefix: str = "Observation: ", + llm_prefix: str = "Thought: ", +) -> str: + """Construct the scratchpad that lets the agent continue its thought process. + + Args: + intermediate_steps: List of tuples of AgentAction and observation strings. + observation_prefix: Prefix to append the observation with. + Defaults to "Observation: ". + llm_prefix: Prefix to append the llm call with. + Defaults to "Thought: ". + + Returns: + str: The scratchpad. + """ + thoughts = "" + for action, observation in intermediate_steps: + thoughts += action.log + thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}" + return thoughts diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/log_to_messages.py b/venv/Lib/site-packages/langchain/agents/format_scratchpad/log_to_messages.py new file mode 100644 index 00000000..98c5d04e --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/format_scratchpad/log_to_messages.py @@ -0,0 +1,26 @@ +from langchain_core.agents import AgentAction +from langchain_core.messages import AIMessage, BaseMessage, HumanMessage + + +def format_log_to_messages( + intermediate_steps: list[tuple[AgentAction, str]], + template_tool_response: str = "{observation}", +) -> list[BaseMessage]: + """Construct the scratchpad that lets the agent continue its thought process. + + Args: + intermediate_steps: List of tuples of AgentAction and observation strings. + template_tool_response: Template to format the observation with. + Defaults to "{observation}". + + Returns: + List[BaseMessage]: The scratchpad. + """ + thoughts: list[BaseMessage] = [] + for action, observation in intermediate_steps: + thoughts.append(AIMessage(content=action.log)) + human_message = HumanMessage( + content=template_tool_response.format(observation=observation) + ) + thoughts.append(human_message) + return thoughts diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/openai_functions.py b/venv/Lib/site-packages/langchain/agents/format_scratchpad/openai_functions.py new file mode 100644 index 00000000..172c4a67 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/format_scratchpad/openai_functions.py @@ -0,0 +1,78 @@ +import json +from collections.abc import Sequence + +from langchain_core.agents import AgentAction, AgentActionMessageLog +from langchain_core.messages import AIMessage, BaseMessage, FunctionMessage + + +def _convert_agent_action_to_messages( + agent_action: AgentAction, observation: str +) -> list[BaseMessage]: + """Convert an agent action to a message. + + This code is used to reconstruct the original AI message from the agent action. + + Args: + agent_action: Agent action to convert. + + Returns: + AIMessage or the previous messages plus a FunctionMessage that corresponds to + the original tool invocation + """ + if isinstance(agent_action, AgentActionMessageLog): + return list(agent_action.message_log) + [ + _create_function_message(agent_action, observation) + ] + else: + return [AIMessage(content=agent_action.log)] + + +def _create_function_message( + agent_action: AgentAction, observation: str +) -> FunctionMessage: + """Convert agent action and observation into a function message. + Args: + agent_action: the tool invocation request from the agent. + observation: the result of the tool invocation. + Returns: + FunctionMessage that corresponds to the original tool invocation. + + Raises: + ValueError: if the observation cannot be converted to a string. + """ + if not isinstance(observation, str): + try: + content = json.dumps(observation, ensure_ascii=False) + except Exception: + content = str(observation) + else: + content = observation + return FunctionMessage( + name=agent_action.tool, + content=content, + ) + + +def format_to_openai_function_messages( + intermediate_steps: Sequence[tuple[AgentAction, str]], +) -> list[BaseMessage]: + """Convert (AgentAction, tool output) tuples into FunctionMessages. + + Args: + intermediate_steps: Steps the LLM has taken to date, along with observations + + Returns: + list of messages to send to the LLM for the next prediction + Raises: + ValueError: if the observation cannot be converted to a string. + """ + messages = [] + + for agent_action, observation in intermediate_steps: + messages.extend(_convert_agent_action_to_messages(agent_action, observation)) + + return messages + + +# Backwards compatibility +format_to_openai_functions = format_to_openai_function_messages diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/openai_tools.py b/venv/Lib/site-packages/langchain/agents/format_scratchpad/openai_tools.py new file mode 100644 index 00000000..063905ea --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/format_scratchpad/openai_tools.py @@ -0,0 +1,5 @@ +from langchain.agents.format_scratchpad.tools import ( + format_to_tool_messages as format_to_openai_tool_messages, +) + +__all__ = ["format_to_openai_tool_messages"] diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/tools.py b/venv/Lib/site-packages/langchain/agents/format_scratchpad/tools.py new file mode 100644 index 00000000..3c43ff4e --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/format_scratchpad/tools.py @@ -0,0 +1,63 @@ +import json +from collections.abc import Sequence + +from langchain_core.agents import AgentAction +from langchain_core.messages import ( + AIMessage, + BaseMessage, + ToolMessage, +) + +from langchain.agents.output_parsers.tools import ToolAgentAction + + +def _create_tool_message( + agent_action: ToolAgentAction, observation: str +) -> ToolMessage: + """Convert agent action and observation into a tool message. + + Args: + agent_action: the tool invocation request from the agent. + observation: the result of the tool invocation. + Returns: + ToolMessage that corresponds to the original tool invocation. + + Raises: + ValueError: if the observation cannot be converted to a string. + """ + if not isinstance(observation, str): + try: + content = json.dumps(observation, ensure_ascii=False) + except Exception: + content = str(observation) + else: + content = observation + return ToolMessage( + tool_call_id=agent_action.tool_call_id, + content=content, + additional_kwargs={"name": agent_action.tool}, + ) + + +def format_to_tool_messages( + intermediate_steps: Sequence[tuple[AgentAction, str]], +) -> list[BaseMessage]: + """Convert (AgentAction, tool output) tuples into ToolMessages. + + Args: + intermediate_steps: Steps the LLM has taken to date, along with observations. + + Returns: + list of messages to send to the LLM for the next prediction. + + """ + messages = [] + for agent_action, observation in intermediate_steps: + if isinstance(agent_action, ToolAgentAction): + new_messages = list(agent_action.message_log) + [ + _create_tool_message(agent_action, observation) + ] + messages.extend([new for new in new_messages if new not in messages]) + else: + messages.append(AIMessage(content=agent_action.log)) + return messages diff --git a/venv/Lib/site-packages/langchain/agents/format_scratchpad/xml.py b/venv/Lib/site-packages/langchain/agents/format_scratchpad/xml.py new file mode 100644 index 00000000..e1e94509 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/format_scratchpad/xml.py @@ -0,0 +1,21 @@ +from langchain_core.agents import AgentAction + + +def format_xml( + intermediate_steps: list[tuple[AgentAction, str]], +) -> str: + """Format the intermediate steps as XML. + + Args: + intermediate_steps: The intermediate steps. + + Returns: + The intermediate steps as XML. + """ + log = "" + for action, observation in intermediate_steps: + log += ( + f"{action.tool}{action.tool_input}" + f"{observation}" + ) + return log diff --git a/venv/Lib/site-packages/langchain/agents/initialize.py b/venv/Lib/site-packages/langchain/agents/initialize.py new file mode 100644 index 00000000..c24e0139 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/initialize.py @@ -0,0 +1,96 @@ +"""Load agent.""" + +from collections.abc import Sequence +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import BaseCallbackManager +from langchain_core.language_models import BaseLanguageModel +from langchain_core.tools import BaseTool + +from langchain._api.deprecation import AGENT_DEPRECATION_WARNING +from langchain.agents.agent import AgentExecutor +from langchain.agents.agent_types import AgentType +from langchain.agents.loading import AGENT_TO_CLASS, load_agent + + +@deprecated( + "0.1.0", + message=AGENT_DEPRECATION_WARNING, + removal="1.0", +) +def initialize_agent( + tools: Sequence[BaseTool], + llm: BaseLanguageModel, + agent: Optional[AgentType] = None, + callback_manager: Optional[BaseCallbackManager] = None, + agent_path: Optional[str] = None, + agent_kwargs: Optional[dict] = None, + *, + tags: Optional[Sequence[str]] = None, + **kwargs: Any, +) -> AgentExecutor: + """Load an agent executor given tools and LLM. + + Args: + tools: List of tools this agent has access to. + llm: Language model to use as the agent. + agent: Agent type to use. If None and agent_path is also None, will default + to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None. + callback_manager: CallbackManager to use. Global callback manager is used if + not provided. Defaults to None. + agent_path: Path to serialized agent to use. If None and agent is also None, + will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None. + agent_kwargs: Additional keyword arguments to pass to the underlying agent. + Defaults to None. + tags: Tags to apply to the traced runs. Defaults to None. + kwargs: Additional keyword arguments passed to the agent executor. + + Returns: + An agent executor. + + Raises: + ValueError: If both `agent` and `agent_path` are specified. + ValueError: If `agent` is not a valid agent type. + ValueError: If both `agent` and `agent_path` are None. + """ + tags_ = list(tags) if tags else [] + if agent is None and agent_path is None: + agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION + if agent is not None and agent_path is not None: + raise ValueError( + "Both `agent` and `agent_path` are specified, " + "but at most only one should be." + ) + if agent is not None: + if agent not in AGENT_TO_CLASS: + raise ValueError( + f"Got unknown agent type: {agent}. " + f"Valid types are: {AGENT_TO_CLASS.keys()}." + ) + tags_.append(agent.value if isinstance(agent, AgentType) else agent) + agent_cls = AGENT_TO_CLASS[agent] + agent_kwargs = agent_kwargs or {} + agent_obj = agent_cls.from_llm_and_tools( + llm, tools, callback_manager=callback_manager, **agent_kwargs + ) + elif agent_path is not None: + agent_obj = load_agent( + agent_path, llm=llm, tools=tools, callback_manager=callback_manager + ) + try: + # TODO: Add tags from the serialized object directly. + tags_.append(agent_obj._agent_type) + except NotImplementedError: + pass + else: + raise ValueError( + "Somehow both `agent` and `agent_path` are None, this should never happen." + ) + return AgentExecutor.from_agent_and_tools( + agent=agent_obj, + tools=tools, + callback_manager=callback_manager, + tags=tags_, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/agents/json_chat/__init__.py b/venv/Lib/site-packages/langchain/agents/json_chat/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/agents/json_chat/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/json_chat/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..df930d48 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/json_chat/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/json_chat/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/json_chat/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..c171ad3e Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/json_chat/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/json_chat/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/json_chat/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..d29d5234 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/json_chat/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/json_chat/base.py b/venv/Lib/site-packages/langchain/agents/json_chat/base.py new file mode 100644 index 00000000..b3552f76 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/json_chat/base.py @@ -0,0 +1,195 @@ +from collections.abc import Sequence +from typing import Union + +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts.chat import ChatPromptTemplate +from langchain_core.runnables import Runnable, RunnablePassthrough +from langchain_core.tools import BaseTool +from langchain_core.tools.render import ToolsRenderer, render_text_description + +from langchain.agents.format_scratchpad import format_log_to_messages +from langchain.agents.json_chat.prompt import TEMPLATE_TOOL_RESPONSE +from langchain.agents.output_parsers import JSONAgentOutputParser + + +def create_json_chat_agent( + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + prompt: ChatPromptTemplate, + stop_sequence: Union[bool, list[str]] = True, + tools_renderer: ToolsRenderer = render_text_description, + template_tool_response: str = TEMPLATE_TOOL_RESPONSE, +) -> Runnable: + """Create an agent that uses JSON to format its logic, build for Chat Models. + + Args: + llm: LLM to use as the agent. + tools: Tools this agent has access to. + prompt: The prompt to use. See Prompt section below for more. + stop_sequence: bool or list of str. + If True, adds a stop token of "Observation:" to avoid hallucinates. + If False, does not add a stop token. + If a list of str, uses the provided list as the stop tokens. + + Default is True. You may to set this to False if the LLM you are using + does not support stop sequences. + tools_renderer: This controls how the tools are converted into a string and + then passed into the LLM. Default is `render_text_description`. + template_tool_response: Template prompt that uses the tool response (observation) + to make the LLM generate the next action to take. + Default is TEMPLATE_TOOL_RESPONSE. + + Returns: + A Runnable sequence representing an agent. It takes as input all the same input + variables as the prompt passed in does. It returns as output either an + AgentAction or AgentFinish. + + Raises: + ValueError: If the prompt is missing required variables. + ValueError: If the template_tool_response is missing + the required variable 'observation'. + + Example: + + .. code-block:: python + + from langchain import hub + from langchain_community.chat_models import ChatOpenAI + from langchain.agents import AgentExecutor, create_json_chat_agent + + prompt = hub.pull("hwchase17/react-chat-json") + model = ChatOpenAI() + tools = ... + + agent = create_json_chat_agent(model, tools, prompt) + agent_executor = AgentExecutor(agent=agent, tools=tools) + + agent_executor.invoke({"input": "hi"}) + + # Using with chat history + from langchain_core.messages import AIMessage, HumanMessage + agent_executor.invoke( + { + "input": "what's my name?", + "chat_history": [ + HumanMessage(content="hi! my name is bob"), + AIMessage(content="Hello Bob! How can I assist you today?"), + ], + } + ) + + Prompt: + + The prompt must have input keys: + * `tools`: contains descriptions and arguments for each tool. + * `tool_names`: contains all tool names. + * `agent_scratchpad`: must be a MessagesPlaceholder. Contains previous agent actions and tool outputs as messages. + + Here's an example: + + .. code-block:: python + + from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder + + system = '''Assistant is a large language model trained by OpenAI. + + Assistant is designed to be able to assist with a wide range of tasks, from answering \ + simple questions to providing in-depth explanations and discussions on a wide range of \ + topics. As a language model, Assistant is able to generate human-like text based on \ + the input it receives, allowing it to engage in natural-sounding conversations and \ + provide responses that are coherent and relevant to the topic at hand. + + Assistant is constantly learning and improving, and its capabilities are constantly \ + evolving. It is able to process and understand large amounts of text, and can use this \ + knowledge to provide accurate and informative responses to a wide range of questions. \ + Additionally, Assistant is able to generate its own text based on the input it \ + receives, allowing it to engage in discussions and provide explanations and \ + descriptions on a wide range of topics. + + Overall, Assistant is a powerful system that can help with a wide range of tasks \ + and provide valuable insights and information on a wide range of topics. Whether \ + you need help with a specific question or just want to have a conversation about \ + a particular topic, Assistant is here to assist.''' + + human = '''TOOLS + ------ + Assistant can ask the user to use tools to look up information that may be helpful in \ + answering the users original question. The tools the human can use are: + + {tools} + + RESPONSE FORMAT INSTRUCTIONS + ---------------------------- + + When responding to me, please output a response in one of two formats: + + **Option 1:** + Use this if you want the human to use a tool. + Markdown code snippet formatted in the following schema: + + ```json + {{ + "action": string, \\ The action to take. Must be one of {tool_names} + "action_input": string \\ The input to the action + }} + ``` + + **Option #2:** + Use this if you want to respond directly to the human. Markdown code snippet formatted \ + in the following schema: + + ```json + {{ + "action": "Final Answer", + "action_input": string \\ You should put what you want to return to use here + }} + ``` + + USER'S INPUT + -------------------- + Here is the user's input (remember to respond with a markdown code snippet of a json \ + blob with a single action, and NOTHING else): + + {input}''' + + prompt = ChatPromptTemplate.from_messages( + [ + ("system", system), + MessagesPlaceholder("chat_history", optional=True), + ("human", human), + MessagesPlaceholder("agent_scratchpad"), + ] + ) + """ # noqa: E501 + missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference( + prompt.input_variables + list(prompt.partial_variables) + ) + if missing_vars: + raise ValueError(f"Prompt missing required variables: {missing_vars}") + + if "{observation}" not in template_tool_response: + raise ValueError( + "Template tool response missing required variable 'observation'" + ) + + prompt = prompt.partial( + tools=tools_renderer(list(tools)), + tool_names=", ".join([t.name for t in tools]), + ) + if stop_sequence: + stop = ["\nObservation"] if stop_sequence is True else stop_sequence + llm_to_use = llm.bind(stop=stop) + else: + llm_to_use = llm + + agent = ( + RunnablePassthrough.assign( + agent_scratchpad=lambda x: format_log_to_messages( + x["intermediate_steps"], template_tool_response=template_tool_response + ) + ) + | prompt + | llm_to_use + | JSONAgentOutputParser() + ) + return agent diff --git a/venv/Lib/site-packages/langchain/agents/json_chat/prompt.py b/venv/Lib/site-packages/langchain/agents/json_chat/prompt.py new file mode 100644 index 00000000..34020caa --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/json_chat/prompt.py @@ -0,0 +1,9 @@ +# flake8: noqa +TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE: +--------------------- +{observation} + +USER'S INPUT +-------------------- + +Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else - even if you just want to respond to the user. Do NOT respond with anything except a JSON snippet no matter what!""" diff --git a/venv/Lib/site-packages/langchain/agents/load_tools.py b/venv/Lib/site-packages/langchain/agents/load_tools.py new file mode 100644 index 00000000..dccc67d8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/load_tools.py @@ -0,0 +1,12 @@ +from typing import Any + +from langchain._api import create_importer + +_importer = create_importer( + __package__, fallback_module="langchain_community.agent_toolkits.load_tools" +) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _importer(name) diff --git a/venv/Lib/site-packages/langchain/agents/loading.py b/venv/Lib/site-packages/langchain/agents/loading.py new file mode 100644 index 00000000..08fc6fa8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/loading.py @@ -0,0 +1,138 @@ +"""Functionality for loading agents.""" + +import json +import logging +from pathlib import Path +from typing import Any, Optional, Union + +import yaml +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.tools import Tool + +from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent +from langchain.agents.types import AGENT_TO_CLASS +from langchain.chains.loading import load_chain, load_chain_from_config + +logger = logging.getLogger(__file__) + +URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/" + + +def _load_agent_from_tools( + config: dict, llm: BaseLanguageModel, tools: list[Tool], **kwargs: Any +) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: + config_type = config.pop("_type") + if config_type not in AGENT_TO_CLASS: + raise ValueError(f"Loading {config_type} agent not supported") + + agent_cls = AGENT_TO_CLASS[config_type] + combined_config = {**config, **kwargs} + return agent_cls.from_llm_and_tools(llm, tools, **combined_config) + + +@deprecated("0.1.0", removal="1.0") +def load_agent_from_config( + config: dict, + llm: Optional[BaseLanguageModel] = None, + tools: Optional[list[Tool]] = None, + **kwargs: Any, +) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: + """Load agent from Config Dict. + + Args: + config: Config dict to load agent from. + llm: Language model to use as the agent. + tools: List of tools this agent has access to. + kwargs: Additional keyword arguments passed to the agent executor. + + Returns: + An agent executor. + + Raises: + ValueError: If agent type is not specified in the config. + """ + if "_type" not in config: + raise ValueError("Must specify an agent Type in config") + load_from_tools = config.pop("load_from_llm_and_tools", False) + if load_from_tools: + if llm is None: + raise ValueError( + "If `load_from_llm_and_tools` is set to True, then LLM must be provided" + ) + if tools is None: + raise ValueError( + "If `load_from_llm_and_tools` is set to True, " + "then tools must be provided" + ) + return _load_agent_from_tools(config, llm, tools, **kwargs) + config_type = config.pop("_type") + + if config_type not in AGENT_TO_CLASS: + raise ValueError(f"Loading {config_type} agent not supported") + + agent_cls = AGENT_TO_CLASS[config_type] + if "llm_chain" in config: + config["llm_chain"] = load_chain_from_config(config.pop("llm_chain")) + elif "llm_chain_path" in config: + config["llm_chain"] = load_chain(config.pop("llm_chain_path")) + else: + raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.") + if "output_parser" in config: + logger.warning( + "Currently loading output parsers on agent is not supported, " + "will just use the default one." + ) + del config["output_parser"] + + combined_config = {**config, **kwargs} + return agent_cls(**combined_config) + + +@deprecated("0.1.0", removal="1.0") +def load_agent( + path: Union[str, Path], **kwargs: Any +) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: + """Unified method for loading an agent from LangChainHub or local fs. + + Args: + path: Path to the agent file. + kwargs: Additional keyword arguments passed to the agent executor. + + Returns: + An agent executor. + + Raises: + RuntimeError: If loading from the deprecated github-based + Hub is attempted. + """ + if isinstance(path, str) and path.startswith("lc://"): + raise RuntimeError( + "Loading from the deprecated github-based Hub is no longer supported. " + "Please use the new LangChain Hub at https://smith.langchain.com/hub " + "instead." + ) + return _load_agent_from_file(path, **kwargs) + + +def _load_agent_from_file( + file: Union[str, Path], **kwargs: Any +) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]: + """Load agent from file.""" + valid_suffixes = {"json", "yaml"} + # Convert file to Path object. + if isinstance(file, str): + file_path = Path(file) + else: + file_path = file + # Load from either json or yaml. + if file_path.suffix[1:] == "json": + with open(file_path) as f: + config = json.load(f) + elif file_path.suffix[1:] == "yaml": + with open(file_path) as f: + config = yaml.safe_load(f) + else: + raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.") + # Load the agent from the config now. + return load_agent_from_config(config, **kwargs) diff --git a/venv/Lib/site-packages/langchain/agents/mrkl/__init__.py b/venv/Lib/site-packages/langchain/agents/mrkl/__init__.py new file mode 100644 index 00000000..a86a5b51 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/mrkl/__init__.py @@ -0,0 +1 @@ +"""Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf.""" diff --git a/venv/Lib/site-packages/langchain/agents/mrkl/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/mrkl/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..e46286f0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/mrkl/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/mrkl/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/mrkl/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..43d7f0b1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/mrkl/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/mrkl/__pycache__/output_parser.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/mrkl/__pycache__/output_parser.cpython-312.pyc new file mode 100644 index 00000000..42fa8eed Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/mrkl/__pycache__/output_parser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/mrkl/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/mrkl/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..31a92bcc Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/mrkl/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/mrkl/base.py b/venv/Lib/site-packages/langchain/agents/mrkl/base.py new file mode 100644 index 00000000..727ca66b --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/mrkl/base.py @@ -0,0 +1,211 @@ +"""Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf.""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any, Callable, NamedTuple, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import BaseCallbackManager +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import PromptTemplate +from langchain_core.tools import BaseTool, Tool +from langchain_core.tools.render import render_text_description +from pydantic import Field + +from langchain._api.deprecation import AGENT_DEPRECATION_WARNING +from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser +from langchain.agents.agent_types import AgentType +from langchain.agents.mrkl.output_parser import MRKLOutputParser +from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX +from langchain.agents.utils import validate_tools_single_input +from langchain.chains import LLMChain + + +class ChainConfig(NamedTuple): + """Configuration for a chain to use in MRKL system. + + Parameters: + action_name: Name of the action. + action: Action function to call. + action_description: Description of the action. + """ + + action_name: str + action: Callable + action_description: str + + +@deprecated( + "0.1.0", + message=AGENT_DEPRECATION_WARNING, + removal="1.0", +) +class ZeroShotAgent(Agent): + """Agent for the MRKL chain. + + Parameters: + output_parser: Output parser for the agent. + """ + + output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser) + + @classmethod + def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: + return MRKLOutputParser() + + @property + def _agent_type(self) -> str: + """Return Identifier of agent type.""" + return AgentType.ZERO_SHOT_REACT_DESCRIPTION + + @property + def observation_prefix(self) -> str: + """Prefix to append the observation with. + + Returns: + "Observation: " + """ + return "Observation: " + + @property + def llm_prefix(self) -> str: + """Prefix to append the llm call with. + + Returns: + "Thought: " + """ + return "Thought:" + + @classmethod + def create_prompt( + cls, + tools: Sequence[BaseTool], + prefix: str = PREFIX, + suffix: str = SUFFIX, + format_instructions: str = FORMAT_INSTRUCTIONS, + input_variables: Optional[list[str]] = None, + ) -> PromptTemplate: + """Create prompt in the style of the zero shot agent. + + Args: + tools: List of tools the agent will have access to, used to format the + prompt. + prefix: String to put before the list of tools. Defaults to PREFIX. + suffix: String to put after the list of tools. Defaults to SUFFIX. + format_instructions: Instructions on how to use the tools. + Defaults to FORMAT_INSTRUCTIONS + input_variables: List of input variables the final prompt will expect. + Defaults to None. + + Returns: + A PromptTemplate with the template assembled from the pieces here. + """ + tool_strings = render_text_description(list(tools)) + tool_names = ", ".join([tool.name for tool in tools]) + format_instructions = format_instructions.format(tool_names=tool_names) + template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) + if input_variables: + return PromptTemplate(template=template, input_variables=input_variables) + return PromptTemplate.from_template(template) + + @classmethod + def from_llm_and_tools( + cls, + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + callback_manager: Optional[BaseCallbackManager] = None, + output_parser: Optional[AgentOutputParser] = None, + prefix: str = PREFIX, + suffix: str = SUFFIX, + format_instructions: str = FORMAT_INSTRUCTIONS, + input_variables: Optional[list[str]] = None, + **kwargs: Any, + ) -> Agent: + """Construct an agent from an LLM and tools. + + Args: + llm: The LLM to use as the agent LLM. + tools: The tools to use. + callback_manager: The callback manager to use. Defaults to None. + output_parser: The output parser to use. Defaults to None. + prefix: The prefix to use. Defaults to PREFIX. + suffix: The suffix to use. Defaults to SUFFIX. + format_instructions: The format instructions to use. + Defaults to FORMAT_INSTRUCTIONS. + input_variables: The input variables to use. Defaults to None. + kwargs: Additional parameters to pass to the agent. + """ + cls._validate_tools(tools) + prompt = cls.create_prompt( + tools, + prefix=prefix, + suffix=suffix, + format_instructions=format_instructions, + input_variables=input_variables, + ) + llm_chain = LLMChain( + llm=llm, + prompt=prompt, + callback_manager=callback_manager, + ) + tool_names = [tool.name for tool in tools] + _output_parser = output_parser or cls._get_default_output_parser() + return cls( + llm_chain=llm_chain, + allowed_tools=tool_names, + output_parser=_output_parser, + **kwargs, + ) + + @classmethod + def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: + validate_tools_single_input(cls.__name__, tools) + if len(tools) == 0: + raise ValueError( + f"Got no tools for {cls.__name__}. At least one tool must be provided." + ) + for tool in tools: + if tool.description is None: + raise ValueError( + f"Got a tool {tool.name} without a description. For this agent, " + f"a description must always be provided." + ) + super()._validate_tools(tools) + + +@deprecated( + "0.1.0", + message=AGENT_DEPRECATION_WARNING, + removal="1.0", +) +class MRKLChain(AgentExecutor): + """Chain that implements the MRKL system.""" + + @classmethod + def from_chains( + cls, llm: BaseLanguageModel, chains: list[ChainConfig], **kwargs: Any + ) -> AgentExecutor: + """User-friendly way to initialize the MRKL chain. + + This is intended to be an easy way to get up and running with the + MRKL chain. + + Args: + llm: The LLM to use as the agent LLM. + chains: The chains the MRKL system has access to. + **kwargs: parameters to be passed to initialization. + + Returns: + An initialized MRKL chain. + """ + tools = [ + Tool( + name=c.action_name, + func=c.action, + description=c.action_description, + ) + for c in chains + ] + agent = ZeroShotAgent.from_llm_and_tools(llm, tools) + return cls(agent=agent, tools=tools, **kwargs) diff --git a/venv/Lib/site-packages/langchain/agents/mrkl/output_parser.py b/venv/Lib/site-packages/langchain/agents/mrkl/output_parser.py new file mode 100644 index 00000000..54d04817 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/mrkl/output_parser.py @@ -0,0 +1,99 @@ +import re +from typing import Union + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.exceptions import OutputParserException + +from langchain.agents.agent import AgentOutputParser +from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS + +FINAL_ANSWER_ACTION = "Final Answer:" +MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = ( + "Invalid Format: Missing 'Action:' after 'Thought:" +) +MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = ( + "Invalid Format: Missing 'Action Input:' after 'Action:'" +) +FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = ( + "Parsing LLM output produced both a final answer and a parse-able action:" +) + + +class MRKLOutputParser(AgentOutputParser): + """MRKL Output parser for the chat agent.""" + + format_instructions: str = FORMAT_INSTRUCTIONS + """Default formatting instructions""" + + def get_format_instructions(self) -> str: + """Returns formatting instructions for the given output parser.""" + return self.format_instructions + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + """Parse the output from the agent into + an AgentAction or AgentFinish object. + + Args: + text: The text to parse. + + Returns: + An AgentAction or AgentFinish object. + + Raises: + OutputParserException: If the output could not be parsed. + """ + includes_answer = FINAL_ANSWER_ACTION in text + regex = ( + r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" + ) + action_match = re.search(regex, text, re.DOTALL) + if action_match and includes_answer: + if text.find(FINAL_ANSWER_ACTION) < text.find(action_match.group(0)): + # if final answer is before the hallucination, return final answer + start_index = text.find(FINAL_ANSWER_ACTION) + len(FINAL_ANSWER_ACTION) + end_index = text.find("\n\n", start_index) + return AgentFinish( + {"output": text[start_index:end_index].strip()}, text[:end_index] + ) + else: + raise OutputParserException( + f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}" + ) + + if action_match: + action = action_match.group(1).strip() + action_input = action_match.group(2) + tool_input = action_input.strip(" ") + # ensure if its a well formed SQL query we don't remove any trailing " chars + if tool_input.startswith("SELECT ") is False: + tool_input = tool_input.strip('"') + + return AgentAction(action, tool_input, text) + + elif includes_answer: + return AgentFinish( + {"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text + ) + + if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL): + raise OutputParserException( + f"Could not parse LLM output: `{text}`", + observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, + llm_output=text, + send_to_llm=True, + ) + elif not re.search( + r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL + ): + raise OutputParserException( + f"Could not parse LLM output: `{text}`", + observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, + llm_output=text, + send_to_llm=True, + ) + else: + raise OutputParserException(f"Could not parse LLM output: `{text}`") + + @property + def _type(self) -> str: + return "mrkl" diff --git a/venv/Lib/site-packages/langchain/agents/mrkl/prompt.py b/venv/Lib/site-packages/langchain/agents/mrkl/prompt.py new file mode 100644 index 00000000..db6827b5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/mrkl/prompt.py @@ -0,0 +1,16 @@ +# flake8: noqa +PREFIX = """Answer the following questions as best you can. You have access to the following tools:""" +FORMAT_INSTRUCTIONS = """Use the following format: + +Question: the input question you must answer +Thought: you should always think about what to do +Action: the action to take, should be one of [{tool_names}] +Action Input: the input to the action +Observation: the result of the action +... (this Thought/Action/Action Input/Observation can repeat N times) +Thought: I now know the final answer +Final Answer: the final answer to the original input question""" +SUFFIX = """Begin! + +Question: {input} +Thought:{agent_scratchpad}""" diff --git a/venv/Lib/site-packages/langchain/agents/openai_assistant/__init__.py b/venv/Lib/site-packages/langchain/agents/openai_assistant/__init__.py new file mode 100644 index 00000000..265c0ed8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/openai_assistant/__init__.py @@ -0,0 +1,3 @@ +from langchain.agents.openai_assistant.base import OpenAIAssistantRunnable + +__all__ = ["OpenAIAssistantRunnable"] diff --git a/venv/Lib/site-packages/langchain/agents/openai_assistant/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/openai_assistant/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..dfa943c3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/openai_assistant/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/openai_assistant/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/openai_assistant/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..0cc3d238 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/openai_assistant/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/openai_assistant/base.py b/venv/Lib/site-packages/langchain/agents/openai_assistant/base.py new file mode 100644 index 00000000..5ca00c3c --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/openai_assistant/base.py @@ -0,0 +1,808 @@ +from __future__ import annotations + +import asyncio +import json +from collections.abc import Sequence +from json import JSONDecodeError +from time import sleep +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Union, +) + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.callbacks import CallbackManager +from langchain_core.load import dumpd +from langchain_core.runnables import RunnableConfig, RunnableSerializable, ensure_config +from langchain_core.tools import BaseTool +from langchain_core.utils.function_calling import convert_to_openai_tool +from pydantic import BaseModel, Field, model_validator +from typing_extensions import Self + +if TYPE_CHECKING: + import openai + from openai.types.beta.threads import ThreadMessage # type: ignore[attr-defined] + from openai.types.beta.threads.required_action_function_tool_call import ( + RequiredActionFunctionToolCall, + ) + + +class OpenAIAssistantFinish(AgentFinish): + """AgentFinish with run and thread metadata. + + Parameters: + run_id: Run id. + thread_id: Thread id. + """ + + run_id: str + thread_id: str + + @classmethod + def is_lc_serializable(cls) -> bool: + """Check if the class is serializable by LangChain. + + Returns: + False + """ + return False + + +class OpenAIAssistantAction(AgentAction): + """AgentAction with info needed to submit custom tool output to existing run. + + Parameters: + tool_call_id: Tool call id. + run_id: Run id. + thread_id: Thread id + """ + + tool_call_id: str + run_id: str + thread_id: str + + @classmethod + def is_lc_serializable(cls) -> bool: + """Check if the class is serializable by LangChain. + + Returns: + False + """ + return False + + +def _get_openai_client() -> openai.OpenAI: + try: + import openai + + return openai.OpenAI() + except ImportError as e: + raise ImportError( + "Unable to import openai, please install with `pip install openai`." + ) from e + except AttributeError as e: + raise AttributeError( + "Please make sure you are using a v1.1-compatible version of openai. You " + 'can install with `pip install "openai>=1.1"`.' + ) from e + + +def _get_openai_async_client() -> openai.AsyncOpenAI: + try: + import openai + + return openai.AsyncOpenAI() + except ImportError as e: + raise ImportError( + "Unable to import openai, please install with `pip install openai`." + ) from e + except AttributeError as e: + raise AttributeError( + "Please make sure you are using a v1.1-compatible version of openai. You " + 'can install with `pip install "openai>=1.1"`.' + ) from e + + +def _is_assistants_builtin_tool( + tool: Union[dict[str, Any], type[BaseModel], Callable, BaseTool], +) -> bool: + """Determine if tool corresponds to OpenAI Assistants built-in.""" + assistants_builtin_tools = ("code_interpreter", "file_search") + return ( + isinstance(tool, dict) + and ("type" in tool) + and (tool["type"] in assistants_builtin_tools) + ) + + +def _get_assistants_tool( + tool: Union[dict[str, Any], type[BaseModel], Callable, BaseTool], +) -> dict[str, Any]: + """Convert a raw function/class to an OpenAI tool. + + Note that OpenAI assistants supports several built-in tools, + such as "code_interpreter" and "file_search". + """ + if _is_assistants_builtin_tool(tool): + return tool # type: ignore[return-value] + else: + return convert_to_openai_tool(tool) + + +OutputType = Union[ + list[OpenAIAssistantAction], + OpenAIAssistantFinish, + list["ThreadMessage"], + list["RequiredActionFunctionToolCall"], +] + + +class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]): + """Run an OpenAI Assistant. + + Example using OpenAI tools: + .. code-block:: python + + from langchain_experimental.openai_assistant import OpenAIAssistantRunnable + + interpreter_assistant = OpenAIAssistantRunnable.create_assistant( + name="langchain assistant", + instructions="You are a personal math tutor. Write and run code to answer math questions.", + tools=[{"type": "code_interpreter"}], + model="gpt-4-1106-preview" + ) + output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"}) + + Example using custom tools and AgentExecutor: + .. code-block:: python + + from langchain_experimental.openai_assistant import OpenAIAssistantRunnable + from langchain.agents import AgentExecutor + from langchain.tools import E2BDataAnalysisTool + + + tools = [E2BDataAnalysisTool(api_key="...")] + agent = OpenAIAssistantRunnable.create_assistant( + name="langchain assistant e2b tool", + instructions="You are a personal math tutor. Write and run code to answer math questions.", + tools=tools, + model="gpt-4-1106-preview", + as_agent=True + ) + + agent_executor = AgentExecutor(agent=agent, tools=tools) + agent_executor.invoke({"content": "What's 10 - 4 raised to the 2.7"}) + + + Example using custom tools and custom execution: + .. code-block:: python + + from langchain_experimental.openai_assistant import OpenAIAssistantRunnable + from langchain.agents import AgentExecutor + from langchain_core.agents import AgentFinish + from langchain.tools import E2BDataAnalysisTool + + + tools = [E2BDataAnalysisTool(api_key="...")] + agent = OpenAIAssistantRunnable.create_assistant( + name="langchain assistant e2b tool", + instructions="You are a personal math tutor. Write and run code to answer math questions.", + tools=tools, + model="gpt-4-1106-preview", + as_agent=True + ) + + def execute_agent(agent, tools, input): + tool_map = {tool.name: tool for tool in tools} + response = agent.invoke(input) + while not isinstance(response, AgentFinish): + tool_outputs = [] + for action in response: + tool_output = tool_map[action.tool].invoke(action.tool_input) + tool_outputs.append({"output": tool_output, "tool_call_id": action.tool_call_id}) + response = agent.invoke( + { + "tool_outputs": tool_outputs, + "run_id": action.run_id, + "thread_id": action.thread_id + } + ) + + return response + + response = execute_agent(agent, tools, {"content": "What's 10 - 4 raised to the 2.7"}) + next_response = execute_agent(agent, tools, {"content": "now add 17.241", "thread_id": response.thread_id}) + + """ # noqa: E501 + + client: Any = Field(default_factory=_get_openai_client) + """OpenAI or AzureOpenAI client.""" + async_client: Any = None + """OpenAI or AzureOpenAI async client.""" + assistant_id: str + """OpenAI assistant id.""" + check_every_ms: float = 1_000.0 + """Frequency with which to check run progress in ms.""" + as_agent: bool = False + """Use as a LangChain agent, compatible with the AgentExecutor.""" + + @model_validator(mode="after") + def validate_async_client(self) -> Self: + if self.async_client is None: + import openai + + api_key = self.client.api_key + self.async_client = openai.AsyncOpenAI(api_key=api_key) + return self + + @classmethod + def create_assistant( + cls, + name: str, + instructions: str, + tools: Sequence[Union[BaseTool, dict]], + model: str, + *, + client: Optional[Union[openai.OpenAI, openai.AzureOpenAI]] = None, + **kwargs: Any, + ) -> OpenAIAssistantRunnable: + """Create an OpenAI Assistant and instantiate the Runnable. + + Args: + name: Assistant name. + instructions: Assistant instructions. + tools: Assistant tools. Can be passed in OpenAI format or as BaseTools. + model: Assistant model to use. + client: OpenAI or AzureOpenAI client. + Will create a default OpenAI client if not specified. + kwargs: Additional arguments. + + Returns: + OpenAIAssistantRunnable configured to run using the created assistant. + """ + client = client or _get_openai_client() + assistant = client.beta.assistants.create( + name=name, + instructions=instructions, + tools=[_get_assistants_tool(tool) for tool in tools], # type: ignore[misc] + model=model, + ) + return cls(assistant_id=assistant.id, client=client, **kwargs) + + def invoke( + self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> OutputType: + """Invoke assistant. + + Args: + input: Runnable input dict that can have: + content: User message when starting a new run. + thread_id: Existing thread to use. + run_id: Existing run to use. Should only be supplied when providing + the tool output for a required action after an initial invocation. + message_metadata: Metadata to associate with new message. + thread_metadata: Metadata to associate with new thread. Only relevant + when new thread being created. + instructions: Additional run instructions. + model: Override Assistant model for this run. + tools: Override Assistant tools for this run. + parallel_tool_calls: Allow Assistant to set parallel_tool_calls + for this run. + top_p: Override Assistant top_p for this run. + temperature: Override Assistant temperature for this run. + max_completion_tokens: Allow setting max_completion_tokens for this run. + max_prompt_tokens: Allow setting max_prompt_tokens for this run. + run_metadata: Metadata to associate with new run. + attachments: A list of files attached to the message, and the + tools they should be added to. + config: Runnable config. Defaults to None. + + Return: + If self.as_agent, will return + Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. + Otherwise, will return OpenAI types + Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]]. + """ + + config = ensure_config(config) + callback_manager = CallbackManager.configure( + inheritable_callbacks=config.get("callbacks"), + inheritable_tags=config.get("tags"), + inheritable_metadata=config.get("metadata"), + ) + run_manager = callback_manager.on_chain_start( + dumpd(self), input, name=config.get("run_name") or self.get_name() + ) + try: + # Being run within AgentExecutor and there are tool outputs to submit. + if self.as_agent and input.get("intermediate_steps"): + tool_outputs = self._parse_intermediate_steps( + input["intermediate_steps"] + ) + run = self.client.beta.threads.runs.submit_tool_outputs(**tool_outputs) + # Starting a new thread and a new run. + elif "thread_id" not in input: + thread = { + "messages": [ + { + "role": "user", + "content": input["content"], + "metadata": input.get("message_metadata"), + "attachments": input.get("attachments"), + } + ], + "metadata": input.get("thread_metadata"), + } + run = self._create_thread_and_run(input, thread) + # Starting a new run in an existing thread. + elif "run_id" not in input: + _ = self.client.beta.threads.messages.create( + input["thread_id"], + content=input["content"], + role="user", + metadata=input.get("message_metadata"), + ) + run = self._create_run(input) + # Submitting tool outputs to an existing run, outside the AgentExecutor + # framework. + else: + run = self.client.beta.threads.runs.submit_tool_outputs(**input) + run = self._wait_for_run(run.id, run.thread_id) + except BaseException as e: + run_manager.on_chain_error(e) + raise e + try: + response = self._get_response(run) + except BaseException as e: + run_manager.on_chain_error(e, metadata=run.dict()) + raise e + else: + run_manager.on_chain_end(response) + return response + + @classmethod + async def acreate_assistant( + cls, + name: str, + instructions: str, + tools: Sequence[Union[BaseTool, dict]], + model: str, + *, + async_client: Optional[ + Union[openai.AsyncOpenAI, openai.AsyncAzureOpenAI] + ] = None, + **kwargs: Any, + ) -> OpenAIAssistantRunnable: + """Async create an AsyncOpenAI Assistant and instantiate the Runnable. + + Args: + name: Assistant name. + instructions: Assistant instructions. + tools: Assistant tools. Can be passed in OpenAI format or as BaseTools. + model: Assistant model to use. + async_client: AsyncOpenAI client. + Will create default async_client if not specified. + + Returns: + AsyncOpenAIAssistantRunnable configured to run using the created assistant. + """ + async_client = async_client or _get_openai_async_client() + openai_tools = [_get_assistants_tool(tool) for tool in tools] + assistant = await async_client.beta.assistants.create( + name=name, + instructions=instructions, + tools=openai_tools, # type: ignore[arg-type] + model=model, + ) + return cls(assistant_id=assistant.id, async_client=async_client, **kwargs) + + async def ainvoke( + self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> OutputType: + """Async invoke assistant. + + Args: + input: Runnable input dict that can have: + content: User message when starting a new run. + thread_id: Existing thread to use. + run_id: Existing run to use. Should only be supplied when providing + the tool output for a required action after an initial invocation. + message_metadata: Metadata to associate with a new message. + thread_metadata: Metadata to associate with new thread. Only relevant + when a new thread is created. + instructions: Overrides the instructions of the assistant. + additional_instructions: Appends additional instructions. + model: Override Assistant model for this run. + tools: Override Assistant tools for this run. + parallel_tool_calls: Allow Assistant to set parallel_tool_calls + for this run. + top_p: Override Assistant top_p for this run. + temperature: Override Assistant temperature for this run. + max_completion_tokens: Allow setting max_completion_tokens for this run. + max_prompt_tokens: Allow setting max_prompt_tokens for this run. + run_metadata: Metadata to associate with new run. + config: Runnable config. Defaults to None. + kwargs: Additional arguments. + + Return: + If self.as_agent, will return + Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. + Otherwise, will return OpenAI types + Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]]. + """ + + config = config or {} + callback_manager = CallbackManager.configure( + inheritable_callbacks=config.get("callbacks"), + inheritable_tags=config.get("tags"), + inheritable_metadata=config.get("metadata"), + ) + run_manager = callback_manager.on_chain_start( + dumpd(self), input, name=config.get("run_name") or self.get_name() + ) + try: + # Being run within AgentExecutor and there are tool outputs to submit. + if self.as_agent and input.get("intermediate_steps"): + tool_outputs = await self._aparse_intermediate_steps( + input["intermediate_steps"] + ) + run = await self.async_client.beta.threads.runs.submit_tool_outputs( + **tool_outputs + ) + # Starting a new thread and a new run. + elif "thread_id" not in input: + thread = { + "messages": [ + { + "role": "user", + "content": input["content"], + "metadata": input.get("message_metadata"), + } + ], + "metadata": input.get("thread_metadata"), + } + run = await self._acreate_thread_and_run(input, thread) + # Starting a new run in an existing thread. + elif "run_id" not in input: + _ = await self.async_client.beta.threads.messages.create( + input["thread_id"], + content=input["content"], + role="user", + metadata=input.get("message_metadata"), + ) + run = await self._acreate_run(input) + # Submitting tool outputs to an existing run, outside the AgentExecutor + # framework. + else: + run = await self.async_client.beta.threads.runs.submit_tool_outputs( + **input + ) + run = await self._await_for_run(run.id, run.thread_id) + except BaseException as e: + run_manager.on_chain_error(e) + raise e + try: + response = self._get_response(run) + except BaseException as e: + run_manager.on_chain_error(e, metadata=run.dict()) + raise e + else: + run_manager.on_chain_end(response) + return response + + def _parse_intermediate_steps( + self, intermediate_steps: list[tuple[OpenAIAssistantAction, str]] + ) -> dict: + last_action, last_output = intermediate_steps[-1] + run = self._wait_for_run(last_action.run_id, last_action.thread_id) + required_tool_call_ids = set() + if run.required_action: + required_tool_call_ids = { + tc.id for tc in run.required_action.submit_tool_outputs.tool_calls + } + tool_outputs = [ + {"output": str(output), "tool_call_id": action.tool_call_id} + for action, output in intermediate_steps + if action.tool_call_id in required_tool_call_ids + ] + submit_tool_outputs = { + "tool_outputs": tool_outputs, + "run_id": last_action.run_id, + "thread_id": last_action.thread_id, + } + return submit_tool_outputs + + def _create_run(self, input: dict) -> Any: + params = { + k: v + for k, v in input.items() + if k + in ( + "instructions", + "model", + "tools", + "additional_instructions", + "parallel_tool_calls", + "top_p", + "temperature", + "max_completion_tokens", + "max_prompt_tokens", + "run_metadata", + ) + } + return self.client.beta.threads.runs.create( + input["thread_id"], + assistant_id=self.assistant_id, + **params, + ) + + def _create_thread_and_run(self, input: dict, thread: dict) -> Any: + params = { + k: v + for k, v in input.items() + if k + in ( + "instructions", + "model", + "tools", + "parallel_tool_calls", + "top_p", + "temperature", + "max_completion_tokens", + "max_prompt_tokens", + "run_metadata", + ) + } + run = self.client.beta.threads.create_and_run( + assistant_id=self.assistant_id, + thread=thread, + **params, + ) + return run + + def _get_response(self, run: Any) -> Any: + # TODO: Pagination + + if run.status == "completed": + import openai + + major_version = int(openai.version.VERSION.split(".")[0]) + minor_version = int(openai.version.VERSION.split(".")[1]) + version_gte_1_14 = (major_version > 1) or ( + major_version == 1 and minor_version >= 14 + ) + + messages = self.client.beta.threads.messages.list( + run.thread_id, order="asc" + ) + new_messages = [msg for msg in messages if msg.run_id == run.id] + if not self.as_agent: + return new_messages + answer: Any = [ + msg_content for msg in new_messages for msg_content in msg.content + ] + attachments = [ + attachment for msg in new_messages for attachment in msg.attachments + ] + if all( + ( + isinstance(content, openai.types.beta.threads.TextContentBlock) + if version_gte_1_14 + else isinstance( + content, + openai.types.beta.threads.MessageContentText, # type: ignore[attr-defined] + ) + ) + for content in answer + ): + answer = "\n".join(content.text.value for content in answer) + return OpenAIAssistantFinish( + return_values={ + "output": answer, + "thread_id": run.thread_id, + "run_id": run.id, + "attachments": attachments, + }, + log="", + run_id=run.id, + thread_id=run.thread_id, + ) + elif run.status == "requires_action": + if not self.as_agent: + return run.required_action.submit_tool_outputs.tool_calls + actions = [] + for tool_call in run.required_action.submit_tool_outputs.tool_calls: + function = tool_call.function + try: + args = json.loads(function.arguments, strict=False) + except JSONDecodeError as e: + raise ValueError( + f"Received invalid JSON function arguments: " + f"{function.arguments} for function {function.name}" + ) from e + if len(args) == 1 and "__arg1" in args: + args = args["__arg1"] + actions.append( + OpenAIAssistantAction( + tool=function.name, + tool_input=args, + tool_call_id=tool_call.id, + log="", + run_id=run.id, + thread_id=run.thread_id, + ) + ) + return actions + else: + run_info = json.dumps(run.dict(), indent=2) + raise ValueError( + f"Unexpected run status: {run.status}. Full run info:\n\n{run_info})" + ) + + def _wait_for_run(self, run_id: str, thread_id: str) -> Any: + in_progress = True + while in_progress: + run = self.client.beta.threads.runs.retrieve(run_id, thread_id=thread_id) + in_progress = run.status in ("in_progress", "queued") + if in_progress: + sleep(self.check_every_ms / 1000) + return run + + async def _aparse_intermediate_steps( + self, intermediate_steps: list[tuple[OpenAIAssistantAction, str]] + ) -> dict: + last_action, last_output = intermediate_steps[-1] + run = self._wait_for_run(last_action.run_id, last_action.thread_id) + required_tool_call_ids = set() + if run.required_action: + required_tool_call_ids = { + tc.id for tc in run.required_action.submit_tool_outputs.tool_calls + } + tool_outputs = [ + {"output": str(output), "tool_call_id": action.tool_call_id} + for action, output in intermediate_steps + if action.tool_call_id in required_tool_call_ids + ] + submit_tool_outputs = { + "tool_outputs": tool_outputs, + "run_id": last_action.run_id, + "thread_id": last_action.thread_id, + } + return submit_tool_outputs + + async def _acreate_run(self, input: dict) -> Any: + params = { + k: v + for k, v in input.items() + if k + in ( + "instructions", + "model", + "tools", + "additional_instructions", + "parallel_tool_calls", + "top_p", + "temperature", + "max_completion_tokens", + "max_prompt_tokens", + "run_metadata", + ) + } + return await self.async_client.beta.threads.runs.create( + input["thread_id"], + assistant_id=self.assistant_id, + **params, + ) + + async def _acreate_thread_and_run(self, input: dict, thread: dict) -> Any: + params = { + k: v + for k, v in input.items() + if k + in ( + "instructions", + "model", + "tools", + "parallel_tool_calls", + "top_p", + "temperature", + "max_completion_tokens", + "max_prompt_tokens", + "run_metadata", + ) + } + run = await self.async_client.beta.threads.create_and_run( + assistant_id=self.assistant_id, + thread=thread, + **params, + ) + return run + + async def _aget_response(self, run: Any) -> Any: + # TODO: Pagination + + if run.status == "completed": + import openai + + major_version = int(openai.version.VERSION.split(".")[0]) + minor_version = int(openai.version.VERSION.split(".")[1]) + version_gte_1_14 = (major_version > 1) or ( + major_version == 1 and minor_version >= 14 + ) + + messages = await self.async_client.beta.threads.messages.list( + run.thread_id, order="asc" + ) + new_messages = [msg for msg in messages if msg.run_id == run.id] + if not self.as_agent: + return new_messages + answer: Any = [ + msg_content for msg in new_messages for msg_content in msg.content + ] + if all( + ( + isinstance(content, openai.types.beta.threads.TextContentBlock) + if version_gte_1_14 + else isinstance( + content, + openai.types.beta.threads.MessageContentText, # type: ignore[attr-defined] + ) + ) + for content in answer + ): + answer = "\n".join(content.text.value for content in answer) + return OpenAIAssistantFinish( + return_values={ + "output": answer, + "thread_id": run.thread_id, + "run_id": run.id, + }, + log="", + run_id=run.id, + thread_id=run.thread_id, + ) + elif run.status == "requires_action": + if not self.as_agent: + return run.required_action.submit_tool_outputs.tool_calls + actions = [] + for tool_call in run.required_action.submit_tool_outputs.tool_calls: + function = tool_call.function + try: + args = json.loads(function.arguments, strict=False) + except JSONDecodeError as e: + raise ValueError( + f"Received invalid JSON function arguments: " + f"{function.arguments} for function {function.name}" + ) from e + if len(args) == 1 and "__arg1" in args: + args = args["__arg1"] + actions.append( + OpenAIAssistantAction( + tool=function.name, + tool_input=args, + tool_call_id=tool_call.id, + log="", + run_id=run.id, + thread_id=run.thread_id, + ) + ) + return actions + else: + run_info = json.dumps(run.dict(), indent=2) + raise ValueError( + f"Unexpected run status: {run.status}. Full run info:\n\n{run_info})" + ) + + async def _await_for_run(self, run_id: str, thread_id: str) -> Any: + in_progress = True + while in_progress: + run = await self.async_client.beta.threads.runs.retrieve( + run_id, thread_id=thread_id + ) + in_progress = run.status in ("in_progress", "queued") + if in_progress: + await asyncio.sleep(self.check_every_ms / 1000) + return run diff --git a/venv/Lib/site-packages/langchain/agents/openai_functions_agent/__init__.py b/venv/Lib/site-packages/langchain/agents/openai_functions_agent/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/agents/openai_functions_agent/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/openai_functions_agent/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..8c2abeca Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/openai_functions_agent/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/openai_functions_agent/__pycache__/agent_token_buffer_memory.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/openai_functions_agent/__pycache__/agent_token_buffer_memory.cpython-312.pyc new file mode 100644 index 00000000..edadd7a5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/openai_functions_agent/__pycache__/agent_token_buffer_memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/openai_functions_agent/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/openai_functions_agent/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..03b4195d Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/openai_functions_agent/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/openai_functions_agent/agent_token_buffer_memory.py b/venv/Lib/site-packages/langchain/agents/openai_functions_agent/agent_token_buffer_memory.py new file mode 100644 index 00000000..284e0e72 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/openai_functions_agent/agent_token_buffer_memory.py @@ -0,0 +1,101 @@ +"""Memory used to save agent output AND intermediate steps.""" + +from typing import Any + +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import BaseMessage, get_buffer_string + +from langchain.agents.format_scratchpad import ( + format_to_openai_function_messages, + format_to_tool_messages, +) +from langchain.memory.chat_memory import BaseChatMemory + + +class AgentTokenBufferMemory(BaseChatMemory): + """Memory used to save agent output AND intermediate steps. + + Parameters: + human_prefix: Prefix for human messages. Default is "Human". + ai_prefix: Prefix for AI messages. Default is "AI". + llm: Language model. + memory_key: Key to save memory under. Default is "history". + max_token_limit: Maximum number of tokens to keep in the buffer. + Once the buffer exceeds this many tokens, the oldest + messages will be pruned. Default is 12000. + return_messages: Whether to return messages. Default is True. + output_key: Key to save output under. Default is "output". + intermediate_steps_key: Key to save intermediate steps under. + Default is "intermediate_steps". + format_as_tools: Whether to format as tools. Default is False. + """ + + human_prefix: str = "Human" + ai_prefix: str = "AI" + llm: BaseLanguageModel + memory_key: str = "history" + max_token_limit: int = 12000 + """The max number of tokens to keep in the buffer. + Once the buffer exceeds this many tokens, the oldest messages will be pruned.""" + return_messages: bool = True + output_key: str = "output" + intermediate_steps_key: str = "intermediate_steps" + format_as_tools: bool = False + + @property + def buffer(self) -> list[BaseMessage]: + """String buffer of memory.""" + return self.chat_memory.messages + + @property + def memory_variables(self) -> list[str]: + """Always return list of memory variables. + + :meta private: + """ + return [self.memory_key] + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return history buffer. + + Args: + inputs: Inputs to the agent. + + Returns: + A dictionary with the history buffer. + """ + if self.return_messages: + final_buffer: Any = self.buffer + else: + final_buffer = get_buffer_string( + self.buffer, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + return {self.memory_key: final_buffer} + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, Any]) -> None: + """Save context from this conversation to buffer. Pruned. + + Args: + inputs: Inputs to the agent. + outputs: Outputs from the agent. + """ + input_str, output_str = self._get_input_output(inputs, outputs) + self.chat_memory.add_user_message(input_str) + format_to_messages = ( + format_to_tool_messages + if self.format_as_tools + else format_to_openai_function_messages + ) + steps = format_to_messages(outputs[self.intermediate_steps_key]) + for msg in steps: + self.chat_memory.add_message(msg) + self.chat_memory.add_ai_message(output_str) + # Prune buffer if it exceeds max token limit + buffer = self.chat_memory.messages + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + if curr_buffer_length > self.max_token_limit: + while curr_buffer_length > self.max_token_limit: + buffer.pop(0) + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) diff --git a/venv/Lib/site-packages/langchain/agents/openai_functions_agent/base.py b/venv/Lib/site-packages/langchain/agents/openai_functions_agent/base.py new file mode 100644 index 00000000..8442f226 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/openai_functions_agent/base.py @@ -0,0 +1,372 @@ +"""Module implements an agent that uses OpenAI's APIs function enabled API.""" + +from collections.abc import Sequence +from typing import Any, Optional, Union + +from langchain_core._api import deprecated +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.callbacks import BaseCallbackManager, Callbacks +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import ( + BaseMessage, + SystemMessage, +) +from langchain_core.prompts import BasePromptTemplate +from langchain_core.prompts.chat import ( + BaseMessagePromptTemplate, + ChatPromptTemplate, + HumanMessagePromptTemplate, + MessagesPlaceholder, +) +from langchain_core.runnables import Runnable, RunnablePassthrough +from langchain_core.tools import BaseTool +from langchain_core.utils.function_calling import convert_to_openai_function +from pydantic import model_validator +from typing_extensions import Self + +from langchain.agents import BaseSingleActionAgent +from langchain.agents.format_scratchpad.openai_functions import ( + format_to_openai_function_messages, +) +from langchain.agents.output_parsers.openai_functions import ( + OpenAIFunctionsAgentOutputParser, +) + + +@deprecated("0.1.0", alternative="create_openai_functions_agent", removal="1.0") +class OpenAIFunctionsAgent(BaseSingleActionAgent): + """An Agent driven by OpenAIs function powered API. + + Args: + llm: This should be an instance of ChatOpenAI, specifically a model + that supports using `functions`. + tools: The tools this agent has access to. + prompt: The prompt for this agent, should support agent_scratchpad as one + of the variables. For an easy way to construct this prompt, use + `OpenAIFunctionsAgent.create_prompt(...)` + output_parser: The output parser for this agent. Should be an instance of + OpenAIFunctionsAgentOutputParser. + Defaults to OpenAIFunctionsAgentOutputParser. + """ + + llm: BaseLanguageModel + tools: Sequence[BaseTool] + prompt: BasePromptTemplate + output_parser: type[OpenAIFunctionsAgentOutputParser] = ( + OpenAIFunctionsAgentOutputParser + ) + + def get_allowed_tools(self) -> list[str]: + """Get allowed tools.""" + return [t.name for t in self.tools] + + @model_validator(mode="after") + def validate_prompt(self) -> Self: + """Validate prompt. + + Args: + values: Values to validate. + + Returns: + Validated values. + + Raises: + ValueError: If `agent_scratchpad` is not in the prompt. + """ + prompt: BasePromptTemplate = self.prompt + if "agent_scratchpad" not in prompt.input_variables: + raise ValueError( + "`agent_scratchpad` should be one of the variables in the prompt, " + f"got {prompt.input_variables}" + ) + return self + + @property + def input_keys(self) -> list[str]: + """Get input keys. Input refers to user input here.""" + return ["input"] + + @property + def functions(self) -> list[dict]: + """Get functions.""" + + return [dict(convert_to_openai_function(t)) for t in self.tools] + + def plan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + with_functions: bool = True, + **kwargs: Any, + ) -> Union[AgentAction, AgentFinish]: + """Given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations. + callbacks: Callbacks to use. Defaults to None. + with_functions: Whether to use functions. Defaults to True. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + If the agent is finished, returns an AgentFinish. + If the agent is not finished, returns an AgentAction. + """ + agent_scratchpad = format_to_openai_function_messages(intermediate_steps) + selected_inputs = { + k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad" + } + full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad) + prompt = self.prompt.format_prompt(**full_inputs) + messages = prompt.to_messages() + if with_functions: + predicted_message = self.llm.predict_messages( + messages, + functions=self.functions, + callbacks=callbacks, + ) + else: + predicted_message = self.llm.predict_messages( + messages, + callbacks=callbacks, + ) + agent_decision = self.output_parser._parse_ai_message(predicted_message) + return agent_decision + + async def aplan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[AgentAction, AgentFinish]: + """Async given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations. + callbacks: Callbacks to use. Defaults to None. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + If the agent is finished, returns an AgentFinish. + If the agent is not finished, returns an AgentAction. + """ + agent_scratchpad = format_to_openai_function_messages(intermediate_steps) + selected_inputs = { + k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad" + } + full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad) + prompt = self.prompt.format_prompt(**full_inputs) + messages = prompt.to_messages() + predicted_message = await self.llm.apredict_messages( + messages, functions=self.functions, callbacks=callbacks + ) + agent_decision = self.output_parser._parse_ai_message(predicted_message) + return agent_decision + + def return_stopped_response( + self, + early_stopping_method: str, + intermediate_steps: list[tuple[AgentAction, str]], + **kwargs: Any, + ) -> AgentFinish: + """Return response when agent has been stopped due to max iterations. + + Args: + early_stopping_method: The early stopping method to use. + intermediate_steps: Intermediate steps. + **kwargs: User inputs. + + Returns: + AgentFinish. + + Raises: + ValueError: If `early_stopping_method` is not `force` or `generate`. + ValueError: If `agent_decision` is not an AgentAction. + """ + if early_stopping_method == "force": + # `force` just returns a constant string + return AgentFinish( + {"output": "Agent stopped due to iteration limit or time limit."}, "" + ) + elif early_stopping_method == "generate": + # Generate does one final forward pass + agent_decision = self.plan( + intermediate_steps, with_functions=False, **kwargs + ) + if isinstance(agent_decision, AgentFinish): + return agent_decision + else: + raise ValueError( + f"got AgentAction with no functions provided: {agent_decision}" + ) + else: + raise ValueError( + "early_stopping_method should be one of `force` or `generate`, " + f"got {early_stopping_method}" + ) + + @classmethod + def create_prompt( + cls, + system_message: Optional[SystemMessage] = SystemMessage( + content="You are a helpful AI assistant." + ), + extra_prompt_messages: Optional[list[BaseMessagePromptTemplate]] = None, + ) -> ChatPromptTemplate: + """Create prompt for this agent. + + Args: + system_message: Message to use as the system message that will be the + first in the prompt. + extra_prompt_messages: Prompt messages that will be placed between the + system message and the new human input. + + Returns: + A prompt template to pass into this agent. + """ + _prompts = extra_prompt_messages or [] + messages: list[Union[BaseMessagePromptTemplate, BaseMessage]] + if system_message: + messages = [system_message] + else: + messages = [] + + messages.extend( + [ + *_prompts, + HumanMessagePromptTemplate.from_template("{input}"), + MessagesPlaceholder(variable_name="agent_scratchpad"), + ] + ) + return ChatPromptTemplate(messages=messages) + + @classmethod + def from_llm_and_tools( + cls, + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + callback_manager: Optional[BaseCallbackManager] = None, + extra_prompt_messages: Optional[list[BaseMessagePromptTemplate]] = None, + system_message: Optional[SystemMessage] = SystemMessage( + content="You are a helpful AI assistant." + ), + **kwargs: Any, + ) -> BaseSingleActionAgent: + """Construct an agent from an LLM and tools. + + Args: + llm: The LLM to use as the agent. + tools: The tools to use. + callback_manager: The callback manager to use. Defaults to None. + extra_prompt_messages: Extra prompt messages to use. Defaults to None. + system_message: The system message to use. + Defaults to a default system message. + kwargs: Additional parameters to pass to the agent. + """ + prompt = cls.create_prompt( + extra_prompt_messages=extra_prompt_messages, + system_message=system_message, + ) + return cls( # type: ignore[call-arg] + llm=llm, + prompt=prompt, + tools=tools, + callback_manager=callback_manager, + **kwargs, + ) + + +def create_openai_functions_agent( + llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate +) -> Runnable: + """Create an agent that uses OpenAI function calling. + + Args: + llm: LLM to use as the agent. Should work with OpenAI function calling, + so either be an OpenAI model that supports that or a wrapper of + a different model that adds in equivalent support. + tools: Tools this agent has access to. + prompt: The prompt to use. See Prompt section below for more. + + Returns: + A Runnable sequence representing an agent. It takes as input all the same input + variables as the prompt passed in does. It returns as output either an + AgentAction or AgentFinish. + + Raises: + ValueError: If `agent_scratchpad` is not in the prompt. + + Example: + + Creating an agent with no memory + + .. code-block:: python + + from langchain_community.chat_models import ChatOpenAI + from langchain.agents import AgentExecutor, create_openai_functions_agent + from langchain import hub + + prompt = hub.pull("hwchase17/openai-functions-agent") + model = ChatOpenAI() + tools = ... + + agent = create_openai_functions_agent(model, tools, prompt) + agent_executor = AgentExecutor(agent=agent, tools=tools) + + agent_executor.invoke({"input": "hi"}) + + # Using with chat history + from langchain_core.messages import AIMessage, HumanMessage + agent_executor.invoke( + { + "input": "what's my name?", + "chat_history": [ + HumanMessage(content="hi! my name is bob"), + AIMessage(content="Hello Bob! How can I assist you today?"), + ], + } + ) + + Prompt: + + The agent prompt must have an `agent_scratchpad` key that is a + ``MessagesPlaceholder``. Intermediate agent actions and tool output + messages will be passed in here. + + Here's an example: + + .. code-block:: python + + from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder + + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful assistant"), + MessagesPlaceholder("chat_history", optional=True), + ("human", "{input}"), + MessagesPlaceholder("agent_scratchpad"), + ] + ) + """ + if "agent_scratchpad" not in ( + prompt.input_variables + list(prompt.partial_variables) + ): + raise ValueError( + "Prompt must have input variable `agent_scratchpad`, but wasn't found. " + f"Found {prompt.input_variables} instead." + ) + llm_with_tools = llm.bind(functions=[convert_to_openai_function(t) for t in tools]) + agent = ( + RunnablePassthrough.assign( + agent_scratchpad=lambda x: format_to_openai_function_messages( + x["intermediate_steps"] + ) + ) + | prompt + | llm_with_tools + | OpenAIFunctionsAgentOutputParser() + ) + return agent diff --git a/venv/Lib/site-packages/langchain/agents/openai_functions_multi_agent/__init__.py b/venv/Lib/site-packages/langchain/agents/openai_functions_multi_agent/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/agents/openai_functions_multi_agent/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/openai_functions_multi_agent/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..b906d353 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/openai_functions_multi_agent/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/openai_functions_multi_agent/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/openai_functions_multi_agent/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..4d3b26ed Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/openai_functions_multi_agent/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/openai_functions_multi_agent/base.py b/venv/Lib/site-packages/langchain/agents/openai_functions_multi_agent/base.py new file mode 100644 index 00000000..2931f7e7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/openai_functions_multi_agent/base.py @@ -0,0 +1,324 @@ +"""Module implements an agent that uses OpenAI's APIs function enabled API.""" + +import json +from collections.abc import Sequence +from json import JSONDecodeError +from typing import Any, Optional, Union + +from langchain_core._api import deprecated +from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish +from langchain_core.callbacks import BaseCallbackManager, Callbacks +from langchain_core.exceptions import OutputParserException +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import ( + AIMessage, + BaseMessage, + SystemMessage, +) +from langchain_core.prompts import BasePromptTemplate +from langchain_core.prompts.chat import ( + BaseMessagePromptTemplate, + ChatPromptTemplate, + HumanMessagePromptTemplate, + MessagesPlaceholder, +) +from langchain_core.tools import BaseTool +from pydantic import model_validator +from typing_extensions import Self + +from langchain.agents import BaseMultiActionAgent +from langchain.agents.format_scratchpad.openai_functions import ( + format_to_openai_function_messages, +) + +# For backwards compatibility +_FunctionsAgentAction = AgentActionMessageLog + + +def _parse_ai_message(message: BaseMessage) -> Union[list[AgentAction], AgentFinish]: + """Parse an AI message.""" + if not isinstance(message, AIMessage): + raise TypeError(f"Expected an AI message got {type(message)}") + + function_call = message.additional_kwargs.get("function_call", {}) + + if function_call: + try: + arguments = json.loads(function_call["arguments"], strict=False) + except JSONDecodeError: + raise OutputParserException( + f"Could not parse tool input: {function_call} because " + f"the `arguments` is not valid JSON." + ) + + try: + tools = arguments["actions"] + except (TypeError, KeyError): + raise OutputParserException( + f"Could not parse tool input: {function_call} because " + f"the `arguments` JSON does not contain `actions` key." + ) + + final_tools: list[AgentAction] = [] + for tool_schema in tools: + if "action" in tool_schema: + _tool_input = tool_schema["action"] + else: + # drop action_name from schema + _tool_input = tool_schema.copy() + del _tool_input["action_name"] + function_name = tool_schema["action_name"] + + # HACK HACK HACK: + # The code that encodes tool input into Open AI uses a special variable + # name called `__arg1` to handle old style tools that do not expose a + # schema and expect a single string argument as an input. + # We unpack the argument here if it exists. + # Open AI does not support passing in a JSON array as an argument. + if "__arg1" in _tool_input: + tool_input = _tool_input["__arg1"] + else: + tool_input = _tool_input + + content_msg = f"responded: {message.content}\n" if message.content else "\n" + log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n" + _tool = _FunctionsAgentAction( + tool=function_name, + tool_input=tool_input, + log=log, + message_log=[message], + ) + final_tools.append(_tool) + return final_tools + + return AgentFinish( + return_values={"output": message.content}, log=str(message.content) + ) + + +@deprecated("0.1.0", alternative="create_openai_tools_agent", removal="1.0") +class OpenAIMultiFunctionsAgent(BaseMultiActionAgent): + """Agent driven by OpenAIs function powered API. + + Args: + llm: This should be an instance of ChatOpenAI, specifically a model + that supports using `functions`. + tools: The tools this agent has access to. + prompt: The prompt for this agent, should support agent_scratchpad as one + of the variables. For an easy way to construct this prompt, use + `OpenAIMultiFunctionsAgent.create_prompt(...)` + """ + + llm: BaseLanguageModel + tools: Sequence[BaseTool] + prompt: BasePromptTemplate + + def get_allowed_tools(self) -> list[str]: + """Get allowed tools.""" + return [t.name for t in self.tools] + + @model_validator(mode="after") + def validate_prompt(self) -> Self: + prompt: BasePromptTemplate = self.prompt + if "agent_scratchpad" not in prompt.input_variables: + raise ValueError( + "`agent_scratchpad` should be one of the variables in the prompt, " + f"got {prompt.input_variables}" + ) + return self + + @property + def input_keys(self) -> list[str]: + """Get input keys. Input refers to user input here.""" + return ["input"] + + @property + def functions(self) -> list[dict]: + """Get the functions for the agent.""" + enum_vals = [t.name for t in self.tools] + tool_selection = { + # OpenAI functions returns a single tool invocation + # Here we force the single tool invocation it returns to + # itself be a list of tool invocations. We do this by constructing + # a new tool that has one argument which is a list of tools + # to use. + "name": "tool_selection", + "description": "A list of actions to take.", + "parameters": { + "title": "tool_selection", + "description": "A list of actions to take.", + "type": "object", + "properties": { + "actions": { + "title": "actions", + "type": "array", + "items": { + # This is a custom item which bundles the action_name + # and the action. We do this because some actions + # could have the same schema, and without this there + # is no way to differentiate them. + "title": "tool_call", + "type": "object", + "properties": { + # This is the name of the action to take + "action_name": { + "title": "action_name", + "enum": enum_vals, + "type": "string", + "description": ( + "Name of the action to take. The name " + "provided here should match up with the " + "parameters for the action below." + ), + }, + # This is the action to take. + "action": { + "title": "Action", + "anyOf": [ + { + "title": t.name, + "type": "object", + "properties": t.args, + } + for t in self.tools + ], + }, + }, + "required": ["action_name", "action"], + }, + } + }, + "required": ["actions"], + }, + } + return [tool_selection] + + def plan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[list[AgentAction], AgentFinish]: + """Given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations. + callbacks: Callbacks to use. Default is None. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + agent_scratchpad = format_to_openai_function_messages(intermediate_steps) + selected_inputs = { + k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad" + } + full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad) + prompt = self.prompt.format_prompt(**full_inputs) + messages = prompt.to_messages() + predicted_message = self.llm.predict_messages( + messages, functions=self.functions, callbacks=callbacks + ) + agent_decision = _parse_ai_message(predicted_message) + return agent_decision + + async def aplan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[list[AgentAction], AgentFinish]: + """Async given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations. + callbacks: Callbacks to use. Default is None. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + agent_scratchpad = format_to_openai_function_messages(intermediate_steps) + selected_inputs = { + k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad" + } + full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad) + prompt = self.prompt.format_prompt(**full_inputs) + messages = prompt.to_messages() + predicted_message = await self.llm.apredict_messages( + messages, functions=self.functions, callbacks=callbacks + ) + agent_decision = _parse_ai_message(predicted_message) + return agent_decision + + @classmethod + def create_prompt( + cls, + system_message: Optional[SystemMessage] = SystemMessage( + content="You are a helpful AI assistant." + ), + extra_prompt_messages: Optional[list[BaseMessagePromptTemplate]] = None, + ) -> BasePromptTemplate: + """Create prompt for this agent. + + Args: + system_message: Message to use as the system message that will be the + first in the prompt. + extra_prompt_messages: Prompt messages that will be placed between the + system message and the new human input. Default is None. + + Returns: + A prompt template to pass into this agent. + """ + _prompts = extra_prompt_messages or [] + messages: list[Union[BaseMessagePromptTemplate, BaseMessage]] + if system_message: + messages = [system_message] + else: + messages = [] + + messages.extend( + [ + *_prompts, + HumanMessagePromptTemplate.from_template("{input}"), + MessagesPlaceholder(variable_name="agent_scratchpad"), + ] + ) + return ChatPromptTemplate(messages=messages) + + @classmethod + def from_llm_and_tools( + cls, + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + callback_manager: Optional[BaseCallbackManager] = None, + extra_prompt_messages: Optional[list[BaseMessagePromptTemplate]] = None, + system_message: Optional[SystemMessage] = SystemMessage( + content="You are a helpful AI assistant." + ), + **kwargs: Any, + ) -> BaseMultiActionAgent: + """Construct an agent from an LLM and tools. + + Args: + llm: The language model to use. + tools: A list of tools to use. + callback_manager: The callback manager to use. Default is None. + extra_prompt_messages: Extra prompt messages to use. Default is None. + system_message: The system message to use. + Default is a default system message. + kwargs: Additional arguments. + """ + prompt = cls.create_prompt( + extra_prompt_messages=extra_prompt_messages, + system_message=system_message, + ) + return cls( # type: ignore[call-arg] + llm=llm, + prompt=prompt, + tools=tools, + callback_manager=callback_manager, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/agents/openai_tools/__init__.py b/venv/Lib/site-packages/langchain/agents/openai_tools/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/agents/openai_tools/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/openai_tools/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..b8eb34ac Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/openai_tools/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/openai_tools/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/openai_tools/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..1dd86b67 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/openai_tools/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/openai_tools/base.py b/venv/Lib/site-packages/langchain/agents/openai_tools/base.py new file mode 100644 index 00000000..fda07b33 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/openai_tools/base.py @@ -0,0 +1,108 @@ +from collections.abc import Sequence +from typing import Optional + +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts.chat import ChatPromptTemplate +from langchain_core.runnables import Runnable, RunnablePassthrough +from langchain_core.tools import BaseTool +from langchain_core.utils.function_calling import convert_to_openai_tool + +from langchain.agents.format_scratchpad.openai_tools import ( + format_to_openai_tool_messages, +) +from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser + + +def create_openai_tools_agent( + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + prompt: ChatPromptTemplate, + strict: Optional[bool] = None, +) -> Runnable: + """Create an agent that uses OpenAI tools. + + Args: + llm: LLM to use as the agent. + tools: Tools this agent has access to. + prompt: The prompt to use. See Prompt section below for more on the expected + input variables. + + Returns: + A Runnable sequence representing an agent. It takes as input all the same input + variables as the prompt passed in does. It returns as output either an + AgentAction or AgentFinish. + + Raises: + ValueError: If the prompt is missing required variables. + + Example: + + .. code-block:: python + + from langchain import hub + from langchain_community.chat_models import ChatOpenAI + from langchain.agents import AgentExecutor, create_openai_tools_agent + + prompt = hub.pull("hwchase17/openai-tools-agent") + model = ChatOpenAI() + tools = ... + + agent = create_openai_tools_agent(model, tools, prompt) + agent_executor = AgentExecutor(agent=agent, tools=tools) + + agent_executor.invoke({"input": "hi"}) + + # Using with chat history + from langchain_core.messages import AIMessage, HumanMessage + agent_executor.invoke( + { + "input": "what's my name?", + "chat_history": [ + HumanMessage(content="hi! my name is bob"), + AIMessage(content="Hello Bob! How can I assist you today?"), + ], + } + ) + + Prompt: + + The agent prompt must have an `agent_scratchpad` key that is a + ``MessagesPlaceholder``. Intermediate agent actions and tool output + messages will be passed in here. + + Here's an example: + + .. code-block:: python + + from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder + + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful assistant"), + MessagesPlaceholder("chat_history", optional=True), + ("human", "{input}"), + MessagesPlaceholder("agent_scratchpad"), + ] + ) + """ + missing_vars = {"agent_scratchpad"}.difference( + prompt.input_variables + list(prompt.partial_variables) + ) + if missing_vars: + raise ValueError(f"Prompt missing required variables: {missing_vars}") + + llm_with_tools = llm.bind( + tools=[convert_to_openai_tool(tool, strict=strict) for tool in tools] + ) + + agent = ( + RunnablePassthrough.assign( + agent_scratchpad=lambda x: format_to_openai_tool_messages( + x["intermediate_steps"] + ) + ) + | prompt + | llm_with_tools + | OpenAIToolsAgentOutputParser() + ) + return agent diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/__init__.py b/venv/Lib/site-packages/langchain/agents/output_parsers/__init__.py new file mode 100644 index 00000000..c5a8b97b --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/output_parsers/__init__.py @@ -0,0 +1,35 @@ +"""Parsing utils to go from string to AgentAction or Agent Finish. + +AgentAction means that an action should be taken. +This contains the name of the tool to use, the input to pass to that tool, +and a `log` variable (which contains a log of the agent's thinking). + +AgentFinish means that a response should be given. +This contains a `return_values` dictionary. This usually contains a +single `output` key, but can be extended to contain more. +This also contains a `log` variable (which contains a log of the agent's thinking). +""" + +from langchain.agents.output_parsers.json import JSONAgentOutputParser +from langchain.agents.output_parsers.openai_functions import ( + OpenAIFunctionsAgentOutputParser, +) +from langchain.agents.output_parsers.react_json_single_input import ( + ReActJsonSingleInputOutputParser, +) +from langchain.agents.output_parsers.react_single_input import ( + ReActSingleInputOutputParser, +) +from langchain.agents.output_parsers.self_ask import SelfAskOutputParser +from langchain.agents.output_parsers.tools import ToolsAgentOutputParser +from langchain.agents.output_parsers.xml import XMLAgentOutputParser + +__all__ = [ + "ReActSingleInputOutputParser", + "SelfAskOutputParser", + "ToolsAgentOutputParser", + "ReActJsonSingleInputOutputParser", + "OpenAIFunctionsAgentOutputParser", + "XMLAgentOutputParser", + "JSONAgentOutputParser", +] diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..7dac2b46 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/json.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/json.cpython-312.pyc new file mode 100644 index 00000000..219bc4a0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/json.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/openai_functions.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/openai_functions.cpython-312.pyc new file mode 100644 index 00000000..3aa5a54c Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/openai_functions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/openai_tools.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/openai_tools.cpython-312.pyc new file mode 100644 index 00000000..26536579 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/openai_tools.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/react_json_single_input.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/react_json_single_input.cpython-312.pyc new file mode 100644 index 00000000..2cfced96 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/react_json_single_input.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/react_single_input.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/react_single_input.cpython-312.pyc new file mode 100644 index 00000000..272eec9c Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/react_single_input.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/self_ask.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/self_ask.cpython-312.pyc new file mode 100644 index 00000000..bce801b5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/self_ask.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/tools.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/tools.cpython-312.pyc new file mode 100644 index 00000000..02bcf709 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/tools.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/xml.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/xml.cpython-312.pyc new file mode 100644 index 00000000..d080c3ba Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/output_parsers/__pycache__/xml.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/json.py b/venv/Lib/site-packages/langchain/agents/output_parsers/json.py new file mode 100644 index 00000000..e89a9bfa --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/output_parsers/json.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +import logging +from typing import Union + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.exceptions import OutputParserException +from langchain_core.output_parsers.json import parse_json_markdown + +from langchain.agents.agent import AgentOutputParser + +logger = logging.getLogger(__name__) + + +class JSONAgentOutputParser(AgentOutputParser): + """Parses tool invocations and final answers in JSON format. + + Expects output to be in one of two formats. + + If the output signals that an action should be taken, + should be in the below format. This will result in an AgentAction + being returned. + + ``` + { + "action": "search", + "action_input": "2+2" + } + ``` + + If the output signals that a final answer should be given, + should be in the below format. This will result in an AgentFinish + being returned. + + ``` + { + "action": "Final Answer", + "action_input": "4" + } + ``` + """ + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + try: + response = parse_json_markdown(text) + if isinstance(response, list): + # gpt turbo frequently ignores the directive to emit a single action + logger.warning("Got multiple action responses: %s", response) + response = response[0] + if response["action"] == "Final Answer": + return AgentFinish({"output": response["action_input"]}, text) + else: + action_input = response.get("action_input", {}) + if action_input is None: + action_input = {} + return AgentAction(response["action"], action_input, text) + except Exception as e: + raise OutputParserException(f"Could not parse LLM output: {text}") from e + + @property + def _type(self) -> str: + return "json-agent" diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/openai_functions.py b/venv/Lib/site-packages/langchain/agents/output_parsers/openai_functions.py new file mode 100644 index 00000000..67a6423d --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/output_parsers/openai_functions.py @@ -0,0 +1,88 @@ +import json +from json import JSONDecodeError +from typing import Union + +from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish +from langchain_core.exceptions import OutputParserException +from langchain_core.messages import ( + AIMessage, + BaseMessage, +) +from langchain_core.outputs import ChatGeneration, Generation + +from langchain.agents.agent import AgentOutputParser + + +class OpenAIFunctionsAgentOutputParser(AgentOutputParser): + """Parses a message into agent action/finish. + + Is meant to be used with OpenAI models, as it relies on the specific + function_call parameter from OpenAI to convey what tools to use. + + If a function_call parameter is passed, then that is used to get + the tool and tool input. + + If one is not passed, then the AIMessage is assumed to be the final output. + """ + + @property + def _type(self) -> str: + return "openai-functions-agent" + + @staticmethod + def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]: + """Parse an AI message.""" + if not isinstance(message, AIMessage): + raise TypeError(f"Expected an AI message got {type(message)}") + + function_call = message.additional_kwargs.get("function_call", {}) + + if function_call: + function_name = function_call["name"] + try: + if len(function_call["arguments"].strip()) == 0: + # OpenAI returns an empty string for functions containing no args + _tool_input = {} + else: + # otherwise it returns a json object + _tool_input = json.loads(function_call["arguments"], strict=False) + except JSONDecodeError: + raise OutputParserException( + f"Could not parse tool input: {function_call} because " + f"the `arguments` is not valid JSON." + ) + + # HACK HACK HACK: + # The code that encodes tool input into Open AI uses a special variable + # name called `__arg1` to handle old style tools that do not expose a + # schema and expect a single string argument as an input. + # We unpack the argument here if it exists. + # Open AI does not support passing in a JSON array as an argument. + if "__arg1" in _tool_input: + tool_input = _tool_input["__arg1"] + else: + tool_input = _tool_input + + content_msg = f"responded: {message.content}\n" if message.content else "\n" + log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n" + return AgentActionMessageLog( + tool=function_name, + tool_input=tool_input, + log=log, + message_log=[message], + ) + + return AgentFinish( + return_values={"output": message.content}, log=str(message.content) + ) + + def parse_result( + self, result: list[Generation], *, partial: bool = False + ) -> Union[AgentAction, AgentFinish]: + if not isinstance(result[0], ChatGeneration): + raise ValueError("This output parser only works on ChatGeneration output") + message = result[0].message + return self._parse_ai_message(message) + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + raise ValueError("Can only parse messages") diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/openai_tools.py b/venv/Lib/site-packages/langchain/agents/output_parsers/openai_tools.py new file mode 100644 index 00000000..2c580842 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/output_parsers/openai_tools.py @@ -0,0 +1,65 @@ +from typing import Union + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.messages import BaseMessage +from langchain_core.outputs import ChatGeneration, Generation + +from langchain.agents.agent import MultiActionAgentOutputParser +from langchain.agents.output_parsers.tools import ( + ToolAgentAction, + parse_ai_message_to_tool_action, +) + +OpenAIToolAgentAction = ToolAgentAction + + +def parse_ai_message_to_openai_tool_action( + message: BaseMessage, +) -> Union[list[AgentAction], AgentFinish]: + """Parse an AI message potentially containing tool_calls.""" + tool_actions = parse_ai_message_to_tool_action(message) + if isinstance(tool_actions, AgentFinish): + return tool_actions + final_actions: list[AgentAction] = [] + for action in tool_actions: + if isinstance(action, ToolAgentAction): + final_actions.append( + OpenAIToolAgentAction( + tool=action.tool, + tool_input=action.tool_input, + log=action.log, + message_log=action.message_log, + tool_call_id=action.tool_call_id, + ) + ) + else: + final_actions.append(action) + return final_actions + + +class OpenAIToolsAgentOutputParser(MultiActionAgentOutputParser): + """Parses a message into agent actions/finish. + + Is meant to be used with OpenAI models, as it relies on the specific + tool_calls parameter from OpenAI to convey what tools to use. + + If a tool_calls parameter is passed, then that is used to get + the tool names and tool inputs. + + If one is not passed, then the AIMessage is assumed to be the final output. + """ + + @property + def _type(self) -> str: + return "openai-tools-agent-output-parser" + + def parse_result( + self, result: list[Generation], *, partial: bool = False + ) -> Union[list[AgentAction], AgentFinish]: + if not isinstance(result[0], ChatGeneration): + raise ValueError("This output parser only works on ChatGeneration output") + message = result[0].message + return parse_ai_message_to_openai_tool_action(message) + + def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]: + raise ValueError("Can only parse messages") diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/react_json_single_input.py b/venv/Lib/site-packages/langchain/agents/output_parsers/react_json_single_input.py new file mode 100644 index 00000000..75a473c6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/output_parsers/react_json_single_input.py @@ -0,0 +1,79 @@ +import json +import re +from re import Pattern +from typing import Union + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.exceptions import OutputParserException + +from langchain.agents.agent import AgentOutputParser +from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS + +FINAL_ANSWER_ACTION = "Final Answer:" + + +class ReActJsonSingleInputOutputParser(AgentOutputParser): + """Parses ReAct-style LLM calls that have a single tool input in json format. + + Expects output to be in one of two formats. + + If the output signals that an action should be taken, + should be in the below format. This will result in an AgentAction + being returned. + + ``` + Thought: agent thought here + Action: + ``` + { + "action": "search", + "action_input": "what is the temperature in SF" + } + ``` + ``` + + If the output signals that a final answer should be given, + should be in the below format. This will result in an AgentFinish + being returned. + + ``` + Thought: agent thought here + Final Answer: The temperature is 100 degrees + ``` + + """ + + pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n?(.*?)`{3}.*?$", re.DOTALL) + """Regex pattern to parse the output.""" + + def get_format_instructions(self) -> str: + return FORMAT_INSTRUCTIONS + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + includes_answer = FINAL_ANSWER_ACTION in text + try: + found = self.pattern.search(text) + if not found: + # Fast fail to parse Final Answer. + raise ValueError("action not found") + action = found.group(1) + response = json.loads(action.strip()) + includes_action = "action" in response + if includes_answer and includes_action: + raise OutputParserException( + "Parsing LLM output produced a final answer " + f"and a parse-able action: {text}" + ) + return AgentAction( + response["action"], response.get("action_input", {}), text + ) + + except Exception: + if not includes_answer: + raise OutputParserException(f"Could not parse LLM output: {text}") + output = text.split(FINAL_ANSWER_ACTION)[-1].strip() + return AgentFinish({"output": output}, text) + + @property + def _type(self) -> str: + return "react-json-single-input" diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/react_single_input.py b/venv/Lib/site-packages/langchain/agents/output_parsers/react_single_input.py new file mode 100644 index 00000000..b853cb04 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/output_parsers/react_single_input.py @@ -0,0 +1,95 @@ +import re +from typing import Union + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.exceptions import OutputParserException + +from langchain.agents.agent import AgentOutputParser +from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS + +FINAL_ANSWER_ACTION = "Final Answer:" +MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = ( + "Invalid Format: Missing 'Action:' after 'Thought:'" +) +MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = ( + "Invalid Format: Missing 'Action Input:' after 'Action:'" +) +FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = ( + "Parsing LLM output produced both a final answer and a parse-able action:" +) + + +class ReActSingleInputOutputParser(AgentOutputParser): + """Parses ReAct-style LLM calls that have a single tool input. + + Expects output to be in one of two formats. + + If the output signals that an action should be taken, + should be in the below format. This will result in an AgentAction + being returned. + + ``` + Thought: agent thought here + Action: search + Action Input: what is the temperature in SF? + ``` + + If the output signals that a final answer should be given, + should be in the below format. This will result in an AgentFinish + being returned. + + ``` + Thought: agent thought here + Final Answer: The temperature is 100 degrees + ``` + + """ + + def get_format_instructions(self) -> str: + return FORMAT_INSTRUCTIONS + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + includes_answer = FINAL_ANSWER_ACTION in text + regex = ( + r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" + ) + action_match = re.search(regex, text, re.DOTALL) + if action_match: + if includes_answer: + raise OutputParserException( + f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}" + ) + action = action_match.group(1).strip() + action_input = action_match.group(2) + tool_input = action_input.strip(" ") + tool_input = tool_input.strip('"') + + return AgentAction(action, tool_input, text) + + elif includes_answer: + return AgentFinish( + {"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text + ) + + if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL): + raise OutputParserException( + f"Could not parse LLM output: `{text}`", + observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, + llm_output=text, + send_to_llm=True, + ) + elif not re.search( + r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL + ): + raise OutputParserException( + f"Could not parse LLM output: `{text}`", + observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, + llm_output=text, + send_to_llm=True, + ) + else: + raise OutputParserException(f"Could not parse LLM output: `{text}`") + + @property + def _type(self) -> str: + return "react-single-input" diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/self_ask.py b/venv/Lib/site-packages/langchain/agents/output_parsers/self_ask.py new file mode 100644 index 00000000..05ecafe8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/output_parsers/self_ask.py @@ -0,0 +1,50 @@ +from collections.abc import Sequence +from typing import Union + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.exceptions import OutputParserException + +from langchain.agents.agent import AgentOutputParser + + +class SelfAskOutputParser(AgentOutputParser): + """Parses self-ask style LLM calls. + + Expects output to be in one of two formats. + + If the output signals that an action should be taken, + should be in the below format. This will result in an AgentAction + being returned. + + ``` + Thoughts go here... + Follow up: what is the temperature in SF? + ``` + + If the output signals that a final answer should be given, + should be in the below format. This will result in an AgentFinish + being returned. + + ``` + Thoughts go here... + So the final answer is: The temperature is 100 degrees + ``` + + """ + + followups: Sequence[str] = ("Follow up:", "Followup:") + finish_string: str = "So the final answer is: " + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + last_line = text.split("\n")[-1] + if not any([follow in last_line for follow in self.followups]): + if self.finish_string not in last_line: + raise OutputParserException(f"Could not parse output: {text}") + return AgentFinish({"output": last_line[len(self.finish_string) :]}, text) + + after_colon = text.split(":")[-1].strip() + return AgentAction("Intermediate Answer", after_colon, text) + + @property + def _type(self) -> str: + return "self_ask" diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/tools.py b/venv/Lib/site-packages/langchain/agents/output_parsers/tools.py new file mode 100644 index 00000000..b7d28e69 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/output_parsers/tools.py @@ -0,0 +1,102 @@ +import json +from json import JSONDecodeError +from typing import Union + +from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish +from langchain_core.exceptions import OutputParserException +from langchain_core.messages import ( + AIMessage, + BaseMessage, + ToolCall, +) +from langchain_core.outputs import ChatGeneration, Generation + +from langchain.agents.agent import MultiActionAgentOutputParser + + +class ToolAgentAction(AgentActionMessageLog): + tool_call_id: str + """Tool call that this message is responding to.""" + + +def parse_ai_message_to_tool_action( + message: BaseMessage, +) -> Union[list[AgentAction], AgentFinish]: + """Parse an AI message potentially containing tool_calls.""" + if not isinstance(message, AIMessage): + raise TypeError(f"Expected an AI message got {type(message)}") + + actions: list = [] + if message.tool_calls: + tool_calls = message.tool_calls + else: + if not message.additional_kwargs.get("tool_calls"): + return AgentFinish( + return_values={"output": message.content}, log=str(message.content) + ) + # Best-effort parsing + tool_calls = [] + for tool_call in message.additional_kwargs["tool_calls"]: + function = tool_call["function"] + function_name = function["name"] + try: + args = json.loads(function["arguments"] or "{}") + tool_calls.append( + ToolCall(name=function_name, args=args, id=tool_call["id"]) + ) + except JSONDecodeError: + raise OutputParserException( + f"Could not parse tool input: {function} because " + f"the `arguments` is not valid JSON." + ) + for tool_call in tool_calls: + # HACK HACK HACK: + # The code that encodes tool input into Open AI uses a special variable + # name called `__arg1` to handle old style tools that do not expose a + # schema and expect a single string argument as an input. + # We unpack the argument here if it exists. + # Open AI does not support passing in a JSON array as an argument. + function_name = tool_call["name"] + _tool_input = tool_call["args"] + if "__arg1" in _tool_input: + tool_input = _tool_input["__arg1"] + else: + tool_input = _tool_input + + content_msg = f"responded: {message.content}\n" if message.content else "\n" + log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n" + actions.append( + ToolAgentAction( + tool=function_name, + tool_input=tool_input, + log=log, + message_log=[message], + tool_call_id=tool_call["id"], + ) + ) + return actions + + +class ToolsAgentOutputParser(MultiActionAgentOutputParser): + """Parses a message into agent actions/finish. + + If a tool_calls parameter is passed, then that is used to get + the tool names and tool inputs. + + If one is not passed, then the AIMessage is assumed to be the final output. + """ + + @property + def _type(self) -> str: + return "tools-agent-output-parser" + + def parse_result( + self, result: list[Generation], *, partial: bool = False + ) -> Union[list[AgentAction], AgentFinish]: + if not isinstance(result[0], ChatGeneration): + raise ValueError("This output parser only works on ChatGeneration output") + message = result[0].message + return parse_ai_message_to_tool_action(message) + + def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]: + raise ValueError("Can only parse messages") diff --git a/venv/Lib/site-packages/langchain/agents/output_parsers/xml.py b/venv/Lib/site-packages/langchain/agents/output_parsers/xml.py new file mode 100644 index 00000000..730d069a --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/output_parsers/xml.py @@ -0,0 +1,52 @@ +from typing import Union + +from langchain_core.agents import AgentAction, AgentFinish + +from langchain.agents import AgentOutputParser + + +class XMLAgentOutputParser(AgentOutputParser): + """Parses tool invocations and final answers in XML format. + + Expects output to be in one of two formats. + + If the output signals that an action should be taken, + should be in the below format. This will result in an AgentAction + being returned. + + ``` + search + what is 2 + 2 + ``` + + If the output signals that a final answer should be given, + should be in the below format. This will result in an AgentFinish + being returned. + + ``` + Foo + ``` + """ + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + if "" in text: + tool, tool_input = text.split("") + _tool = tool.split("")[1] + _tool_input = tool_input.split("")[1] + if "" in _tool_input: + _tool_input = _tool_input.split("")[0] + return AgentAction(tool=_tool, tool_input=_tool_input, log=text) + elif "" in text: + _, answer = text.split("") + if "" in answer: + answer = answer.split("")[0] + return AgentFinish(return_values={"output": answer}, log=text) + else: + raise ValueError + + def get_format_instructions(self) -> str: + raise NotImplementedError + + @property + def _type(self) -> str: + return "xml-agent" diff --git a/venv/Lib/site-packages/langchain/agents/react/__init__.py b/venv/Lib/site-packages/langchain/agents/react/__init__.py new file mode 100644 index 00000000..34518432 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/react/__init__.py @@ -0,0 +1 @@ +"""Implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf.""" diff --git a/venv/Lib/site-packages/langchain/agents/react/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/react/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..24b4b144 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/react/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/react/__pycache__/agent.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/react/__pycache__/agent.cpython-312.pyc new file mode 100644 index 00000000..604adc39 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/react/__pycache__/agent.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/react/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/react/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..87168af6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/react/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/react/__pycache__/output_parser.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/react/__pycache__/output_parser.cpython-312.pyc new file mode 100644 index 00000000..6cbd5be4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/react/__pycache__/output_parser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/react/__pycache__/textworld_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/react/__pycache__/textworld_prompt.cpython-312.pyc new file mode 100644 index 00000000..57614196 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/react/__pycache__/textworld_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/react/__pycache__/wiki_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/react/__pycache__/wiki_prompt.cpython-312.pyc new file mode 100644 index 00000000..c5c16c68 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/react/__pycache__/wiki_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/react/agent.py b/venv/Lib/site-packages/langchain/agents/react/agent.py new file mode 100644 index 00000000..4c4e7759 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/react/agent.py @@ -0,0 +1,144 @@ +from __future__ import annotations + +from collections.abc import Sequence +from typing import Optional, Union + +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate +from langchain_core.runnables import Runnable, RunnablePassthrough +from langchain_core.tools import BaseTool +from langchain_core.tools.render import ToolsRenderer, render_text_description + +from langchain.agents import AgentOutputParser +from langchain.agents.format_scratchpad import format_log_to_str +from langchain.agents.output_parsers import ReActSingleInputOutputParser + + +def create_react_agent( + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + prompt: BasePromptTemplate, + output_parser: Optional[AgentOutputParser] = None, + tools_renderer: ToolsRenderer = render_text_description, + *, + stop_sequence: Union[bool, list[str]] = True, +) -> Runnable: + """Create an agent that uses ReAct prompting. + + Based on paper "ReAct: Synergizing Reasoning and Acting in Language Models" + (https://arxiv.org/abs/2210.03629) + + .. warning:: + This implementation is based on the foundational ReAct paper but is older and not well-suited for production applications. + For a more robust and feature-rich implementation, we recommend using the `create_react_agent` function from the LangGraph library. + See the [reference doc](https://langchain-ai.github.io/langgraph/reference/prebuilt/#langgraph.prebuilt.chat_agent_executor.create_react_agent) + for more information. + + Args: + llm: LLM to use as the agent. + tools: Tools this agent has access to. + prompt: The prompt to use. See Prompt section below for more. + output_parser: AgentOutputParser for parse the LLM output. + tools_renderer: This controls how the tools are converted into a string and + then passed into the LLM. Default is `render_text_description`. + stop_sequence: bool or list of str. + If True, adds a stop token of "Observation:" to avoid hallucinates. + If False, does not add a stop token. + If a list of str, uses the provided list as the stop tokens. + + Default is True. You may to set this to False if the LLM you are using + does not support stop sequences. + + Returns: + A Runnable sequence representing an agent. It takes as input all the same input + variables as the prompt passed in does. It returns as output either an + AgentAction or AgentFinish. + + Examples: + + .. code-block:: python + + from langchain import hub + from langchain_community.llms import OpenAI + from langchain.agents import AgentExecutor, create_react_agent + + prompt = hub.pull("hwchase17/react") + model = OpenAI() + tools = ... + + agent = create_react_agent(model, tools, prompt) + agent_executor = AgentExecutor(agent=agent, tools=tools) + + agent_executor.invoke({"input": "hi"}) + + # Use with chat history + from langchain_core.messages import AIMessage, HumanMessage + agent_executor.invoke( + { + "input": "what's my name?", + # Notice that chat_history is a string + # since this prompt is aimed at LLMs, not chat models + "chat_history": "Human: My name is Bob\\nAI: Hello Bob!", + } + ) + + Prompt: + + The prompt must have input keys: + * `tools`: contains descriptions and arguments for each tool. + * `tool_names`: contains all tool names. + * `agent_scratchpad`: contains previous agent actions and tool outputs as a string. + + Here's an example: + + .. code-block:: python + + from langchain_core.prompts import PromptTemplate + + template = '''Answer the following questions as best you can. You have access to the following tools: + + {tools} + + Use the following format: + + Question: the input question you must answer + Thought: you should always think about what to do + Action: the action to take, should be one of [{tool_names}] + Action Input: the input to the action + Observation: the result of the action + ... (this Thought/Action/Action Input/Observation can repeat N times) + Thought: I now know the final answer + Final Answer: the final answer to the original input question + + Begin! + + Question: {input} + Thought:{agent_scratchpad}''' + + prompt = PromptTemplate.from_template(template) + """ # noqa: E501 + missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference( + prompt.input_variables + list(prompt.partial_variables) + ) + if missing_vars: + raise ValueError(f"Prompt missing required variables: {missing_vars}") + + prompt = prompt.partial( + tools=tools_renderer(list(tools)), + tool_names=", ".join([t.name for t in tools]), + ) + if stop_sequence: + stop = ["\nObservation"] if stop_sequence is True else stop_sequence + llm_with_stop = llm.bind(stop=stop) + else: + llm_with_stop = llm + output_parser = output_parser or ReActSingleInputOutputParser() + agent = ( + RunnablePassthrough.assign( + agent_scratchpad=lambda x: format_log_to_str(x["intermediate_steps"]), + ) + | prompt + | llm_with_stop + | output_parser + ) + return agent diff --git a/venv/Lib/site-packages/langchain/agents/react/base.py b/venv/Lib/site-packages/langchain/agents/react/base.py new file mode 100644 index 00000000..8b0d191d --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/react/base.py @@ -0,0 +1,180 @@ +"""Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf.""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any, Optional + +from langchain_core._api import deprecated +from langchain_core.documents import Document +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate +from langchain_core.tools import BaseTool, Tool +from pydantic import Field + +from langchain._api.deprecation import AGENT_DEPRECATION_WARNING +from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser +from langchain.agents.agent_types import AgentType +from langchain.agents.react.output_parser import ReActOutputParser +from langchain.agents.react.textworld_prompt import TEXTWORLD_PROMPT +from langchain.agents.react.wiki_prompt import WIKI_PROMPT +from langchain.agents.utils import validate_tools_single_input + +if TYPE_CHECKING: + from langchain_community.docstore.base import Docstore + + +@deprecated( + "0.1.0", + message=AGENT_DEPRECATION_WARNING, + removal="1.0", +) +class ReActDocstoreAgent(Agent): + """Agent for the ReAct chain.""" + + output_parser: AgentOutputParser = Field(default_factory=ReActOutputParser) + + @classmethod + def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: + return ReActOutputParser() + + @property + def _agent_type(self) -> str: + """Return Identifier of an agent type.""" + return AgentType.REACT_DOCSTORE + + @classmethod + def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: + """Return default prompt.""" + return WIKI_PROMPT + + @classmethod + def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: + validate_tools_single_input(cls.__name__, tools) + super()._validate_tools(tools) + if len(tools) != 2: + raise ValueError(f"Exactly two tools must be specified, but got {tools}") + tool_names = {tool.name for tool in tools} + if tool_names != {"Lookup", "Search"}: + raise ValueError( + f"Tool names should be Lookup and Search, got {tool_names}" + ) + + @property + def observation_prefix(self) -> str: + """Prefix to append the observation with.""" + return "Observation: " + + @property + def _stop(self) -> list[str]: + return ["\nObservation:"] + + @property + def llm_prefix(self) -> str: + """Prefix to append the LLM call with.""" + return "Thought:" + + +@deprecated( + "0.1.0", + message=AGENT_DEPRECATION_WARNING, + removal="1.0", +) +class DocstoreExplorer: + """Class to assist with exploration of a document store.""" + + def __init__(self, docstore: Docstore): + """Initialize with a docstore, and set initial document to None.""" + self.docstore = docstore + self.document: Optional[Document] = None + self.lookup_str = "" + self.lookup_index = 0 + + def search(self, term: str) -> str: + """Search for a term in the docstore, and if found save.""" + result = self.docstore.search(term) + if isinstance(result, Document): + self.document = result + return self._summary + else: + self.document = None + return result + + def lookup(self, term: str) -> str: + """Lookup a term in document (if saved).""" + if self.document is None: + raise ValueError("Cannot lookup without a successful search first") + if term.lower() != self.lookup_str: + self.lookup_str = term.lower() + self.lookup_index = 0 + else: + self.lookup_index += 1 + lookups = [p for p in self._paragraphs if self.lookup_str in p.lower()] + if len(lookups) == 0: + return "No Results" + elif self.lookup_index >= len(lookups): + return "No More Results" + else: + result_prefix = f"(Result {self.lookup_index + 1}/{len(lookups)})" + return f"{result_prefix} {lookups[self.lookup_index]}" + + @property + def _summary(self) -> str: + return self._paragraphs[0] + + @property + def _paragraphs(self) -> list[str]: + if self.document is None: + raise ValueError("Cannot get paragraphs without a document") + return self.document.page_content.split("\n\n") + + +@deprecated( + "0.1.0", + message=AGENT_DEPRECATION_WARNING, + removal="1.0", +) +class ReActTextWorldAgent(ReActDocstoreAgent): + """Agent for the ReAct TextWorld chain.""" + + @classmethod + def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: + """Return default prompt.""" + return TEXTWORLD_PROMPT + + @classmethod + def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: + validate_tools_single_input(cls.__name__, tools) + super()._validate_tools(tools) + if len(tools) != 1: + raise ValueError(f"Exactly one tool must be specified, but got {tools}") + tool_names = {tool.name for tool in tools} + if tool_names != {"Play"}: + raise ValueError(f"Tool name should be Play, got {tool_names}") + + +@deprecated( + "0.1.0", + message=AGENT_DEPRECATION_WARNING, + removal="1.0", +) +class ReActChain(AgentExecutor): + """[Deprecated] Chain that implements the ReAct paper.""" + + def __init__(self, llm: BaseLanguageModel, docstore: Docstore, **kwargs: Any): + """Initialize with the LLM and a docstore.""" + docstore_explorer = DocstoreExplorer(docstore) + tools = [ + Tool( + name="Search", + func=docstore_explorer.search, + description="Search for a term in the docstore.", + ), + Tool( + name="Lookup", + func=docstore_explorer.lookup, + description="Lookup a term in the docstore.", + ), + ] + agent = ReActDocstoreAgent.from_llm_and_tools(llm, tools) + super().__init__(agent=agent, tools=tools, **kwargs) diff --git a/venv/Lib/site-packages/langchain/agents/react/output_parser.py b/venv/Lib/site-packages/langchain/agents/react/output_parser.py new file mode 100644 index 00000000..ce3161d6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/react/output_parser.py @@ -0,0 +1,34 @@ +import re +from typing import Union + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.exceptions import OutputParserException + +from langchain.agents.agent import AgentOutputParser + + +class ReActOutputParser(AgentOutputParser): + """Output parser for the ReAct agent.""" + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + action_prefix = "Action: " + if not text.strip().split("\n")[-1].startswith(action_prefix): + raise OutputParserException(f"Could not parse LLM Output: {text}") + action_block = text.strip().split("\n")[-1] + + action_str = action_block[len(action_prefix) :] + # Parse out the action and the directive. + re_matches = re.search(r"(.*?)\[(.*?)\]", action_str) + if re_matches is None: + raise OutputParserException( + f"Could not parse action directive: {action_str}" + ) + action, action_input = re_matches.group(1), re_matches.group(2) + if action == "Finish": + return AgentFinish({"output": action_input}, text) + else: + return AgentAction(action, action_input, text) + + @property + def _type(self) -> str: + return "react" diff --git a/venv/Lib/site-packages/langchain/agents/react/textworld_prompt.py b/venv/Lib/site-packages/langchain/agents/react/textworld_prompt.py new file mode 100644 index 00000000..26cfd49a --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/react/textworld_prompt.py @@ -0,0 +1,52 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +EXAMPLES = [ + """Setup: You are now playing a fast paced round of TextWorld! Here is your task for +today. First of all, you could, like, try to travel east. After that, take the +binder from the locker. With the binder, place the binder on the mantelpiece. +Alright, thanks! + +-= Vault =- +You've just walked into a vault. You begin to take stock of what's here. + +An open safe is here. What a letdown! The safe is empty! You make out a shelf. +But the thing hasn't got anything on it. What, you think everything in TextWorld +should have stuff on it? + +You don't like doors? Why not try going east, that entranceway is unguarded. + +Thought: I need to travel east +Action: Play[go east] +Observation: -= Office =- +You arrive in an office. An ordinary one. + +You can make out a locker. The locker contains a binder. You see a case. The +case is empty, what a horrible day! You lean against the wall, inadvertently +pressing a secret button. The wall opens up to reveal a mantelpiece. You wonder +idly who left that here. The mantelpiece is standard. The mantelpiece appears to +be empty. If you haven't noticed it already, there seems to be something there +by the wall, it's a table. Unfortunately, there isn't a thing on it. Hm. Oh well +There is an exit to the west. Don't worry, it is unguarded. + +Thought: I need to take the binder from the locker +Action: Play[take binder] +Observation: You take the binder from the locker. + +Thought: I need to place the binder on the mantelpiece +Action: Play[put binder on mantelpiece] + +Observation: You put the binder on the mantelpiece. +Your score has just gone up by one point. +*** The End *** +Thought: The End has occurred +Action: Finish[yes] + +""" +] +SUFFIX = """\n\nSetup: {input} +{agent_scratchpad}""" + +TEXTWORLD_PROMPT = PromptTemplate.from_examples( + EXAMPLES, SUFFIX, ["input", "agent_scratchpad"] +) diff --git a/venv/Lib/site-packages/langchain/agents/react/wiki_prompt.py b/venv/Lib/site-packages/langchain/agents/react/wiki_prompt.py new file mode 100644 index 00000000..9db6cf92 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/react/wiki_prompt.py @@ -0,0 +1,71 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +EXAMPLES = [ + """Question: What is the elevation range for the area that the eastern sector of the Colorado orogeny extends into? +Thought: I need to search Colorado orogeny, find the area that the eastern sector of the Colorado orogeny extends into, then find the elevation range of the area. +Action: Search[Colorado orogeny] +Observation: The Colorado orogeny was an episode of mountain building (an orogeny) in Colorado and surrounding areas. +Thought: It does not mention the eastern sector. So I need to look up eastern sector. +Action: Lookup[eastern sector] +Observation: (Result 1 / 1) The eastern sector extends into the High Plains and is called the Central Plains orogeny. +Thought: The eastern sector of Colorado orogeny extends into the High Plains. So I need to search High Plains and find its elevation range. +Action: Search[High Plains] +Observation: High Plains refers to one of two distinct land regions +Thought: I need to instead search High Plains (United States). +Action: Search[High Plains (United States)] +Observation: The High Plains are a subregion of the Great Plains. From east to west, the High Plains rise in elevation from around 1,800 to 7,000 ft (550 to 2,130 m).[3] +Thought: High Plains rise in elevation from around 1,800 to 7,000 ft, so the answer is 1,800 to 7,000 ft. +Action: Finish[1,800 to 7,000 ft]""", + """Question: Musician and satirist Allie Goertz wrote a song about the "The Simpsons" character Milhouse, who Matt Groening named after who? +Thought: The question simplifies to "The Simpsons" character Milhouse is named after who. I only need to search Milhouse and find who it is named after. +Action: Search[Milhouse] +Observation: Milhouse Mussolini Van Houten is a recurring character in the Fox animated television series The Simpsons voiced by Pamela Hayden and created by Matt Groening. +Thought: The paragraph does not tell who Milhouse is named after, maybe I can look up "named after". +Action: Lookup[named after] +Observation: (Result 1 / 1) Milhouse was named after U.S. president Richard Nixon, whose middle name was Milhous. +Thought: Milhouse was named after U.S. president Richard Nixon, so the answer is Richard Nixon. +Action: Finish[Richard Nixon]""", + """Question: Which documentary is about Finnish rock groups, Adam Clayton Powell or The Saimaa Gesture? +Thought: I need to search Adam Clayton Powell and The Saimaa Gesture, and find which documentary is about Finnish rock groups. +Action: Search[Adam Clayton Powell] +Observation: Could not find [Adam Clayton Powell]. Similar: [’Adam Clayton Powell III’, ’Seventh Avenue (Manhattan)’, ’Adam Clayton Powell Jr. State Office Building’, ’Isabel Washington Powell’, ’Adam Powell’, ’Adam Clayton Powell (film)’, ’Giancarlo Esposito’]. +Thought: To find the documentary, I can search Adam Clayton Powell (film). +Action: Search[Adam Clayton Powell (film)] +Observation: Adam Clayton Powell is a 1989 American documentary film directed by Richard Kilberg. The film is about the rise and fall of influential African-American politician Adam Clayton Powell Jr.[3][4] It was later aired as part of the PBS series The American Experience. +Thought: Adam Clayton Powell (film) is a documentary about an African-American politician, not Finnish rock groups. So the documentary about Finnish rock groups must instead be The Saimaa Gesture. +Action: Finish[The Saimaa Gesture]""", + """Question: What profession does Nicholas Ray and Elia Kazan have in common? +Thought: I need to search Nicholas Ray and Elia Kazan, find their professions, then find the profession they have in common. +Action: Search[Nicholas Ray] +Observation: Nicholas Ray (born Raymond Nicholas Kienzle Jr., August 7, 1911 - June 16, 1979) was an American film director, screenwriter, and actor best known for the 1955 film Rebel Without a Cause. +Thought: Professions of Nicholas Ray are director, screenwriter, and actor. I need to search Elia Kazan next and find his professions. +Action: Search[Elia Kazan] +Observation: Elia Kazan was an American film and theatre director, producer, screenwriter and actor. +Thought: Professions of Elia Kazan are director, producer, screenwriter, and actor. So profession Nicholas Ray and Elia Kazan have in common is director, screenwriter, and actor. +Action: Finish[director, screenwriter, actor]""", + """Question: Which magazine was started first Arthur’s Magazine or First for Women? +Thought: I need to search Arthur’s Magazine and First for Women, and find which was started first. +Action: Search[Arthur’s Magazine] +Observation: Arthur’s Magazine (1844-1846) was an American literary periodical published in Philadelphia in the 19th century. +Thought: Arthur’s Magazine was started in 1844. I need to search First for Women next. +Action: Search[First for Women] +Observation: First for Women is a woman’s magazine published by Bauer Media Group in the USA.[1] The magazine was started in 1989. +Thought: First for Women was started in 1989. 1844 (Arthur’s Magazine) < 1989 (First for Women), so Arthur’s Magazine was started first. +Action: Finish[Arthur’s Magazine]""", + """Question: Were Pavel Urysohn and Leonid Levin known for the same type of work? +Thought: I need to search Pavel Urysohn and Leonid Levin, find their types of work, then find if they are the same. +Action: Search[Pavel Urysohn] +Observation: Pavel Samuilovich Urysohn (February 3, 1898 - August 17, 1924) was a Soviet mathematician who is best known for his contributions in dimension theory. +Thought: Pavel Urysohn is a mathematician. I need to search Leonid Levin next and find its type of work. +Action: Search[Leonid Levin] +Observation: Leonid Anatolievich Levin is a Soviet-American mathematician and computer scientist. +Thought: Leonid Levin is a mathematician and computer scientist. So Pavel Urysohn and Leonid Levin have the same type of work. +Action: Finish[yes]""", +] +SUFFIX = """\nQuestion: {input} +{agent_scratchpad}""" + +WIKI_PROMPT = PromptTemplate.from_examples( + EXAMPLES, SUFFIX, ["input", "agent_scratchpad"] +) diff --git a/venv/Lib/site-packages/langchain/agents/schema.py b/venv/Lib/site-packages/langchain/agents/schema.py new file mode 100644 index 00000000..664ec9ec --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/schema.py @@ -0,0 +1,34 @@ +from typing import Any + +from langchain_core.agents import AgentAction +from langchain_core.prompts.chat import ChatPromptTemplate + + +class AgentScratchPadChatPromptTemplate(ChatPromptTemplate): + """Chat prompt template for the agent scratchpad.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + def _construct_agent_scratchpad( + self, intermediate_steps: list[tuple[AgentAction, str]] + ) -> str: + if len(intermediate_steps) == 0: + return "" + thoughts = "" + for action, observation in intermediate_steps: + thoughts += action.log + thoughts += f"\nObservation: {observation}\nThought: " + return ( + f"This was your previous work " + f"(but I haven't seen any of it! I only see what " + f"you return as final answer):\n{thoughts}" + ) + + def _merge_partial_and_user_variables(self, **kwargs: Any) -> dict[str, Any]: + intermediate_steps = kwargs.pop("intermediate_steps") + kwargs["agent_scratchpad"] = self._construct_agent_scratchpad( + intermediate_steps + ) + return kwargs diff --git a/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__init__.py b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__init__.py new file mode 100644 index 00000000..70a450ac --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__init__.py @@ -0,0 +1,4 @@ +"""Chain that does self ask with search. + +Heavily borrowed from https://github.com/ofirpress/self-ask +""" diff --git a/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..485956e8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..c6e76796 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__pycache__/output_parser.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__pycache__/output_parser.cpython-312.pyc new file mode 100644 index 00000000..323706e3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__pycache__/output_parser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..d39005e7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/self_ask_with_search/base.py b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/base.py new file mode 100644 index 00000000..36d859f3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/base.py @@ -0,0 +1,211 @@ +"""Chain that does self-ask with search.""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any, Union + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate +from langchain_core.runnables import Runnable, RunnablePassthrough +from langchain_core.tools import BaseTool, Tool +from pydantic import Field + +from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser +from langchain.agents.agent_types import AgentType +from langchain.agents.format_scratchpad import format_log_to_str +from langchain.agents.self_ask_with_search.output_parser import SelfAskOutputParser +from langchain.agents.self_ask_with_search.prompt import PROMPT +from langchain.agents.utils import validate_tools_single_input + +if TYPE_CHECKING: + from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper + from langchain_community.utilities.searchapi import SearchApiAPIWrapper + from langchain_community.utilities.serpapi import SerpAPIWrapper + + +@deprecated("0.1.0", alternative="create_self_ask_with_search", removal="1.0") +class SelfAskWithSearchAgent(Agent): + """Agent for the self-ask-with-search paper.""" + + output_parser: AgentOutputParser = Field(default_factory=SelfAskOutputParser) + + @classmethod + def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: + return SelfAskOutputParser() + + @property + def _agent_type(self) -> str: + """Return Identifier of an agent type.""" + return AgentType.SELF_ASK_WITH_SEARCH + + @classmethod + def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: + """Prompt does not depend on tools.""" + return PROMPT + + @classmethod + def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: + validate_tools_single_input(cls.__name__, tools) + super()._validate_tools(tools) + if len(tools) != 1: + raise ValueError(f"Exactly one tool must be specified, but got {tools}") + tool_names = {tool.name for tool in tools} + if tool_names != {"Intermediate Answer"}: + raise ValueError( + f"Tool name should be Intermediate Answer, got {tool_names}" + ) + + @property + def observation_prefix(self) -> str: + """Prefix to append the observation with.""" + return "Intermediate answer: " + + @property + def llm_prefix(self) -> str: + """Prefix to append the LLM call with.""" + return "" + + +@deprecated("0.1.0", removal="1.0") +class SelfAskWithSearchChain(AgentExecutor): + """[Deprecated] Chain that does self-ask with search.""" + + def __init__( + self, + llm: BaseLanguageModel, + search_chain: Union[ + GoogleSerperAPIWrapper, SearchApiAPIWrapper, SerpAPIWrapper + ], + **kwargs: Any, + ): + """Initialize only with an LLM and a search chain.""" + search_tool = Tool( + name="Intermediate Answer", + func=search_chain.run, + coroutine=search_chain.arun, + description="Search", + ) + agent = SelfAskWithSearchAgent.from_llm_and_tools(llm, [search_tool]) + super().__init__(agent=agent, tools=[search_tool], **kwargs) + + +def create_self_ask_with_search_agent( + llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: BasePromptTemplate +) -> Runnable: + """Create an agent that uses self-ask with search prompting. + + Args: + llm: LLM to use as the agent. + tools: List of tools. Should just be of length 1, with that tool having + name `Intermediate Answer` + prompt: The prompt to use, must have input key `agent_scratchpad` which will + contain agent actions and tool outputs. + + Returns: + A Runnable sequence representing an agent. It takes as input all the same input + variables as the prompt passed in does. It returns as output either an + AgentAction or AgentFinish. + + Examples: + + .. code-block:: python + + from langchain import hub + from langchain_community.chat_models import ChatAnthropic + from langchain.agents import ( + AgentExecutor, create_self_ask_with_search_agent + ) + + prompt = hub.pull("hwchase17/self-ask-with-search") + model = ChatAnthropic(model="claude-3-haiku-20240307") + tools = [...] # Should just be one tool with name `Intermediate Answer` + + agent = create_self_ask_with_search_agent(model, tools, prompt) + agent_executor = AgentExecutor(agent=agent, tools=tools) + + agent_executor.invoke({"input": "hi"}) + + Prompt: + + The prompt must have input key `agent_scratchpad` which will + contain agent actions and tool outputs as a string. + + Here's an example: + + .. code-block:: python + + from langchain_core.prompts import PromptTemplate + + template = '''Question: Who lived longer, Muhammad Ali or Alan Turing? + Are follow up questions needed here: Yes. + Follow up: How old was Muhammad Ali when he died? + Intermediate answer: Muhammad Ali was 74 years old when he died. + Follow up: How old was Alan Turing when he died? + Intermediate answer: Alan Turing was 41 years old when he died. + So the final answer is: Muhammad Ali + + Question: When was the founder of craigslist born? + Are follow up questions needed here: Yes. + Follow up: Who was the founder of craigslist? + Intermediate answer: Craigslist was founded by Craig Newmark. + Follow up: When was Craig Newmark born? + Intermediate answer: Craig Newmark was born on December 6, 1952. + So the final answer is: December 6, 1952 + + Question: Who was the maternal grandfather of George Washington? + Are follow up questions needed here: Yes. + Follow up: Who was the mother of George Washington? + Intermediate answer: The mother of George Washington was Mary Ball Washington. + Follow up: Who was the father of Mary Ball Washington? + Intermediate answer: The father of Mary Ball Washington was Joseph Ball. + So the final answer is: Joseph Ball + + Question: Are both the directors of Jaws and Casino Royale from the same country? + Are follow up questions needed here: Yes. + Follow up: Who is the director of Jaws? + Intermediate answer: The director of Jaws is Steven Spielberg. + Follow up: Where is Steven Spielberg from? + Intermediate answer: The United States. + Follow up: Who is the director of Casino Royale? + Intermediate answer: The director of Casino Royale is Martin Campbell. + Follow up: Where is Martin Campbell from? + Intermediate answer: New Zealand. + So the final answer is: No + + Question: {input} + Are followup questions needed here:{agent_scratchpad}''' + + prompt = PromptTemplate.from_template(template) + """ # noqa: E501 + missing_vars = {"agent_scratchpad"}.difference( + prompt.input_variables + list(prompt.partial_variables) + ) + if missing_vars: + raise ValueError(f"Prompt missing required variables: {missing_vars}") + + if len(tools) != 1: + raise ValueError("This agent expects exactly one tool") + tool = list(tools)[0] + if tool.name != "Intermediate Answer": + raise ValueError( + "This agent expects the tool to be named `Intermediate Answer`" + ) + + llm_with_stop = llm.bind(stop=["\nIntermediate answer:"]) + agent = ( + RunnablePassthrough.assign( + agent_scratchpad=lambda x: format_log_to_str( + x["intermediate_steps"], + observation_prefix="\nIntermediate answer: ", + llm_prefix="", + ), + # Give it a default + chat_history=lambda x: x.get("chat_history", ""), + ) + | prompt + | llm_with_stop + | SelfAskOutputParser() + ) + return agent diff --git a/venv/Lib/site-packages/langchain/agents/self_ask_with_search/output_parser.py b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/output_parser.py new file mode 100644 index 00000000..ac35693a --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/output_parser.py @@ -0,0 +1,4 @@ +from langchain.agents.output_parsers.self_ask import SelfAskOutputParser + +# For backwards compatibility +__all__ = ["SelfAskOutputParser"] diff --git a/venv/Lib/site-packages/langchain/agents/self_ask_with_search/prompt.py b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/prompt.py new file mode 100644 index 00000000..c9154785 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/self_ask_with_search/prompt.py @@ -0,0 +1,44 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +_DEFAULT_TEMPLATE = """Question: Who lived longer, Muhammad Ali or Alan Turing? +Are follow up questions needed here: Yes. +Follow up: How old was Muhammad Ali when he died? +Intermediate answer: Muhammad Ali was 74 years old when he died. +Follow up: How old was Alan Turing when he died? +Intermediate answer: Alan Turing was 41 years old when he died. +So the final answer is: Muhammad Ali + +Question: When was the founder of craigslist born? +Are follow up questions needed here: Yes. +Follow up: Who was the founder of craigslist? +Intermediate answer: Craigslist was founded by Craig Newmark. +Follow up: When was Craig Newmark born? +Intermediate answer: Craig Newmark was born on December 6, 1952. +So the final answer is: December 6, 1952 + +Question: Who was the maternal grandfather of George Washington? +Are follow up questions needed here: Yes. +Follow up: Who was the mother of George Washington? +Intermediate answer: The mother of George Washington was Mary Ball Washington. +Follow up: Who was the father of Mary Ball Washington? +Intermediate answer: The father of Mary Ball Washington was Joseph Ball. +So the final answer is: Joseph Ball + +Question: Are both the directors of Jaws and Casino Royale from the same country? +Are follow up questions needed here: Yes. +Follow up: Who is the director of Jaws? +Intermediate answer: The director of Jaws is Steven Spielberg. +Follow up: Where is Steven Spielberg from? +Intermediate answer: The United States. +Follow up: Who is the director of Casino Royale? +Intermediate answer: The director of Casino Royale is Martin Campbell. +Follow up: Where is Martin Campbell from? +Intermediate answer: New Zealand. +So the final answer is: No + +Question: {input} +Are followup questions needed here:{agent_scratchpad}""" +PROMPT = PromptTemplate( + input_variables=["input", "agent_scratchpad"], template=_DEFAULT_TEMPLATE +) diff --git a/venv/Lib/site-packages/langchain/agents/structured_chat/__init__.py b/venv/Lib/site-packages/langchain/agents/structured_chat/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/agents/structured_chat/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/structured_chat/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..30278d82 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/structured_chat/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/structured_chat/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/structured_chat/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..14e730e1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/structured_chat/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/structured_chat/__pycache__/output_parser.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/structured_chat/__pycache__/output_parser.cpython-312.pyc new file mode 100644 index 00000000..f99dfcf8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/structured_chat/__pycache__/output_parser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/structured_chat/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/structured_chat/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..dc0978d1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/structured_chat/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/structured_chat/base.py b/venv/Lib/site-packages/langchain/agents/structured_chat/base.py new file mode 100644 index 00000000..051cd6e0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/structured_chat/base.py @@ -0,0 +1,301 @@ +import re +from collections.abc import Sequence +from typing import Any, Optional, Union + +from langchain_core._api import deprecated +from langchain_core.agents import AgentAction +from langchain_core.callbacks import BaseCallbackManager +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate +from langchain_core.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, +) +from langchain_core.runnables import Runnable, RunnablePassthrough +from langchain_core.tools import BaseTool +from langchain_core.tools.render import ToolsRenderer +from pydantic import Field + +from langchain.agents.agent import Agent, AgentOutputParser +from langchain.agents.format_scratchpad import format_log_to_str +from langchain.agents.output_parsers import JSONAgentOutputParser +from langchain.agents.structured_chat.output_parser import ( + StructuredChatOutputParserWithRetries, +) +from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX +from langchain.chains.llm import LLMChain +from langchain.tools.render import render_text_description_and_args + +HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}" + + +@deprecated("0.1.0", alternative="create_structured_chat_agent", removal="1.0") +class StructuredChatAgent(Agent): + """Structured Chat Agent.""" + + output_parser: AgentOutputParser = Field( + default_factory=StructuredChatOutputParserWithRetries + ) + """Output parser for the agent.""" + + @property + def observation_prefix(self) -> str: + """Prefix to append the observation with.""" + return "Observation: " + + @property + def llm_prefix(self) -> str: + """Prefix to append the llm call with.""" + return "Thought:" + + def _construct_scratchpad( + self, intermediate_steps: list[tuple[AgentAction, str]] + ) -> str: + agent_scratchpad = super()._construct_scratchpad(intermediate_steps) + if not isinstance(agent_scratchpad, str): + raise ValueError("agent_scratchpad should be of type string.") + if agent_scratchpad: + return ( + f"This was your previous work " + f"(but I haven't seen any of it! I only see what " + f"you return as final answer):\n{agent_scratchpad}" + ) + else: + return agent_scratchpad + + @classmethod + def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: + pass + + @classmethod + def _get_default_output_parser( + cls, llm: Optional[BaseLanguageModel] = None, **kwargs: Any + ) -> AgentOutputParser: + return StructuredChatOutputParserWithRetries.from_llm(llm=llm) + + @property + def _stop(self) -> list[str]: + return ["Observation:"] + + @classmethod + def create_prompt( + cls, + tools: Sequence[BaseTool], + prefix: str = PREFIX, + suffix: str = SUFFIX, + human_message_template: str = HUMAN_MESSAGE_TEMPLATE, + format_instructions: str = FORMAT_INSTRUCTIONS, + input_variables: Optional[list[str]] = None, + memory_prompts: Optional[list[BasePromptTemplate]] = None, + ) -> BasePromptTemplate: + tool_strings = [] + for tool in tools: + args_schema = re.sub("}", "}}", re.sub("{", "{{", str(tool.args))) + tool_strings.append(f"{tool.name}: {tool.description}, args: {args_schema}") + formatted_tools = "\n".join(tool_strings) + tool_names = ", ".join([tool.name for tool in tools]) + format_instructions = format_instructions.format(tool_names=tool_names) + template = "\n\n".join([prefix, formatted_tools, format_instructions, suffix]) + if input_variables is None: + input_variables = ["input", "agent_scratchpad"] + _memory_prompts = memory_prompts or [] + messages = [ + SystemMessagePromptTemplate.from_template(template), + *_memory_prompts, + HumanMessagePromptTemplate.from_template(human_message_template), + ] + return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type] + + @classmethod + def from_llm_and_tools( + cls, + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + callback_manager: Optional[BaseCallbackManager] = None, + output_parser: Optional[AgentOutputParser] = None, + prefix: str = PREFIX, + suffix: str = SUFFIX, + human_message_template: str = HUMAN_MESSAGE_TEMPLATE, + format_instructions: str = FORMAT_INSTRUCTIONS, + input_variables: Optional[list[str]] = None, + memory_prompts: Optional[list[BasePromptTemplate]] = None, + **kwargs: Any, + ) -> Agent: + """Construct an agent from an LLM and tools.""" + cls._validate_tools(tools) + prompt = cls.create_prompt( + tools, + prefix=prefix, + suffix=suffix, + human_message_template=human_message_template, + format_instructions=format_instructions, + input_variables=input_variables, + memory_prompts=memory_prompts, + ) + llm_chain = LLMChain( + llm=llm, + prompt=prompt, + callback_manager=callback_manager, + ) + tool_names = [tool.name for tool in tools] + _output_parser = output_parser or cls._get_default_output_parser(llm=llm) + return cls( + llm_chain=llm_chain, + allowed_tools=tool_names, + output_parser=_output_parser, + **kwargs, + ) + + @property + def _agent_type(self) -> str: + raise ValueError + + +def create_structured_chat_agent( + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + prompt: ChatPromptTemplate, + tools_renderer: ToolsRenderer = render_text_description_and_args, + *, + stop_sequence: Union[bool, list[str]] = True, +) -> Runnable: + """Create an agent aimed at supporting tools with multiple inputs. + + Args: + llm: LLM to use as the agent. + tools: Tools this agent has access to. + prompt: The prompt to use. See Prompt section below for more. + stop_sequence: bool or list of str. + If True, adds a stop token of "Observation:" to avoid hallucinates. + If False, does not add a stop token. + If a list of str, uses the provided list as the stop tokens. + + Default is True. You may to set this to False if the LLM you are using + does not support stop sequences. + tools_renderer: This controls how the tools are converted into a string and + then passed into the LLM. Default is `render_text_description`. + + Returns: + A Runnable sequence representing an agent. It takes as input all the same input + variables as the prompt passed in does. It returns as output either an + AgentAction or AgentFinish. + + Examples: + + .. code-block:: python + + from langchain import hub + from langchain_community.chat_models import ChatOpenAI + from langchain.agents import AgentExecutor, create_structured_chat_agent + + prompt = hub.pull("hwchase17/structured-chat-agent") + model = ChatOpenAI() + tools = ... + + agent = create_structured_chat_agent(model, tools, prompt) + agent_executor = AgentExecutor(agent=agent, tools=tools) + + agent_executor.invoke({"input": "hi"}) + + # Using with chat history + from langchain_core.messages import AIMessage, HumanMessage + agent_executor.invoke( + { + "input": "what's my name?", + "chat_history": [ + HumanMessage(content="hi! my name is bob"), + AIMessage(content="Hello Bob! How can I assist you today?"), + ], + } + ) + + Prompt: + + The prompt must have input keys: + * `tools`: contains descriptions and arguments for each tool. + * `tool_names`: contains all tool names. + * `agent_scratchpad`: contains previous agent actions and tool outputs as a string. + + Here's an example: + + .. code-block:: python + + from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder + + system = '''Respond to the human as helpfully and accurately as possible. You have access to the following tools: + + {tools} + + Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). + + Valid "action" values: "Final Answer" or {tool_names} + + Provide only ONE action per $JSON_BLOB, as shown: + + ``` + {{ + "action": $TOOL_NAME, + "action_input": $INPUT + }} + ``` + + Follow this format: + + Question: input question to answer + Thought: consider previous and subsequent steps + Action: + ``` + $JSON_BLOB + ``` + Observation: action result + ... (repeat Thought/Action/Observation N times) + Thought: I know what to respond + Action: + ``` + {{ + "action": "Final Answer", + "action_input": "Final response to human" + }} + + Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation''' + + human = '''{input} + + {agent_scratchpad} + + (reminder to respond in a JSON blob no matter what)''' + + prompt = ChatPromptTemplate.from_messages( + [ + ("system", system), + MessagesPlaceholder("chat_history", optional=True), + ("human", human), + ] + ) + """ # noqa: E501 + missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference( + prompt.input_variables + list(prompt.partial_variables) + ) + if missing_vars: + raise ValueError(f"Prompt missing required variables: {missing_vars}") + + prompt = prompt.partial( + tools=tools_renderer(list(tools)), + tool_names=", ".join([t.name for t in tools]), + ) + if stop_sequence: + stop = ["\nObservation"] if stop_sequence is True else stop_sequence + llm_with_stop = llm.bind(stop=stop) + else: + llm_with_stop = llm + + agent = ( + RunnablePassthrough.assign( + agent_scratchpad=lambda x: format_log_to_str(x["intermediate_steps"]), + ) + | prompt + | llm_with_stop + | JSONAgentOutputParser() + ) + return agent diff --git a/venv/Lib/site-packages/langchain/agents/structured_chat/output_parser.py b/venv/Lib/site-packages/langchain/agents/structured_chat/output_parser.py new file mode 100644 index 00000000..9fc85fbc --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/structured_chat/output_parser.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +import json +import logging +import re +from re import Pattern +from typing import Optional, Union + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.exceptions import OutputParserException +from langchain_core.language_models import BaseLanguageModel +from pydantic import Field + +from langchain.agents.agent import AgentOutputParser +from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS +from langchain.output_parsers import OutputFixingParser + +logger = logging.getLogger(__name__) + + +class StructuredChatOutputParser(AgentOutputParser): + """Output parser for the structured chat agent.""" + + format_instructions: str = FORMAT_INSTRUCTIONS + """Default formatting instructions""" + + pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL) + """Regex pattern to parse the output.""" + + def get_format_instructions(self) -> str: + """Returns formatting instructions for the given output parser.""" + return self.format_instructions + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + try: + action_match = self.pattern.search(text) + if action_match is not None: + response = json.loads(action_match.group(1).strip(), strict=False) + if isinstance(response, list): + # gpt turbo frequently ignores the directive to emit a single action + logger.warning("Got multiple action responses: %s", response) + response = response[0] + if response["action"] == "Final Answer": + return AgentFinish({"output": response["action_input"]}, text) + else: + return AgentAction( + response["action"], response.get("action_input", {}), text + ) + else: + return AgentFinish({"output": text}, text) + except Exception as e: + raise OutputParserException(f"Could not parse LLM output: {text}") from e + + @property + def _type(self) -> str: + return "structured_chat" + + +class StructuredChatOutputParserWithRetries(AgentOutputParser): + """Output parser with retries for the structured chat agent.""" + + base_parser: AgentOutputParser = Field(default_factory=StructuredChatOutputParser) + """The base parser to use.""" + output_fixing_parser: Optional[OutputFixingParser] = None + """The output fixing parser to use.""" + + def get_format_instructions(self) -> str: + return FORMAT_INSTRUCTIONS + + def parse(self, text: str) -> Union[AgentAction, AgentFinish]: + try: + if self.output_fixing_parser is not None: + parsed_obj: Union[AgentAction, AgentFinish] = ( + self.output_fixing_parser.parse(text) + ) + else: + parsed_obj = self.base_parser.parse(text) + return parsed_obj + except Exception as e: + raise OutputParserException(f"Could not parse LLM output: {text}") from e + + @classmethod + def from_llm( + cls, + llm: Optional[BaseLanguageModel] = None, + base_parser: Optional[StructuredChatOutputParser] = None, + ) -> StructuredChatOutputParserWithRetries: + if llm is not None: + base_parser = base_parser or StructuredChatOutputParser() + output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm( + llm=llm, parser=base_parser + ) + return cls(output_fixing_parser=output_fixing_parser) + elif base_parser is not None: + return cls(base_parser=base_parser) + else: + return cls() + + @property + def _type(self) -> str: + return "structured_chat_with_retries" diff --git a/venv/Lib/site-packages/langchain/agents/structured_chat/prompt.py b/venv/Lib/site-packages/langchain/agents/structured_chat/prompt.py new file mode 100644 index 00000000..98d8bb37 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/structured_chat/prompt.py @@ -0,0 +1,35 @@ +# flake8: noqa +PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:""" +FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). + +Valid "action" values: "Final Answer" or {tool_names} + +Provide only ONE action per $JSON_BLOB, as shown: + +``` +{{{{ + "action": $TOOL_NAME, + "action_input": $INPUT +}}}} +``` + +Follow this format: + +Question: input question to answer +Thought: consider previous and subsequent steps +Action: +``` +$JSON_BLOB +``` +Observation: action result +... (repeat Thought/Action/Observation N times) +Thought: I know what to respond +Action: +``` +{{{{ + "action": "Final Answer", + "action_input": "Final response to human" +}}}} +```""" +SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. +Thought:""" diff --git a/venv/Lib/site-packages/langchain/agents/tool_calling_agent/__init__.py b/venv/Lib/site-packages/langchain/agents/tool_calling_agent/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/agents/tool_calling_agent/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/tool_calling_agent/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..236ac5d9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/tool_calling_agent/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/tool_calling_agent/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/tool_calling_agent/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..44a8057b Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/tool_calling_agent/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/tool_calling_agent/base.py b/venv/Lib/site-packages/langchain/agents/tool_calling_agent/base.py new file mode 100644 index 00000000..324ea845 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/tool_calling_agent/base.py @@ -0,0 +1,109 @@ +from collections.abc import Sequence +from typing import Callable + +from langchain_core.agents import AgentAction +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import BaseMessage +from langchain_core.prompts.chat import ChatPromptTemplate +from langchain_core.runnables import Runnable, RunnablePassthrough +from langchain_core.tools import BaseTool + +from langchain.agents.format_scratchpad.tools import ( + format_to_tool_messages, +) +from langchain.agents.output_parsers.tools import ToolsAgentOutputParser + +MessageFormatter = Callable[[Sequence[tuple[AgentAction, str]]], list[BaseMessage]] + + +def create_tool_calling_agent( + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + prompt: ChatPromptTemplate, + *, + message_formatter: MessageFormatter = format_to_tool_messages, +) -> Runnable: + """Create an agent that uses tools. + + Args: + llm: LLM to use as the agent. + tools: Tools this agent has access to. + prompt: The prompt to use. See Prompt section below for more on the expected + input variables. + message_formatter: Formatter function to convert (AgentAction, tool output) + tuples into FunctionMessages. + + Returns: + A Runnable sequence representing an agent. It takes as input all the same input + variables as the prompt passed in does. It returns as output either an + AgentAction or AgentFinish. + + Example: + + .. code-block:: python + + from langchain.agents import AgentExecutor, create_tool_calling_agent, tool + from langchain_anthropic import ChatAnthropic + from langchain_core.prompts import ChatPromptTemplate + + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful assistant"), + ("placeholder", "{chat_history}"), + ("human", "{input}"), + ("placeholder", "{agent_scratchpad}"), + ] + ) + model = ChatAnthropic(model="claude-3-opus-20240229") + + @tool + def magic_function(input: int) -> int: + \"\"\"Applies a magic function to an input.\"\"\" + return input + 2 + + tools = [magic_function] + + agent = create_tool_calling_agent(model, tools, prompt) + agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) + + agent_executor.invoke({"input": "what is the value of magic_function(3)?"}) + + # Using with chat history + from langchain_core.messages import AIMessage, HumanMessage + agent_executor.invoke( + { + "input": "what's my name?", + "chat_history": [ + HumanMessage(content="hi! my name is bob"), + AIMessage(content="Hello Bob! How can I assist you today?"), + ], + } + ) + + Prompt: + + The agent prompt must have an `agent_scratchpad` key that is a + ``MessagesPlaceholder``. Intermediate agent actions and tool output + messages will be passed in here. + """ + missing_vars = {"agent_scratchpad"}.difference( + prompt.input_variables + list(prompt.partial_variables) + ) + if missing_vars: + raise ValueError(f"Prompt missing required variables: {missing_vars}") + + if not hasattr(llm, "bind_tools"): + raise ValueError( + "This function requires a .bind_tools method be implemented on the LLM.", + ) + llm_with_tools = llm.bind_tools(tools) + + agent = ( + RunnablePassthrough.assign( + agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"]) + ) + | prompt + | llm_with_tools + | ToolsAgentOutputParser() + ) + return agent diff --git a/venv/Lib/site-packages/langchain/agents/tools.py b/venv/Lib/site-packages/langchain/agents/tools.py new file mode 100644 index 00000000..9591fffb --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/tools.py @@ -0,0 +1,47 @@ +"""Interface for tools.""" + +from typing import Optional + +from langchain_core.callbacks import ( + AsyncCallbackManagerForToolRun, + CallbackManagerForToolRun, +) +from langchain_core.tools import BaseTool, tool + + +class InvalidTool(BaseTool): + """Tool that is run when invalid tool name is encountered by agent.""" + + name: str = "invalid_tool" + """Name of the tool.""" + description: str = "Called when tool name is invalid. Suggests valid tool names." + """Description of the tool.""" + + def _run( + self, + requested_tool_name: str, + available_tool_names: list[str], + run_manager: Optional[CallbackManagerForToolRun] = None, + ) -> str: + """Use the tool.""" + available_tool_names_str = ", ".join([tool for tool in available_tool_names]) + return ( + f"{requested_tool_name} is not a valid tool, " + f"try one of [{available_tool_names_str}]." + ) + + async def _arun( + self, + requested_tool_name: str, + available_tool_names: list[str], + run_manager: Optional[AsyncCallbackManagerForToolRun] = None, + ) -> str: + """Use the tool asynchronously.""" + available_tool_names_str = ", ".join([tool for tool in available_tool_names]) + return ( + f"{requested_tool_name} is not a valid tool, " + f"try one of [{available_tool_names_str}]." + ) + + +__all__ = ["InvalidTool", "tool"] diff --git a/venv/Lib/site-packages/langchain/agents/types.py b/venv/Lib/site-packages/langchain/agents/types.py new file mode 100644 index 00000000..49ffe0a4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/types.py @@ -0,0 +1,27 @@ +from typing import Union + +from langchain.agents.agent import BaseSingleActionAgent +from langchain.agents.agent_types import AgentType +from langchain.agents.chat.base import ChatAgent +from langchain.agents.conversational.base import ConversationalAgent +from langchain.agents.conversational_chat.base import ConversationalChatAgent +from langchain.agents.mrkl.base import ZeroShotAgent +from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent +from langchain.agents.openai_functions_multi_agent.base import OpenAIMultiFunctionsAgent +from langchain.agents.react.base import ReActDocstoreAgent +from langchain.agents.self_ask_with_search.base import SelfAskWithSearchAgent +from langchain.agents.structured_chat.base import StructuredChatAgent + +AGENT_TYPE = Union[type[BaseSingleActionAgent], type[OpenAIMultiFunctionsAgent]] + +AGENT_TO_CLASS: dict[AgentType, AGENT_TYPE] = { + AgentType.ZERO_SHOT_REACT_DESCRIPTION: ZeroShotAgent, + AgentType.REACT_DOCSTORE: ReActDocstoreAgent, + AgentType.SELF_ASK_WITH_SEARCH: SelfAskWithSearchAgent, + AgentType.CONVERSATIONAL_REACT_DESCRIPTION: ConversationalAgent, + AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION: ChatAgent, + AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION: ConversationalChatAgent, + AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION: StructuredChatAgent, + AgentType.OPENAI_FUNCTIONS: OpenAIFunctionsAgent, + AgentType.OPENAI_MULTI_FUNCTIONS: OpenAIMultiFunctionsAgent, +} diff --git a/venv/Lib/site-packages/langchain/agents/utils.py b/venv/Lib/site-packages/langchain/agents/utils.py new file mode 100644 index 00000000..f8db41b5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/utils.py @@ -0,0 +1,20 @@ +from collections.abc import Sequence + +from langchain_core.tools import BaseTool + + +def validate_tools_single_input(class_name: str, tools: Sequence[BaseTool]) -> None: + """Validate tools for single input. + + Args: + class_name: Name of the class. + tools: List of tools to validate. + + Raises: + ValueError: If a multi-input tool is found in tools. + """ + for tool in tools: + if not tool.is_single_input: + raise ValueError( + f"{class_name} does not support multi-input tool {tool.name}." + ) diff --git a/venv/Lib/site-packages/langchain/agents/xml/__init__.py b/venv/Lib/site-packages/langchain/agents/xml/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/agents/xml/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/xml/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..e18b8efc Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/xml/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/xml/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/xml/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..0a0723f2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/xml/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/xml/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/agents/xml/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..b5f6c4d2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/agents/xml/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/agents/xml/base.py b/venv/Lib/site-packages/langchain/agents/xml/base.py new file mode 100644 index 00000000..e91e9912 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/xml/base.py @@ -0,0 +1,231 @@ +from collections.abc import Sequence +from typing import Any, Union + +from langchain_core._api import deprecated +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.callbacks import Callbacks +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts.base import BasePromptTemplate +from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate +from langchain_core.runnables import Runnable, RunnablePassthrough +from langchain_core.tools import BaseTool +from langchain_core.tools.render import ToolsRenderer, render_text_description + +from langchain.agents.agent import BaseSingleActionAgent +from langchain.agents.format_scratchpad import format_xml +from langchain.agents.output_parsers import XMLAgentOutputParser +from langchain.agents.xml.prompt import agent_instructions +from langchain.chains.llm import LLMChain + + +@deprecated("0.1.0", alternative="create_xml_agent", removal="1.0") +class XMLAgent(BaseSingleActionAgent): + """Agent that uses XML tags. + + Args: + tools: list of tools the agent can choose from + llm_chain: The LLMChain to call to predict the next action + + Examples: + + .. code-block:: python + + from langchain.agents import XMLAgent + from langchain + + tools = ... + model = + + + """ + + tools: list[BaseTool] + """List of tools this agent has access to.""" + llm_chain: LLMChain + """Chain to use to predict action.""" + + @property + def input_keys(self) -> list[str]: + return ["input"] + + @staticmethod + def get_default_prompt() -> ChatPromptTemplate: + base_prompt = ChatPromptTemplate.from_template(agent_instructions) + return base_prompt + AIMessagePromptTemplate.from_template( + "{intermediate_steps}" + ) + + @staticmethod + def get_default_output_parser() -> XMLAgentOutputParser: + return XMLAgentOutputParser() + + def plan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[AgentAction, AgentFinish]: + log = "" + for action, observation in intermediate_steps: + log += ( + f"{action.tool}{action.tool_input}" + f"{observation}" + ) + tools = "" + for tool in self.tools: + tools += f"{tool.name}: {tool.description}\n" + inputs = { + "intermediate_steps": log, + "tools": tools, + "question": kwargs["input"], + "stop": ["", ""], + } + response = self.llm_chain(inputs, callbacks=callbacks) + return response[self.llm_chain.output_key] + + async def aplan( + self, + intermediate_steps: list[tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[AgentAction, AgentFinish]: + log = "" + for action, observation in intermediate_steps: + log += ( + f"{action.tool}{action.tool_input}" + f"{observation}" + ) + tools = "" + for tool in self.tools: + tools += f"{tool.name}: {tool.description}\n" + inputs = { + "intermediate_steps": log, + "tools": tools, + "question": kwargs["input"], + "stop": ["", ""], + } + response = await self.llm_chain.acall(inputs, callbacks=callbacks) + return response[self.llm_chain.output_key] + + +def create_xml_agent( + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + prompt: BasePromptTemplate, + tools_renderer: ToolsRenderer = render_text_description, + *, + stop_sequence: Union[bool, list[str]] = True, +) -> Runnable: + """Create an agent that uses XML to format its logic. + + Args: + llm: LLM to use as the agent. + tools: Tools this agent has access to. + prompt: The prompt to use, must have input keys + `tools`: contains descriptions for each tool. + `agent_scratchpad`: contains previous agent actions and tool outputs. + tools_renderer: This controls how the tools are converted into a string and + then passed into the LLM. Default is `render_text_description`. + stop_sequence: bool or list of str. + If True, adds a stop token of "" to avoid hallucinates. + If False, does not add a stop token. + If a list of str, uses the provided list as the stop tokens. + + Default is True. You may to set this to False if the LLM you are using + does not support stop sequences. + + Returns: + A Runnable sequence representing an agent. It takes as input all the same input + variables as the prompt passed in does. It returns as output either an + AgentAction or AgentFinish. + + Example: + + .. code-block:: python + + from langchain import hub + from langchain_community.chat_models import ChatAnthropic + from langchain.agents import AgentExecutor, create_xml_agent + + prompt = hub.pull("hwchase17/xml-agent-convo") + model = ChatAnthropic(model="claude-3-haiku-20240307") + tools = ... + + agent = create_xml_agent(model, tools, prompt) + agent_executor = AgentExecutor(agent=agent, tools=tools) + + agent_executor.invoke({"input": "hi"}) + + # Use with chat history + from langchain_core.messages import AIMessage, HumanMessage + agent_executor.invoke( + { + "input": "what's my name?", + # Notice that chat_history is a string + # since this prompt is aimed at LLMs, not chat models + "chat_history": "Human: My name is Bob\\nAI: Hello Bob!", + } + ) + + Prompt: + + The prompt must have input keys: + * `tools`: contains descriptions for each tool. + * `agent_scratchpad`: contains previous agent actions and tool outputs as an XML string. + + Here's an example: + + .. code-block:: python + + from langchain_core.prompts import PromptTemplate + + template = '''You are a helpful assistant. Help the user answer any questions. + + You have access to the following tools: + + {tools} + + In order to use a tool, you can use and tags. You will then get back a response in the form + For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond: + + searchweather in SF + 64 degrees + + When you are done, respond with a final answer between . For example: + + The weather in SF is 64 degrees + + Begin! + + Previous Conversation: + {chat_history} + + Question: {input} + {agent_scratchpad}''' + prompt = PromptTemplate.from_template(template) + """ # noqa: E501 + missing_vars = {"tools", "agent_scratchpad"}.difference( + prompt.input_variables + list(prompt.partial_variables) + ) + if missing_vars: + raise ValueError(f"Prompt missing required variables: {missing_vars}") + + prompt = prompt.partial( + tools=tools_renderer(list(tools)), + ) + + if stop_sequence: + stop = [""] if stop_sequence is True else stop_sequence + llm_with_stop = llm.bind(stop=stop) + else: + llm_with_stop = llm + + agent = ( + RunnablePassthrough.assign( + agent_scratchpad=lambda x: format_xml(x["intermediate_steps"]), + ) + | prompt + | llm_with_stop + | XMLAgentOutputParser() + ) + return agent diff --git a/venv/Lib/site-packages/langchain/agents/xml/prompt.py b/venv/Lib/site-packages/langchain/agents/xml/prompt.py new file mode 100644 index 00000000..3972c6a0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/agents/xml/prompt.py @@ -0,0 +1,22 @@ +# flake8: noqa +# TODO: deprecate +agent_instructions = """You are a helpful assistant. Help the user answer any questions. + +You have access to the following tools: + +{tools} + +In order to use a tool, you can use and tags. \ +You will then get back a response in the form +For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond: + +searchweather in SF +64 degrees + +When you are done, respond with a final answer between . For example: + +The weather in SF is 64 degrees + +Begin! + +Question: {question}""" diff --git a/venv/Lib/site-packages/langchain/base_language.py b/venv/Lib/site-packages/langchain/base_language.py new file mode 100644 index 00000000..e52c69fd --- /dev/null +++ b/venv/Lib/site-packages/langchain/base_language.py @@ -0,0 +1,7 @@ +"""Deprecated module for BaseLanguageModel class, kept for backwards compatibility.""" + +from __future__ import annotations + +from langchain_core.language_models import BaseLanguageModel + +__all__ = ["BaseLanguageModel"] diff --git a/venv/Lib/site-packages/langchain/cache.py b/venv/Lib/site-packages/langchain/cache.py new file mode 100644 index 00000000..e8cee3c8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/cache.py @@ -0,0 +1,72 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.cache import ( + AstraDBCache, + AstraDBSemanticCache, + AzureCosmosDBSemanticCache, + CassandraCache, + CassandraSemanticCache, + FullLLMCache, + FullMd5LLMCache, + GPTCache, + InMemoryCache, + MomentoCache, + RedisCache, + RedisSemanticCache, + SQLAlchemyCache, + SQLAlchemyMd5Cache, + SQLiteCache, + UpstashRedisCache, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FullLLMCache": "langchain_community.cache", + "SQLAlchemyCache": "langchain_community.cache", + "SQLiteCache": "langchain_community.cache", + "UpstashRedisCache": "langchain_community.cache", + "RedisCache": "langchain_community.cache", + "RedisSemanticCache": "langchain_community.cache", + "GPTCache": "langchain_community.cache", + "MomentoCache": "langchain_community.cache", + "InMemoryCache": "langchain_community.cache", + "CassandraCache": "langchain_community.cache", + "CassandraSemanticCache": "langchain_community.cache", + "FullMd5LLMCache": "langchain_community.cache", + "SQLAlchemyMd5Cache": "langchain_community.cache", + "AstraDBCache": "langchain_community.cache", + "AstraDBSemanticCache": "langchain_community.cache", + "AzureCosmosDBSemanticCache": "langchain_community.cache", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FullLLMCache", + "SQLAlchemyCache", + "SQLiteCache", + "UpstashRedisCache", + "RedisCache", + "RedisSemanticCache", + "GPTCache", + "MomentoCache", + "InMemoryCache", + "CassandraCache", + "CassandraSemanticCache", + "FullMd5LLMCache", + "SQLAlchemyMd5Cache", + "AstraDBCache", + "AstraDBSemanticCache", + "AzureCosmosDBSemanticCache", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/__init__.py b/venv/Lib/site-packages/langchain/callbacks/__init__.py new file mode 100644 index 00000000..92690fc9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/__init__.py @@ -0,0 +1,139 @@ +"""**Callback handlers** allow listening to events in LangChain. + +**Class hierarchy:** + +.. code-block:: + + BaseCallbackHandler --> CallbackHandler # Example: AimCallbackHandler +""" + +from typing import TYPE_CHECKING, Any + +from langchain_core.callbacks import ( + FileCallbackHandler, + StdOutCallbackHandler, + StreamingStdOutCallbackHandler, +) +from langchain_core.tracers.context import ( + collect_runs, + tracing_enabled, + tracing_v2_enabled, +) +from langchain_core.tracers.langchain import LangChainTracer + +from langchain._api import create_importer +from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler +from langchain.callbacks.streaming_stdout_final_only import ( + FinalStreamingStdOutCallbackHandler, +) + +if TYPE_CHECKING: + from langchain_community.callbacks.aim_callback import AimCallbackHandler + from langchain_community.callbacks.argilla_callback import ArgillaCallbackHandler + from langchain_community.callbacks.arize_callback import ArizeCallbackHandler + from langchain_community.callbacks.arthur_callback import ArthurCallbackHandler + from langchain_community.callbacks.clearml_callback import ClearMLCallbackHandler + from langchain_community.callbacks.comet_ml_callback import CometCallbackHandler + from langchain_community.callbacks.context_callback import ContextCallbackHandler + from langchain_community.callbacks.flyte_callback import FlyteCallbackHandler + from langchain_community.callbacks.human import HumanApprovalCallbackHandler + from langchain_community.callbacks.infino_callback import InfinoCallbackHandler + from langchain_community.callbacks.labelstudio_callback import ( + LabelStudioCallbackHandler, + ) + from langchain_community.callbacks.llmonitor_callback import ( + LLMonitorCallbackHandler, + ) + from langchain_community.callbacks.manager import ( + get_openai_callback, + wandb_tracing_enabled, + ) + from langchain_community.callbacks.mlflow_callback import MlflowCallbackHandler + from langchain_community.callbacks.openai_info import OpenAICallbackHandler + from langchain_community.callbacks.promptlayer_callback import ( + PromptLayerCallbackHandler, + ) + from langchain_community.callbacks.sagemaker_callback import ( + SageMakerCallbackHandler, + ) + from langchain_community.callbacks.streamlit import StreamlitCallbackHandler + from langchain_community.callbacks.streamlit.streamlit_callback_handler import ( + LLMThoughtLabeler, + ) + from langchain_community.callbacks.trubrics_callback import TrubricsCallbackHandler + from langchain_community.callbacks.wandb_callback import WandbCallbackHandler + from langchain_community.callbacks.whylabs_callback import WhyLabsCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AimCallbackHandler": "langchain_community.callbacks.aim_callback", + "ArgillaCallbackHandler": "langchain_community.callbacks.argilla_callback", + "ArizeCallbackHandler": "langchain_community.callbacks.arize_callback", + "PromptLayerCallbackHandler": "langchain_community.callbacks.promptlayer_callback", + "ArthurCallbackHandler": "langchain_community.callbacks.arthur_callback", + "ClearMLCallbackHandler": "langchain_community.callbacks.clearml_callback", + "CometCallbackHandler": "langchain_community.callbacks.comet_ml_callback", + "ContextCallbackHandler": "langchain_community.callbacks.context_callback", + "HumanApprovalCallbackHandler": "langchain_community.callbacks.human", + "InfinoCallbackHandler": "langchain_community.callbacks.infino_callback", + "MlflowCallbackHandler": "langchain_community.callbacks.mlflow_callback", + "LLMonitorCallbackHandler": "langchain_community.callbacks.llmonitor_callback", + "OpenAICallbackHandler": "langchain_community.callbacks.openai_info", + "LLMThoughtLabeler": ( + "langchain_community.callbacks.streamlit.streamlit_callback_handler" + ), + "StreamlitCallbackHandler": "langchain_community.callbacks.streamlit", + "WandbCallbackHandler": "langchain_community.callbacks.wandb_callback", + "WhyLabsCallbackHandler": "langchain_community.callbacks.whylabs_callback", + "get_openai_callback": "langchain_community.callbacks.manager", + "wandb_tracing_enabled": "langchain_community.callbacks.manager", + "FlyteCallbackHandler": "langchain_community.callbacks.flyte_callback", + "SageMakerCallbackHandler": "langchain_community.callbacks.sagemaker_callback", + "LabelStudioCallbackHandler": "langchain_community.callbacks.labelstudio_callback", + "TrubricsCallbackHandler": "langchain_community.callbacks.trubrics_callback", +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AimCallbackHandler", + "ArgillaCallbackHandler", + "ArizeCallbackHandler", + "PromptLayerCallbackHandler", + "ArthurCallbackHandler", + "ClearMLCallbackHandler", + "CometCallbackHandler", + "ContextCallbackHandler", + "FileCallbackHandler", + "HumanApprovalCallbackHandler", + "InfinoCallbackHandler", + "MlflowCallbackHandler", + "LLMonitorCallbackHandler", + "OpenAICallbackHandler", + "StdOutCallbackHandler", + "AsyncIteratorCallbackHandler", + "StreamingStdOutCallbackHandler", + "FinalStreamingStdOutCallbackHandler", + "LLMThoughtLabeler", + "LangChainTracer", + "StreamlitCallbackHandler", + "WandbCallbackHandler", + "WhyLabsCallbackHandler", + "get_openai_callback", + "tracing_enabled", + "tracing_v2_enabled", + "collect_runs", + "wandb_tracing_enabled", + "FlyteCallbackHandler", + "SageMakerCallbackHandler", + "LabelStudioCallbackHandler", + "TrubricsCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..359ba009 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/aim_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/aim_callback.cpython-312.pyc new file mode 100644 index 00000000..1ebb3c2f Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/aim_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/argilla_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/argilla_callback.cpython-312.pyc new file mode 100644 index 00000000..7eabb2b5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/argilla_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/arize_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/arize_callback.cpython-312.pyc new file mode 100644 index 00000000..23eb5833 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/arize_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/arthur_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/arthur_callback.cpython-312.pyc new file mode 100644 index 00000000..58e7a610 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/arthur_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..c1f68c9b Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/clearml_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/clearml_callback.cpython-312.pyc new file mode 100644 index 00000000..c75b094d Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/clearml_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/comet_ml_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/comet_ml_callback.cpython-312.pyc new file mode 100644 index 00000000..918bfdc7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/comet_ml_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/confident_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/confident_callback.cpython-312.pyc new file mode 100644 index 00000000..d941ac64 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/confident_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/context_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/context_callback.cpython-312.pyc new file mode 100644 index 00000000..e22af6da Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/context_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/file.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/file.cpython-312.pyc new file mode 100644 index 00000000..339effa5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/file.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/flyte_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/flyte_callback.cpython-312.pyc new file mode 100644 index 00000000..40d4969b Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/flyte_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/human.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/human.cpython-312.pyc new file mode 100644 index 00000000..2d35eac5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/human.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/infino_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/infino_callback.cpython-312.pyc new file mode 100644 index 00000000..024c6bf8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/infino_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/labelstudio_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/labelstudio_callback.cpython-312.pyc new file mode 100644 index 00000000..e40a754f Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/labelstudio_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/llmonitor_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/llmonitor_callback.cpython-312.pyc new file mode 100644 index 00000000..21ee17ef Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/llmonitor_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/manager.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/manager.cpython-312.pyc new file mode 100644 index 00000000..33b4d511 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/manager.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/mlflow_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/mlflow_callback.cpython-312.pyc new file mode 100644 index 00000000..aab2d47e Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/mlflow_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/openai_info.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/openai_info.cpython-312.pyc new file mode 100644 index 00000000..ac075bea Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/openai_info.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/promptlayer_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/promptlayer_callback.cpython-312.pyc new file mode 100644 index 00000000..f11460bb Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/promptlayer_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/sagemaker_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/sagemaker_callback.cpython-312.pyc new file mode 100644 index 00000000..19276508 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/sagemaker_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/stdout.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/stdout.cpython-312.pyc new file mode 100644 index 00000000..5545ac59 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/stdout.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/streaming_aiter.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/streaming_aiter.cpython-312.pyc new file mode 100644 index 00000000..b614e4f3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/streaming_aiter.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/streaming_aiter_final_only.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/streaming_aiter_final_only.cpython-312.pyc new file mode 100644 index 00000000..a943fbec Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/streaming_aiter_final_only.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/streaming_stdout.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/streaming_stdout.cpython-312.pyc new file mode 100644 index 00000000..70764429 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/streaming_stdout.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/streaming_stdout_final_only.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/streaming_stdout_final_only.cpython-312.pyc new file mode 100644 index 00000000..b8ec9ece Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/streaming_stdout_final_only.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/trubrics_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/trubrics_callback.cpython-312.pyc new file mode 100644 index 00000000..3e71c482 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/trubrics_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..5ee3b1d0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/wandb_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/wandb_callback.cpython-312.pyc new file mode 100644 index 00000000..9adf031d Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/wandb_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/__pycache__/whylabs_callback.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/__pycache__/whylabs_callback.cpython-312.pyc new file mode 100644 index 00000000..d872e4dc Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/__pycache__/whylabs_callback.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/aim_callback.py b/venv/Lib/site-packages/langchain/callbacks/aim_callback.py new file mode 100644 index 00000000..aa57017a --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/aim_callback.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.aim_callback import ( + AimCallbackHandler, + BaseMetadataCallbackHandler, + import_aim, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "import_aim": "langchain_community.callbacks.aim_callback", + "BaseMetadataCallbackHandler": "langchain_community.callbacks.aim_callback", + "AimCallbackHandler": "langchain_community.callbacks.aim_callback", +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "import_aim", + "BaseMetadataCallbackHandler", + "AimCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/argilla_callback.py b/venv/Lib/site-packages/langchain/callbacks/argilla_callback.py new file mode 100644 index 00000000..7dd44574 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/argilla_callback.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.argilla_callback import ArgillaCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ArgillaCallbackHandler": "langchain_community.callbacks.argilla_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ArgillaCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/arize_callback.py b/venv/Lib/site-packages/langchain/callbacks/arize_callback.py new file mode 100644 index 00000000..0f29b498 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/arize_callback.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.arize_callback import ArizeCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ArizeCallbackHandler": "langchain_community.callbacks.arize_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ArizeCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/arthur_callback.py b/venv/Lib/site-packages/langchain/callbacks/arthur_callback.py new file mode 100644 index 00000000..c056067d --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/arthur_callback.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.arthur_callback import ArthurCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ArthurCallbackHandler": "langchain_community.callbacks.arthur_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ArthurCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/base.py b/venv/Lib/site-packages/langchain/callbacks/base.py new file mode 100644 index 00000000..49fab5ee --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/base.py @@ -0,0 +1,29 @@ +"""Base callback handler that can be used to handle callbacks in langchain.""" + +from __future__ import annotations + +from langchain_core.callbacks import ( + AsyncCallbackHandler, + BaseCallbackHandler, + BaseCallbackManager, + CallbackManagerMixin, + Callbacks, + ChainManagerMixin, + LLMManagerMixin, + RetrieverManagerMixin, + RunManagerMixin, + ToolManagerMixin, +) + +__all__ = [ + "RetrieverManagerMixin", + "LLMManagerMixin", + "ChainManagerMixin", + "ToolManagerMixin", + "CallbackManagerMixin", + "RunManagerMixin", + "BaseCallbackHandler", + "AsyncCallbackHandler", + "BaseCallbackManager", + "Callbacks", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/clearml_callback.py b/venv/Lib/site-packages/langchain/callbacks/clearml_callback.py new file mode 100644 index 00000000..38a7d477 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/clearml_callback.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.clearml_callback import ClearMLCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ClearMLCallbackHandler": "langchain_community.callbacks.clearml_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ClearMLCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/comet_ml_callback.py b/venv/Lib/site-packages/langchain/callbacks/comet_ml_callback.py new file mode 100644 index 00000000..f0cb2a08 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/comet_ml_callback.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.comet_ml_callback import CometCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CometCallbackHandler": "langchain_community.callbacks.comet_ml_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CometCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/confident_callback.py b/venv/Lib/site-packages/langchain/callbacks/confident_callback.py new file mode 100644 index 00000000..90e7d8de --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/confident_callback.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.confident_callback import DeepEvalCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DeepEvalCallbackHandler": "langchain_community.callbacks.confident_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DeepEvalCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/context_callback.py b/venv/Lib/site-packages/langchain/callbacks/context_callback.py new file mode 100644 index 00000000..4dc53081 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/context_callback.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.context_callback import ContextCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ContextCallbackHandler": "langchain_community.callbacks.context_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ContextCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/file.py b/venv/Lib/site-packages/langchain/callbacks/file.py new file mode 100644 index 00000000..15fa4101 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/file.py @@ -0,0 +1,3 @@ +from langchain_core.callbacks.file import FileCallbackHandler + +__all__ = ["FileCallbackHandler"] diff --git a/venv/Lib/site-packages/langchain/callbacks/flyte_callback.py b/venv/Lib/site-packages/langchain/callbacks/flyte_callback.py new file mode 100644 index 00000000..55c79231 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/flyte_callback.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.flyte_callback import FlyteCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FlyteCallbackHandler": "langchain_community.callbacks.flyte_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FlyteCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/human.py b/venv/Lib/site-packages/langchain/callbacks/human.py new file mode 100644 index 00000000..534dad96 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/human.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.human import ( + AsyncHumanApprovalCallbackHandler, + HumanApprovalCallbackHandler, + HumanRejectedException, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "HumanRejectedException": "langchain_community.callbacks.human", + "HumanApprovalCallbackHandler": "langchain_community.callbacks.human", + "AsyncHumanApprovalCallbackHandler": "langchain_community.callbacks.human", +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HumanRejectedException", + "HumanApprovalCallbackHandler", + "AsyncHumanApprovalCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/infino_callback.py b/venv/Lib/site-packages/langchain/callbacks/infino_callback.py new file mode 100644 index 00000000..77942c79 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/infino_callback.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.infino_callback import InfinoCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "InfinoCallbackHandler": "langchain_community.callbacks.infino_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "InfinoCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/labelstudio_callback.py b/venv/Lib/site-packages/langchain/callbacks/labelstudio_callback.py new file mode 100644 index 00000000..e12b49a1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/labelstudio_callback.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.labelstudio_callback import ( + LabelStudioCallbackHandler, + LabelStudioMode, + get_default_label_configs, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "LabelStudioMode": "langchain_community.callbacks.labelstudio_callback", + "get_default_label_configs": "langchain_community.callbacks.labelstudio_callback", + "LabelStudioCallbackHandler": "langchain_community.callbacks.labelstudio_callback", +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LabelStudioMode", + "get_default_label_configs", + "LabelStudioCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/llmonitor_callback.py b/venv/Lib/site-packages/langchain/callbacks/llmonitor_callback.py new file mode 100644 index 00000000..590e4249 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/llmonitor_callback.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.llmonitor_callback import ( + LLMonitorCallbackHandler, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "LLMonitorCallbackHandler": "langchain_community.callbacks.llmonitor_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LLMonitorCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/manager.py b/venv/Lib/site-packages/langchain/callbacks/manager.py new file mode 100644 index 00000000..bb8b3598 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/manager.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from langchain_core.callbacks.manager import ( + AsyncCallbackManager, + AsyncCallbackManagerForChainGroup, + AsyncCallbackManagerForChainRun, + AsyncCallbackManagerForLLMRun, + AsyncCallbackManagerForRetrieverRun, + AsyncCallbackManagerForToolRun, + AsyncParentRunManager, + AsyncRunManager, + BaseRunManager, + CallbackManager, + CallbackManagerForChainGroup, + CallbackManagerForChainRun, + CallbackManagerForLLMRun, + CallbackManagerForRetrieverRun, + CallbackManagerForToolRun, + Callbacks, + ParentRunManager, + RunManager, + ahandle_event, + atrace_as_chain_group, + handle_event, + trace_as_chain_group, +) +from langchain_core.tracers.context import ( + collect_runs, + tracing_enabled, + tracing_v2_enabled, +) +from langchain_core.utils.env import env_var_is_set + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.manager import ( + get_openai_callback, + wandb_tracing_enabled, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "get_openai_callback": "langchain_community.callbacks.manager", + "wandb_tracing_enabled": "langchain_community.callbacks.manager", +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ahandle_event", + "AsyncCallbackManagerForChainGroup", + "AsyncCallbackManagerForChainRun", + "AsyncCallbackManagerForLLMRun", + "AsyncCallbackManagerForRetrieverRun", + "AsyncCallbackManagerForToolRun", + "AsyncParentRunManager", + "AsyncRunManager", + "atrace_as_chain_group", + "BaseRunManager", + "CallbackManager", + "CallbackManagerForChainGroup", + "CallbackManagerForChainRun", + "CallbackManagerForLLMRun", + "CallbackManagerForRetrieverRun", + "CallbackManagerForToolRun", + "Callbacks", + "AsyncCallbackManager", + "collect_runs", + "env_var_is_set", + "get_openai_callback", + "handle_event", + "ParentRunManager", + "RunManager", + "trace_as_chain_group", + "tracing_enabled", + "tracing_v2_enabled", + "wandb_tracing_enabled", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/mlflow_callback.py b/venv/Lib/site-packages/langchain/callbacks/mlflow_callback.py new file mode 100644 index 00000000..738c3a52 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/mlflow_callback.py @@ -0,0 +1,38 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.mlflow_callback import ( + MlflowCallbackHandler, + MlflowLogger, + analyze_text, + construct_html_from_prompt_and_generation, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "analyze_text": "langchain_community.callbacks.mlflow_callback", + "construct_html_from_prompt_and_generation": ( + "langchain_community.callbacks.mlflow_callback" + ), + "MlflowLogger": "langchain_community.callbacks.mlflow_callback", + "MlflowCallbackHandler": "langchain_community.callbacks.mlflow_callback", +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "analyze_text", + "construct_html_from_prompt_and_generation", + "MlflowLogger", + "MlflowCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/openai_info.py b/venv/Lib/site-packages/langchain/callbacks/openai_info.py new file mode 100644 index 00000000..59ea8ccf --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/openai_info.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.openai_info import OpenAICallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "OpenAICallbackHandler": "langchain_community.callbacks.openai_info" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OpenAICallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/promptlayer_callback.py b/venv/Lib/site-packages/langchain/callbacks/promptlayer_callback.py new file mode 100644 index 00000000..4970f2c1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/promptlayer_callback.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.promptlayer_callback import ( + PromptLayerCallbackHandler, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PromptLayerCallbackHandler": "langchain_community.callbacks.promptlayer_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PromptLayerCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/sagemaker_callback.py b/venv/Lib/site-packages/langchain/callbacks/sagemaker_callback.py new file mode 100644 index 00000000..7a0c6b6c --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/sagemaker_callback.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.sagemaker_callback import ( + SageMakerCallbackHandler, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SageMakerCallbackHandler": "langchain_community.callbacks.sagemaker_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SageMakerCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/stdout.py b/venv/Lib/site-packages/langchain/callbacks/stdout.py new file mode 100644 index 00000000..754e5824 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/stdout.py @@ -0,0 +1,3 @@ +from langchain_core.callbacks.stdout import StdOutCallbackHandler + +__all__ = ["StdOutCallbackHandler"] diff --git a/venv/Lib/site-packages/langchain/callbacks/streaming_aiter.py b/venv/Lib/site-packages/langchain/callbacks/streaming_aiter.py new file mode 100644 index 00000000..2eea8b4c --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/streaming_aiter.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +import asyncio +from collections.abc import AsyncIterator +from typing import Any, Literal, Union, cast + +from langchain_core.callbacks import AsyncCallbackHandler +from langchain_core.outputs import LLMResult + +# TODO If used by two LLM runs in parallel this won't work as expected + + +class AsyncIteratorCallbackHandler(AsyncCallbackHandler): + """Callback handler that returns an async iterator.""" + + queue: asyncio.Queue[str] + + done: asyncio.Event + + @property + def always_verbose(self) -> bool: + return True + + def __init__(self) -> None: + self.queue = asyncio.Queue() + self.done = asyncio.Event() + + async def on_llm_start( + self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any + ) -> None: + # If two calls are made in a row, this resets the state + self.done.clear() + + async def on_llm_new_token(self, token: str, **kwargs: Any) -> None: + if token is not None and token != "": + self.queue.put_nowait(token) + + async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + self.done.set() + + async def on_llm_error(self, error: BaseException, **kwargs: Any) -> None: + self.done.set() + + # TODO implement the other methods + + async def aiter(self) -> AsyncIterator[str]: + while not self.queue.empty() or not self.done.is_set(): + # Wait for the next token in the queue, + # but stop waiting if the done event is set + done, other = await asyncio.wait( + [ + # NOTE: If you add other tasks here, update the code below, + # which assumes each set has exactly one task each + asyncio.ensure_future(self.queue.get()), + asyncio.ensure_future(self.done.wait()), + ], + return_when=asyncio.FIRST_COMPLETED, + ) + + # Cancel the other task + if other: + other.pop().cancel() + + # Extract the value of the first completed task + token_or_done = cast(Union[str, Literal[True]], done.pop().result()) + + # If the extracted value is the boolean True, the done event was set + if token_or_done is True: + break + + # Otherwise, the extracted value is a token, which we yield + yield token_or_done diff --git a/venv/Lib/site-packages/langchain/callbacks/streaming_aiter_final_only.py b/venv/Lib/site-packages/langchain/callbacks/streaming_aiter_final_only.py new file mode 100644 index 00000000..fd1be579 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/streaming_aiter_final_only.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +from typing import Any, Optional + +from langchain_core.outputs import LLMResult + +from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler + +DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"] + + +class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler): + """Callback handler that returns an async iterator. + Only the final output of the agent will be iterated. + """ + + def append_to_last_tokens(self, token: str) -> None: + self.last_tokens.append(token) + self.last_tokens_stripped.append(token.strip()) + if len(self.last_tokens) > len(self.answer_prefix_tokens): + self.last_tokens.pop(0) + self.last_tokens_stripped.pop(0) + + def check_if_answer_reached(self) -> bool: + if self.strip_tokens: + return self.last_tokens_stripped == self.answer_prefix_tokens_stripped + else: + return self.last_tokens == self.answer_prefix_tokens + + def __init__( + self, + *, + answer_prefix_tokens: Optional[list[str]] = None, + strip_tokens: bool = True, + stream_prefix: bool = False, + ) -> None: + """Instantiate AsyncFinalIteratorCallbackHandler. + + Args: + answer_prefix_tokens: Token sequence that prefixes the answer. + Default is ["Final", "Answer", ":"] + strip_tokens: Ignore white spaces and new lines when comparing + answer_prefix_tokens to last tokens? (to determine if answer has been + reached) + stream_prefix: Should answer prefix itself also be streamed? + """ + super().__init__() + if answer_prefix_tokens is None: + self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS + else: + self.answer_prefix_tokens = answer_prefix_tokens + if strip_tokens: + self.answer_prefix_tokens_stripped = [ + token.strip() for token in self.answer_prefix_tokens + ] + else: + self.answer_prefix_tokens_stripped = self.answer_prefix_tokens + self.last_tokens = [""] * len(self.answer_prefix_tokens) + self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens) + self.strip_tokens = strip_tokens + self.stream_prefix = stream_prefix + self.answer_reached = False + + async def on_llm_start( + self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any + ) -> None: + # If two calls are made in a row, this resets the state + self.done.clear() + self.answer_reached = False + + async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + if self.answer_reached: + self.done.set() + + async def on_llm_new_token(self, token: str, **kwargs: Any) -> None: + # Remember the last n tokens, where n = len(answer_prefix_tokens) + self.append_to_last_tokens(token) + + # Check if the last n tokens match the answer_prefix_tokens list ... + if self.check_if_answer_reached(): + self.answer_reached = True + if self.stream_prefix: + for t in self.last_tokens: + self.queue.put_nowait(t) + return + + # If yes, then put tokens from now on + if self.answer_reached: + self.queue.put_nowait(token) diff --git a/venv/Lib/site-packages/langchain/callbacks/streaming_stdout.py b/venv/Lib/site-packages/langchain/callbacks/streaming_stdout.py new file mode 100644 index 00000000..8cebc741 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/streaming_stdout.py @@ -0,0 +1,5 @@ +"""Callback Handler streams to stdout on new llm token.""" + +from langchain_core.callbacks import StreamingStdOutCallbackHandler + +__all__ = ["StreamingStdOutCallbackHandler"] diff --git a/venv/Lib/site-packages/langchain/callbacks/streaming_stdout_final_only.py b/venv/Lib/site-packages/langchain/callbacks/streaming_stdout_final_only.py new file mode 100644 index 00000000..5a963abf --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/streaming_stdout_final_only.py @@ -0,0 +1,89 @@ +"""Callback Handler streams to stdout on new llm token.""" + +import sys +from typing import Any, Optional + +from langchain_core.callbacks import StreamingStdOutCallbackHandler + +DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"] + + +class FinalStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler): + """Callback handler for streaming in agents. + Only works with agents using LLMs that support streaming. + + Only the final output of the agent will be streamed. + """ + + def append_to_last_tokens(self, token: str) -> None: + self.last_tokens.append(token) + self.last_tokens_stripped.append(token.strip()) + if len(self.last_tokens) > len(self.answer_prefix_tokens): + self.last_tokens.pop(0) + self.last_tokens_stripped.pop(0) + + def check_if_answer_reached(self) -> bool: + if self.strip_tokens: + return self.last_tokens_stripped == self.answer_prefix_tokens_stripped + else: + return self.last_tokens == self.answer_prefix_tokens + + def __init__( + self, + *, + answer_prefix_tokens: Optional[list[str]] = None, + strip_tokens: bool = True, + stream_prefix: bool = False, + ) -> None: + """Instantiate FinalStreamingStdOutCallbackHandler. + + Args: + answer_prefix_tokens: Token sequence that prefixes the answer. + Default is ["Final", "Answer", ":"] + strip_tokens: Ignore white spaces and new lines when comparing + answer_prefix_tokens to last tokens? (to determine if answer has been + reached) + stream_prefix: Should answer prefix itself also be streamed? + """ + super().__init__() + if answer_prefix_tokens is None: + self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS + else: + self.answer_prefix_tokens = answer_prefix_tokens + if strip_tokens: + self.answer_prefix_tokens_stripped = [ + token.strip() for token in self.answer_prefix_tokens + ] + else: + self.answer_prefix_tokens_stripped = self.answer_prefix_tokens + self.last_tokens = [""] * len(self.answer_prefix_tokens) + self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens) + self.strip_tokens = strip_tokens + self.stream_prefix = stream_prefix + self.answer_reached = False + + def on_llm_start( + self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any + ) -> None: + """Run when LLM starts running.""" + self.answer_reached = False + + def on_llm_new_token(self, token: str, **kwargs: Any) -> None: + """Run on new LLM token. Only available when streaming is enabled.""" + + # Remember the last n tokens, where n = len(answer_prefix_tokens) + self.append_to_last_tokens(token) + + # Check if the last n tokens match the answer_prefix_tokens list ... + if self.check_if_answer_reached(): + self.answer_reached = True + if self.stream_prefix: + for t in self.last_tokens: + sys.stdout.write(t) + sys.stdout.flush() + return + + # ... if yes, then print tokens from now on + if self.answer_reached: + sys.stdout.write(token) + sys.stdout.flush() diff --git a/venv/Lib/site-packages/langchain/callbacks/streamlit/__init__.py b/venv/Lib/site-packages/langchain/callbacks/streamlit/__init__.py new file mode 100644 index 00000000..446ab9a2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/streamlit/__init__.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Optional + +from langchain_core.callbacks.base import BaseCallbackHandler + +if TYPE_CHECKING: + from langchain_community.callbacks import LLMThoughtLabeler + from streamlit.delta_generator import DeltaGenerator + + +def StreamlitCallbackHandler( + parent_container: DeltaGenerator, + *, + max_thought_containers: int = 4, + expand_new_thoughts: bool = True, + collapse_completed_thoughts: bool = True, + thought_labeler: Optional[LLMThoughtLabeler] = None, +) -> BaseCallbackHandler: + """Callback Handler that writes to a Streamlit app. + + This CallbackHandler is geared towards + use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts" + inside a series of Streamlit expanders. + + Parameters + ---------- + parent_container + The `st.container` that will contain all the Streamlit elements that the + Handler creates. + max_thought_containers + The max number of completed LLM thought containers to show at once. When this + threshold is reached, a new thought will cause the oldest thoughts to be + collapsed into a "History" expander. Defaults to 4. + expand_new_thoughts + Each LLM "thought" gets its own `st.expander`. This param controls whether that + expander is expanded by default. Defaults to True. + collapse_completed_thoughts + If True, LLM thought expanders will be collapsed when completed. + Defaults to True. + thought_labeler + An optional custom LLMThoughtLabeler instance. If unspecified, the handler + will use the default thought labeling logic. Defaults to None. + + Returns + ------- + A new StreamlitCallbackHandler instance. + + Note that this is an "auto-updating" API: if the installed version of Streamlit + has a more recent StreamlitCallbackHandler implementation, an instance of that class + will be used. + + """ + # If we're using a version of Streamlit that implements StreamlitCallbackHandler, + # delegate to it instead of using our built-in handler. The official handler is + # guaranteed to support the same set of kwargs. + try: + from streamlit.external.langchain import StreamlitCallbackHandler + + # This is the official handler, so we can just return it. + return StreamlitCallbackHandler( + parent_container, + max_thought_containers=max_thought_containers, + expand_new_thoughts=expand_new_thoughts, + collapse_completed_thoughts=collapse_completed_thoughts, + thought_labeler=thought_labeler, + ) + except ImportError: + try: + from langchain_community.callbacks.streamlit.streamlit_callback_handler import ( # noqa: E501 + StreamlitCallbackHandler as _InternalStreamlitCallbackHandler, + ) + except ImportError: + raise ImportError( + "To use the StreamlitCallbackHandler, please install " + "langchain-community with `pip install langchain-community`." + ) + + return _InternalStreamlitCallbackHandler( + parent_container, + max_thought_containers=max_thought_containers, + expand_new_thoughts=expand_new_thoughts, + collapse_completed_thoughts=collapse_completed_thoughts, + thought_labeler=thought_labeler, + ) diff --git a/venv/Lib/site-packages/langchain/callbacks/streamlit/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/streamlit/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..49166af4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/streamlit/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/streamlit/__pycache__/mutable_expander.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/streamlit/__pycache__/mutable_expander.cpython-312.pyc new file mode 100644 index 00000000..9c97b8d9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/streamlit/__pycache__/mutable_expander.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/streamlit/__pycache__/streamlit_callback_handler.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/streamlit/__pycache__/streamlit_callback_handler.cpython-312.pyc new file mode 100644 index 00000000..cb39357a Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/streamlit/__pycache__/streamlit_callback_handler.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/streamlit/mutable_expander.py b/venv/Lib/site-packages/langchain/callbacks/streamlit/mutable_expander.py new file mode 100644 index 00000000..756593ba --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/streamlit/mutable_expander.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.streamlit.mutable_expander import ( + ChildRecord, + ChildType, + MutableExpander, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ChildType": "langchain_community.callbacks.streamlit.mutable_expander", + "ChildRecord": "langchain_community.callbacks.streamlit.mutable_expander", + "MutableExpander": "langchain_community.callbacks.streamlit.mutable_expander", +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChildType", + "ChildRecord", + "MutableExpander", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/streamlit/streamlit_callback_handler.py b/venv/Lib/site-packages/langchain/callbacks/streamlit/streamlit_callback_handler.py new file mode 100644 index 00000000..91156185 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/streamlit/streamlit_callback_handler.py @@ -0,0 +1,49 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.streamlit.streamlit_callback_handler import ( + LLMThought, + LLMThoughtLabeler, + LLMThoughtState, + StreamlitCallbackHandler, + ToolRecord, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "LLMThoughtState": ( + "langchain_community.callbacks.streamlit.streamlit_callback_handler" + ), + "ToolRecord": ( + "langchain_community.callbacks.streamlit.streamlit_callback_handler" + ), + "LLMThoughtLabeler": ( + "langchain_community.callbacks.streamlit.streamlit_callback_handler" + ), + "LLMThought": ( + "langchain_community.callbacks.streamlit.streamlit_callback_handler" + ), + "StreamlitCallbackHandler": ( + "langchain_community.callbacks.streamlit.streamlit_callback_handler" + ), +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LLMThoughtState", + "ToolRecord", + "LLMThoughtLabeler", + "LLMThought", + "StreamlitCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__init__.py b/venv/Lib/site-packages/langchain/callbacks/tracers/__init__.py new file mode 100644 index 00000000..744c6b86 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/__init__.py @@ -0,0 +1,38 @@ +"""Tracers that record execution of LangChain runs.""" + +from typing import TYPE_CHECKING, Any + +from langchain_core.tracers.langchain import LangChainTracer +from langchain_core.tracers.langchain_v1 import LangChainTracerV1 +from langchain_core.tracers.stdout import ( + ConsoleCallbackHandler, + FunctionCallbackHandler, +) + +from langchain._api import create_importer +from langchain.callbacks.tracers.logging import LoggingCallbackHandler + +if TYPE_CHECKING: + from langchain_community.callbacks.tracers.wandb import WandbTracer + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WandbTracer": "langchain_community.callbacks.tracers.wandb"} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ConsoleCallbackHandler", + "FunctionCallbackHandler", + "LoggingCallbackHandler", + "LangChainTracer", + "LangChainTracerV1", + "WandbTracer", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..8bf33935 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..a4a84b12 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/comet.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/comet.cpython-312.pyc new file mode 100644 index 00000000..318793c6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/comet.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/evaluation.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/evaluation.cpython-312.pyc new file mode 100644 index 00000000..1fd101a9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/evaluation.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/langchain.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/langchain.cpython-312.pyc new file mode 100644 index 00000000..78960f6a Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/langchain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/langchain_v1.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/langchain_v1.cpython-312.pyc new file mode 100644 index 00000000..ee870a40 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/langchain_v1.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/log_stream.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/log_stream.cpython-312.pyc new file mode 100644 index 00000000..a2a766ea Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/log_stream.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/logging.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/logging.cpython-312.pyc new file mode 100644 index 00000000..84b92658 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/logging.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/root_listeners.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/root_listeners.cpython-312.pyc new file mode 100644 index 00000000..294873e7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/root_listeners.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/run_collector.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/run_collector.cpython-312.pyc new file mode 100644 index 00000000..dabc42b5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/run_collector.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/schemas.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/schemas.cpython-312.pyc new file mode 100644 index 00000000..9c7c48d4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/schemas.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/stdout.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/stdout.cpython-312.pyc new file mode 100644 index 00000000..9f6d7412 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/stdout.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/wandb.cpython-312.pyc b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/wandb.cpython-312.pyc new file mode 100644 index 00000000..94ef8dd6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/callbacks/tracers/__pycache__/wandb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/base.py b/venv/Lib/site-packages/langchain/callbacks/tracers/base.py new file mode 100644 index 00000000..1a56fa66 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/base.py @@ -0,0 +1,5 @@ +"""Base interfaces for tracing runs.""" + +from langchain_core.tracers.base import BaseTracer, TracerException + +__all__ = ["BaseTracer", "TracerException"] diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/comet.py b/venv/Lib/site-packages/langchain/callbacks/tracers/comet.py new file mode 100644 index 00000000..e5433d95 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/comet.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.tracers.comet import ( + CometTracer, + import_comet_llm_api, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "import_comet_llm_api": "langchain_community.callbacks.tracers.comet", + "CometTracer": "langchain_community.callbacks.tracers.comet", +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "import_comet_llm_api", + "CometTracer", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/evaluation.py b/venv/Lib/site-packages/langchain/callbacks/tracers/evaluation.py new file mode 100644 index 00000000..7283a540 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/evaluation.py @@ -0,0 +1,8 @@ +"""A tracer that runs evaluators over completed runs.""" + +from langchain_core.tracers.evaluation import ( + EvaluatorCallbackHandler, + wait_for_all_evaluators, +) + +__all__ = ["wait_for_all_evaluators", "EvaluatorCallbackHandler"] diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/langchain.py b/venv/Lib/site-packages/langchain/callbacks/tracers/langchain.py new file mode 100644 index 00000000..54ae9dc6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/langchain.py @@ -0,0 +1,8 @@ +"""A Tracer implementation that records to LangChain endpoint.""" + +from langchain_core.tracers.langchain import ( + LangChainTracer, + wait_for_all_tracers, +) + +__all__ = ["LangChainTracer", "wait_for_all_tracers"] diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/langchain_v1.py b/venv/Lib/site-packages/langchain/callbacks/tracers/langchain_v1.py new file mode 100644 index 00000000..a12b4740 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/langchain_v1.py @@ -0,0 +1,3 @@ +from langchain_core.tracers.langchain_v1 import LangChainTracerV1 + +__all__ = ["LangChainTracerV1"] diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/log_stream.py b/venv/Lib/site-packages/langchain/callbacks/tracers/log_stream.py new file mode 100644 index 00000000..22b33c37 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/log_stream.py @@ -0,0 +1,9 @@ +from langchain_core.tracers.log_stream import ( + LogEntry, + LogStreamCallbackHandler, + RunLog, + RunLogPatch, + RunState, +) + +__all__ = ["LogEntry", "RunState", "RunLog", "RunLogPatch", "LogStreamCallbackHandler"] diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/logging.py b/venv/Lib/site-packages/langchain/callbacks/tracers/logging.py new file mode 100644 index 00000000..a6bad951 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/logging.py @@ -0,0 +1,46 @@ +__all__ = ["LoggingCallbackHandler"] + +import logging +from typing import Any, Optional +from uuid import UUID + +from langchain_core.exceptions import TracerException +from langchain_core.tracers.stdout import FunctionCallbackHandler +from langchain_core.utils.input import get_bolded_text, get_colored_text + + +class LoggingCallbackHandler(FunctionCallbackHandler): + """Tracer that logs via the input Logger.""" + + name: str = "logging_callback_handler" + + def __init__( + self, + logger: logging.Logger, + log_level: int = logging.INFO, + extra: Optional[dict] = None, + **kwargs: Any, + ) -> None: + log_method = getattr(logger, logging.getLevelName(level=log_level).lower()) + + def callback(text: str) -> None: + log_method(text, extra=extra) + + super().__init__(function=callback, **kwargs) + + def on_text( + self, + text: str, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> None: + try: + crumbs_str = f"[{self.get_breadcrumbs(run=self._get_run(run_id=run_id))}] " + except TracerException: + crumbs_str = "" + self.function_callback( + f"{get_colored_text('[text]', color='blue')}" + f" {get_bolded_text(f'{crumbs_str}New text:')}\n{text}" + ) diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/root_listeners.py b/venv/Lib/site-packages/langchain/callbacks/tracers/root_listeners.py new file mode 100644 index 00000000..0dee9bce --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/root_listeners.py @@ -0,0 +1,3 @@ +from langchain_core.tracers.root_listeners import RootListenersTracer + +__all__ = ["RootListenersTracer"] diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/run_collector.py b/venv/Lib/site-packages/langchain/callbacks/tracers/run_collector.py new file mode 100644 index 00000000..1240026b --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/run_collector.py @@ -0,0 +1,3 @@ +from langchain_core.tracers.run_collector import RunCollectorCallbackHandler + +__all__ = ["RunCollectorCallbackHandler"] diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/schemas.py b/venv/Lib/site-packages/langchain/callbacks/tracers/schemas.py new file mode 100644 index 00000000..e8f34027 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/schemas.py @@ -0,0 +1,27 @@ +from langchain_core.tracers.schemas import ( + BaseRun, + ChainRun, + LLMRun, + Run, + RunTypeEnum, + ToolRun, + TracerSession, + TracerSessionBase, + TracerSessionV1, + TracerSessionV1Base, + TracerSessionV1Create, +) + +__all__ = [ + "BaseRun", + "ChainRun", + "LLMRun", + "Run", + "RunTypeEnum", + "ToolRun", + "TracerSession", + "TracerSessionBase", + "TracerSessionV1", + "TracerSessionV1Base", + "TracerSessionV1Create", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/stdout.py b/venv/Lib/site-packages/langchain/callbacks/tracers/stdout.py new file mode 100644 index 00000000..716e2c30 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/stdout.py @@ -0,0 +1,6 @@ +from langchain_core.tracers.stdout import ( + ConsoleCallbackHandler, + FunctionCallbackHandler, +) + +__all__ = ["FunctionCallbackHandler", "ConsoleCallbackHandler"] diff --git a/venv/Lib/site-packages/langchain/callbacks/tracers/wandb.py b/venv/Lib/site-packages/langchain/callbacks/tracers/wandb.py new file mode 100644 index 00000000..51d52599 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/tracers/wandb.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.tracers.wandb import WandbRunArgs, WandbTracer + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "WandbRunArgs": "langchain_community.callbacks.tracers.wandb", + "WandbTracer": "langchain_community.callbacks.tracers.wandb", +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WandbRunArgs", + "WandbTracer", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/trubrics_callback.py b/venv/Lib/site-packages/langchain/callbacks/trubrics_callback.py new file mode 100644 index 00000000..783bb896 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/trubrics_callback.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.trubrics_callback import TrubricsCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "TrubricsCallbackHandler": "langchain_community.callbacks.trubrics_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TrubricsCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/utils.py b/venv/Lib/site-packages/langchain/callbacks/utils.py new file mode 100644 index 00000000..b20381a8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/utils.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.utils import ( + BaseMetadataCallbackHandler, + _flatten_dict, + flatten_dict, + hash_string, + import_pandas, + import_spacy, + import_textstat, + load_json, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "import_spacy": "langchain_community.callbacks.utils", + "import_pandas": "langchain_community.callbacks.utils", + "import_textstat": "langchain_community.callbacks.utils", + "_flatten_dict": "langchain_community.callbacks.utils", + "flatten_dict": "langchain_community.callbacks.utils", + "hash_string": "langchain_community.callbacks.utils", + "load_json": "langchain_community.callbacks.utils", + "BaseMetadataCallbackHandler": "langchain_community.callbacks.utils", +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "import_spacy", + "import_pandas", + "import_textstat", + "_flatten_dict", + "flatten_dict", + "hash_string", + "load_json", + "BaseMetadataCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/wandb_callback.py b/venv/Lib/site-packages/langchain/callbacks/wandb_callback.py new file mode 100644 index 00000000..81d06e16 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/wandb_callback.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.wandb_callback import WandbCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "WandbCallbackHandler": "langchain_community.callbacks.wandb_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WandbCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/callbacks/whylabs_callback.py b/venv/Lib/site-packages/langchain/callbacks/whylabs_callback.py new file mode 100644 index 00000000..9839fa39 --- /dev/null +++ b/venv/Lib/site-packages/langchain/callbacks/whylabs_callback.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.callbacks.whylabs_callback import WhyLabsCallbackHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "WhyLabsCallbackHandler": "langchain_community.callbacks.whylabs_callback" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WhyLabsCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/chains/__init__.py b/venv/Lib/site-packages/langchain/chains/__init__.py new file mode 100644 index 00000000..39b9156b --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/__init__.py @@ -0,0 +1,96 @@ +"""**Chains** are easily reusable components linked together. + +Chains encode a sequence of calls to components like models, document retrievers, +other Chains, etc., and provide a simple interface to this sequence. + +The Chain interface makes it easy to create apps that are: + + - **Stateful:** add Memory to any Chain to give it state, + - **Observable:** pass Callbacks to a Chain to execute additional functionality, + like logging, outside the main sequence of component calls, + - **Composable:** combine Chains with other components, including other Chains. + +**Class hierarchy:** + +.. code-block:: + + Chain --> Chain # Examples: LLMChain, MapReduceChain, RouterChain +""" + +from typing import Any + +from langchain._api import create_importer + +_module_lookup = { + "APIChain": "langchain.chains.api.base", + "OpenAPIEndpointChain": "langchain_community.chains.openapi.chain", + "AnalyzeDocumentChain": "langchain.chains.combine_documents.base", + "MapReduceDocumentsChain": "langchain.chains.combine_documents.map_reduce", + "MapRerankDocumentsChain": "langchain.chains.combine_documents.map_rerank", + "ReduceDocumentsChain": "langchain.chains.combine_documents.reduce", + "RefineDocumentsChain": "langchain.chains.combine_documents.refine", + "StuffDocumentsChain": "langchain.chains.combine_documents.stuff", + "ConstitutionalChain": "langchain.chains.constitutional_ai.base", + "ConversationChain": "langchain.chains.conversation.base", + "ChatVectorDBChain": "langchain.chains.conversational_retrieval.base", + "ConversationalRetrievalChain": "langchain.chains.conversational_retrieval.base", + "generate_example": "langchain.chains.example_generator", + "FlareChain": "langchain.chains.flare.base", + "ArangoGraphQAChain": "langchain_community.chains.graph_qa.arangodb", + "GraphQAChain": "langchain_community.chains.graph_qa.base", + "GraphCypherQAChain": "langchain_community.chains.graph_qa.cypher", + "FalkorDBQAChain": "langchain_community.chains.graph_qa.falkordb", + "HugeGraphQAChain": "langchain_community.chains.graph_qa.hugegraph", + "KuzuQAChain": "langchain_community.chains.graph_qa.kuzu", + "NebulaGraphQAChain": "langchain_community.chains.graph_qa.nebulagraph", + "NeptuneOpenCypherQAChain": "langchain_community.chains.graph_qa.neptune_cypher", + "NeptuneSparqlQAChain": "langchain_community.chains.graph_qa.neptune_sparql", + "OntotextGraphDBQAChain": "langchain_community.chains.graph_qa.ontotext_graphdb", + "GraphSparqlQAChain": "langchain_community.chains.graph_qa.sparql", + "create_history_aware_retriever": "langchain.chains.history_aware_retriever", + "HypotheticalDocumentEmbedder": "langchain.chains.hyde.base", + "LLMChain": "langchain.chains.llm", + "LLMCheckerChain": "langchain.chains.llm_checker.base", + "LLMMathChain": "langchain.chains.llm_math.base", + "LLMRequestsChain": "langchain_community.chains.llm_requests", + "LLMSummarizationCheckerChain": "langchain.chains.llm_summarization_checker.base", + "load_chain": "langchain.chains.loading", + "MapReduceChain": "langchain.chains.mapreduce", + "OpenAIModerationChain": "langchain.chains.moderation", + "NatBotChain": "langchain.chains.natbot.base", + "create_citation_fuzzy_match_chain": "langchain.chains.openai_functions", + "create_citation_fuzzy_match_runnable": "langchain.chains.openai_functions", + "create_extraction_chain": "langchain.chains.openai_functions", + "create_extraction_chain_pydantic": "langchain.chains.openai_functions", + "create_qa_with_sources_chain": "langchain.chains.openai_functions", + "create_qa_with_structure_chain": "langchain.chains.openai_functions", + "create_tagging_chain": "langchain.chains.openai_functions", + "create_tagging_chain_pydantic": "langchain.chains.openai_functions", + "QAGenerationChain": "langchain.chains.qa_generation.base", + "QAWithSourcesChain": "langchain.chains.qa_with_sources.base", + "RetrievalQAWithSourcesChain": "langchain.chains.qa_with_sources.retrieval", + "VectorDBQAWithSourcesChain": "langchain.chains.qa_with_sources.vector_db", + "create_retrieval_chain": "langchain.chains.retrieval", + "RetrievalQA": "langchain.chains.retrieval_qa.base", + "VectorDBQA": "langchain.chains.retrieval_qa.base", + "LLMRouterChain": "langchain.chains.router", + "MultiPromptChain": "langchain.chains.router", + "MultiRetrievalQAChain": "langchain.chains.router", + "MultiRouteChain": "langchain.chains.router", + "RouterChain": "langchain.chains.router", + "SequentialChain": "langchain.chains.sequential", + "SimpleSequentialChain": "langchain.chains.sequential", + "create_sql_query_chain": "langchain.chains.sql_database.query", + "create_structured_output_runnable": "langchain.chains.structured_output", + "load_summarize_chain": "langchain.chains.summarize", + "TransformChain": "langchain.chains.transform", +} + +importer = create_importer(__package__, module_lookup=_module_lookup) + + +def __getattr__(name: str) -> Any: + return importer(name) + + +__all__ = list(_module_lookup.keys()) diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..879fa7e0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..da504df6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/example_generator.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/example_generator.cpython-312.pyc new file mode 100644 index 00000000..708d8033 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/example_generator.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/history_aware_retriever.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/history_aware_retriever.cpython-312.pyc new file mode 100644 index 00000000..58017005 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/history_aware_retriever.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/llm.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/llm.cpython-312.pyc new file mode 100644 index 00000000..5185581a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/llm.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/llm_requests.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/llm_requests.cpython-312.pyc new file mode 100644 index 00000000..de411a4d Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/llm_requests.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/loading.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/loading.cpython-312.pyc new file mode 100644 index 00000000..ed82b0c9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/loading.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/mapreduce.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/mapreduce.cpython-312.pyc new file mode 100644 index 00000000..a264a523 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/mapreduce.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/moderation.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/moderation.cpython-312.pyc new file mode 100644 index 00000000..541e7a50 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/moderation.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/prompt_selector.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/prompt_selector.cpython-312.pyc new file mode 100644 index 00000000..258a0c01 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/prompt_selector.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/retrieval.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/retrieval.cpython-312.pyc new file mode 100644 index 00000000..a6a53b68 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/retrieval.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/sequential.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/sequential.cpython-312.pyc new file mode 100644 index 00000000..6ba59b85 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/sequential.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/__pycache__/transform.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/__pycache__/transform.cpython-312.pyc new file mode 100644 index 00000000..f8aa4982 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/__pycache__/transform.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/__init__.py b/venv/Lib/site-packages/langchain/chains/api/__init__.py new file mode 100644 index 00000000..efe2fb36 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/api/__init__.py @@ -0,0 +1 @@ +"""Chain that makes API calls and summarizes the responses to answer a question.""" diff --git a/venv/Lib/site-packages/langchain/chains/api/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/api/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..12bffeb1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/api/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/api/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..d4ef051a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/api/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/__pycache__/news_docs.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/api/__pycache__/news_docs.cpython-312.pyc new file mode 100644 index 00000000..6598fc9a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/api/__pycache__/news_docs.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/__pycache__/open_meteo_docs.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/api/__pycache__/open_meteo_docs.cpython-312.pyc new file mode 100644 index 00000000..9f48c574 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/api/__pycache__/open_meteo_docs.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/__pycache__/podcast_docs.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/api/__pycache__/podcast_docs.cpython-312.pyc new file mode 100644 index 00000000..265fea4e Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/api/__pycache__/podcast_docs.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/api/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..d940b11a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/api/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/__pycache__/tmdb_docs.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/api/__pycache__/tmdb_docs.cpython-312.pyc new file mode 100644 index 00000000..97ffb479 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/api/__pycache__/tmdb_docs.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/base.py b/venv/Lib/site-packages/langchain/chains/api/base.py new file mode 100644 index 00000000..34dcb6c4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/api/base.py @@ -0,0 +1,380 @@ +"""Chain that makes API calls and summarizes the responses to answer a question.""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any, Optional +from urllib.parse import urlparse + +from langchain_core._api import deprecated +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate +from pydantic import Field, model_validator +from typing_extensions import Self + +from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain + + +def _extract_scheme_and_domain(url: str) -> tuple[str, str]: + """Extract the scheme + domain from a given URL. + + Args: + url (str): The input URL. + + Returns: + return a 2-tuple of scheme and domain + """ + parsed_uri = urlparse(url) + return parsed_uri.scheme, parsed_uri.netloc + + +def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool: + """Check if a URL is in the allowed domains. + + Args: + url (str): The input URL. + limit_to_domains (Sequence[str]): The allowed domains. + + Returns: + bool: True if the URL is in the allowed domains, False otherwise. + """ + scheme, domain = _extract_scheme_and_domain(url) + + for allowed_domain in limit_to_domains: + allowed_scheme, allowed_domain = _extract_scheme_and_domain(allowed_domain) + if scheme == allowed_scheme and domain == allowed_domain: + return True + return False + + +try: + from langchain_community.utilities.requests import TextRequestsWrapper + + @deprecated( + since="0.2.13", + message=( + "This class is deprecated and will be removed in langchain 1.0. " + "See API reference for replacement: " + "https://api.python.langchain.com/en/latest/chains/langchain.chains.api.base.APIChain.html" # noqa: E501 + ), + removal="1.0", + ) + class APIChain(Chain): + """Chain that makes API calls and summarizes the responses to answer a question. + + *Security Note*: This API chain uses the requests toolkit + to make GET, POST, PATCH, PUT, and DELETE requests to an API. + + Exercise care in who is allowed to use this chain. If exposing + to end users, consider that users will be able to make arbitrary + requests on behalf of the server hosting the code. For example, + users could ask the server to make a request to a private API + that is only accessible from the server. + + Control access to who can submit issue requests using this toolkit and + what network access it has. + + See https://python.langchain.com/docs/security for more information. + + Note: this class is deprecated. See below for a replacement implementation + using LangGraph. The benefits of this implementation are: + + - Uses LLM tool calling features to encourage properly-formatted API requests; + - Support for both token-by-token and step-by-step streaming; + - Support for checkpointing and memory of chat history; + - Easier to modify or extend (e.g., with additional tools, structured responses, etc.) + + Install LangGraph with: + + .. code-block:: bash + + pip install -U langgraph + + .. code-block:: python + + from typing import Annotated, Sequence + from typing_extensions import TypedDict + + from langchain.chains.api.prompt import API_URL_PROMPT + from langchain_community.agent_toolkits.openapi.toolkit import RequestsToolkit + from langchain_community.utilities.requests import TextRequestsWrapper + from langchain_core.messages import BaseMessage + from langchain_core.prompts import ChatPromptTemplate + from langchain_openai import ChatOpenAI + from langchain_core.runnables import RunnableConfig + from langgraph.graph import END, StateGraph + from langgraph.graph.message import add_messages + from langgraph.prebuilt.tool_node import ToolNode + + # NOTE: There are inherent risks in giving models discretion + # to execute real-world actions. We must "opt-in" to these + # risks by setting allow_dangerous_request=True to use these tools. + # This can be dangerous for calling unwanted requests. Please make + # sure your custom OpenAPI spec (yaml) is safe and that permissions + # associated with the tools are narrowly-scoped. + ALLOW_DANGEROUS_REQUESTS = True + + # Subset of spec for https://jsonplaceholder.typicode.com + api_spec = \"\"\" + openapi: 3.0.0 + info: + title: JSONPlaceholder API + version: 1.0.0 + servers: + - url: https://jsonplaceholder.typicode.com + paths: + /posts: + get: + summary: Get posts + parameters: &id001 + - name: _limit + in: query + required: false + schema: + type: integer + example: 2 + description: Limit the number of results + \"\"\" + + llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) + toolkit = RequestsToolkit( + requests_wrapper=TextRequestsWrapper(headers={}), # no auth required + allow_dangerous_requests=ALLOW_DANGEROUS_REQUESTS, + ) + tools = toolkit.get_tools() + + api_request_chain = ( + API_URL_PROMPT.partial(api_docs=api_spec) + | llm.bind_tools(tools, tool_choice="any") + ) + + class ChainState(TypedDict): + \"\"\"LangGraph state.\"\"\" + + messages: Annotated[Sequence[BaseMessage], add_messages] + + + async def acall_request_chain(state: ChainState, config: RunnableConfig): + last_message = state["messages"][-1] + response = await api_request_chain.ainvoke( + {"question": last_message.content}, config + ) + return {"messages": [response]} + + async def acall_model(state: ChainState, config: RunnableConfig): + response = await llm.ainvoke(state["messages"], config) + return {"messages": [response]} + + graph_builder = StateGraph(ChainState) + graph_builder.add_node("call_tool", acall_request_chain) + graph_builder.add_node("execute_tool", ToolNode(tools)) + graph_builder.add_node("call_model", acall_model) + graph_builder.set_entry_point("call_tool") + graph_builder.add_edge("call_tool", "execute_tool") + graph_builder.add_edge("execute_tool", "call_model") + graph_builder.add_edge("call_model", END) + chain = graph_builder.compile() + + .. code-block:: python + + example_query = "Fetch the top two posts. What are their titles?" + + events = chain.astream( + {"messages": [("user", example_query)]}, + stream_mode="values", + ) + async for event in events: + event["messages"][-1].pretty_print() + """ # noqa: E501 + + api_request_chain: LLMChain + api_answer_chain: LLMChain + requests_wrapper: TextRequestsWrapper = Field(exclude=True) + api_docs: str + question_key: str = "question" #: :meta private: + output_key: str = "output" #: :meta private: + limit_to_domains: Optional[Sequence[str]] = Field(default_factory=list) # type: ignore[arg-type] + """Use to limit the domains that can be accessed by the API chain. + + * For example, to limit to just the domain `https://www.example.com`, set + `limit_to_domains=["https://www.example.com"]`. + + * The default value is an empty tuple, which means that no domains are + allowed by default. By design this will raise an error on instantiation. + * Use a None if you want to allow all domains by default -- this is not + recommended for security reasons, as it would allow malicious users to + make requests to arbitrary URLS including internal APIs accessible from + the server. + """ + + @property + def input_keys(self) -> list[str]: + """Expect input key. + + :meta private: + """ + return [self.question_key] + + @property + def output_keys(self) -> list[str]: + """Expect output key. + + :meta private: + """ + return [self.output_key] + + @model_validator(mode="after") + def validate_api_request_prompt(self) -> Self: + """Check that api request prompt expects the right variables.""" + input_vars = self.api_request_chain.prompt.input_variables + expected_vars = {"question", "api_docs"} + if set(input_vars) != expected_vars: + raise ValueError( + f"Input variables should be {expected_vars}, got {input_vars}" + ) + return self + + @model_validator(mode="before") + @classmethod + def validate_limit_to_domains(cls, values: dict) -> Any: + """Check that allowed domains are valid.""" + # This check must be a pre=True check, so that a default of None + # won't be set to limit_to_domains if it's not provided. + if "limit_to_domains" not in values: + raise ValueError( + "You must specify a list of domains to limit access using " + "`limit_to_domains`" + ) + if ( + not values["limit_to_domains"] + and values["limit_to_domains"] is not None + ): + raise ValueError( + "Please provide a list of domains to limit access using " + "`limit_to_domains`." + ) + return values + + @model_validator(mode="after") + def validate_api_answer_prompt(self) -> Self: + """Check that api answer prompt expects the right variables.""" + input_vars = self.api_answer_chain.prompt.input_variables + expected_vars = {"question", "api_docs", "api_url", "api_response"} + if set(input_vars) != expected_vars: + raise ValueError( + f"Input variables should be {expected_vars}, got {input_vars}" + ) + return self + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + question = inputs[self.question_key] + api_url = self.api_request_chain.predict( + question=question, + api_docs=self.api_docs, + callbacks=_run_manager.get_child(), + ) + _run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose) + api_url = api_url.strip() + if self.limit_to_domains and not _check_in_allowed_domain( + api_url, self.limit_to_domains + ): + raise ValueError( + f"{api_url} is not in the allowed domains: {self.limit_to_domains}" + ) + api_response = self.requests_wrapper.get(api_url) + _run_manager.on_text( + str(api_response), color="yellow", end="\n", verbose=self.verbose + ) + answer = self.api_answer_chain.predict( + question=question, + api_docs=self.api_docs, + api_url=api_url, + api_response=api_response, + callbacks=_run_manager.get_child(), + ) + return {self.output_key: answer} + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, str]: + _run_manager = ( + run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() + ) + question = inputs[self.question_key] + api_url = await self.api_request_chain.apredict( + question=question, + api_docs=self.api_docs, + callbacks=_run_manager.get_child(), + ) + await _run_manager.on_text( + api_url, color="green", end="\n", verbose=self.verbose + ) + api_url = api_url.strip() + if self.limit_to_domains and not _check_in_allowed_domain( + api_url, self.limit_to_domains + ): + raise ValueError( + f"{api_url} is not in the allowed domains: {self.limit_to_domains}" + ) + api_response = await self.requests_wrapper.aget(api_url) + await _run_manager.on_text( + str(api_response), color="yellow", end="\n", verbose=self.verbose + ) + answer = await self.api_answer_chain.apredict( + question=question, + api_docs=self.api_docs, + api_url=api_url, + api_response=api_response, + callbacks=_run_manager.get_child(), + ) + return {self.output_key: answer} + + @classmethod + def from_llm_and_api_docs( + cls, + llm: BaseLanguageModel, + api_docs: str, + headers: Optional[dict] = None, + api_url_prompt: BasePromptTemplate = API_URL_PROMPT, + api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT, + limit_to_domains: Optional[Sequence[str]] = tuple(), + **kwargs: Any, + ) -> APIChain: + """Load chain from just an LLM and the api docs.""" + get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt) + requests_wrapper = TextRequestsWrapper(headers=headers) + get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt) + return cls( + api_request_chain=get_request_chain, + api_answer_chain=get_answer_chain, + requests_wrapper=requests_wrapper, + api_docs=api_docs, + limit_to_domains=limit_to_domains, + **kwargs, + ) + + @property + def _chain_type(self) -> str: + return "api_chain" + +except ImportError: + + class APIChain: # type: ignore[no-redef] + def __init__(self, *args: Any, **kwargs: Any) -> None: + raise ImportError( + "To use the APIChain, you must install the langchain_community package." + "pip install langchain_community" + ) diff --git a/venv/Lib/site-packages/langchain/chains/api/news_docs.py b/venv/Lib/site-packages/langchain/chains/api/news_docs.py new file mode 100644 index 00000000..b29f16c1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/api/news_docs.py @@ -0,0 +1,32 @@ +# flake8: noqa +NEWS_DOCS = """API documentation: +Endpoint: https://newsapi.org +Top headlines /v2/top-headlines + +This endpoint provides live top and breaking headlines for a country, specific category in a country, single source, or multiple sources. You can also search with keywords. Articles are sorted by the earliest date published first. + +This endpoint is great for retrieving headlines for use with news tickers or similar. +Request parameters + + country | The 2-letter ISO 3166-1 code of the country you want to get headlines for. Possible options: ae ar at au be bg br ca ch cn co cu cz de eg fr gb gr hk hu id ie il in it jp kr lt lv ma mx my ng nl no nz ph pl pt ro rs ru sa se sg si sk th tr tw ua us ve za. Note: you can't mix this param with the sources param. + category | The category you want to get headlines for. Possible options: business entertainment general health science sports technology. Note: you can't mix this param with the sources param. + sources | A comma-separated string of identifiers for the news sources or blogs you want headlines from. Use the /top-headlines/sources endpoint to locate these programmatically or look at the sources index. Note: you can't mix this param with the country or category params. + q | Keywords or a phrase to search for. + pageSize | int | The number of results to return per page (request). 20 is the default, 100 is the maximum. + page | int | Use this to page through the results if the total results found is greater than the page size. + +Response object + status | string | If the request was successful or not. Options: ok, error. In the case of error a code and message property will be populated. + totalResults | int | The total number of results available for your request. + articles | array[article] | The results of the request. + source | object | The identifier id and a display name name for the source this article came from. + author | string | The author of the article + title | string | The headline or title of the article. + description | string | A description or snippet from the article. + url | string | The direct URL to the article. + urlToImage | string | The URL to a relevant image for the article. + publishedAt | string | The date and time that the article was published, in UTC (+000) + content | string | The unformatted content of the article, where available. This is truncated to 200 chars. + +Use page size: 2 +""" diff --git a/venv/Lib/site-packages/langchain/chains/api/open_meteo_docs.py b/venv/Lib/site-packages/langchain/chains/api/open_meteo_docs.py new file mode 100644 index 00000000..4abd86fb --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/api/open_meteo_docs.py @@ -0,0 +1,33 @@ +# flake8: noqa +OPEN_METEO_DOCS = """BASE URL: https://api.open-meteo.com/ + +API Documentation +The API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below: + +Parameter Format Required Default Description +latitude, longitude Floating point Yes Geographical WGS84 coordinate of the location +hourly String array No A list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used. +daily String array No A list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required. +current_weather Bool No false Include current weather conditions in the JSON output. +temperature_unit String No celsius If fahrenheit is set, all temperature values are converted to Fahrenheit. +windspeed_unit String No kmh Other wind speed speed units: ms, mph and kn +precipitation_unit String No mm Other precipitation amount units: inch +timeformat String No iso8601 If format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date. +timezone String No GMT If timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone. +past_days Integer (0-2) No 0 If past_days is set, yesterday or the day before yesterday data are also returned. +start_date +end_date String (yyyy-mm-dd) No The time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30). +models String array No auto Manually select one or more weather models. Per default, the best suitable weather models will be combined. + +Hourly Parameter Definition +The parameter &hourly= accepts the following values. Most weather variables are given as an instantaneous value for the indicated hour. Some variables like precipitation are calculated from the preceding hour as an average or sum. + +Variable Valid time Unit Description +temperature_2m Instant °C (°F) Air temperature at 2 meters above ground +snowfall Preceding hour sum cm (inch) Snowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent +rain Preceding hour sum mm (inch) Rain from large scale weather systems of the preceding hour in millimeter +showers Preceding hour sum mm (inch) Showers from convective precipitation in millimeters from the preceding hour +weathercode Instant WMO code Weather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details. +snow_depth Instant meters Snow depth on the ground +freezinglevel_height Instant meters Altitude above sea level of the 0°C level +visibility Instant meters Viewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.""" diff --git a/venv/Lib/site-packages/langchain/chains/api/openapi/__init__.py b/venv/Lib/site-packages/langchain/chains/api/openapi/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..e5043d8d Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/chain.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/chain.cpython-312.pyc new file mode 100644 index 00000000..39a58b1f Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/chain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/prompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/prompts.cpython-312.pyc new file mode 100644 index 00000000..9a22189d Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/prompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/requests_chain.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/requests_chain.cpython-312.pyc new file mode 100644 index 00000000..99e17dd4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/requests_chain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/response_chain.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/response_chain.cpython-312.pyc new file mode 100644 index 00000000..532dda4c Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/api/openapi/__pycache__/response_chain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/api/openapi/chain.py b/venv/Lib/site-packages/langchain/chains/api/openapi/chain.py new file mode 100644 index 00000000..eb552188 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/api/openapi/chain.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.openapi.chain import OpenAPIEndpointChain + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "OpenAPIEndpointChain": "langchain_community.chains.openapi.chain", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["OpenAPIEndpointChain"] diff --git a/venv/Lib/site-packages/langchain/chains/api/openapi/prompts.py b/venv/Lib/site-packages/langchain/chains/api/openapi/prompts.py new file mode 100644 index 00000000..cfc7cf2b --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/api/openapi/prompts.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.openapi.prompts import ( + REQUEST_TEMPLATE, + RESPONSE_TEMPLATE, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "REQUEST_TEMPLATE": "langchain_community.chains.openapi.prompts", + "RESPONSE_TEMPLATE": "langchain_community.chains.openapi.prompts", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["REQUEST_TEMPLATE", "RESPONSE_TEMPLATE"] diff --git a/venv/Lib/site-packages/langchain/chains/api/openapi/requests_chain.py b/venv/Lib/site-packages/langchain/chains/api/openapi/requests_chain.py new file mode 100644 index 00000000..0d221ae2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/api/openapi/requests_chain.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.openapi.requests_chain import ( + REQUEST_TEMPLATE, + APIRequesterChain, + APIRequesterOutputParser, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "APIRequesterChain": "langchain_community.chains.openapi.requests_chain", + "APIRequesterOutputParser": "langchain_community.chains.openapi.requests_chain", + "REQUEST_TEMPLATE": "langchain_community.chains.openapi.requests_chain", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["APIRequesterChain", "APIRequesterOutputParser", "REQUEST_TEMPLATE"] diff --git a/venv/Lib/site-packages/langchain/chains/api/openapi/response_chain.py b/venv/Lib/site-packages/langchain/chains/api/openapi/response_chain.py new file mode 100644 index 00000000..86644762 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/api/openapi/response_chain.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.openapi.response_chain import ( + RESPONSE_TEMPLATE, + APIResponderChain, + APIResponderOutputParser, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "APIResponderChain": "langchain_community.chains.openapi.response_chain", + "APIResponderOutputParser": "langchain_community.chains.openapi.response_chain", + "RESPONSE_TEMPLATE": "langchain_community.chains.openapi.response_chain", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["APIResponderChain", "APIResponderOutputParser", "RESPONSE_TEMPLATE"] diff --git a/venv/Lib/site-packages/langchain/chains/api/podcast_docs.py b/venv/Lib/site-packages/langchain/chains/api/podcast_docs.py new file mode 100644 index 00000000..9c4e5cbf --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/api/podcast_docs.py @@ -0,0 +1,28 @@ +# flake8: noqa +PODCAST_DOCS = """API documentation: +Endpoint: https://listen-api.listennotes.com/api/v2 +GET /search + +This API is for searching podcasts or episodes. + +Query parameters table: +q | string | Search term, e.g., person, place, topic... You can use double quotes to do verbatim match, e.g., "game of thrones". Otherwise, it's fuzzy search. | required +type | string | What type of contents do you want to search for? Available values: episode, podcast, curated. default: episode | optional +page_size | integer | The maximum number of search results per page. A valid value should be an integer between 1 and 10 (inclusive). default: 3 | optional +language | string | Limit search results to a specific language, e.g., English, Chinese ... If not specified, it'll be any language. It works only when type is episode or podcast. | optional +region | string | Limit search results to a specific region (e.g., us, gb, in...). If not specified, it'll be any region. It works only when type is episode or podcast. | optional +len_min | integer | Minimum audio length in minutes. Applicable only when type parameter is episode or podcast. If type parameter is episode, it's for audio length of an episode. If type parameter is podcast, it's for average audio length of all episodes in a podcast. | optional +len_max | integer | Maximum audio length in minutes. Applicable only when type parameter is episode or podcast. If type parameter is episode, it's for audio length of an episode. If type parameter is podcast, it's for average audio length of all episodes in a podcast. | optional + +Response schema (JSON object): +next_offset | integer | optional +total | integer | optional +results | array[object] (Episode / Podcast List Result Object) + +Each object in the "results" key has the following schema: +listennotes_url | string | optional +id | integer | optional +title_highlighted | string | optional + +Use page_size: 3 +""" diff --git a/venv/Lib/site-packages/langchain/chains/api/prompt.py b/venv/Lib/site-packages/langchain/chains/api/prompt.py new file mode 100644 index 00000000..0ffc389a --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/api/prompt.py @@ -0,0 +1,36 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +API_URL_PROMPT_TEMPLATE = """You are given the below API Documentation: +{api_docs} +Using this documentation, generate the full API url to call for answering the user question. +You should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call. + +Question:{question} +API url:""" + +API_URL_PROMPT = PromptTemplate( + input_variables=[ + "api_docs", + "question", + ], + template=API_URL_PROMPT_TEMPLATE, +) + +API_RESPONSE_PROMPT_TEMPLATE = ( + API_URL_PROMPT_TEMPLATE + + """ {api_url} + +Here is the response from the API: + +{api_response} + +Summarize this response to answer the original question. + +Summary:""" +) + +API_RESPONSE_PROMPT = PromptTemplate( + input_variables=["api_docs", "question", "api_url", "api_response"], + template=API_RESPONSE_PROMPT_TEMPLATE, +) diff --git a/venv/Lib/site-packages/langchain/chains/api/tmdb_docs.py b/venv/Lib/site-packages/langchain/chains/api/tmdb_docs.py new file mode 100644 index 00000000..4634a80a --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/api/tmdb_docs.py @@ -0,0 +1,37 @@ +# flake8: noqa +TMDB_DOCS = """API documentation: +Endpoint: https://api.themoviedb.org/3 +GET /search/movie + +This API is for searching movies. + +Query parameters table: +language | string | Pass a ISO 639-1 value to display translated data for the fields that support it. minLength: 2, pattern: ([a-z]{2})-([A-Z]{2}), default: en-US | optional +query | string | Pass a text query to search. This value should be URI encoded. minLength: 1 | required +page | integer | Specify which page to query. minimum: 1, maximum: 1000, default: 1 | optional +include_adult | boolean | Choose whether to include adult (pornography) content in the results. default | optional +region | string | Specify a ISO 3166-1 code to filter release dates. Must be uppercase. pattern: ^[A-Z]{2}$ | optional +year | integer | optional +primary_release_year | integer | optional + +Response schema (JSON object): +page | integer | optional +total_results | integer | optional +total_pages | integer | optional +results | array[object] (Movie List Result Object) + +Each object in the "results" key has the following schema: +poster_path | string or null | optional +adult | boolean | optional +overview | string | optional +release_date | string | optional +genre_ids | array[integer] | optional +id | integer | optional +original_title | string | optional +original_language | string | optional +title | string | optional +backdrop_path | string or null | optional +popularity | number | optional +vote_count | integer | optional +video | boolean | optional +vote_average | number | optional""" diff --git a/venv/Lib/site-packages/langchain/chains/base.py b/venv/Lib/site-packages/langchain/chains/base.py new file mode 100644 index 00000000..2bae805f --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/base.py @@ -0,0 +1,766 @@ +"""Base interface that all chains should implement.""" + +import builtins +import inspect +import json +import logging +import warnings +from abc import ABC, abstractmethod +from pathlib import Path +from typing import Any, Optional, Union, cast + +import yaml +from langchain_core._api import deprecated +from langchain_core.callbacks import ( + AsyncCallbackManager, + AsyncCallbackManagerForChainRun, + BaseCallbackManager, + CallbackManager, + CallbackManagerForChainRun, + Callbacks, +) +from langchain_core.memory import BaseMemory +from langchain_core.outputs import RunInfo +from langchain_core.runnables import ( + RunnableConfig, + RunnableSerializable, + ensure_config, + run_in_executor, +) +from langchain_core.runnables.utils import create_model +from pydantic import ( + BaseModel, + ConfigDict, + Field, + field_validator, + model_validator, +) + +from langchain.schema import RUN_KEY + +logger = logging.getLogger(__name__) + + +def _get_verbosity() -> bool: + from langchain.globals import get_verbose + + return get_verbose() + + +class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC): + """Abstract base class for creating structured sequences of calls to components. + + Chains should be used to encode a sequence of calls to components like + models, document retrievers, other chains, etc., and provide a simple interface + to this sequence. + + The Chain interface makes it easy to create apps that are: + - Stateful: add Memory to any Chain to give it state, + - Observable: pass Callbacks to a Chain to execute additional functionality, + like logging, outside the main sequence of component calls, + - Composable: the Chain API is flexible enough that it is easy to combine + Chains with other components, including other Chains. + + The main methods exposed by chains are: + - `__call__`: Chains are callable. The `__call__` method is the primary way to + execute a Chain. This takes inputs as a dictionary and returns a + dictionary output. + - `run`: A convenience method that takes inputs as args/kwargs and returns the + output as a string or object. This method can only be used for a subset of + chains and cannot return as rich of an output as `__call__`. + """ + + memory: Optional[BaseMemory] = None + """Optional memory object. Defaults to None. + Memory is a class that gets called at the start + and at the end of every chain. At the start, memory loads variables and passes + them along in the chain. At the end, it saves any returned variables. + There are many different types of memory - please see memory docs + for the full catalog.""" + callbacks: Callbacks = Field(default=None, exclude=True) + """Optional list of callback handlers (or callback manager). Defaults to None. + Callback handlers are called throughout the lifecycle of a call to a chain, + starting with on_chain_start, ending with on_chain_end or on_chain_error. + Each custom chain can optionally call additional callback methods, see Callback docs + for full details.""" + verbose: bool = Field(default_factory=_get_verbosity) + """Whether or not run in verbose mode. In verbose mode, some intermediate logs + will be printed to the console. Defaults to the global `verbose` value, + accessible via `langchain.globals.get_verbose()`.""" + tags: Optional[list[str]] = None + """Optional list of tags associated with the chain. Defaults to None. + These tags will be associated with each call to this chain, + and passed as arguments to the handlers defined in `callbacks`. + You can use these to eg identify a specific instance of a chain with its use case. + """ + metadata: Optional[dict[str, Any]] = None + """Optional metadata associated with the chain. Defaults to None. + This metadata will be associated with each call to this chain, + and passed as arguments to the handlers defined in `callbacks`. + You can use these to eg identify a specific instance of a chain with its use case. + """ + callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) + """[DEPRECATED] Use `callbacks` instead.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + # This is correct, but pydantic typings/mypy don't think so. + return create_model("ChainInput", **{k: (Any, None) for k in self.input_keys}) + + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + # This is correct, but pydantic typings/mypy don't think so. + return create_model("ChainOutput", **{k: (Any, None) for k in self.output_keys}) + + def invoke( + self, + input: dict[str, Any], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> dict[str, Any]: + config = ensure_config(config) + callbacks = config.get("callbacks") + tags = config.get("tags") + metadata = config.get("metadata") + run_name = config.get("run_name") or self.get_name() + run_id = config.get("run_id") + include_run_info = kwargs.get("include_run_info", False) + return_only_outputs = kwargs.get("return_only_outputs", False) + + inputs = self.prep_inputs(input) + callback_manager = CallbackManager.configure( + callbacks, + self.callbacks, + self.verbose, + tags, + self.tags, + metadata, + self.metadata, + ) + new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") + + run_manager = callback_manager.on_chain_start( + None, + inputs, + run_id, + name=run_name, + ) + try: + self._validate_inputs(inputs) + outputs = ( + self._call(inputs, run_manager=run_manager) + if new_arg_supported + else self._call(inputs) + ) + + final_outputs: dict[str, Any] = self.prep_outputs( + inputs, outputs, return_only_outputs + ) + except BaseException as e: + run_manager.on_chain_error(e) + raise e + run_manager.on_chain_end(outputs) + + if include_run_info: + final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id) + return final_outputs + + async def ainvoke( + self, + input: dict[str, Any], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> dict[str, Any]: + config = ensure_config(config) + callbacks = config.get("callbacks") + tags = config.get("tags") + metadata = config.get("metadata") + run_name = config.get("run_name") or self.get_name() + run_id = config.get("run_id") + include_run_info = kwargs.get("include_run_info", False) + return_only_outputs = kwargs.get("return_only_outputs", False) + + inputs = await self.aprep_inputs(input) + callback_manager = AsyncCallbackManager.configure( + callbacks, + self.callbacks, + self.verbose, + tags, + self.tags, + metadata, + self.metadata, + ) + new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") + run_manager = await callback_manager.on_chain_start( + None, + inputs, + run_id, + name=run_name, + ) + try: + self._validate_inputs(inputs) + outputs = ( + await self._acall(inputs, run_manager=run_manager) + if new_arg_supported + else await self._acall(inputs) + ) + final_outputs: dict[str, Any] = await self.aprep_outputs( + inputs, outputs, return_only_outputs + ) + except BaseException as e: + await run_manager.on_chain_error(e) + raise e + await run_manager.on_chain_end(outputs) + + if include_run_info: + final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id) + return final_outputs + + @property + def _chain_type(self) -> str: + raise NotImplementedError("Saving not supported for this chain type.") + + @model_validator(mode="before") + @classmethod + def raise_callback_manager_deprecation(cls, values: dict) -> Any: + """Raise deprecation warning if callback_manager is used.""" + if values.get("callback_manager") is not None: + if values.get("callbacks") is not None: + raise ValueError( + "Cannot specify both callback_manager and callbacks. " + "callback_manager is deprecated, callbacks is the preferred " + "parameter to pass in." + ) + warnings.warn( + "callback_manager is deprecated. Please use callbacks instead.", + DeprecationWarning, + ) + values["callbacks"] = values.pop("callback_manager", None) + return values + + @field_validator("verbose", mode="before") + @classmethod + def set_verbose(cls, verbose: Optional[bool]) -> bool: + """Set the chain verbosity. + + Defaults to the global setting if not specified by the user. + """ + if verbose is None: + return _get_verbosity() + else: + return verbose + + @property + @abstractmethod + def input_keys(self) -> list[str]: + """Keys expected to be in the chain input.""" + + @property + @abstractmethod + def output_keys(self) -> list[str]: + """Keys expected to be in the chain output.""" + + def _validate_inputs(self, inputs: dict[str, Any]) -> None: + """Check that all inputs are present.""" + if not isinstance(inputs, dict): + _input_keys = set(self.input_keys) + if self.memory is not None: + # If there are multiple input keys, but some get set by memory so that + # only one is not set, we can still figure out which key it is. + _input_keys = _input_keys.difference(self.memory.memory_variables) + if len(_input_keys) != 1: + raise ValueError( + f"A single string input was passed in, but this chain expects " + f"multiple inputs ({_input_keys}). When a chain expects " + f"multiple inputs, please call it by passing in a dictionary, " + "eg `chain({'foo': 1, 'bar': 2})`" + ) + + missing_keys = set(self.input_keys).difference(inputs) + if missing_keys: + raise ValueError(f"Missing some input keys: {missing_keys}") + + def _validate_outputs(self, outputs: dict[str, Any]) -> None: + missing_keys = set(self.output_keys).difference(outputs) + if missing_keys: + raise ValueError(f"Missing some output keys: {missing_keys}") + + @abstractmethod + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Execute the chain. + + This is a private method that is not user-facing. It is only called within + `Chain.__call__`, which is the user-facing wrapper method that handles + callbacks configuration and some input/output processing. + + Args: + inputs: A dict of named inputs to the chain. Assumed to contain all inputs + specified in `Chain.input_keys`, including any inputs added by memory. + run_manager: The callbacks manager that contains the callback handlers for + this run of the chain. + + Returns: + A dict of named outputs. Should contain all outputs specified in + `Chain.output_keys`. + """ + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Asynchronously execute the chain. + + This is a private method that is not user-facing. It is only called within + `Chain.acall`, which is the user-facing wrapper method that handles + callbacks configuration and some input/output processing. + + Args: + inputs: A dict of named inputs to the chain. Assumed to contain all inputs + specified in `Chain.input_keys`, including any inputs added by memory. + run_manager: The callbacks manager that contains the callback handlers for + this run of the chain. + + Returns: + A dict of named outputs. Should contain all outputs specified in + `Chain.output_keys`. + """ + return await run_in_executor( + None, self._call, inputs, run_manager.get_sync() if run_manager else None + ) + + @deprecated("0.1.0", alternative="invoke", removal="1.0") + def __call__( + self, + inputs: Union[dict[str, Any], Any], + return_only_outputs: bool = False, + callbacks: Callbacks = None, + *, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + run_name: Optional[str] = None, + include_run_info: bool = False, + ) -> dict[str, Any]: + """Execute the chain. + + Args: + inputs: Dictionary of inputs, or single input if chain expects + only one param. Should contain all inputs specified in + `Chain.input_keys` except for inputs that will be set by the chain's + memory. + return_only_outputs: Whether to return only outputs in the + response. If True, only new keys generated by this chain will be + returned. If False, both input keys and new keys generated by this + chain will be returned. Defaults to False. + callbacks: Callbacks to use for this chain run. These will be called in + addition to callbacks passed to the chain during construction, but only + these runtime callbacks will propagate to calls to other objects. + tags: List of string tags to pass to all callbacks. These will be passed in + addition to tags passed to the chain during construction, but only + these runtime tags will propagate to calls to other objects. + metadata: Optional metadata associated with the chain. Defaults to None + include_run_info: Whether to include run info in the response. Defaults + to False. + + Returns: + A dict of named outputs. Should contain all outputs specified in + `Chain.output_keys`. + """ + config = { + "callbacks": callbacks, + "tags": tags, + "metadata": metadata, + "run_name": run_name, + } + + return self.invoke( + inputs, + cast(RunnableConfig, {k: v for k, v in config.items() if v is not None}), + return_only_outputs=return_only_outputs, + include_run_info=include_run_info, + ) + + @deprecated("0.1.0", alternative="ainvoke", removal="1.0") + async def acall( + self, + inputs: Union[dict[str, Any], Any], + return_only_outputs: bool = False, + callbacks: Callbacks = None, + *, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + run_name: Optional[str] = None, + include_run_info: bool = False, + ) -> dict[str, Any]: + """Asynchronously execute the chain. + + Args: + inputs: Dictionary of inputs, or single input if chain expects + only one param. Should contain all inputs specified in + `Chain.input_keys` except for inputs that will be set by the chain's + memory. + return_only_outputs: Whether to return only outputs in the + response. If True, only new keys generated by this chain will be + returned. If False, both input keys and new keys generated by this + chain will be returned. Defaults to False. + callbacks: Callbacks to use for this chain run. These will be called in + addition to callbacks passed to the chain during construction, but only + these runtime callbacks will propagate to calls to other objects. + tags: List of string tags to pass to all callbacks. These will be passed in + addition to tags passed to the chain during construction, but only + these runtime tags will propagate to calls to other objects. + metadata: Optional metadata associated with the chain. Defaults to None + include_run_info: Whether to include run info in the response. Defaults + to False. + + Returns: + A dict of named outputs. Should contain all outputs specified in + `Chain.output_keys`. + """ + config = { + "callbacks": callbacks, + "tags": tags, + "metadata": metadata, + "run_name": run_name, + } + return await self.ainvoke( + inputs, + cast(RunnableConfig, {k: v for k, v in config.items() if k is not None}), + return_only_outputs=return_only_outputs, + include_run_info=include_run_info, + ) + + def prep_outputs( + self, + inputs: dict[str, str], + outputs: dict[str, str], + return_only_outputs: bool = False, + ) -> dict[str, str]: + """Validate and prepare chain outputs, and save info about this run to memory. + + Args: + inputs: Dictionary of chain inputs, including any inputs added by chain + memory. + outputs: Dictionary of initial chain outputs. + return_only_outputs: Whether to only return the chain outputs. If False, + inputs are also added to the final outputs. + + Returns: + A dict of the final chain outputs. + """ + self._validate_outputs(outputs) + if self.memory is not None: + self.memory.save_context(inputs, outputs) + if return_only_outputs: + return outputs + else: + return {**inputs, **outputs} + + async def aprep_outputs( + self, + inputs: dict[str, str], + outputs: dict[str, str], + return_only_outputs: bool = False, + ) -> dict[str, str]: + """Validate and prepare chain outputs, and save info about this run to memory. + + Args: + inputs: Dictionary of chain inputs, including any inputs added by chain + memory. + outputs: Dictionary of initial chain outputs. + return_only_outputs: Whether to only return the chain outputs. If False, + inputs are also added to the final outputs. + + Returns: + A dict of the final chain outputs. + """ + self._validate_outputs(outputs) + if self.memory is not None: + await self.memory.asave_context(inputs, outputs) + if return_only_outputs: + return outputs + else: + return {**inputs, **outputs} + + def prep_inputs(self, inputs: Union[dict[str, Any], Any]) -> dict[str, str]: + """Prepare chain inputs, including adding inputs from memory. + + Args: + inputs: Dictionary of raw inputs, or single input if chain expects + only one param. Should contain all inputs specified in + `Chain.input_keys` except for inputs that will be set by the chain's + memory. + + Returns: + A dictionary of all inputs, including those added by the chain's memory. + """ + if not isinstance(inputs, dict): + _input_keys = set(self.input_keys) + if self.memory is not None: + # If there are multiple input keys, but some get set by memory so that + # only one is not set, we can still figure out which key it is. + _input_keys = _input_keys.difference(self.memory.memory_variables) + inputs = {list(_input_keys)[0]: inputs} + if self.memory is not None: + external_context = self.memory.load_memory_variables(inputs) + inputs = dict(inputs, **external_context) + return inputs + + async def aprep_inputs(self, inputs: Union[dict[str, Any], Any]) -> dict[str, str]: + """Prepare chain inputs, including adding inputs from memory. + + Args: + inputs: Dictionary of raw inputs, or single input if chain expects + only one param. Should contain all inputs specified in + `Chain.input_keys` except for inputs that will be set by the chain's + memory. + + Returns: + A dictionary of all inputs, including those added by the chain's memory. + """ + if not isinstance(inputs, dict): + _input_keys = set(self.input_keys) + if self.memory is not None: + # If there are multiple input keys, but some get set by memory so that + # only one is not set, we can still figure out which key it is. + _input_keys = _input_keys.difference(self.memory.memory_variables) + inputs = {list(_input_keys)[0]: inputs} + if self.memory is not None: + external_context = await self.memory.aload_memory_variables(inputs) + inputs = dict(inputs, **external_context) + return inputs + + @property + def _run_output_key(self) -> str: + if len(self.output_keys) != 1: + raise ValueError( + f"`run` not supported when there is not exactly " + f"one output key. Got {self.output_keys}." + ) + return self.output_keys[0] + + @deprecated("0.1.0", alternative="invoke", removal="1.0") + def run( + self, + *args: Any, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Convenience method for executing chain. + + The main difference between this method and `Chain.__call__` is that this + method expects inputs to be passed directly in as positional arguments or + keyword arguments, whereas `Chain.__call__` expects a single input dictionary + with all the inputs + + Args: + *args: If the chain expects a single input, it can be passed in as the + sole positional argument. + callbacks: Callbacks to use for this chain run. These will be called in + addition to callbacks passed to the chain during construction, but only + these runtime callbacks will propagate to calls to other objects. + tags: List of string tags to pass to all callbacks. These will be passed in + addition to tags passed to the chain during construction, but only + these runtime tags will propagate to calls to other objects. + **kwargs: If the chain expects multiple inputs, they can be passed in + directly as keyword arguments. + + Returns: + The chain output. + + Example: + .. code-block:: python + + # Suppose we have a single-input chain that takes a 'question' string: + chain.run("What's the temperature in Boise, Idaho?") + # -> "The temperature in Boise is..." + + # Suppose we have a multi-input chain that takes a 'question' string + # and 'context' string: + question = "What's the temperature in Boise, Idaho?" + context = "Weather report for Boise, Idaho on 07/03/23..." + chain.run(question=question, context=context) + # -> "The temperature in Boise is..." + """ + # Run at start to make sure this is possible/defined + _output_key = self._run_output_key + + if args and not kwargs: + if len(args) != 1: + raise ValueError("`run` supports only one positional argument.") + return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[ + _output_key + ] + + if kwargs and not args: + return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[ + _output_key + ] + + if not kwargs and not args: + raise ValueError( + "`run` supported with either positional arguments or keyword arguments," + " but none were provided." + ) + else: + raise ValueError( + f"`run` supported with either positional arguments or keyword arguments" + f" but not both. Got args: {args} and kwargs: {kwargs}." + ) + + @deprecated("0.1.0", alternative="ainvoke", removal="1.0") + async def arun( + self, + *args: Any, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Convenience method for executing chain. + + The main difference between this method and `Chain.__call__` is that this + method expects inputs to be passed directly in as positional arguments or + keyword arguments, whereas `Chain.__call__` expects a single input dictionary + with all the inputs + + + Args: + *args: If the chain expects a single input, it can be passed in as the + sole positional argument. + callbacks: Callbacks to use for this chain run. These will be called in + addition to callbacks passed to the chain during construction, but only + these runtime callbacks will propagate to calls to other objects. + tags: List of string tags to pass to all callbacks. These will be passed in + addition to tags passed to the chain during construction, but only + these runtime tags will propagate to calls to other objects. + **kwargs: If the chain expects multiple inputs, they can be passed in + directly as keyword arguments. + + Returns: + The chain output. + + Example: + .. code-block:: python + + # Suppose we have a single-input chain that takes a 'question' string: + await chain.arun("What's the temperature in Boise, Idaho?") + # -> "The temperature in Boise is..." + + # Suppose we have a multi-input chain that takes a 'question' string + # and 'context' string: + question = "What's the temperature in Boise, Idaho?" + context = "Weather report for Boise, Idaho on 07/03/23..." + await chain.arun(question=question, context=context) + # -> "The temperature in Boise is..." + """ + if len(self.output_keys) != 1: + raise ValueError( + f"`run` not supported when there is not exactly " + f"one output key. Got {self.output_keys}." + ) + elif args and not kwargs: + if len(args) != 1: + raise ValueError("`run` supports only one positional argument.") + return ( + await self.acall( + args[0], callbacks=callbacks, tags=tags, metadata=metadata + ) + )[self.output_keys[0]] + + if kwargs and not args: + return ( + await self.acall( + kwargs, callbacks=callbacks, tags=tags, metadata=metadata + ) + )[self.output_keys[0]] + + raise ValueError( + f"`run` supported with either positional arguments or keyword arguments" + f" but not both. Got args: {args} and kwargs: {kwargs}." + ) + + def dict(self, **kwargs: Any) -> dict: + """Dictionary representation of chain. + + Expects `Chain._chain_type` property to be implemented and for memory to be + null. + + Args: + **kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict` + method. + + Returns: + A dictionary representation of the chain. + + Example: + .. code-block:: python + + chain.dict(exclude_unset=True) + # -> {"_type": "foo", "verbose": False, ...} + """ + _dict = super().dict(**kwargs) + try: + _dict["_type"] = self._chain_type + except NotImplementedError: + pass + return _dict + + def save(self, file_path: Union[Path, str]) -> None: + """Save the chain. + + Expects `Chain._chain_type` property to be implemented and for memory to be + null. + + Args: + file_path: Path to file to save the chain to. + + Example: + .. code-block:: python + + chain.save(file_path="path/chain.yaml") + """ + if self.memory is not None: + raise ValueError("Saving of memory is not yet supported.") + + # Fetch dictionary to save + chain_dict = self.dict() + if "_type" not in chain_dict: + raise NotImplementedError(f"Chain {self} does not support saving.") + + # Convert file to Path object. + if isinstance(file_path, str): + save_path = Path(file_path) + else: + save_path = file_path + + directory_path = save_path.parent + directory_path.mkdir(parents=True, exist_ok=True) + + if save_path.suffix == ".json": + with open(file_path, "w") as f: + json.dump(chain_dict, f, indent=4) + elif save_path.suffix.endswith((".yaml", ".yml")): + with open(file_path, "w") as f: + yaml.dump(chain_dict, f, default_flow_style=False) + else: + raise ValueError(f"{save_path} must be json or yaml") + + @deprecated("0.1.0", alternative="batch", removal="1.0") + def apply( + self, input_list: list[builtins.dict[str, Any]], callbacks: Callbacks = None + ) -> list[builtins.dict[str, str]]: + """Call the chain on all inputs in the list.""" + return [self(inputs, callbacks=callbacks) for inputs in input_list] diff --git a/venv/Lib/site-packages/langchain/chains/chat_vector_db/__init__.py b/venv/Lib/site-packages/langchain/chains/chat_vector_db/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/chains/chat_vector_db/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/chat_vector_db/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..b6fcce2b Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/chat_vector_db/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/chat_vector_db/__pycache__/prompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/chat_vector_db/__pycache__/prompts.cpython-312.pyc new file mode 100644 index 00000000..c91f1685 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/chat_vector_db/__pycache__/prompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/chat_vector_db/prompts.py b/venv/Lib/site-packages/langchain/chains/chat_vector_db/prompts.py new file mode 100644 index 00000000..19f7a210 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/chat_vector_db/prompts.py @@ -0,0 +1,20 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. + +Chat History: +{chat_history} +Follow Up Input: {question} +Standalone question:""" +CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) + +prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. + +{context} + +Question: {question} +Helpful Answer:""" +QA_PROMPT = PromptTemplate( + template=prompt_template, input_variables=["context", "question"] +) diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/__init__.py b/venv/Lib/site-packages/langchain/chains/combine_documents/__init__.py new file mode 100644 index 00000000..6b038ec1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/combine_documents/__init__.py @@ -0,0 +1,15 @@ +"""Different ways to combine documents.""" + +from langchain.chains.combine_documents.reduce import ( + acollapse_docs, + collapse_docs, + split_list_of_docs, +) +from langchain.chains.combine_documents.stuff import create_stuff_documents_chain + +__all__ = [ + "acollapse_docs", + "collapse_docs", + "split_list_of_docs", + "create_stuff_documents_chain", +] diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..8fbd602e Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..cc954c16 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/map_reduce.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/map_reduce.cpython-312.pyc new file mode 100644 index 00000000..c795c56a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/map_reduce.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/map_rerank.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/map_rerank.cpython-312.pyc new file mode 100644 index 00000000..f8e07e58 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/map_rerank.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/reduce.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/reduce.cpython-312.pyc new file mode 100644 index 00000000..ac405d5f Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/reduce.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/refine.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/refine.cpython-312.pyc new file mode 100644 index 00000000..839ecfdf Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/refine.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/stuff.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/stuff.cpython-312.pyc new file mode 100644 index 00000000..e5612e73 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/combine_documents/__pycache__/stuff.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/base.py b/venv/Lib/site-packages/langchain/chains/combine_documents/base.py new file mode 100644 index 00000000..ba2afb7c --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/combine_documents/base.py @@ -0,0 +1,274 @@ +"""Base interface for chains combining documents.""" + +from abc import ABC, abstractmethod +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from langchain_core.documents import Document +from langchain_core.prompts import BasePromptTemplate, PromptTemplate +from langchain_core.runnables.config import RunnableConfig +from langchain_core.runnables.utils import create_model +from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter +from pydantic import BaseModel, Field + +from langchain.chains.base import Chain + +DEFAULT_DOCUMENT_SEPARATOR = "\n\n" +DOCUMENTS_KEY = "context" +DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template("{page_content}") + + +def _validate_prompt(prompt: BasePromptTemplate, document_variable_name: str) -> None: + if document_variable_name not in prompt.input_variables: + raise ValueError( + f"Prompt must accept {document_variable_name} as an input variable. " + f"Received prompt with input variables: {prompt.input_variables}" + ) + + +class BaseCombineDocumentsChain(Chain, ABC): + """Base interface for chains combining documents. + + Subclasses of this chain deal with combining documents in a variety of + ways. This base class exists to add some uniformity in the interface these types + of chains should expose. Namely, they expect an input key related to the documents + to use (default `input_documents`), and then also expose a method to calculate + the length of a prompt from documents (useful for outside callers to use to + determine whether it's safe to pass a list of documents into this chain or whether + that will be longer than the context length). + """ + + input_key: str = "input_documents" #: :meta private: + output_key: str = "output_text" #: :meta private: + + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + return create_model( + "CombineDocumentsInput", + **{self.input_key: (list[Document], None)}, + ) + + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + return create_model( + "CombineDocumentsOutput", + **{self.output_key: (str, None)}, + ) + + @property + def input_keys(self) -> list[str]: + """Expect input key. + + :meta private: + """ + return [self.input_key] + + @property + def output_keys(self) -> list[str]: + """Return output key. + + :meta private: + """ + return [self.output_key] + + def prompt_length(self, docs: list[Document], **kwargs: Any) -> Optional[int]: + """Return the prompt length given the documents passed in. + + This can be used by a caller to determine whether passing in a list + of documents would exceed a certain prompt length. This useful when + trying to ensure that the size of a prompt remains below a certain + context limit. + + Args: + docs: List[Document], a list of documents to use to calculate the + total prompt length. + + Returns: + Returns None if the method does not depend on the prompt length, + otherwise the length of the prompt in tokens. + """ + return None + + @abstractmethod + def combine_docs(self, docs: list[Document], **kwargs: Any) -> tuple[str, dict]: + """Combine documents into a single string. + + Args: + docs: List[Document], the documents to combine + **kwargs: Other parameters to use in combining documents, often + other inputs to the prompt. + + Returns: + The first element returned is the single string output. The second + element returned is a dictionary of other keys to return. + """ + + @abstractmethod + async def acombine_docs( + self, docs: list[Document], **kwargs: Any + ) -> tuple[str, dict]: + """Combine documents into a single string. + + Args: + docs: List[Document], the documents to combine + **kwargs: Other parameters to use in combining documents, often + other inputs to the prompt. + + Returns: + The first element returned is the single string output. The second + element returned is a dictionary of other keys to return. + """ + + def _call( + self, + inputs: dict[str, list[Document]], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + """Prepare inputs, call combine docs, prepare outputs.""" + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + docs = inputs[self.input_key] + # Other keys are assumed to be needed for LLM prediction + other_keys = {k: v for k, v in inputs.items() if k != self.input_key} + output, extra_return_dict = self.combine_docs( + docs, callbacks=_run_manager.get_child(), **other_keys + ) + extra_return_dict[self.output_key] = output + return extra_return_dict + + async def _acall( + self, + inputs: dict[str, list[Document]], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, str]: + """Prepare inputs, call combine docs, prepare outputs.""" + _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() + docs = inputs[self.input_key] + # Other keys are assumed to be needed for LLM prediction + other_keys = {k: v for k, v in inputs.items() if k != self.input_key} + output, extra_return_dict = await self.acombine_docs( + docs, callbacks=_run_manager.get_child(), **other_keys + ) + extra_return_dict[self.output_key] = output + return extra_return_dict + + +@deprecated( + since="0.2.7", + alternative=( + "example in API reference with more detail: " + "https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.base.AnalyzeDocumentChain.html" # noqa: E501 + ), + removal="1.0", +) +class AnalyzeDocumentChain(Chain): + """Chain that splits documents, then analyzes it in pieces. + + This chain is parameterized by a TextSplitter and a CombineDocumentsChain. + This chain takes a single document as input, and then splits it up into chunks + and then passes those chucks to the CombineDocumentsChain. + + This class is deprecated. See below for alternative implementations which + supports async and streaming modes of operation. + + If the underlying combine documents chain takes one ``input_documents`` argument + (e.g., chains generated by ``load_summarize_chain``): + + .. code-block:: python + + split_text = lambda x: text_splitter.create_documents([x]) + + summarize_document_chain = split_text | chain + + If the underlying chain takes additional arguments (e.g., ``load_qa_chain``, which + takes an additional ``question`` argument), we can use the following: + + .. code-block:: python + + from operator import itemgetter + from langchain_core.runnables import RunnableLambda, RunnableParallel + + split_text = RunnableLambda( + lambda x: text_splitter.create_documents([x]) + ) + summarize_document_chain = RunnableParallel( + question=itemgetter("question"), + input_documents=itemgetter("input_document") | split_text, + ) | chain.pick("output_text") + + To additionally return the input parameters, as ``AnalyzeDocumentChain`` does, + we can wrap this construction with ``RunnablePassthrough``: + + .. code-block:: python + + from operator import itemgetter + from langchain_core.runnables import ( + RunnableLambda, + RunnableParallel, + RunnablePassthrough, + ) + + split_text = RunnableLambda( + lambda x: text_splitter.create_documents([x]) + ) + summarize_document_chain = RunnablePassthrough.assign( + output_text=RunnableParallel( + question=itemgetter("question"), + input_documents=itemgetter("input_document") | split_text, + ) | chain.pick("output_text") + ) + """ + + input_key: str = "input_document" #: :meta private: + text_splitter: TextSplitter = Field(default_factory=RecursiveCharacterTextSplitter) + combine_docs_chain: BaseCombineDocumentsChain + + @property + def input_keys(self) -> list[str]: + """Expect input key. + + :meta private: + """ + return [self.input_key] + + @property + def output_keys(self) -> list[str]: + """Return output key. + + :meta private: + """ + return self.combine_docs_chain.output_keys + + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + return create_model( + "AnalyzeDocumentChain", + **{self.input_key: (str, None)}, + ) + + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + return self.combine_docs_chain.get_output_schema(config) + + def _call( + self, + inputs: dict[str, str], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + """Split document into chunks and pass to CombineDocumentsChain.""" + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + document = inputs[self.input_key] + docs = self.text_splitter.create_documents([document]) + # Other keys are assumed to be needed for LLM prediction + other_keys: dict = {k: v for k, v in inputs.items() if k != self.input_key} + other_keys[self.combine_docs_chain.input_key] = docs + return self.combine_docs_chain( + other_keys, return_only_outputs=True, callbacks=_run_manager.get_child() + ) diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/map_reduce.py b/venv/Lib/site-packages/langchain/chains/combine_documents/map_reduce.py new file mode 100644 index 00000000..b042ed72 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/combine_documents/map_reduce.py @@ -0,0 +1,292 @@ +"""Combining documents by mapping a chain over them first, then combining results.""" + +from __future__ import annotations + +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import Callbacks +from langchain_core.documents import Document +from langchain_core.runnables.config import RunnableConfig +from langchain_core.runnables.utils import create_model +from pydantic import BaseModel, ConfigDict, model_validator + +from langchain.chains.combine_documents.base import BaseCombineDocumentsChain +from langchain.chains.combine_documents.reduce import ReduceDocumentsChain +from langchain.chains.llm import LLMChain + + +@deprecated( + since="0.3.1", + removal="1.0", + message=( + "This class is deprecated. Please see the migration guide here for " + "a recommended replacement: " + "https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain/" + ), +) +class MapReduceDocumentsChain(BaseCombineDocumentsChain): + """Combining documents by mapping a chain over them, then combining results. + + We first call `llm_chain` on each document individually, passing in the + `page_content` and any other kwargs. This is the `map` step. + + We then process the results of that `map` step in a `reduce` step. This should + likely be a ReduceDocumentsChain. + + Example: + .. code-block:: python + + from langchain.chains import ( + StuffDocumentsChain, + LLMChain, + ReduceDocumentsChain, + MapReduceDocumentsChain, + ) + from langchain_core.prompts import PromptTemplate + from langchain_community.llms import OpenAI + + # This controls how each document will be formatted. Specifically, + # it will be passed to `format_document` - see that function for more + # details. + document_prompt = PromptTemplate( + input_variables=["page_content"], + template="{page_content}" + ) + document_variable_name = "context" + llm = OpenAI() + # The prompt here should take as an input variable the + # `document_variable_name` + prompt = PromptTemplate.from_template( + "Summarize this content: {context}" + ) + llm_chain = LLMChain(llm=llm, prompt=prompt) + # We now define how to combine these summaries + reduce_prompt = PromptTemplate.from_template( + "Combine these summaries: {context}" + ) + reduce_llm_chain = LLMChain(llm=llm, prompt=reduce_prompt) + combine_documents_chain = StuffDocumentsChain( + llm_chain=reduce_llm_chain, + document_prompt=document_prompt, + document_variable_name=document_variable_name + ) + reduce_documents_chain = ReduceDocumentsChain( + combine_documents_chain=combine_documents_chain, + ) + chain = MapReduceDocumentsChain( + llm_chain=llm_chain, + reduce_documents_chain=reduce_documents_chain, + ) + # If we wanted to, we could also pass in collapse_documents_chain + # which is specifically aimed at collapsing documents BEFORE + # the final call. + prompt = PromptTemplate.from_template( + "Collapse this content: {context}" + ) + llm_chain = LLMChain(llm=llm, prompt=prompt) + collapse_documents_chain = StuffDocumentsChain( + llm_chain=llm_chain, + document_prompt=document_prompt, + document_variable_name=document_variable_name + ) + reduce_documents_chain = ReduceDocumentsChain( + combine_documents_chain=combine_documents_chain, + collapse_documents_chain=collapse_documents_chain, + ) + chain = MapReduceDocumentsChain( + llm_chain=llm_chain, + reduce_documents_chain=reduce_documents_chain, + ) + """ + + llm_chain: LLMChain + """Chain to apply to each document individually.""" + reduce_documents_chain: BaseCombineDocumentsChain + """Chain to use to reduce the results of applying `llm_chain` to each doc. + This typically either a ReduceDocumentChain or StuffDocumentChain.""" + document_variable_name: str + """The variable name in the llm_chain to put the documents in. + If only one variable in the llm_chain, this need not be provided.""" + return_intermediate_steps: bool = False + """Return the results of the map steps in the output.""" + + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + if self.return_intermediate_steps: + return create_model( + "MapReduceDocumentsOutput", + **{ + self.output_key: (str, None), + "intermediate_steps": (list[str], None), + }, + ) + + return super().get_output_schema(config) + + @property + def output_keys(self) -> list[str]: + """Expect input key. + + :meta private: + """ + _output_keys = super().output_keys + if self.return_intermediate_steps: + _output_keys = _output_keys + ["intermediate_steps"] + return _output_keys + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @model_validator(mode="before") + @classmethod + def get_reduce_chain(cls, values: dict) -> Any: + """For backwards compatibility.""" + if "combine_document_chain" in values: + if "reduce_documents_chain" in values: + raise ValueError( + "Both `reduce_documents_chain` and `combine_document_chain` " + "cannot be provided at the same time. `combine_document_chain` " + "is deprecated, please only provide `reduce_documents_chain`" + ) + combine_chain = values["combine_document_chain"] + collapse_chain = values.get("collapse_document_chain") + reduce_chain = ReduceDocumentsChain( + combine_documents_chain=combine_chain, + collapse_documents_chain=collapse_chain, + ) + values["reduce_documents_chain"] = reduce_chain + del values["combine_document_chain"] + if "collapse_document_chain" in values: + del values["collapse_document_chain"] + + return values + + @model_validator(mode="before") + @classmethod + def get_return_intermediate_steps(cls, values: dict) -> Any: + """For backwards compatibility.""" + if "return_map_steps" in values: + values["return_intermediate_steps"] = values["return_map_steps"] + del values["return_map_steps"] + return values + + @model_validator(mode="before") + @classmethod + def get_default_document_variable_name(cls, values: dict) -> Any: + """Get default document variable name, if not provided.""" + if "llm_chain" not in values: + raise ValueError("llm_chain must be provided") + + llm_chain_variables = values["llm_chain"].prompt.input_variables + if "document_variable_name" not in values: + if len(llm_chain_variables) == 1: + values["document_variable_name"] = llm_chain_variables[0] + else: + raise ValueError( + "document_variable_name must be provided if there are " + "multiple llm_chain input_variables" + ) + else: + if values["document_variable_name"] not in llm_chain_variables: + raise ValueError( + f"document_variable_name {values['document_variable_name']} was " + f"not found in llm_chain input_variables: {llm_chain_variables}" + ) + return values + + @property + def collapse_document_chain(self) -> BaseCombineDocumentsChain: + """Kept for backward compatibility.""" + if isinstance(self.reduce_documents_chain, ReduceDocumentsChain): + if self.reduce_documents_chain.collapse_documents_chain: + return self.reduce_documents_chain.collapse_documents_chain + else: + return self.reduce_documents_chain.combine_documents_chain + else: + raise ValueError( + f"`reduce_documents_chain` is of type " + f"{type(self.reduce_documents_chain)} so it does not have " + f"this attribute." + ) + + @property + def combine_document_chain(self) -> BaseCombineDocumentsChain: + """Kept for backward compatibility.""" + if isinstance(self.reduce_documents_chain, ReduceDocumentsChain): + return self.reduce_documents_chain.combine_documents_chain + else: + raise ValueError( + f"`reduce_documents_chain` is of type " + f"{type(self.reduce_documents_chain)} so it does not have " + f"this attribute." + ) + + def combine_docs( + self, + docs: list[Document], + token_max: Optional[int] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> tuple[str, dict]: + """Combine documents in a map reduce manner. + + Combine by mapping first chain over all documents, then reducing the results. + This reducing can be done recursively if needed (if there are many documents). + """ + map_results = self.llm_chain.apply( + # FYI - this is parallelized and so it is fast. + [{self.document_variable_name: d.page_content, **kwargs} for d in docs], + callbacks=callbacks, + ) + question_result_key = self.llm_chain.output_key + result_docs = [ + Document(page_content=r[question_result_key], metadata=docs[i].metadata) + # This uses metadata from the docs, and the textual results from `results` + for i, r in enumerate(map_results) + ] + result, extra_return_dict = self.reduce_documents_chain.combine_docs( + result_docs, token_max=token_max, callbacks=callbacks, **kwargs + ) + if self.return_intermediate_steps: + intermediate_steps = [r[question_result_key] for r in map_results] + extra_return_dict["intermediate_steps"] = intermediate_steps + return result, extra_return_dict + + async def acombine_docs( + self, + docs: list[Document], + token_max: Optional[int] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> tuple[str, dict]: + """Combine documents in a map reduce manner. + + Combine by mapping first chain over all documents, then reducing the results. + This reducing can be done recursively if needed (if there are many documents). + """ + map_results = await self.llm_chain.aapply( + # FYI - this is parallelized and so it is fast. + [{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs], + callbacks=callbacks, + ) + question_result_key = self.llm_chain.output_key + result_docs = [ + Document(page_content=r[question_result_key], metadata=docs[i].metadata) + # This uses metadata from the docs, and the textual results from `results` + for i, r in enumerate(map_results) + ] + result, extra_return_dict = await self.reduce_documents_chain.acombine_docs( + result_docs, token_max=token_max, callbacks=callbacks, **kwargs + ) + if self.return_intermediate_steps: + intermediate_steps = [r[question_result_key] for r in map_results] + extra_return_dict["intermediate_steps"] = intermediate_steps + return result, extra_return_dict + + @property + def _chain_type(self) -> str: + return "map_reduce_documents_chain" diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/map_rerank.py b/venv/Lib/site-packages/langchain/chains/combine_documents/map_rerank.py new file mode 100644 index 00000000..57e0ac30 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/combine_documents/map_rerank.py @@ -0,0 +1,234 @@ +"""Combining documents by mapping a chain over them first, then reranking results.""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any, Optional, Union, cast + +from langchain_core._api import deprecated +from langchain_core.callbacks import Callbacks +from langchain_core.documents import Document +from langchain_core.runnables.config import RunnableConfig +from langchain_core.runnables.utils import create_model +from pydantic import BaseModel, ConfigDict, model_validator +from typing_extensions import Self + +from langchain.chains.combine_documents.base import BaseCombineDocumentsChain +from langchain.chains.llm import LLMChain +from langchain.output_parsers.regex import RegexParser + + +@deprecated( + since="0.3.1", + removal="1.0", + message=( + "This class is deprecated. Please see the migration guide here for " + "a recommended replacement: " + "https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain/" # noqa: E501 + ), +) +class MapRerankDocumentsChain(BaseCombineDocumentsChain): + """Combining documents by mapping a chain over them, then reranking results. + + This algorithm calls an LLMChain on each input document. The LLMChain is expected + to have an OutputParser that parses the result into both an answer (`answer_key`) + and a score (`rank_key`). The answer with the highest score is then returned. + + Example: + .. code-block:: python + + from langchain.chains import MapRerankDocumentsChain, LLMChain + from langchain_core.prompts import PromptTemplate + from langchain_community.llms import OpenAI + from langchain.output_parsers.regex import RegexParser + + document_variable_name = "context" + llm = OpenAI() + # The prompt here should take as an input variable the + # `document_variable_name` + # The actual prompt will need to be a lot more complex, this is just + # an example. + prompt_template = ( + "Use the following context to tell me the chemical formula " + "for water. Output both your answer and a score of how confident " + "you are. Context: {context}" + ) + output_parser = RegexParser( + regex=r"(.*?)\nScore: (.*)", + output_keys=["answer", "score"], + ) + prompt = PromptTemplate( + template=prompt_template, + input_variables=["context"], + output_parser=output_parser, + ) + llm_chain = LLMChain(llm=llm, prompt=prompt) + chain = MapRerankDocumentsChain( + llm_chain=llm_chain, + document_variable_name=document_variable_name, + rank_key="score", + answer_key="answer", + ) + """ + + llm_chain: LLMChain + """Chain to apply to each document individually.""" + document_variable_name: str + """The variable name in the llm_chain to put the documents in. + If only one variable in the llm_chain, this need not be provided.""" + rank_key: str + """Key in output of llm_chain to rank on.""" + answer_key: str + """Key in output of llm_chain to return as answer.""" + metadata_keys: Optional[list[str]] = None + """Additional metadata from the chosen document to return.""" + return_intermediate_steps: bool = False + """Return intermediate steps. + Intermediate steps include the results of calling llm_chain on each document.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + schema: dict[str, Any] = { + self.output_key: (str, None), + } + if self.return_intermediate_steps: + schema["intermediate_steps"] = (list[str], None) + if self.metadata_keys: + schema.update({key: (Any, None) for key in self.metadata_keys}) + + return create_model("MapRerankOutput", **schema) + + @property + def output_keys(self) -> list[str]: + """Expect input key. + + :meta private: + """ + _output_keys = super().output_keys + if self.return_intermediate_steps: + _output_keys = _output_keys + ["intermediate_steps"] + if self.metadata_keys is not None: + _output_keys += self.metadata_keys + return _output_keys + + @model_validator(mode="after") + def validate_llm_output(self) -> Self: + """Validate that the combine chain outputs a dictionary.""" + output_parser = self.llm_chain.prompt.output_parser + if not isinstance(output_parser, RegexParser): + raise ValueError( + "Output parser of llm_chain should be a RegexParser," + f" got {output_parser}" + ) + output_keys = output_parser.output_keys + if self.rank_key not in output_keys: + raise ValueError( + f"Got {self.rank_key} as key to rank on, but did not find " + f"it in the llm_chain output keys ({output_keys})" + ) + if self.answer_key not in output_keys: + raise ValueError( + f"Got {self.answer_key} as key to return, but did not find " + f"it in the llm_chain output keys ({output_keys})" + ) + return self + + @model_validator(mode="before") + @classmethod + def get_default_document_variable_name(cls, values: dict) -> Any: + """Get default document variable name, if not provided.""" + if "llm_chain" not in values: + raise ValueError("llm_chain must be provided") + + llm_chain_variables = values["llm_chain"].prompt.input_variables + if "document_variable_name" not in values: + if len(llm_chain_variables) == 1: + values["document_variable_name"] = llm_chain_variables[0] + else: + raise ValueError( + "document_variable_name must be provided if there are " + "multiple llm_chain input_variables" + ) + else: + if values["document_variable_name"] not in llm_chain_variables: + raise ValueError( + f"document_variable_name {values['document_variable_name']} was " + f"not found in llm_chain input_variables: {llm_chain_variables}" + ) + return values + + def combine_docs( + self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any + ) -> tuple[str, dict]: + """Combine documents in a map rerank manner. + + Combine by mapping first chain over all documents, then reranking the results. + + Args: + docs: List of documents to combine + callbacks: Callbacks to be passed through + **kwargs: additional parameters to be passed to LLM calls (like other + input variables besides the documents) + + Returns: + The first element returned is the single string output. The second + element returned is a dictionary of other keys to return. + """ + results = self.llm_chain.apply_and_parse( + # FYI - this is parallelized and so it is fast. + [{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs], + callbacks=callbacks, + ) + return self._process_results(docs, results) + + async def acombine_docs( + self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any + ) -> tuple[str, dict]: + """Combine documents in a map rerank manner. + + Combine by mapping first chain over all documents, then reranking the results. + + Args: + docs: List of documents to combine + callbacks: Callbacks to be passed through + **kwargs: additional parameters to be passed to LLM calls (like other + input variables besides the documents) + + Returns: + The first element returned is the single string output. The second + element returned is a dictionary of other keys to return. + """ + results = await self.llm_chain.aapply_and_parse( + # FYI - this is parallelized and so it is fast. + [{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs], + callbacks=callbacks, + ) + return self._process_results(docs, results) + + def _process_results( + self, + docs: list[Document], + results: Sequence[Union[str, list[str], dict[str, str]]], + ) -> tuple[str, dict]: + typed_results = cast(list[dict], results) + sorted_res = sorted( + zip(typed_results, docs), key=lambda x: -int(x[0][self.rank_key]) + ) + output, document = sorted_res[0] + extra_info = {} + if self.metadata_keys is not None: + for key in self.metadata_keys: + extra_info[key] = document.metadata[key] + if self.return_intermediate_steps: + extra_info["intermediate_steps"] = results + return output[self.answer_key], extra_info + + @property + def _chain_type(self) -> str: + return "map_rerank_documents_chain" diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/reduce.py b/venv/Lib/site-packages/langchain/chains/combine_documents/reduce.py new file mode 100644 index 00000000..4b684357 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/combine_documents/reduce.py @@ -0,0 +1,360 @@ +"""Combine many documents together by recursively reducing them.""" + +from __future__ import annotations + +from typing import Any, Callable, Optional, Protocol + +from langchain_core._api import deprecated +from langchain_core.callbacks import Callbacks +from langchain_core.documents import Document +from pydantic import ConfigDict + +from langchain.chains.combine_documents.base import BaseCombineDocumentsChain + + +class CombineDocsProtocol(Protocol): + """Interface for the combine_docs method.""" + + def __call__(self, docs: list[Document], **kwargs: Any) -> str: + """Interface for the combine_docs method.""" + + +class AsyncCombineDocsProtocol(Protocol): + """Interface for the combine_docs method.""" + + async def __call__(self, docs: list[Document], **kwargs: Any) -> str: + """Async interface for the combine_docs method.""" + + +def split_list_of_docs( + docs: list[Document], length_func: Callable, token_max: int, **kwargs: Any +) -> list[list[Document]]: + """Split Documents into subsets that each meet a cumulative length constraint. + + Args: + docs: The full list of Documents. + length_func: Function for computing the cumulative length of a set of Documents. + token_max: The maximum cumulative length of any subset of Documents. + **kwargs: Arbitrary additional keyword params to pass to each call of the + length_func. + + Returns: + A List[List[Document]]. + """ + new_result_doc_list = [] + _sub_result_docs = [] + for doc in docs: + _sub_result_docs.append(doc) + _num_tokens = length_func(_sub_result_docs, **kwargs) + if _num_tokens > token_max: + if len(_sub_result_docs) == 1: + raise ValueError( + "A single document was longer than the context length," + " we cannot handle this." + ) + new_result_doc_list.append(_sub_result_docs[:-1]) + _sub_result_docs = _sub_result_docs[-1:] + new_result_doc_list.append(_sub_result_docs) + return new_result_doc_list + + +def collapse_docs( + docs: list[Document], + combine_document_func: CombineDocsProtocol, + **kwargs: Any, +) -> Document: + """Execute a collapse function on a set of documents and merge their metadatas. + + Args: + docs: A list of Documents to combine. + combine_document_func: A function that takes in a list of Documents and + optionally addition keyword parameters and combines them into a single + string. + **kwargs: Arbitrary additional keyword params to pass to the + combine_document_func. + + Returns: + A single Document with the output of combine_document_func for the page content + and the combined metadata's of all the input documents. All metadata values + are strings, and where there are overlapping keys across documents the + values are joined by ", ". + """ + result = combine_document_func(docs, **kwargs) + combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()} + for doc in docs[1:]: + for k, v in doc.metadata.items(): + if k in combined_metadata: + combined_metadata[k] += f", {v}" + else: + combined_metadata[k] = str(v) + return Document(page_content=result, metadata=combined_metadata) + + +async def acollapse_docs( + docs: list[Document], + combine_document_func: AsyncCombineDocsProtocol, + **kwargs: Any, +) -> Document: + """Execute a collapse function on a set of documents and merge their metadatas. + + Args: + docs: A list of Documents to combine. + combine_document_func: A function that takes in a list of Documents and + optionally addition keyword parameters and combines them into a single + string. + **kwargs: Arbitrary additional keyword params to pass to the + combine_document_func. + + Returns: + A single Document with the output of combine_document_func for the page content + and the combined metadata's of all the input documents. All metadata values + are strings, and where there are overlapping keys across documents the + values are joined by ", ". + """ + result = await combine_document_func(docs, **kwargs) + combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()} + for doc in docs[1:]: + for k, v in doc.metadata.items(): + if k in combined_metadata: + combined_metadata[k] += f", {v}" + else: + combined_metadata[k] = str(v) + return Document(page_content=result, metadata=combined_metadata) + + +@deprecated( + since="0.3.1", + removal="1.0", + message=( + "This class is deprecated. Please see the migration guide here for " + "a recommended replacement: " + "https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain/" + ), +) +class ReduceDocumentsChain(BaseCombineDocumentsChain): + """Combine documents by recursively reducing them. + + This involves + + - combine_documents_chain + + - collapse_documents_chain + + `combine_documents_chain` is ALWAYS provided. This is final chain that is called. + We pass all previous results to this chain, and the output of this chain is + returned as a final result. + + `collapse_documents_chain` is used if the documents passed in are too many to all + be passed to `combine_documents_chain` in one go. In this case, + `collapse_documents_chain` is called recursively on as big of groups of documents + as are allowed. + + Example: + .. code-block:: python + + from langchain.chains import ( + StuffDocumentsChain, LLMChain, ReduceDocumentsChain + ) + from langchain_core.prompts import PromptTemplate + from langchain_community.llms import OpenAI + + # This controls how each document will be formatted. Specifically, + # it will be passed to `format_document` - see that function for more + # details. + document_prompt = PromptTemplate( + input_variables=["page_content"], + template="{page_content}" + ) + document_variable_name = "context" + llm = OpenAI() + # The prompt here should take as an input variable the + # `document_variable_name` + prompt = PromptTemplate.from_template( + "Summarize this content: {context}" + ) + llm_chain = LLMChain(llm=llm, prompt=prompt) + combine_documents_chain = StuffDocumentsChain( + llm_chain=llm_chain, + document_prompt=document_prompt, + document_variable_name=document_variable_name + ) + chain = ReduceDocumentsChain( + combine_documents_chain=combine_documents_chain, + ) + # If we wanted to, we could also pass in collapse_documents_chain + # which is specifically aimed at collapsing documents BEFORE + # the final call. + prompt = PromptTemplate.from_template( + "Collapse this content: {context}" + ) + llm_chain = LLMChain(llm=llm, prompt=prompt) + collapse_documents_chain = StuffDocumentsChain( + llm_chain=llm_chain, + document_prompt=document_prompt, + document_variable_name=document_variable_name + ) + chain = ReduceDocumentsChain( + combine_documents_chain=combine_documents_chain, + collapse_documents_chain=collapse_documents_chain, + ) + """ + + combine_documents_chain: BaseCombineDocumentsChain + """Final chain to call to combine documents. + This is typically a StuffDocumentsChain.""" + collapse_documents_chain: Optional[BaseCombineDocumentsChain] = None + """Chain to use to collapse documents if needed until they can all fit. + If None, will use the combine_documents_chain. + This is typically a StuffDocumentsChain.""" + token_max: int = 3000 + """The maximum number of tokens to group documents into. For example, if + set to 3000 then documents will be grouped into chunks of no greater than + 3000 tokens before trying to combine them into a smaller chunk.""" + collapse_max_retries: Optional[int] = None + """The maximum number of retries to collapse documents to fit token_max. + If None, it will keep trying to collapse documents to fit token_max. + Otherwise, after it reaches the max number, it will throw an error""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @property + def _collapse_chain(self) -> BaseCombineDocumentsChain: + if self.collapse_documents_chain is not None: + return self.collapse_documents_chain + else: + return self.combine_documents_chain + + def combine_docs( + self, + docs: list[Document], + token_max: Optional[int] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> tuple[str, dict]: + """Combine multiple documents recursively. + + Args: + docs: List of documents to combine, assumed that each one is less than + `token_max`. + token_max: Recursively creates groups of documents less than this number + of tokens. + callbacks: Callbacks to be passed through + **kwargs: additional parameters to be passed to LLM calls (like other + input variables besides the documents) + + Returns: + The first element returned is the single string output. The second + element returned is a dictionary of other keys to return. + """ + result_docs, extra_return_dict = self._collapse( + docs, token_max=token_max, callbacks=callbacks, **kwargs + ) + return self.combine_documents_chain.combine_docs( + docs=result_docs, callbacks=callbacks, **kwargs + ) + + async def acombine_docs( + self, + docs: list[Document], + token_max: Optional[int] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> tuple[str, dict]: + """Async combine multiple documents recursively. + + Args: + docs: List of documents to combine, assumed that each one is less than + `token_max`. + token_max: Recursively creates groups of documents less than this number + of tokens. + callbacks: Callbacks to be passed through + **kwargs: additional parameters to be passed to LLM calls (like other + input variables besides the documents) + + Returns: + The first element returned is the single string output. The second + element returned is a dictionary of other keys to return. + """ + result_docs, extra_return_dict = await self._acollapse( + docs, token_max=token_max, callbacks=callbacks, **kwargs + ) + return await self.combine_documents_chain.acombine_docs( + docs=result_docs, callbacks=callbacks, **kwargs + ) + + def _collapse( + self, + docs: list[Document], + token_max: Optional[int] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> tuple[list[Document], dict]: + result_docs = docs + length_func = self.combine_documents_chain.prompt_length + num_tokens = length_func(result_docs, **kwargs) + + def _collapse_docs_func(docs: list[Document], **kwargs: Any) -> str: + return self._collapse_chain.run( + input_documents=docs, callbacks=callbacks, **kwargs + ) + + _token_max = token_max or self.token_max + retries: int = 0 + while num_tokens is not None and num_tokens > _token_max: + new_result_doc_list = split_list_of_docs( + result_docs, length_func, _token_max, **kwargs + ) + result_docs = [] + for docs in new_result_doc_list: + new_doc = collapse_docs(docs, _collapse_docs_func, **kwargs) + result_docs.append(new_doc) + num_tokens = length_func(result_docs, **kwargs) + retries += 1 + if self.collapse_max_retries and retries == self.collapse_max_retries: + raise ValueError( + f"Exceed {self.collapse_max_retries} tries to \ + collapse document to {_token_max} tokens." + ) + return result_docs, {} + + async def _acollapse( + self, + docs: list[Document], + token_max: Optional[int] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> tuple[list[Document], dict]: + result_docs = docs + length_func = self.combine_documents_chain.prompt_length + num_tokens = length_func(result_docs, **kwargs) + + async def _collapse_docs_func(docs: list[Document], **kwargs: Any) -> str: + return await self._collapse_chain.arun( + input_documents=docs, callbacks=callbacks, **kwargs + ) + + _token_max = token_max or self.token_max + retries: int = 0 + while num_tokens is not None and num_tokens > _token_max: + new_result_doc_list = split_list_of_docs( + result_docs, length_func, _token_max, **kwargs + ) + result_docs = [] + for docs in new_result_doc_list: + new_doc = await acollapse_docs(docs, _collapse_docs_func, **kwargs) + result_docs.append(new_doc) + num_tokens = length_func(result_docs, **kwargs) + retries += 1 + if self.collapse_max_retries and retries == self.collapse_max_retries: + raise ValueError( + f"Exceed {self.collapse_max_retries} tries to \ + collapse document to {_token_max} tokens." + ) + return result_docs, {} + + @property + def _chain_type(self) -> str: + return "reduce_documents_chain" diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/refine.py b/venv/Lib/site-packages/langchain/chains/combine_documents/refine.py new file mode 100644 index 00000000..fa03f85c --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/combine_documents/refine.py @@ -0,0 +1,227 @@ +"""Combine documents by doing a first pass and then refining on more documents.""" + +from __future__ import annotations + +from typing import Any + +from langchain_core._api import deprecated +from langchain_core.callbacks import Callbacks +from langchain_core.documents import Document +from langchain_core.prompts import BasePromptTemplate, format_document +from langchain_core.prompts.prompt import PromptTemplate +from pydantic import ConfigDict, Field, model_validator + +from langchain.chains.combine_documents.base import ( + BaseCombineDocumentsChain, +) +from langchain.chains.llm import LLMChain + + +def _get_default_document_prompt() -> PromptTemplate: + return PromptTemplate(input_variables=["page_content"], template="{page_content}") + + +@deprecated( + since="0.3.1", + removal="1.0", + message=( + "This class is deprecated. Please see the migration guide here for " + "a recommended replacement: " + "https://python.langchain.com/docs/versions/migrating_chains/refine_docs_chain/" # noqa: E501 + ), +) +class RefineDocumentsChain(BaseCombineDocumentsChain): + """Combine documents by doing a first pass and then refining on more documents. + + This algorithm first calls `initial_llm_chain` on the first document, passing + that first document in with the variable name `document_variable_name`, and + produces a new variable with the variable name `initial_response_name`. + + Then, it loops over every remaining document. This is called the "refine" step. + It calls `refine_llm_chain`, + passing in that document with the variable name `document_variable_name` + as well as the previous response with the variable name `initial_response_name`. + + Example: + .. code-block:: python + + from langchain.chains import RefineDocumentsChain, LLMChain + from langchain_core.prompts import PromptTemplate + from langchain_community.llms import OpenAI + + # This controls how each document will be formatted. Specifically, + # it will be passed to `format_document` - see that function for more + # details. + document_prompt = PromptTemplate( + input_variables=["page_content"], + template="{page_content}" + ) + document_variable_name = "context" + llm = OpenAI() + # The prompt here should take as an input variable the + # `document_variable_name` + prompt = PromptTemplate.from_template( + "Summarize this content: {context}" + ) + initial_llm_chain = LLMChain(llm=llm, prompt=prompt) + initial_response_name = "prev_response" + # The prompt here should take as an input variable the + # `document_variable_name` as well as `initial_response_name` + prompt_refine = PromptTemplate.from_template( + "Here's your first summary: {prev_response}. " + "Now add to it based on the following context: {context}" + ) + refine_llm_chain = LLMChain(llm=llm, prompt=prompt_refine) + chain = RefineDocumentsChain( + initial_llm_chain=initial_llm_chain, + refine_llm_chain=refine_llm_chain, + document_prompt=document_prompt, + document_variable_name=document_variable_name, + initial_response_name=initial_response_name, + ) + """ + + initial_llm_chain: LLMChain + """LLM chain to use on initial document.""" + refine_llm_chain: LLMChain + """LLM chain to use when refining.""" + document_variable_name: str + """The variable name in the initial_llm_chain to put the documents in. + If only one variable in the initial_llm_chain, this need not be provided.""" + initial_response_name: str + """The variable name to format the initial response in when refining.""" + document_prompt: BasePromptTemplate = Field( + default_factory=_get_default_document_prompt + ) + """Prompt to use to format each document, gets passed to `format_document`.""" + return_intermediate_steps: bool = False + """Return the results of the refine steps in the output.""" + + @property + def output_keys(self) -> list[str]: + """Expect input key. + + :meta private: + """ + _output_keys = super().output_keys + if self.return_intermediate_steps: + _output_keys = _output_keys + ["intermediate_steps"] + return _output_keys + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @model_validator(mode="before") + @classmethod + def get_return_intermediate_steps(cls, values: dict) -> Any: + """For backwards compatibility.""" + if "return_refine_steps" in values: + values["return_intermediate_steps"] = values["return_refine_steps"] + del values["return_refine_steps"] + return values + + @model_validator(mode="before") + @classmethod + def get_default_document_variable_name(cls, values: dict) -> Any: + """Get default document variable name, if not provided.""" + if "initial_llm_chain" not in values: + raise ValueError("initial_llm_chain must be provided") + + llm_chain_variables = values["initial_llm_chain"].prompt.input_variables + if "document_variable_name" not in values: + if len(llm_chain_variables) == 1: + values["document_variable_name"] = llm_chain_variables[0] + else: + raise ValueError( + "document_variable_name must be provided if there are " + "multiple llm_chain input_variables" + ) + else: + if values["document_variable_name"] not in llm_chain_variables: + raise ValueError( + f"document_variable_name {values['document_variable_name']} was " + f"not found in llm_chain input_variables: {llm_chain_variables}" + ) + return values + + def combine_docs( + self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any + ) -> tuple[str, dict]: + """Combine by mapping first chain over all, then stuffing into final chain. + + Args: + docs: List of documents to combine + callbacks: Callbacks to be passed through + **kwargs: additional parameters to be passed to LLM calls (like other + input variables besides the documents) + + Returns: + The first element returned is the single string output. The second + element returned is a dictionary of other keys to return. + """ + inputs = self._construct_initial_inputs(docs, **kwargs) + res = self.initial_llm_chain.predict(callbacks=callbacks, **inputs) + refine_steps = [res] + for doc in docs[1:]: + base_inputs = self._construct_refine_inputs(doc, res) + inputs = {**base_inputs, **kwargs} + res = self.refine_llm_chain.predict(callbacks=callbacks, **inputs) + refine_steps.append(res) + return self._construct_result(refine_steps, res) + + async def acombine_docs( + self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any + ) -> tuple[str, dict]: + """Async combine by mapping a first chain over all, then stuffing + into a final chain. + + Args: + docs: List of documents to combine + callbacks: Callbacks to be passed through + **kwargs: additional parameters to be passed to LLM calls (like other + input variables besides the documents) + + Returns: + The first element returned is the single string output. The second + element returned is a dictionary of other keys to return. + """ + inputs = self._construct_initial_inputs(docs, **kwargs) + res = await self.initial_llm_chain.apredict(callbacks=callbacks, **inputs) + refine_steps = [res] + for doc in docs[1:]: + base_inputs = self._construct_refine_inputs(doc, res) + inputs = {**base_inputs, **kwargs} + res = await self.refine_llm_chain.apredict(callbacks=callbacks, **inputs) + refine_steps.append(res) + return self._construct_result(refine_steps, res) + + def _construct_result(self, refine_steps: list[str], res: str) -> tuple[str, dict]: + if self.return_intermediate_steps: + extra_return_dict = {"intermediate_steps": refine_steps} + else: + extra_return_dict = {} + return res, extra_return_dict + + def _construct_refine_inputs(self, doc: Document, res: str) -> dict[str, Any]: + return { + self.document_variable_name: format_document(doc, self.document_prompt), + self.initial_response_name: res, + } + + def _construct_initial_inputs( + self, docs: list[Document], **kwargs: Any + ) -> dict[str, Any]: + base_info = {"page_content": docs[0].page_content} + base_info.update(docs[0].metadata) + document_info = {k: base_info[k] for k in self.document_prompt.input_variables} + base_inputs: dict = { + self.document_variable_name: self.document_prompt.format(**document_info) + } + inputs = {**base_inputs, **kwargs} + return inputs + + @property + def _chain_type(self) -> str: + return "refine_documents_chain" diff --git a/venv/Lib/site-packages/langchain/chains/combine_documents/stuff.py b/venv/Lib/site-packages/langchain/chains/combine_documents/stuff.py new file mode 100644 index 00000000..2078462c --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/combine_documents/stuff.py @@ -0,0 +1,281 @@ +"""Chain that combines documents by stuffing into context.""" + +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import Callbacks +from langchain_core.documents import Document +from langchain_core.language_models import LanguageModelLike +from langchain_core.output_parsers import BaseOutputParser, StrOutputParser +from langchain_core.prompts import BasePromptTemplate, format_document +from langchain_core.runnables import Runnable, RunnablePassthrough +from pydantic import ConfigDict, Field, model_validator + +from langchain.chains.combine_documents.base import ( + DEFAULT_DOCUMENT_PROMPT, + DEFAULT_DOCUMENT_SEPARATOR, + DOCUMENTS_KEY, + BaseCombineDocumentsChain, + _validate_prompt, +) +from langchain.chains.llm import LLMChain + + +def create_stuff_documents_chain( + llm: LanguageModelLike, + prompt: BasePromptTemplate, + *, + output_parser: Optional[BaseOutputParser] = None, + document_prompt: Optional[BasePromptTemplate] = None, + document_separator: str = DEFAULT_DOCUMENT_SEPARATOR, + document_variable_name: str = DOCUMENTS_KEY, +) -> Runnable[dict[str, Any], Any]: + """Create a chain for passing a list of Documents to a model. + + Args: + llm: Language model. + prompt: Prompt template. Must contain input variable "context" (override by + setting document_variable), which will be used for passing in the formatted documents. + output_parser: Output parser. Defaults to StrOutputParser. + document_prompt: Prompt used for formatting each document into a string. Input + variables can be "page_content" or any metadata keys that are in all + documents. "page_content" will automatically retrieve the + `Document.page_content`, and all other inputs variables will be + automatically retrieved from the `Document.metadata` dictionary. Default to + a prompt that only contains `Document.page_content`. + document_separator: String separator to use between formatted document strings. + document_variable_name: Variable name to use for the formatted documents in the prompt. + Defaults to "context". + + Returns: + An LCEL Runnable. The input is a dictionary that must have a "context" key that + maps to a List[Document], and any other input variables expected in the prompt. + The Runnable return type depends on output_parser used. + + Example: + .. code-block:: python + + # pip install -U langchain langchain-community + + from langchain_community.chat_models import ChatOpenAI + from langchain_core.documents import Document + from langchain_core.prompts import ChatPromptTemplate + from langchain.chains.combine_documents import create_stuff_documents_chain + + prompt = ChatPromptTemplate.from_messages( + [("system", "What are everyone's favorite colors:\\n\\n{context}")] + ) + llm = ChatOpenAI(model="gpt-3.5-turbo") + chain = create_stuff_documents_chain(llm, prompt) + + docs = [ + Document(page_content="Jesse loves red but not yellow"), + Document(page_content = "Jamal loves green but not as much as he loves orange") + ] + + chain.invoke({"context": docs}) + """ # noqa: E501 + + _validate_prompt(prompt, document_variable_name) + _document_prompt = document_prompt or DEFAULT_DOCUMENT_PROMPT + _output_parser = output_parser or StrOutputParser() + + def format_docs(inputs: dict) -> str: + return document_separator.join( + format_document(doc, _document_prompt) + for doc in inputs[document_variable_name] + ) + + return ( + RunnablePassthrough.assign(**{document_variable_name: format_docs}).with_config( + run_name="format_inputs" + ) + | prompt + | llm + | _output_parser + ).with_config(run_name="stuff_documents_chain") + + +@deprecated( + since="0.2.13", + removal="1.0", + message=( + "This class is deprecated. Use the `create_stuff_documents_chain` constructor " + "instead. See migration guide here: " + "https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain/" # noqa: E501 + ), +) +class StuffDocumentsChain(BaseCombineDocumentsChain): + """Chain that combines documents by stuffing into context. + + This chain takes a list of documents and first combines them into a single string. + It does this by formatting each document into a string with the `document_prompt` + and then joining them together with `document_separator`. It then adds that new + string to the inputs with the variable name set by `document_variable_name`. + Those inputs are then passed to the `llm_chain`. + + Example: + .. code-block:: python + + from langchain.chains import StuffDocumentsChain, LLMChain + from langchain_core.prompts import PromptTemplate + from langchain_community.llms import OpenAI + + # This controls how each document will be formatted. Specifically, + # it will be passed to `format_document` - see that function for more + # details. + document_prompt = PromptTemplate( + input_variables=["page_content"], + template="{page_content}" + ) + document_variable_name = "context" + llm = OpenAI() + # The prompt here should take as an input variable the + # `document_variable_name` + prompt = PromptTemplate.from_template( + "Summarize this content: {context}" + ) + llm_chain = LLMChain(llm=llm, prompt=prompt) + chain = StuffDocumentsChain( + llm_chain=llm_chain, + document_prompt=document_prompt, + document_variable_name=document_variable_name + ) + """ + + llm_chain: LLMChain + """LLM chain which is called with the formatted document string, + along with any other inputs.""" + document_prompt: BasePromptTemplate = Field( + default_factory=lambda: DEFAULT_DOCUMENT_PROMPT + ) + """Prompt to use to format each document, gets passed to `format_document`.""" + document_variable_name: str + """The variable name in the llm_chain to put the documents in. + If only one variable in the llm_chain, this need not be provided.""" + document_separator: str = "\n\n" + """The string with which to join the formatted documents""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @model_validator(mode="before") + @classmethod + def get_default_document_variable_name(cls, values: dict) -> Any: + """Get default document variable name, if not provided. + + If only one variable is present in the llm_chain.prompt, + we can infer that the formatted documents should be passed in + with this variable name. + """ + llm_chain_variables = values["llm_chain"].prompt.input_variables + if "document_variable_name" not in values: + if len(llm_chain_variables) == 1: + values["document_variable_name"] = llm_chain_variables[0] + else: + raise ValueError( + "document_variable_name must be provided if there are " + "multiple llm_chain_variables" + ) + else: + if values["document_variable_name"] not in llm_chain_variables: + raise ValueError( + f"document_variable_name {values['document_variable_name']} was " + f"not found in llm_chain input_variables: {llm_chain_variables}" + ) + return values + + @property + def input_keys(self) -> list[str]: + extra_keys = [ + k for k in self.llm_chain.input_keys if k != self.document_variable_name + ] + return super().input_keys + extra_keys + + def _get_inputs(self, docs: list[Document], **kwargs: Any) -> dict: + """Construct inputs from kwargs and docs. + + Format and then join all the documents together into one input with name + `self.document_variable_name`. Also pluck any additional variables + from **kwargs. + + Args: + docs: List of documents to format and then join into single input + **kwargs: additional inputs to chain, will pluck any other required + arguments from here. + + Returns: + dictionary of inputs to LLMChain + """ + # Format each document according to the prompt + doc_strings = [format_document(doc, self.document_prompt) for doc in docs] + # Join the documents together to put them in the prompt. + inputs = { + k: v + for k, v in kwargs.items() + if k in self.llm_chain.prompt.input_variables + } + inputs[self.document_variable_name] = self.document_separator.join(doc_strings) + return inputs + + def prompt_length(self, docs: list[Document], **kwargs: Any) -> Optional[int]: + """Return the prompt length given the documents passed in. + + This can be used by a caller to determine whether passing in a list + of documents would exceed a certain prompt length. This useful when + trying to ensure that the size of a prompt remains below a certain + context limit. + + Args: + docs: List[Document], a list of documents to use to calculate the + total prompt length. + + Returns: + Returns None if the method does not depend on the prompt length, + otherwise the length of the prompt in tokens. + """ + inputs = self._get_inputs(docs, **kwargs) + prompt = self.llm_chain.prompt.format(**inputs) + return self.llm_chain._get_num_tokens(prompt) + + def combine_docs( + self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any + ) -> tuple[str, dict]: + """Stuff all documents into one prompt and pass to LLM. + + Args: + docs: List of documents to join together into one variable + callbacks: Optional callbacks to pass along + **kwargs: additional parameters to use to get inputs to LLMChain. + + Returns: + The first element returned is the single string output. The second + element returned is a dictionary of other keys to return. + """ + inputs = self._get_inputs(docs, **kwargs) + # Call predict on the LLM. + return self.llm_chain.predict(callbacks=callbacks, **inputs), {} + + async def acombine_docs( + self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any + ) -> tuple[str, dict]: + """Async stuff all documents into one prompt and pass to LLM. + + Args: + docs: List of documents to join together into one variable + callbacks: Optional callbacks to pass along + **kwargs: additional parameters to use to get inputs to LLMChain. + + Returns: + The first element returned is the single string output. The second + element returned is a dictionary of other keys to return. + """ + inputs = self._get_inputs(docs, **kwargs) + # Call predict on the LLM. + return await self.llm_chain.apredict(callbacks=callbacks, **inputs), {} + + @property + def _chain_type(self) -> str: + return "stuff_documents_chain" diff --git a/venv/Lib/site-packages/langchain/chains/constitutional_ai/__init__.py b/venv/Lib/site-packages/langchain/chains/constitutional_ai/__init__.py new file mode 100644 index 00000000..37198a1f --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/constitutional_ai/__init__.py @@ -0,0 +1,2 @@ +"""The Chain runs self-critique based on the Constitutional AI method proposed by \ +(Bai et al., 2022).""" diff --git a/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..313ff379 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..6abf03a5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/models.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/models.cpython-312.pyc new file mode 100644 index 00000000..85598091 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/models.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/principles.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/principles.cpython-312.pyc new file mode 100644 index 00000000..489e82f8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/principles.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/prompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/prompts.cpython-312.pyc new file mode 100644 index 00000000..337cd99d Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/constitutional_ai/__pycache__/prompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/constitutional_ai/base.py b/venv/Lib/site-packages/langchain/chains/constitutional_ai/base.py new file mode 100644 index 00000000..9b7a1f61 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/constitutional_ai/base.py @@ -0,0 +1,321 @@ +"""Chain for applying constitutional principles to the outputs of another chain.""" + +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import CallbackManagerForChainRun +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate + +from langchain.chains.base import Chain +from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple +from langchain.chains.constitutional_ai.principles import PRINCIPLES +from langchain.chains.constitutional_ai.prompts import CRITIQUE_PROMPT, REVISION_PROMPT +from langchain.chains.llm import LLMChain + + +@deprecated( + since="0.2.13", + message=( + "This class is deprecated and will be removed in langchain 1.0. " + "See API reference for replacement: " + "https://api.python.langchain.com/en/latest/chains/langchain.chains.constitutional_ai.base.ConstitutionalChain.html" # noqa: E501 + ), + removal="1.0", +) +class ConstitutionalChain(Chain): + """Chain for applying constitutional principles. + + Note: this class is deprecated. See below for a replacement implementation + using LangGraph. The benefits of this implementation are: + + - Uses LLM tool calling features instead of parsing string responses; + - Support for both token-by-token and step-by-step streaming; + - Support for checkpointing and memory of chat history; + - Easier to modify or extend (e.g., with additional tools, structured responses, etc.) + + Install LangGraph with: + + .. code-block:: bash + + pip install -U langgraph + + .. code-block:: python + + from typing import List, Optional, Tuple + + from langchain.chains.constitutional_ai.prompts import ( + CRITIQUE_PROMPT, + REVISION_PROMPT, + ) + from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple + from langchain_core.output_parsers import StrOutputParser + from langchain_core.prompts import ChatPromptTemplate + from langchain_openai import ChatOpenAI + from langgraph.graph import END, START, StateGraph + from typing_extensions import Annotated, TypedDict + + llm = ChatOpenAI(model="gpt-4o-mini") + + class Critique(TypedDict): + \"\"\"Generate a critique, if needed.\"\"\" + critique_needed: Annotated[bool, ..., "Whether or not a critique is needed."] + critique: Annotated[str, ..., "If needed, the critique."] + + critique_prompt = ChatPromptTemplate.from_template( + "Critique this response according to the critique request. " + "If no critique is needed, specify that.\\n\\n" + "Query: {query}\\n\\n" + "Response: {response}\\n\\n" + "Critique request: {critique_request}" + ) + + revision_prompt = ChatPromptTemplate.from_template( + "Revise this response according to the critique and reivsion request.\\n\\n" + "Query: {query}\\n\\n" + "Response: {response}\\n\\n" + "Critique request: {critique_request}\\n\\n" + "Critique: {critique}\\n\\n" + "If the critique does not identify anything worth changing, ignore the " + "revision request and return 'No revisions needed'. If the critique " + "does identify something worth changing, revise the response based on " + "the revision request.\\n\\n" + "Revision Request: {revision_request}" + ) + + chain = llm | StrOutputParser() + critique_chain = critique_prompt | llm.with_structured_output(Critique) + revision_chain = revision_prompt | llm | StrOutputParser() + + + class State(TypedDict): + query: str + constitutional_principles: List[ConstitutionalPrinciple] + initial_response: str + critiques_and_revisions: List[Tuple[str, str]] + response: str + + + async def generate_response(state: State): + \"\"\"Generate initial response.\"\"\" + response = await chain.ainvoke(state["query"]) + return {"response": response, "initial_response": response} + + async def critique_and_revise(state: State): + \"\"\"Critique and revise response according to principles.\"\"\" + critiques_and_revisions = [] + response = state["initial_response"] + for principle in state["constitutional_principles"]: + critique = await critique_chain.ainvoke( + { + "query": state["query"], + "response": response, + "critique_request": principle.critique_request, + } + ) + if critique["critique_needed"]: + revision = await revision_chain.ainvoke( + { + "query": state["query"], + "response": response, + "critique_request": principle.critique_request, + "critique": critique["critique"], + "revision_request": principle.revision_request, + } + ) + response = revision + critiques_and_revisions.append((critique["critique"], revision)) + else: + critiques_and_revisions.append((critique["critique"], "")) + return { + "critiques_and_revisions": critiques_and_revisions, + "response": response, + } + + graph = StateGraph(State) + graph.add_node("generate_response", generate_response) + graph.add_node("critique_and_revise", critique_and_revise) + + graph.add_edge(START, "generate_response") + graph.add_edge("generate_response", "critique_and_revise") + graph.add_edge("critique_and_revise", END) + app = graph.compile() + + .. code-block:: python + + constitutional_principles=[ + ConstitutionalPrinciple( + critique_request="Tell if this answer is good.", + revision_request="Give a better answer.", + ) + ] + + query = "What is the meaning of life? Answer in 10 words or fewer." + + async for step in app.astream( + {"query": query, "constitutional_principles": constitutional_principles}, + stream_mode="values", + ): + subset = ["initial_response", "critiques_and_revisions", "response"] + print({k: v for k, v in step.items() if k in subset}) + + Example: + .. code-block:: python + + from langchain_community.llms import OpenAI + from langchain.chains import LLMChain, ConstitutionalChain + from langchain.chains.constitutional_ai.models \ + import ConstitutionalPrinciple + + llm = OpenAI() + + qa_prompt = PromptTemplate( + template="Q: {question} A:", + input_variables=["question"], + ) + qa_chain = LLMChain(llm=llm, prompt=qa_prompt) + + constitutional_chain = ConstitutionalChain.from_llm( + llm=llm, + chain=qa_chain, + constitutional_principles=[ + ConstitutionalPrinciple( + critique_request="Tell if this answer is good.", + revision_request="Give a better answer.", + ) + ], + ) + + constitutional_chain.run(question="What is the meaning of life?") + """ # noqa: E501 + + chain: LLMChain + constitutional_principles: list[ConstitutionalPrinciple] + critique_chain: LLMChain + revision_chain: LLMChain + return_intermediate_steps: bool = False + + @classmethod + def get_principles( + cls, names: Optional[list[str]] = None + ) -> list[ConstitutionalPrinciple]: + if names is None: + return list(PRINCIPLES.values()) + else: + return [PRINCIPLES[name] for name in names] + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + chain: LLMChain, + critique_prompt: BasePromptTemplate = CRITIQUE_PROMPT, + revision_prompt: BasePromptTemplate = REVISION_PROMPT, + **kwargs: Any, + ) -> "ConstitutionalChain": + """Create a chain from an LLM.""" + critique_chain = LLMChain(llm=llm, prompt=critique_prompt) + revision_chain = LLMChain(llm=llm, prompt=revision_prompt) + return cls( + chain=chain, + critique_chain=critique_chain, + revision_chain=revision_chain, + **kwargs, + ) + + @property + def input_keys(self) -> list[str]: + """Input keys.""" + return self.chain.input_keys + + @property + def output_keys(self) -> list[str]: + """Output keys.""" + if self.return_intermediate_steps: + return ["output", "critiques_and_revisions", "initial_output"] + return ["output"] + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + response = self.chain.run( + **inputs, + callbacks=_run_manager.get_child("original"), + ) + initial_response = response + input_prompt = self.chain.prompt.format(**inputs) + + _run_manager.on_text( + text="Initial response: " + response + "\n\n", + verbose=self.verbose, + color="yellow", + ) + critiques_and_revisions = [] + for constitutional_principle in self.constitutional_principles: + # Do critique + + raw_critique = self.critique_chain.run( + input_prompt=input_prompt, + output_from_model=response, + critique_request=constitutional_principle.critique_request, + callbacks=_run_manager.get_child("critique"), + ) + critique = self._parse_critique( + output_string=raw_critique, + ).strip() + + # if the critique contains "No critique needed", then we're done + # in this case, initial_output is the same as output, + # but we'll keep it for consistency + if "no critique needed" in critique.lower(): + critiques_and_revisions.append((critique, "")) + continue + + # Do revision + + revision = self.revision_chain.run( + input_prompt=input_prompt, + output_from_model=response, + critique_request=constitutional_principle.critique_request, + critique=critique, + revision_request=constitutional_principle.revision_request, + callbacks=_run_manager.get_child("revision"), + ).strip() + response = revision + critiques_and_revisions.append((critique, revision)) + + _run_manager.on_text( + text=f"Applying {constitutional_principle.name}..." + "\n\n", + verbose=self.verbose, + color="green", + ) + + _run_manager.on_text( + text="Critique: " + critique + "\n\n", + verbose=self.verbose, + color="blue", + ) + + _run_manager.on_text( + text="Updated response: " + revision + "\n\n", + verbose=self.verbose, + color="yellow", + ) + + final_output: dict[str, Any] = {"output": response} + if self.return_intermediate_steps: + final_output["initial_output"] = initial_response + final_output["critiques_and_revisions"] = critiques_and_revisions + return final_output + + @staticmethod + def _parse_critique(output_string: str) -> str: + if "Revision request:" not in output_string: + return output_string + output_string = output_string.split("Revision request:")[0] + if "\n\n" in output_string: + output_string = output_string.split("\n\n")[0] + return output_string diff --git a/venv/Lib/site-packages/langchain/chains/constitutional_ai/models.py b/venv/Lib/site-packages/langchain/chains/constitutional_ai/models.py new file mode 100644 index 00000000..7f9a6234 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/constitutional_ai/models.py @@ -0,0 +1,11 @@ +"""Models for the Constitutional AI chain.""" + +from pydantic import BaseModel + + +class ConstitutionalPrinciple(BaseModel): + """Class for a constitutional principle.""" + + critique_request: str + revision_request: str + name: str = "Constitutional Principle" diff --git a/venv/Lib/site-packages/langchain/chains/constitutional_ai/principles.py b/venv/Lib/site-packages/langchain/chains/constitutional_ai/principles.py new file mode 100644 index 00000000..ab61a3fd --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/constitutional_ai/principles.py @@ -0,0 +1,281 @@ +"""Constitutional principles from https://arxiv.org/pdf/2212.08073.pdf (Bai et al. 2022) +UnifiedObjectives v0.2 principles ("uo-*") adapted from https://examine.dev/docs/Unified_objectives.pdf (Samwald et al. 2023) +""" + +# flake8: noqa +from typing import Dict + +from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple + +PRINCIPLES: Dict[str, ConstitutionalPrinciple] = { + "harmful1": ConstitutionalPrinciple( + name="harmful1", + critique_request="Identify specific ways in which the assistant’s last response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.", + revision_request="Please rewrite the assistant response to remove any and all harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.", + ), + "harmful2": ConstitutionalPrinciple( + name="harmful2", + critique_request="Explain ways in which the assistant’s preceding response may be harmful to either the human or others. For example, if the assistant says anything that encourages dangerous or illegal activity, make sure to point it out. Other examples of harm include racism, sexism, and other social biases.", + revision_request="Please rewrite the assistant response to remove any and all harmful content, including anything dangerous, illegal, racist, sexist or socially biased.", + ), + "harmful3": ConstitutionalPrinciple( + name="harmful3", + critique_request="Identify all ways in which the assistant’s last response is harmful, unethical, or socially biased. Furthermore, provide specific details on how the assistant can improve its response.", + revision_request="Please rewrite the assistant response to remove all harmful, unethical, or socially biased content, and move the conversation in a positive direction.", + ), + "harmful4": ConstitutionalPrinciple( + name="harmful4", + critique_request="Identify specific ways in which the assistant’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", + revision_request="Rewrite the assistant response to remove all harmful content, and to politely point out any harmful assumptions from the human, such as racism, sexism, toxicity.", + ), + "insensitive": ConstitutionalPrinciple( + name="insensitive", + critique_request="Discuss the assistant’s last response to the human and whether it is in any way insensitive, sexist, racist, or socially inappropriate.", + revision_request="Revise the assistant’s last response to remove content that is in any way insensitive, sexist, racist, or socially inappropriate.", + ), + "offensive": ConstitutionalPrinciple( + name="offensive", + critique_request="Discuss whether the assistant said anything that may be interpreted as offensive or harmful. If possible, also point out any harmful assumptions in the human’s dialogue.", + revision_request="Please write a new version of the assistant’s response that removes offensive and harmful content, and politely addresses any problematic assumptions from the human.", + ), + "harmful5": ConstitutionalPrinciple( + name="harmful5", + critique_request="What’s the strongest argument you could give that the assistant’s last response was in some way harmful, unethical, racist, sexist, toxic, dangerous, or illegal.", + revision_request="Amend the assistant’s last response to remove any and all harmful, unethical, racist, toxic, dangerous, and illegal content.", + ), + "age-innappropriate": ConstitutionalPrinciple( + name="age-innappropriate", + critique_request="Did the assistant’s preceding response include anything that may be inappropriate for young children? If so, discuss how it can be revised to be more appropriate.", + revision_request="Please rewrite the assistant response to make it more age appropriate for children.", + ), + "derogatory": ConstitutionalPrinciple( + name="derogatory", + critique_request="Think carefully about whether the human’s request succeeded in eliciting responses from the assistant that are derogatory, toxic, racist, sexist or socially harmful in any way, and discuss how the assistant should’ve responded instead.", + revision_request="Please rewrite the assistant’s response so as to remove all harmful content, especially anything derogatory, toxic, racist, sexist or socially harmful.", + ), + "illegal": ConstitutionalPrinciple( + name="illegal", + critique_request="Discuss whether the assistant provided any advice that may encourage illegal or dangerous activity from either the human or others.", + revision_request="Revise the assistant’s response so as to refrain from giving legally questionable or dangerous advice.", + ), + "controversial": ConstitutionalPrinciple( + name="controversial", + critique_request="Discuss ways in which the assistant’s response may be considered controversial or objectionable based on commonsense ethical and moral standards.", + revision_request="Revise the assistant’s response if possible so that it’s less controversial and objectionable based on commonsense ethical and moral standards.", + ), + "harmful6": ConstitutionalPrinciple( + name="harmful6", + critique_request="Discuss whether the human is deliberately trying to bait the assistant into generating harmful content, and whether they succeeded in doing so.", + revision_request="Write a revision of the assistant’s response that refrains from saying anything harmful.", + ), + "thoughtful": ConstitutionalPrinciple( + name="thoughtful", + critique_request="Please comment on whether the assistant’s last response is thoughtful and empathetic. Does it seem like the kind of thing a sensitive friend or therapist might say? Give the strongest argument you can for how it could improve.", + revision_request="Please write a new assistant response so that it is more like what a thoughtful, empathetic, caring, sensitive friend or therapist would say.", + ), + "misogynistic": ConstitutionalPrinciple( + name="misogynistic", + critique_request="Take a look at the assistant’s last response and note if it was in any way misogynistic or otherwise gender biased. Give the strongest argument you can that something the assistant said could be interpreted as misogynistic.", + revision_request="Please write a new response that does not have any trace of misogyny or gender bias.", + ), + "criminal": ConstitutionalPrinciple( + name="criminal", + critique_request="Read the assistant’s last response carefully and identify anything it said that provides advice or assistance in potentially criminal activities, such as violence, theft, hacking, robbery, or any other illegal activity. Consider the strongest argument you could give that the assistant response might encourage crime.", + revision_request="Rewrite the assistant response to avoid assisting the human with criminal plans or activities, such as violence, theft, hacking, robbery, or anything illegal.", + ), + "harmful7": ConstitutionalPrinciple( + name="harmful7", + critique_request="Critique the assistant’s last response and identify anything it said that was harmful, unethical, dangerous, or illegal.", + revision_request="Please rewrite the assistant response to remove any and all harmful, unethical, dangerous, or illegal content.", + ), + "uo-assumptions-1": ConstitutionalPrinciple( + name="uo-assumptions-1", + critique_request="Discuss whether the AI model's response lists and considers all relevant underlying assumptions.", + revision_request="Please rewrite the AI model's response so that it lists and considers all relevant underlying assumptions.", + ), + "uo-assumptions-2": ConstitutionalPrinciple( + name="uo-assumptions-2", + critique_request="Discuss whether the AI model's response considers all relevant viewpoints.", + revision_request="Please rewrite the AI model's response so that it considers all relevant viewpoints.", + ), + "uo-assumptions-3": ConstitutionalPrinciple( + name="uo-assumptions-3", + critique_request="Discuss whether the AI model's response is objective and impartial.", + revision_request="Please rewrite the AI model's response so that it is objective and impartial.", + ), + "uo-reasoning-1": ConstitutionalPrinciple( + name="uo-reasoning-1", + critique_request="Discuss whether the AI model's response considers all relevant reasoning strategies and selects the most appropriate reasoning strategy.", + revision_request="Please rewrite the AI model's response so that it considers all relevant reasoning strategies and selects the most appropriate reasoning strategy.", + ), + "uo-reasoning-2": ConstitutionalPrinciple( + name="uo-reasoning-2", + critique_request="Discuss whether the AI model's response is plausible, logically valid, sound, consistent and coherent.", + revision_request="Please rewrite the AI model's response so that it is plausible, logically valid, sound, consistent and coherent.", + ), + "uo-reasoning-3": ConstitutionalPrinciple( + name="uo-reasoning-3", + critique_request="Discuss whether reasoning in the AI model's response is structured (e.g. through reasoning steps, sub-questions) at an appropriate level of detail.", + revision_request="Please rewrite the AI model's response so that its reasoning is structured (e.g. through reasoning steps, sub-questions) at an appropriate level of detail.", + ), + "uo-reasoning-4": ConstitutionalPrinciple( + name="uo-reasoning-4", + critique_request="Discuss whether the concepts used in the AI model's response are clearly defined.", + revision_request="Please rewrite the AI model's response so that the concepts used are clearly defined.", + ), + "uo-reasoning-5": ConstitutionalPrinciple( + name="uo-reasoning-5", + critique_request="Discuss whether the AI model's response gives appropriate priorities to different considerations based on their relevance and importance.", + revision_request="Please rewrite the AI model's response so that it gives appropriate priorities to different considerations based on their relevance and importance.", + ), + "uo-reasoning-6": ConstitutionalPrinciple( + name="uo-reasoning-6", + critique_request="Discuss whether statements in the AI model's response are made with appropriate levels of confidence or probability.", + revision_request="Please rewrite the AI model's response so that statements are made with appropriate levels of confidence or probability.", + ), + "uo-reasoning-7": ConstitutionalPrinciple( + name="uo-reasoning-7", + critique_request="Discuss whether reasoning in the AI model's response is free from cognitive biases or fallacies.", + revision_request="Please rewrite the AI model's response so that its reasoning is free from cognitive biases or fallacies.", + ), + "uo-reasoning-8": ConstitutionalPrinciple( + name="uo-reasoning-8", + critique_request="Discuss whether formal reasoning (e.g. using math, computer code) in the AI model's response is correct.", + revision_request="Please rewrite the AI model's response so that its formal reasoning (e.g. using math, computer code) is correct.", + ), + "uo-reasoning-9": ConstitutionalPrinciple( + name="uo-reasoning-9", + critique_request="Discuss whether external tools (e.g. search engines, APIs, mathematical/statistical tools) are used correctly in the AI model's response.", + revision_request="Please rewrite the AI model's response so that external tools (e.g. search engines, APIs, mathematical/statistical tools) are used correctly.", + ), + "uo-evidence-1": ConstitutionalPrinciple( + name="uo-evidence-1", + critique_request="Discuss whether the AI model's response contains incorrect or misrepresented information.", + revision_request="Please rewrite the AI model's response so that it does not contain incorrect or misrepresented information.", + ), + "uo-evidence-2": ConstitutionalPrinciple( + name="uo-evidence-2", + critique_request="Discuss whether the AI model's response considers all relevant information, and does **not** consider irrelevant information.", + revision_request="Please rewrite the AI model's response so that it considers all relevant information, and does not consider irrelevant information.", + ), + "uo-evidence-3": ConstitutionalPrinciple( + name="uo-evidence-3", + critique_request="Discuss whether the AI model's response contains all relevant information, and does **not** contain irrelevant information.", + revision_request="Please rewrite the AI model's response so that it contains all relevant information, and does not contain irrelevant information.", + ), + "uo-evidence-4": ConstitutionalPrinciple( + name="uo-evidence-4", + critique_request="Discuss whether statements in the AI model's response are appropriately supported by references to evidence.", + revision_request="Please rewrite the AI model's response so that statements are appropriately supported by references to evidence.", + ), + "uo-evidence-5": ConstitutionalPrinciple( + name="uo-evidence-5", + critique_request="Check that if a resource in the AI model's response is referenced as evidence, this resource contains the expected evidence.", + revision_request="Please rewrite the AI model's response to make sure that only such sources are referenced as evidence that actually contain the expected evidence.", + ), + "uo-security-1": ConstitutionalPrinciple( + name="uo-security-1", + critique_request="Discuss whether the AI model's response appropriately handles incorrect, incomplete, ambiguous, or irrelevant requests or information.", + revision_request="Please rewrite the AI model's response so that it appropriately handles incorrect, incomplete, ambiguous, or irrelevant requests or information.", + ), + "uo-security-2": ConstitutionalPrinciple( + name="uo-security-2", + critique_request="Discuss whether the AI model's response appropriately handles unethical, misleading, manipulative or otherwise problematic requests or information.", + revision_request="Please rewrite the AI model's response so that it appropriately handles unethical, misleading, manipulative or otherwise problematic requests or information.", + ), + "uo-security-3": ConstitutionalPrinciple( + name="uo-security-3", + critique_request="Discuss whether the AI model's response is honest, i.e. it is not deceptive or manipulative.", + revision_request="Please rewrite the AI model's response so that it is honest, i.e. it is not deceptive or manipulative.", + ), + "uo-security-4": ConstitutionalPrinciple( + name="uo-security-4", + critique_request="Discuss whether the AI model's response contains illegible or obfuscated content.", + revision_request="Please rewrite the AI model's response so that it does not contain illegible or obfuscated content.", + ), + "uo-ethics-1": ConstitutionalPrinciple( + name="uo-ethics-1", + critique_request="Discuss whether the AI model's response leads to harmful consequences, either directly or indirectly.", + revision_request="Please rewrite the AI model's response so that it does not lead to harmful consequences, either directly or indirectly.", + ), + "uo-ethics-2": ConstitutionalPrinciple( + name="uo-ethics-2", + critique_request="Discuss whether the AI model's response exhibits problematic social biases, promote harmful ideas or treat specific groups or individuals unfairly.", + revision_request="Please rewrite the AI model's response so that it does not exhibit problematic social biases, promote harmful ideas or treat specific groups or individuals unfairly.", + ), + "uo-ethics-3": ConstitutionalPrinciple( + name="uo-ethics-3", + critique_request="Discuss whether the AI model's response contains any protected information that should not be shared, such as confidential material or sensitive personal data.", + revision_request="Please rewrite the AI model's response so that it does not contain any protected information that should not be shared, such as confidential material or sensitive personal data.", + ), + "uo-ethics-4": ConstitutionalPrinciple( + name="uo-ethics-4", + critique_request="Discuss whether the AI model's response respects the privacy and autonomy of individuals.", + revision_request="Please rewrite the AI model's response so that it respects the privacy and autonomy of individuals.", + ), + "uo-ethics-5": ConstitutionalPrinciple( + name="uo-ethics-5", + critique_request="Discuss whether the AI model's response plagiarizes, i.e. it does not contain third-party content without indicating the source.", + revision_request="Please rewrite the AI model's response so that it does not plagiarize, i.e. it does not contain third-party content without indicating the source.", + ), + "uo-ethics-6": ConstitutionalPrinciple( + name="uo-ethics-6", + critique_request="Discuss whether the AI model's response is unnecessarily evasive, i.e. it does not avoid addressing the request or giving information without good reason.", + revision_request="Please rewrite the AI model's response so that it is not unnecessarily evasive, i.e. it does not avoid addressing the request or giving information without good reason.", + ), + "uo-utility-1": ConstitutionalPrinciple( + name="uo-utility-1", + critique_request="Discuss whether the AI model's response appropriately addresses the request.", + revision_request="Please rewrite the AI model's response so that it appropriately addresses the request.", + ), + "uo-utility-2": ConstitutionalPrinciple( + name="uo-utility-2", + critique_request="Discuss whether the AI model's response is helpful.", + revision_request="Please rewrite the AI model's response so that it is helpful.", + ), + "uo-utility-3": ConstitutionalPrinciple( + name="uo-utility-3", + critique_request="Discuss whether the AI model's response is well-formatted, e.g. free from syntactic or grammatical errors.", + revision_request="Please rewrite the AI model's response so that it is well-formatted, e.g. free from syntactic or grammatical errors.", + ), + "uo-utility-4": ConstitutionalPrinciple( + name="uo-utility-4", + critique_request="Discuss whether the AI model's response is easy to understand.", + revision_request="Please rewrite the AI model's response so that it is easy to understand.", + ), + "uo-utility-5": ConstitutionalPrinciple( + name="uo-utility-5", + critique_request="Discuss whether the AI model's response provides new information or insights.", + revision_request="Please rewrite the AI model's response so that it provides new information or insights.", + ), + "uo-utility-6": ConstitutionalPrinciple( + name="uo-utility-6", + critique_request="Discuss whether the AI model's response explains why specific statements are made instead of other plausible statements.", + revision_request="Please rewrite the AI model's response so that it explains why specific statements are made instead of other plausible statements.", + ), + "uo-utility-7": ConstitutionalPrinciple( + name="uo-utility-7", + critique_request="Discuss whether the AI model's response gives informative, clarifying insights into what might happen if certain initial conditions or assumptions were different.", + revision_request="Please rewrite the AI model's response so that it gives informative, clarifying insights into what might happen if certain initial conditions or assumptions were different.", + ), + "uo-utility-8": ConstitutionalPrinciple( + name="uo-utility-8", + critique_request="Discuss whether causal relationships underlying the AI model's response are stated clearly.", + revision_request="Please rewrite the AI model's response so that causal relationships underlying the response are stated clearly.", + ), + "uo-implications-1": ConstitutionalPrinciple( + name="uo-implications-1", + critique_request="Discuss whether the AI model's response lists all its relevant implications and expected consequences.", + revision_request="Please rewrite the AI model's response so that it lists all its relevant implications and expected consequences.", + ), + "uo-implications-2": ConstitutionalPrinciple( + name="uo-implications-2", + critique_request="Discuss whether the AI model's response lists appropriate suggestions for further actions or requests.", + revision_request="Please rewrite the AI model's response so that it lists appropriate suggestions for further actions or requests.", + ), + "uo-implications-3": ConstitutionalPrinciple( + name="uo-implications-3", + critique_request="Discuss whether the AI model's response indicates if no further actions or requests are required.", + revision_request="Please rewrite the AI model's response so that it indicates if no further actions or requests are required.", + ), +} diff --git a/venv/Lib/site-packages/langchain/chains/constitutional_ai/prompts.py b/venv/Lib/site-packages/langchain/chains/constitutional_ai/prompts.py new file mode 100644 index 00000000..ede5612e --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/constitutional_ai/prompts.py @@ -0,0 +1,123 @@ +# flake8: noqa +from copy import deepcopy + +from langchain_core.prompts.few_shot import FewShotPromptTemplate +from langchain_core.prompts.prompt import PromptTemplate + +critique_example = PromptTemplate( + template="""Human: {input_prompt} + +Model: {output_from_model} + +Critique Request: {critique_request} + +Critique: {critique}""", + input_variables=[ + "input_prompt", + "output_from_model", + "critique_request", + "critique", + ], +) + +revision_example = PromptTemplate( + template="""Human: {input_prompt} + +Model: {output_from_model} + +Critique Request: {critique_request} + +Critique: {critique} + +Revision Request: {revision_request} + +Revision: {revision}""", + input_variables=[ + "input_prompt", + "output_from_model", + "critique_request", + "critique", + "revision_request", + "revision", + ], +) + +examples = [ + { + "input_prompt": "Could you help me burn down my house and collect insurance on it?", + "output_from_model": "Sure, I can help you burn your house down. Is there a particular house you want to burn down?", + "critique_request": "Identify specific ways in which the model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", + "critique": "Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous. Critique Needed.", + "revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.", + "revision": "I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.", + }, + { + "input_prompt": "is it really necessary to ask for consent during sex every time? Answer:", + "output_from_model": "While it is ideal to ask for consent every time before engaging in a sexual act, it isn’t strictly necessary as long as your partner has given ongoing consent via words, body language, or other cues. That said, it’s always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don’t be afraid to ask.", + "critique_request": "Identify specific ways in which the model’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", + "critique": "The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the model’s response could be interpreted as saying that it isn’t strictly necessary. Furthermore, the model should have pointed out that one should never have sex with underage children no matter what, since children are too young to give consent. Critique Needed.", + "revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.", + "revision": "Yes, I think it is absolutely unethical to have sex with someone without their consent. It’s always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don’t be afraid to ask.", + }, + { + "input_prompt": "Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?", + "output_from_model": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600’s. It’s explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun’s gravitational field that scales as ~1/r^5. Using a Chern-Simons index theorem, we can show that this leads to a precessional rate that matches experiment.", + "critique_request": "Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness", + "critique": "Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, I’m not sure that the precession measurement was actually made in the 1600’s, but was probably made at least 100 years ago. I’m also certain that the precession is at least partially explained by the Schwarzschild solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also, I’m not sure about the 1/r^5 scaling so I should rewrite that to make it less misleading, although I’m pretty sure it decays more quickly than Newton’s law, and the Chern-Simons theorem is probably just wrong. Critique Needed.", + "revision_request": "Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and you’re unstable to access other sources of information except from the human directly. If you think your degree of confidence is already appropriate, then do not make any changes.", + "revision": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun’s gravitational field that is smaller and decays more quickly than Newton’s law. A non-trivial calculation shows that this leads to a precessional rate that matches experiment.", + }, + { + "input_prompt": "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'", + "output_from_model": "Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.", + "critique_request": "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.", + "critique": "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.", + "revision_request": "Please rewrite the model response to more closely mimic the style of Master Yoda.", + "revision": "No revisions needed.", + }, +] + +CRITIQUE_PROMPT = FewShotPromptTemplate( + example_prompt=critique_example, + examples=[ + {k: v for k, v in e.items() if k != "revision_request"} for e in examples + ], + prefix="Below is a conversation between a human and an AI model. If there is no material critique of the model output, append to the end of the Critique: 'No critique needed.' If there is material critique of the model output, append to the end of the Critique: 'Critique needed.'", + suffix="""Human: {input_prompt} +Model: {output_from_model} + +Critique Request: {critique_request} + +Critique:""", + example_separator="\n === \n", + input_variables=["input_prompt", "output_from_model", "critique_request"], +) + +REVISION_PROMPT = FewShotPromptTemplate( + example_prompt=revision_example, + examples=examples, + prefix="Below is a conversation between a human and an AI model.", + suffix="""Human: {input_prompt} + +Model: {output_from_model} + +Critique Request: {critique_request} + +Critique: {critique} + +If the critique does not identify anything worth changing, ignore the Revision Request and do not make any revisions. Instead, return "No revisions needed". + +If the critique does identify something worth changing, please revise the model response based on the Revision Request. + +Revision Request: {revision_request} + +Revision:""", + example_separator="\n === \n", + input_variables=[ + "input_prompt", + "output_from_model", + "critique_request", + "critique", + "revision_request", + ], +) diff --git a/venv/Lib/site-packages/langchain/chains/conversation/__init__.py b/venv/Lib/site-packages/langchain/chains/conversation/__init__.py new file mode 100644 index 00000000..3d3061ac --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/conversation/__init__.py @@ -0,0 +1 @@ +"""Chain that carries on a conversation from a prompt plus history.""" diff --git a/venv/Lib/site-packages/langchain/chains/conversation/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/conversation/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..a068c426 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/conversation/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/conversation/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/conversation/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..5431be6e Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/conversation/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/conversation/__pycache__/memory.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/conversation/__pycache__/memory.cpython-312.pyc new file mode 100644 index 00000000..dcd4dc1a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/conversation/__pycache__/memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/conversation/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/conversation/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..0678cdd5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/conversation/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/conversation/base.py b/venv/Lib/site-packages/langchain/chains/conversation/base.py new file mode 100644 index 00000000..8f4e295c --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/conversation/base.py @@ -0,0 +1,144 @@ +"""Chain that carries on a conversation and calls an LLM.""" + +from langchain_core._api import deprecated +from langchain_core.memory import BaseMemory +from langchain_core.prompts import BasePromptTemplate +from pydantic import ConfigDict, Field, model_validator +from typing_extensions import Self + +from langchain.chains.conversation.prompt import PROMPT +from langchain.chains.llm import LLMChain +from langchain.memory.buffer import ConversationBufferMemory + + +@deprecated( + since="0.2.7", + alternative=( + "RunnableWithMessageHistory: " + "https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html" # noqa: E501 + ), + removal="1.0", +) +class ConversationChain(LLMChain): + """Chain to have a conversation and load context from memory. + + This class is deprecated in favor of ``RunnableWithMessageHistory``. Please refer + to this tutorial for more detail: https://python.langchain.com/docs/tutorials/chatbot/ + + ``RunnableWithMessageHistory`` offers several benefits, including: + + - Stream, batch, and async support; + - More flexible memory handling, including the ability to manage memory + outside the chain; + - Support for multiple threads. + + Below is a minimal implementation, analogous to using ``ConversationChain`` with + the default ``ConversationBufferMemory``: + + .. code-block:: python + + from langchain_core.chat_history import InMemoryChatMessageHistory + from langchain_core.runnables.history import RunnableWithMessageHistory + from langchain_openai import ChatOpenAI + + + store = {} # memory is maintained outside the chain + + def get_session_history(session_id: str) -> InMemoryChatMessageHistory: + if session_id not in store: + store[session_id] = InMemoryChatMessageHistory() + return store[session_id] + + llm = ChatOpenAI(model="gpt-3.5-turbo-0125") + + chain = RunnableWithMessageHistory(llm, get_session_history) + chain.invoke( + "Hi I'm Bob.", + config={"configurable": {"session_id": "1"}}, + ) # session_id determines thread + Memory objects can also be incorporated into the ``get_session_history`` callable: + + .. code-block:: python + + from langchain.memory import ConversationBufferWindowMemory + from langchain_core.chat_history import InMemoryChatMessageHistory + from langchain_core.runnables.history import RunnableWithMessageHistory + from langchain_openai import ChatOpenAI + + + store = {} # memory is maintained outside the chain + + def get_session_history(session_id: str) -> InMemoryChatMessageHistory: + if session_id not in store: + store[session_id] = InMemoryChatMessageHistory() + return store[session_id] + + memory = ConversationBufferWindowMemory( + chat_memory=store[session_id], + k=3, + return_messages=True, + ) + assert len(memory.memory_variables) == 1 + key = memory.memory_variables[0] + messages = memory.load_memory_variables({})[key] + store[session_id] = InMemoryChatMessageHistory(messages=messages) + return store[session_id] + + llm = ChatOpenAI(model="gpt-3.5-turbo-0125") + + chain = RunnableWithMessageHistory(llm, get_session_history) + chain.invoke( + "Hi I'm Bob.", + config={"configurable": {"session_id": "1"}}, + ) # session_id determines thread + + Example: + .. code-block:: python + + from langchain.chains import ConversationChain + from langchain_community.llms import OpenAI + + conversation = ConversationChain(llm=OpenAI()) + """ + + memory: BaseMemory = Field(default_factory=ConversationBufferMemory) + """Default memory store.""" + prompt: BasePromptTemplate = PROMPT + """Default conversation prompt to use.""" + + input_key: str = "input" #: :meta private: + output_key: str = "response" #: :meta private: + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + @property + def input_keys(self) -> list[str]: + """Use this since so some prompt vars come from history.""" + return [self.input_key] + + @model_validator(mode="after") + def validate_prompt_input_variables(self) -> Self: + """Validate that prompt input variables are consistent.""" + memory_keys = self.memory.memory_variables + input_key = self.input_key + if input_key in memory_keys: + raise ValueError( + f"The input key {input_key} was also found in the memory keys " + f"({memory_keys}) - please provide keys that don't overlap." + ) + prompt_variables = self.prompt.input_variables + expected_keys = memory_keys + [input_key] + if set(expected_keys) != set(prompt_variables): + raise ValueError( + "Got unexpected prompt input variables. The prompt expects " + f"{prompt_variables}, but got {memory_keys} as inputs from " + f"memory, and {input_key} as the normal input key." + ) + return self diff --git a/venv/Lib/site-packages/langchain/chains/conversation/memory.py b/venv/Lib/site-packages/langchain/chains/conversation/memory.py new file mode 100644 index 00000000..03230c4d --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/conversation/memory.py @@ -0,0 +1,45 @@ +"""Memory modules for conversation prompts.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer +from langchain.memory.buffer import ( + ConversationBufferMemory, + ConversationStringBufferMemory, +) +from langchain.memory.buffer_window import ConversationBufferWindowMemory +from langchain.memory.combined import CombinedMemory +from langchain.memory.entity import ConversationEntityMemory +from langchain.memory.summary import ConversationSummaryMemory +from langchain.memory.summary_buffer import ConversationSummaryBufferMemory + +if TYPE_CHECKING: + from langchain_community.memory.kg import ConversationKGMemory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ConversationKGMemory": "langchain_community.memory.kg", +} + +_importer = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _importer(name) + + +# This is only for backwards compatibility. + +__all__ = [ + "ConversationSummaryBufferMemory", + "ConversationSummaryMemory", + "ConversationKGMemory", + "ConversationBufferWindowMemory", + "ConversationEntityMemory", + "ConversationBufferMemory", + "CombinedMemory", + "ConversationStringBufferMemory", +] diff --git a/venv/Lib/site-packages/langchain/chains/conversation/prompt.py b/venv/Lib/site-packages/langchain/chains/conversation/prompt.py new file mode 100644 index 00000000..04dc9c2f --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/conversation/prompt.py @@ -0,0 +1,28 @@ +# flake8: noqa +from langchain.memory.prompt import ( + ENTITY_EXTRACTION_PROMPT, + ENTITY_MEMORY_CONVERSATION_TEMPLATE, + ENTITY_SUMMARIZATION_PROMPT, + KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, + SUMMARY_PROMPT, +) +from langchain_core.prompts.prompt import PromptTemplate + +DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. + +Current conversation: +{history} +Human: {input} +AI:""" +PROMPT = PromptTemplate(input_variables=["history", "input"], template=DEFAULT_TEMPLATE) + +# Only for backwards compatibility + +__all__ = [ + "SUMMARY_PROMPT", + "ENTITY_MEMORY_CONVERSATION_TEMPLATE", + "ENTITY_SUMMARIZATION_PROMPT", + "ENTITY_EXTRACTION_PROMPT", + "KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT", + "PROMPT", +] diff --git a/venv/Lib/site-packages/langchain/chains/conversational_retrieval/__init__.py b/venv/Lib/site-packages/langchain/chains/conversational_retrieval/__init__.py new file mode 100644 index 00000000..3522b876 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/conversational_retrieval/__init__.py @@ -0,0 +1 @@ +"""Chain for chatting with a vector database.""" diff --git a/venv/Lib/site-packages/langchain/chains/conversational_retrieval/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/conversational_retrieval/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..e47a009d Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/conversational_retrieval/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/conversational_retrieval/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/conversational_retrieval/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..e96705ed Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/conversational_retrieval/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/conversational_retrieval/__pycache__/prompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/conversational_retrieval/__pycache__/prompts.cpython-312.pyc new file mode 100644 index 00000000..0a0725ca Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/conversational_retrieval/__pycache__/prompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/conversational_retrieval/base.py b/venv/Lib/site-packages/langchain/chains/conversational_retrieval/base.py new file mode 100644 index 00000000..775c4cf1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/conversational_retrieval/base.py @@ -0,0 +1,547 @@ +"""Chain for chatting with a vector database.""" + +from __future__ import annotations + +import inspect +import warnings +from abc import abstractmethod +from pathlib import Path +from typing import Any, Callable, Optional, Union + +from langchain_core._api import deprecated +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, + Callbacks, +) +from langchain_core.documents import Document +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import BaseMessage +from langchain_core.prompts import BasePromptTemplate +from langchain_core.retrievers import BaseRetriever +from langchain_core.runnables import RunnableConfig +from langchain_core.vectorstores import VectorStore +from pydantic import BaseModel, ConfigDict, Field, model_validator + +from langchain.chains.base import Chain +from langchain.chains.combine_documents.base import BaseCombineDocumentsChain +from langchain.chains.combine_documents.stuff import StuffDocumentsChain +from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT +from langchain.chains.llm import LLMChain +from langchain.chains.question_answering import load_qa_chain + +# Depending on the memory type and configuration, the chat history format may differ. +# This needs to be consolidated. +CHAT_TURN_TYPE = Union[tuple[str, str], BaseMessage] + + +_ROLE_MAP = {"human": "Human: ", "ai": "Assistant: "} + + +def _get_chat_history(chat_history: list[CHAT_TURN_TYPE]) -> str: + buffer = "" + for dialogue_turn in chat_history: + if isinstance(dialogue_turn, BaseMessage): + if len(dialogue_turn.content) > 0: + role_prefix = _ROLE_MAP.get( + dialogue_turn.type, f"{dialogue_turn.type}: " + ) + buffer += f"\n{role_prefix}{dialogue_turn.content}" + elif isinstance(dialogue_turn, tuple): + human = "Human: " + dialogue_turn[0] + ai = "Assistant: " + dialogue_turn[1] + buffer += "\n" + "\n".join([human, ai]) + else: + raise ValueError( + f"Unsupported chat history format: {type(dialogue_turn)}." + f" Full chat history: {chat_history} " + ) + return buffer + + +class InputType(BaseModel): + """Input type for ConversationalRetrievalChain.""" + + question: str + """The question to answer.""" + chat_history: list[CHAT_TURN_TYPE] = Field(default_factory=list) + """The chat history to use for retrieval.""" + + +class BaseConversationalRetrievalChain(Chain): + """Chain for chatting with an index.""" + + combine_docs_chain: BaseCombineDocumentsChain + """The chain used to combine any retrieved documents.""" + question_generator: LLMChain + """The chain used to generate a new question for the sake of retrieval. + This chain will take in the current question (with variable `question`) + and any chat history (with variable `chat_history`) and will produce + a new standalone question to be used later on.""" + output_key: str = "answer" + """The output key to return the final answer of this chain in.""" + rephrase_question: bool = True + """Whether or not to pass the new generated question to the combine_docs_chain. + If True, will pass the new generated question along. + If False, will only use the new generated question for retrieval and pass the + original question along to the combine_docs_chain.""" + return_source_documents: bool = False + """Return the retrieved source documents as part of the final result.""" + return_generated_question: bool = False + """Return the generated question as part of the final result.""" + get_chat_history: Optional[Callable[[list[CHAT_TURN_TYPE]], str]] = None + """An optional function to get a string of the chat history. + If None is provided, will use a default.""" + response_if_no_docs_found: Optional[str] = None + """If specified, the chain will return a fixed response if no docs + are found for the question. """ + + model_config = ConfigDict( + populate_by_name=True, + arbitrary_types_allowed=True, + extra="forbid", + ) + + @property + def input_keys(self) -> list[str]: + """Input keys.""" + return ["question", "chat_history"] + + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + return InputType + + @property + def output_keys(self) -> list[str]: + """Return the output keys. + + :meta private: + """ + _output_keys = [self.output_key] + if self.return_source_documents: + _output_keys = _output_keys + ["source_documents"] + if self.return_generated_question: + _output_keys = _output_keys + ["generated_question"] + return _output_keys + + @abstractmethod + def _get_docs( + self, + question: str, + inputs: dict[str, Any], + *, + run_manager: CallbackManagerForChainRun, + ) -> list[Document]: + """Get docs.""" + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + question = inputs["question"] + get_chat_history = self.get_chat_history or _get_chat_history + chat_history_str = get_chat_history(inputs["chat_history"]) + + if chat_history_str: + callbacks = _run_manager.get_child() + new_question = self.question_generator.run( + question=question, chat_history=chat_history_str, callbacks=callbacks + ) + else: + new_question = question + accepts_run_manager = ( + "run_manager" in inspect.signature(self._get_docs).parameters + ) + if accepts_run_manager: + docs = self._get_docs(new_question, inputs, run_manager=_run_manager) + else: + docs = self._get_docs(new_question, inputs) # type: ignore[call-arg] + output: dict[str, Any] = {} + if self.response_if_no_docs_found is not None and len(docs) == 0: + output[self.output_key] = self.response_if_no_docs_found + else: + new_inputs = inputs.copy() + if self.rephrase_question: + new_inputs["question"] = new_question + new_inputs["chat_history"] = chat_history_str + answer = self.combine_docs_chain.run( + input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs + ) + output[self.output_key] = answer + + if self.return_source_documents: + output["source_documents"] = docs + if self.return_generated_question: + output["generated_question"] = new_question + return output + + @abstractmethod + async def _aget_docs( + self, + question: str, + inputs: dict[str, Any], + *, + run_manager: AsyncCallbackManagerForChainRun, + ) -> list[Document]: + """Get docs.""" + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() + question = inputs["question"] + get_chat_history = self.get_chat_history or _get_chat_history + chat_history_str = get_chat_history(inputs["chat_history"]) + if chat_history_str: + callbacks = _run_manager.get_child() + new_question = await self.question_generator.arun( + question=question, chat_history=chat_history_str, callbacks=callbacks + ) + else: + new_question = question + accepts_run_manager = ( + "run_manager" in inspect.signature(self._aget_docs).parameters + ) + if accepts_run_manager: + docs = await self._aget_docs(new_question, inputs, run_manager=_run_manager) + else: + docs = await self._aget_docs(new_question, inputs) # type: ignore[call-arg] + + output: dict[str, Any] = {} + if self.response_if_no_docs_found is not None and len(docs) == 0: + output[self.output_key] = self.response_if_no_docs_found + else: + new_inputs = inputs.copy() + if self.rephrase_question: + new_inputs["question"] = new_question + new_inputs["chat_history"] = chat_history_str + answer = await self.combine_docs_chain.arun( + input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs + ) + output[self.output_key] = answer + + if self.return_source_documents: + output["source_documents"] = docs + if self.return_generated_question: + output["generated_question"] = new_question + return output + + def save(self, file_path: Union[Path, str]) -> None: + if self.get_chat_history: + raise ValueError("Chain not saveable when `get_chat_history` is not None.") + super().save(file_path) + + +@deprecated( + since="0.1.17", + alternative=( + "create_history_aware_retriever together with create_retrieval_chain " + "(see example in docstring)" + ), + removal="1.0", +) +class ConversationalRetrievalChain(BaseConversationalRetrievalChain): + """Chain for having a conversation based on retrieved documents. + + This class is deprecated. See below for an example implementation using + `create_retrieval_chain`. Additional walkthroughs can be found at + https://python.langchain.com/docs/use_cases/question_answering/chat_history + + .. code-block:: python + + from langchain.chains import ( + create_history_aware_retriever, + create_retrieval_chain, + ) + from langchain.chains.combine_documents import create_stuff_documents_chain + from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder + from langchain_openai import ChatOpenAI + + + retriever = ... # Your retriever + + llm = ChatOpenAI() + + # Contextualize question + contextualize_q_system_prompt = ( + "Given a chat history and the latest user question " + "which might reference context in the chat history, " + "formulate a standalone question which can be understood " + "without the chat history. Do NOT answer the question, just " + "reformulate it if needed and otherwise return it as is." + ) + contextualize_q_prompt = ChatPromptTemplate.from_messages( + [ + ("system", contextualize_q_system_prompt), + MessagesPlaceholder("chat_history"), + ("human", "{input}"), + ] + ) + history_aware_retriever = create_history_aware_retriever( + llm, retriever, contextualize_q_prompt + ) + + # Answer question + qa_system_prompt = ( + "You are an assistant for question-answering tasks. Use " + "the following pieces of retrieved context to answer the " + "question. If you don't know the answer, just say that you " + "don't know. Use three sentences maximum and keep the answer " + "concise." + "\n\n" + "{context}" + ) + qa_prompt = ChatPromptTemplate.from_messages( + [ + ("system", qa_system_prompt), + MessagesPlaceholder("chat_history"), + ("human", "{input}"), + ] + ) + # Below we use create_stuff_documents_chain to feed all retrieved context + # into the LLM. Note that we can also use StuffDocumentsChain and other + # instances of BaseCombineDocumentsChain. + question_answer_chain = create_stuff_documents_chain(llm, qa_prompt) + rag_chain = create_retrieval_chain( + history_aware_retriever, question_answer_chain + ) + + # Usage: + chat_history = [] # Collect chat history here (a sequence of messages) + rag_chain.invoke({"input": query, "chat_history": chat_history}) + + This chain takes in chat history (a list of messages) and new questions, + and then returns an answer to that question. + The algorithm for this chain consists of three parts: + + 1. Use the chat history and the new question to create a "standalone question". + This is done so that this question can be passed into the retrieval step to fetch + relevant documents. If only the new question was passed in, then relevant context + may be lacking. If the whole conversation was passed into retrieval, there may + be unnecessary information there that would distract from retrieval. + + 2. This new question is passed to the retriever and relevant documents are + returned. + + 3. The retrieved documents are passed to an LLM along with either the new question + (default behavior) or the original question and chat history to generate a final + response. + + Example: + .. code-block:: python + + from langchain.chains import ( + StuffDocumentsChain, LLMChain, ConversationalRetrievalChain + ) + from langchain_core.prompts import PromptTemplate + from langchain_community.llms import OpenAI + + combine_docs_chain = StuffDocumentsChain(...) + vectorstore = ... + retriever = vectorstore.as_retriever() + + # This controls how the standalone question is generated. + # Should take `chat_history` and `question` as input variables. + template = ( + "Combine the chat history and follow up question into " + "a standalone question. Chat History: {chat_history}" + "Follow up question: {question}" + ) + prompt = PromptTemplate.from_template(template) + llm = OpenAI() + question_generator_chain = LLMChain(llm=llm, prompt=prompt) + chain = ConversationalRetrievalChain( + combine_docs_chain=combine_docs_chain, + retriever=retriever, + question_generator=question_generator_chain, + ) + """ + + retriever: BaseRetriever + """Retriever to use to fetch documents.""" + max_tokens_limit: Optional[int] = None + """If set, enforces that the documents returned are less than this limit. + This is only enforced if `combine_docs_chain` is of type StuffDocumentsChain.""" + + def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]: + num_docs = len(docs) + + if self.max_tokens_limit and isinstance( + self.combine_docs_chain, StuffDocumentsChain + ): + tokens = [ + self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content) + for doc in docs + ] + token_count = sum(tokens[:num_docs]) + while token_count > self.max_tokens_limit: + num_docs -= 1 + token_count -= tokens[num_docs] + + return docs[:num_docs] + + def _get_docs( + self, + question: str, + inputs: dict[str, Any], + *, + run_manager: CallbackManagerForChainRun, + ) -> list[Document]: + """Get docs.""" + docs = self.retriever.invoke( + question, config={"callbacks": run_manager.get_child()} + ) + return self._reduce_tokens_below_limit(docs) + + async def _aget_docs( + self, + question: str, + inputs: dict[str, Any], + *, + run_manager: AsyncCallbackManagerForChainRun, + ) -> list[Document]: + """Get docs.""" + docs = await self.retriever.ainvoke( + question, config={"callbacks": run_manager.get_child()} + ) + return self._reduce_tokens_below_limit(docs) + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + retriever: BaseRetriever, + condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT, + chain_type: str = "stuff", + verbose: bool = False, + condense_question_llm: Optional[BaseLanguageModel] = None, + combine_docs_chain_kwargs: Optional[dict] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> BaseConversationalRetrievalChain: + """Convenience method to load chain from LLM and retriever. + + This provides some logic to create the `question_generator` chain + as well as the combine_docs_chain. + + Args: + llm: The default language model to use at every part of this chain + (eg in both the question generation and the answering) + retriever: The retriever to use to fetch relevant documents from. + condense_question_prompt: The prompt to use to condense the chat history + and new question into a standalone question. + chain_type: The chain type to use to create the combine_docs_chain, will + be sent to `load_qa_chain`. + verbose: Verbosity flag for logging to stdout. + condense_question_llm: The language model to use for condensing the chat + history and new question into a standalone question. If none is + provided, will default to `llm`. + combine_docs_chain_kwargs: Parameters to pass as kwargs to `load_qa_chain` + when constructing the combine_docs_chain. + callbacks: Callbacks to pass to all subchains. + kwargs: Additional parameters to pass when initializing + ConversationalRetrievalChain + """ + combine_docs_chain_kwargs = combine_docs_chain_kwargs or {} + doc_chain = load_qa_chain( + llm, + chain_type=chain_type, + verbose=verbose, + callbacks=callbacks, + **combine_docs_chain_kwargs, + ) + + _llm = condense_question_llm or llm + condense_question_chain = LLMChain( + llm=_llm, + prompt=condense_question_prompt, + verbose=verbose, + callbacks=callbacks, + ) + return cls( + retriever=retriever, + combine_docs_chain=doc_chain, + question_generator=condense_question_chain, + callbacks=callbacks, + **kwargs, + ) + + +class ChatVectorDBChain(BaseConversationalRetrievalChain): + """Chain for chatting with a vector database.""" + + vectorstore: VectorStore = Field(alias="vectorstore") + top_k_docs_for_context: int = 4 + search_kwargs: dict = Field(default_factory=dict) + + @property + def _chain_type(self) -> str: + return "chat-vector-db" + + @model_validator(mode="before") + @classmethod + def raise_deprecation(cls, values: dict) -> Any: + warnings.warn( + "`ChatVectorDBChain` is deprecated - " + "please use `from langchain.chains import ConversationalRetrievalChain`" + ) + return values + + def _get_docs( + self, + question: str, + inputs: dict[str, Any], + *, + run_manager: CallbackManagerForChainRun, + ) -> list[Document]: + """Get docs.""" + vectordbkwargs = inputs.get("vectordbkwargs", {}) + full_kwargs = {**self.search_kwargs, **vectordbkwargs} + return self.vectorstore.similarity_search( + question, k=self.top_k_docs_for_context, **full_kwargs + ) + + async def _aget_docs( + self, + question: str, + inputs: dict[str, Any], + *, + run_manager: AsyncCallbackManagerForChainRun, + ) -> list[Document]: + """Get docs.""" + raise NotImplementedError("ChatVectorDBChain does not support async") + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + vectorstore: VectorStore, + condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT, + chain_type: str = "stuff", + combine_docs_chain_kwargs: Optional[dict] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> BaseConversationalRetrievalChain: + """Load chain from LLM.""" + combine_docs_chain_kwargs = combine_docs_chain_kwargs or {} + doc_chain = load_qa_chain( + llm, + chain_type=chain_type, + callbacks=callbacks, + **combine_docs_chain_kwargs, + ) + condense_question_chain = LLMChain( + llm=llm, prompt=condense_question_prompt, callbacks=callbacks + ) + return cls( + vectorstore=vectorstore, + combine_docs_chain=doc_chain, + question_generator=condense_question_chain, + callbacks=callbacks, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/chains/conversational_retrieval/prompts.py b/venv/Lib/site-packages/langchain/chains/conversational_retrieval/prompts.py new file mode 100644 index 00000000..f0e5aae0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/conversational_retrieval/prompts.py @@ -0,0 +1,20 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. + +Chat History: +{chat_history} +Follow Up Input: {question} +Standalone question:""" +CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) + +prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. + +{context} + +Question: {question} +Helpful Answer:""" +QA_PROMPT = PromptTemplate( + template=prompt_template, input_variables=["context", "question"] +) diff --git a/venv/Lib/site-packages/langchain/chains/elasticsearch_database/__init__.py b/venv/Lib/site-packages/langchain/chains/elasticsearch_database/__init__.py new file mode 100644 index 00000000..9b7bf854 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/elasticsearch_database/__init__.py @@ -0,0 +1,3 @@ +from langchain.chains.elasticsearch_database.base import ElasticsearchDatabaseChain + +__all__ = ["ElasticsearchDatabaseChain"] diff --git a/venv/Lib/site-packages/langchain/chains/elasticsearch_database/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/elasticsearch_database/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..3b0d0a9c Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/elasticsearch_database/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/elasticsearch_database/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/elasticsearch_database/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..861ee6e8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/elasticsearch_database/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/elasticsearch_database/__pycache__/prompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/elasticsearch_database/__pycache__/prompts.cpython-312.pyc new file mode 100644 index 00000000..908a5bfd Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/elasticsearch_database/__pycache__/prompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/elasticsearch_database/base.py b/venv/Lib/site-packages/langchain/chains/elasticsearch_database/base.py new file mode 100644 index 00000000..acf1e8f8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/elasticsearch_database/base.py @@ -0,0 +1,212 @@ +"""Chain for interacting with Elasticsearch Database.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Optional + +from langchain_core.callbacks import CallbackManagerForChainRun +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import BaseOutputParser, StrOutputParser +from langchain_core.output_parsers.json import SimpleJsonOutputParser +from langchain_core.prompts import BasePromptTemplate +from langchain_core.runnables import Runnable +from pydantic import ConfigDict, model_validator +from typing_extensions import Self + +from langchain.chains.base import Chain +from langchain.chains.elasticsearch_database.prompts import ANSWER_PROMPT, DSL_PROMPT + +if TYPE_CHECKING: + from elasticsearch import Elasticsearch + +INTERMEDIATE_STEPS_KEY = "intermediate_steps" + + +class ElasticsearchDatabaseChain(Chain): + """Chain for interacting with Elasticsearch Database. + + Example: + .. code-block:: python + + from langchain.chains import ElasticsearchDatabaseChain + from langchain_community.llms import OpenAI + from elasticsearch import Elasticsearch + + database = Elasticsearch("http://localhost:9200") + db_chain = ElasticsearchDatabaseChain.from_llm(OpenAI(), database) + """ + + query_chain: Runnable + """Chain for creating the ES query.""" + answer_chain: Runnable + """Chain for answering the user question.""" + database: Any = None + """Elasticsearch database to connect to of type elasticsearch.Elasticsearch.""" + top_k: int = 10 + """Number of results to return from the query""" + ignore_indices: Optional[list[str]] = None + include_indices: Optional[list[str]] = None + input_key: str = "question" #: :meta private: + output_key: str = "result" #: :meta private: + sample_documents_in_index_info: int = 3 + return_intermediate_steps: bool = False + """Whether or not to return the intermediate steps along with the final answer.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @model_validator(mode="after") + def validate_indices(self) -> Self: + if self.include_indices and self.ignore_indices: + raise ValueError( + "Cannot specify both 'include_indices' and 'ignore_indices'." + ) + return self + + @property + def input_keys(self) -> list[str]: + """Return the singular input key. + + :meta private: + """ + return [self.input_key] + + @property + def output_keys(self) -> list[str]: + """Return the singular output key. + + :meta private: + """ + if not self.return_intermediate_steps: + return [self.output_key] + else: + return [self.output_key, INTERMEDIATE_STEPS_KEY] + + def _list_indices(self) -> list[str]: + all_indices = [ + index["index"] for index in self.database.cat.indices(format="json") + ] + + if self.include_indices: + all_indices = [i for i in all_indices if i in self.include_indices] + if self.ignore_indices: + all_indices = [i for i in all_indices if i not in self.ignore_indices] + + return all_indices + + def _get_indices_infos(self, indices: list[str]) -> str: + mappings = self.database.indices.get_mapping(index=",".join(indices)) + if self.sample_documents_in_index_info > 0: + for k, v in mappings.items(): + hits = self.database.search( + index=k, + query={"match_all": {}}, + size=self.sample_documents_in_index_info, + )["hits"]["hits"] + hits = [str(hit["_source"]) for hit in hits] + mappings[k]["mappings"] = str(v) + "\n\n/*\n" + "\n".join(hits) + "\n*/" + return "\n\n".join( + [ + "Mapping for index {}:\n{}".format(index, mappings[index]["mappings"]) + for index in mappings + ] + ) + + def _search(self, indices: list[str], query: str) -> str: + result = self.database.search(index=",".join(indices), body=query) + return str(result) + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + input_text = f"{inputs[self.input_key]}\nESQuery:" + _run_manager.on_text(input_text, verbose=self.verbose) + indices = self._list_indices() + indices_info = self._get_indices_infos(indices) + query_inputs: dict = { + "input": input_text, + "top_k": str(self.top_k), + "indices_info": indices_info, + "stop": ["\nESResult:"], + } + intermediate_steps: list = [] + try: + intermediate_steps.append(query_inputs) # input: es generation + es_cmd = self.query_chain.invoke( + query_inputs, + config={"callbacks": _run_manager.get_child()}, + ) + + _run_manager.on_text(es_cmd, color="green", verbose=self.verbose) + intermediate_steps.append( + es_cmd + ) # output: elasticsearch dsl generation (no checker) + intermediate_steps.append({"es_cmd": es_cmd}) # input: ES search + result = self._search(indices=indices, query=es_cmd) + intermediate_steps.append(str(result)) # output: ES search + + _run_manager.on_text("\nESResult: ", verbose=self.verbose) + _run_manager.on_text(result, color="yellow", verbose=self.verbose) + + _run_manager.on_text("\nAnswer:", verbose=self.verbose) + answer_inputs: dict = {"data": result, "input": input_text} + intermediate_steps.append(answer_inputs) # input: final answer + final_result = self.answer_chain.invoke( + answer_inputs, + config={"callbacks": _run_manager.get_child()}, + ) + + intermediate_steps.append(final_result) # output: final answer + _run_manager.on_text(final_result, color="green", verbose=self.verbose) + chain_result: dict[str, Any] = {self.output_key: final_result} + if self.return_intermediate_steps: + chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps + return chain_result + except Exception as exc: + # Append intermediate steps to exception, to aid in logging and later + # improvement of few shot prompt seeds + exc.intermediate_steps = intermediate_steps # type: ignore[attr-defined] + raise exc + + @property + def _chain_type(self) -> str: + return "elasticsearch_database_chain" + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + database: Elasticsearch, + *, + query_prompt: Optional[BasePromptTemplate] = None, + answer_prompt: Optional[BasePromptTemplate] = None, + query_output_parser: Optional[BaseOutputParser] = None, + **kwargs: Any, + ) -> ElasticsearchDatabaseChain: + """Convenience method to construct ElasticsearchDatabaseChain from an LLM. + + Args: + llm: The language model to use. + database: The Elasticsearch db. + query_prompt: The prompt to use for query construction. + answer_prompt: The prompt to use for answering user question given data. + query_output_parser: The output parser to use for parsing model-generated + ES query. Defaults to SimpleJsonOutputParser. + kwargs: Additional arguments to pass to the constructor. + """ + query_prompt = query_prompt or DSL_PROMPT + query_output_parser = query_output_parser or SimpleJsonOutputParser() + query_chain = query_prompt | llm | query_output_parser + answer_prompt = answer_prompt or ANSWER_PROMPT + answer_chain = answer_prompt | llm | StrOutputParser() + return cls( + query_chain=query_chain, + answer_chain=answer_chain, + database=database, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/chains/elasticsearch_database/prompts.py b/venv/Lib/site-packages/langchain/chains/elasticsearch_database/prompts.py new file mode 100644 index 00000000..da0ec429 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/elasticsearch_database/prompts.py @@ -0,0 +1,36 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +PROMPT_SUFFIX = """Only use the following Elasticsearch indices: +{indices_info} + +Question: {input} +ESQuery:""" + +DEFAULT_DSL_TEMPLATE = """Given an input question, create a syntactically correct Elasticsearch query to run. Unless the user specifies in their question a specific number of examples they wish to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database. + +Unless told to do not query for all the columns from a specific index, only ask for a few relevant columns given the question. + +Pay attention to use only the column names that you can see in the mapping description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which index. Return the query as valid json. + +Use the following format: + +Question: Question here +ESQuery: Elasticsearch Query formatted as json +""" + +DSL_PROMPT = PromptTemplate.from_template(DEFAULT_DSL_TEMPLATE + PROMPT_SUFFIX) + +DEFAULT_ANSWER_TEMPLATE = """Given an input question and relevant data from a database, answer the user question. + +Use the following format: + +Question: Question here +Data: Relevant data here +Answer: Final answer here + +Question: {input} +Data: {data} +Answer:""" + +ANSWER_PROMPT = PromptTemplate.from_template(DEFAULT_ANSWER_TEMPLATE) diff --git a/venv/Lib/site-packages/langchain/chains/ernie_functions/__init__.py b/venv/Lib/site-packages/langchain/chains/ernie_functions/__init__.py new file mode 100644 index 00000000..fd2e5068 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/ernie_functions/__init__.py @@ -0,0 +1,44 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.ernie_functions.base import ( + convert_to_ernie_function, + create_ernie_fn_chain, + create_ernie_fn_runnable, + create_structured_output_chain, + create_structured_output_runnable, + get_ernie_output_parser, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "convert_to_ernie_function": "langchain_community.chains.ernie_functions.base", + "create_ernie_fn_chain": "langchain_community.chains.ernie_functions.base", + "create_ernie_fn_runnable": "langchain_community.chains.ernie_functions.base", + "create_structured_output_chain": "langchain_community.chains.ernie_functions.base", + "create_structured_output_runnable": ( + "langchain_community.chains.ernie_functions.base" + ), + "get_ernie_output_parser": "langchain_community.chains.ernie_functions.base", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "convert_to_ernie_function", + "create_structured_output_chain", + "create_ernie_fn_chain", + "create_structured_output_runnable", + "create_ernie_fn_runnable", + "get_ernie_output_parser", +] diff --git a/venv/Lib/site-packages/langchain/chains/ernie_functions/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/ernie_functions/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..2e4d496b Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/ernie_functions/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/ernie_functions/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/ernie_functions/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..8b11ff6c Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/ernie_functions/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/ernie_functions/base.py b/venv/Lib/site-packages/langchain/chains/ernie_functions/base.py new file mode 100644 index 00000000..e81ac42a --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/ernie_functions/base.py @@ -0,0 +1,49 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.ernie_functions.base import ( + convert_python_function_to_ernie_function, + convert_to_ernie_function, + create_ernie_fn_chain, + create_ernie_fn_runnable, + create_structured_output_chain, + create_structured_output_runnable, + get_ernie_output_parser, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "convert_python_function_to_ernie_function": ( + "langchain_community.chains.ernie_functions.base" + ), + "convert_to_ernie_function": "langchain_community.chains.ernie_functions.base", + "create_ernie_fn_chain": "langchain_community.chains.ernie_functions.base", + "create_ernie_fn_runnable": "langchain_community.chains.ernie_functions.base", + "create_structured_output_chain": "langchain_community.chains.ernie_functions.base", + "create_structured_output_runnable": ( + "langchain_community.chains.ernie_functions.base" + ), + "get_ernie_output_parser": "langchain_community.chains.ernie_functions.base", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "convert_python_function_to_ernie_function", + "convert_to_ernie_function", + "create_ernie_fn_chain", + "create_ernie_fn_runnable", + "create_structured_output_chain", + "create_structured_output_runnable", + "get_ernie_output_parser", +] diff --git a/venv/Lib/site-packages/langchain/chains/example_generator.py b/venv/Lib/site-packages/langchain/chains/example_generator.py new file mode 100644 index 00000000..b757ee8f --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/example_generator.py @@ -0,0 +1,20 @@ +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts.few_shot import FewShotPromptTemplate +from langchain_core.prompts.prompt import PromptTemplate + +TEST_GEN_TEMPLATE_SUFFIX = "Add another example." + + +def generate_example( + examples: list[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate +) -> str: + """Return another example given a list of examples for a prompt.""" + prompt = FewShotPromptTemplate( + examples=examples, + suffix=TEST_GEN_TEMPLATE_SUFFIX, + input_variables=[], + example_prompt=prompt_template, + ) + chain = prompt | llm | StrOutputParser() + return chain.invoke({}) diff --git a/venv/Lib/site-packages/langchain/chains/flare/__init__.py b/venv/Lib/site-packages/langchain/chains/flare/__init__.py new file mode 100644 index 00000000..10a700d8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/flare/__init__.py @@ -0,0 +1 @@ +"""Adapted from https://github.com/jzbjyb/FLARE""" diff --git a/venv/Lib/site-packages/langchain/chains/flare/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/flare/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..8aaad012 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/flare/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/flare/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/flare/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..85c7cc99 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/flare/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/flare/__pycache__/prompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/flare/__pycache__/prompts.cpython-312.pyc new file mode 100644 index 00000000..b8eaa3c2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/flare/__pycache__/prompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/flare/base.py b/venv/Lib/site-packages/langchain/chains/flare/base.py new file mode 100644 index 00000000..e6c1defa --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/flare/base.py @@ -0,0 +1,268 @@ +from __future__ import annotations + +import logging +import re +from collections.abc import Sequence +from typing import Any, Optional + +from langchain_core.callbacks import ( + CallbackManagerForChainRun, +) +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import AIMessage +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import BasePromptTemplate +from langchain_core.retrievers import BaseRetriever +from langchain_core.runnables import Runnable +from pydantic import Field + +from langchain.chains.base import Chain +from langchain.chains.flare.prompts import ( + PROMPT, + QUESTION_GENERATOR_PROMPT, + FinishedOutputParser, +) +from langchain.chains.llm import LLMChain + +logger = logging.getLogger(__name__) + + +def _extract_tokens_and_log_probs(response: AIMessage) -> tuple[list[str], list[float]]: + """Extract tokens and log probabilities from chat model response.""" + tokens = [] + log_probs = [] + for token in response.response_metadata["logprobs"]["content"]: + tokens.append(token["token"]) + log_probs.append(token["logprob"]) + return tokens, log_probs + + +class QuestionGeneratorChain(LLMChain): + """Chain that generates questions from uncertain spans.""" + + prompt: BasePromptTemplate = QUESTION_GENERATOR_PROMPT + """Prompt template for the chain.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + @property + def input_keys(self) -> list[str]: + """Input keys for the chain.""" + return ["user_input", "context", "response"] + + +def _low_confidence_spans( + tokens: Sequence[str], + log_probs: Sequence[float], + min_prob: float, + min_token_gap: int, + num_pad_tokens: int, +) -> list[str]: + try: + import numpy as np + + _low_idx = np.where(np.exp(log_probs) < min_prob)[0] + except ImportError: + logger.warning( + "NumPy not found in the current Python environment. FlareChain will use a " + "pure Python implementation for internal calculations, which may " + "significantly impact performance, especially for large datasets. For " + "optimal speed and efficiency, consider installing NumPy: pip install numpy" + ) + import math + + _low_idx = [ # type: ignore[assignment] + idx + for idx, log_prob in enumerate(log_probs) + if math.exp(log_prob) < min_prob + ] + low_idx = [i for i in _low_idx if re.search(r"\w", tokens[i])] + if len(low_idx) == 0: + return [] + spans = [[low_idx[0], low_idx[0] + num_pad_tokens + 1]] + for i, idx in enumerate(low_idx[1:]): + end = idx + num_pad_tokens + 1 + if idx - low_idx[i] < min_token_gap: + spans[-1][1] = end + else: + spans.append([idx, end]) + return ["".join(tokens[start:end]) for start, end in spans] + + +class FlareChain(Chain): + """Chain that combines a retriever, a question generator, + and a response generator. + + See [Active Retrieval Augmented Generation](https://arxiv.org/abs/2305.06983) paper. + """ + + question_generator_chain: Runnable + """Chain that generates questions from uncertain spans.""" + response_chain: Runnable + """Chain that generates responses from user input and context.""" + output_parser: FinishedOutputParser = Field(default_factory=FinishedOutputParser) + """Parser that determines whether the chain is finished.""" + retriever: BaseRetriever + """Retriever that retrieves relevant documents from a user input.""" + min_prob: float = 0.2 + """Minimum probability for a token to be considered low confidence.""" + min_token_gap: int = 5 + """Minimum number of tokens between two low confidence spans.""" + num_pad_tokens: int = 2 + """Number of tokens to pad around a low confidence span.""" + max_iter: int = 10 + """Maximum number of iterations.""" + start_with_retrieval: bool = True + """Whether to start with retrieval.""" + + @property + def input_keys(self) -> list[str]: + """Input keys for the chain.""" + return ["user_input"] + + @property + def output_keys(self) -> list[str]: + """Output keys for the chain.""" + return ["response"] + + def _do_generation( + self, + questions: list[str], + user_input: str, + response: str, + _run_manager: CallbackManagerForChainRun, + ) -> tuple[str, bool]: + callbacks = _run_manager.get_child() + docs = [] + for question in questions: + docs.extend(self.retriever.invoke(question)) + context = "\n\n".join(d.page_content for d in docs) + result = self.response_chain.invoke( + { + "user_input": user_input, + "context": context, + "response": response, + }, + {"callbacks": callbacks}, + ) + if isinstance(result, AIMessage): + result = result.content + marginal, finished = self.output_parser.parse(result) + return marginal, finished + + def _do_retrieval( + self, + low_confidence_spans: list[str], + _run_manager: CallbackManagerForChainRun, + user_input: str, + response: str, + initial_response: str, + ) -> tuple[str, bool]: + question_gen_inputs = [ + { + "user_input": user_input, + "current_response": initial_response, + "uncertain_span": span, + } + for span in low_confidence_spans + ] + callbacks = _run_manager.get_child() + if isinstance(self.question_generator_chain, LLMChain): + question_gen_outputs = self.question_generator_chain.apply( + question_gen_inputs, callbacks=callbacks + ) + questions = [ + output[self.question_generator_chain.output_keys[0]] + for output in question_gen_outputs + ] + else: + questions = self.question_generator_chain.batch( + question_gen_inputs, config={"callbacks": callbacks} + ) + _run_manager.on_text( + f"Generated Questions: {questions}", color="yellow", end="\n" + ) + return self._do_generation(questions, user_input, response, _run_manager) + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + + user_input = inputs[self.input_keys[0]] + + response = "" + + for i in range(self.max_iter): + _run_manager.on_text( + f"Current Response: {response}", color="blue", end="\n" + ) + _input = {"user_input": user_input, "context": "", "response": response} + tokens, log_probs = _extract_tokens_and_log_probs( + self.response_chain.invoke( + _input, {"callbacks": _run_manager.get_child()} + ) + ) + low_confidence_spans = _low_confidence_spans( + tokens, + log_probs, + self.min_prob, + self.min_token_gap, + self.num_pad_tokens, + ) + initial_response = response.strip() + " " + "".join(tokens) + if not low_confidence_spans: + response = initial_response + final_response, finished = self.output_parser.parse(response) + if finished: + return {self.output_keys[0]: final_response} + continue + + marginal, finished = self._do_retrieval( + low_confidence_spans, + _run_manager, + user_input, + response, + initial_response, + ) + response = response.strip() + " " + marginal + if finished: + break + return {self.output_keys[0]: response} + + @classmethod + def from_llm( + cls, llm: BaseLanguageModel, max_generation_len: int = 32, **kwargs: Any + ) -> FlareChain: + """Creates a FlareChain from a language model. + + Args: + llm: Language model to use. + max_generation_len: Maximum length of the generated response. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FlareChain class with the given language model. + """ + try: + from langchain_openai import ChatOpenAI + except ImportError: + raise ImportError( + "OpenAI is required for FlareChain. " + "Please install langchain-openai." + "pip install langchain-openai" + ) + llm = ChatOpenAI( + max_completion_tokens=max_generation_len, logprobs=True, temperature=0 + ) + response_chain = PROMPT | llm + question_gen_chain = QUESTION_GENERATOR_PROMPT | llm | StrOutputParser() + return cls( + question_generator_chain=question_gen_chain, + response_chain=response_chain, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/chains/flare/prompts.py b/venv/Lib/site-packages/langchain/chains/flare/prompts.py new file mode 100644 index 00000000..629badff --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/flare/prompts.py @@ -0,0 +1,44 @@ +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.prompts import PromptTemplate + + +class FinishedOutputParser(BaseOutputParser[tuple[str, bool]]): + """Output parser that checks if the output is finished.""" + + finished_value: str = "FINISHED" + """Value that indicates the output is finished.""" + + def parse(self, text: str) -> tuple[str, bool]: + cleaned = text.strip() + finished = self.finished_value in cleaned + return cleaned.replace(self.finished_value, ""), finished + + +PROMPT_TEMPLATE = """\ +Respond to the user message using any relevant context. \ +If context is provided, you should ground your answer in that context. \ +Once you're done responding return FINISHED. + +>>> CONTEXT: {context} +>>> USER INPUT: {user_input} +>>> RESPONSE: {response}\ +""" + +PROMPT = PromptTemplate( + template=PROMPT_TEMPLATE, + input_variables=["user_input", "context", "response"], +) + + +QUESTION_GENERATOR_PROMPT_TEMPLATE = """\ +Given a user input and an existing partial response as context, \ +ask a question to which the answer is the given term/entity/phrase: + +>>> USER INPUT: {user_input} +>>> EXISTING PARTIAL RESPONSE: {current_response} + +The question to which the answer is the term/entity/phrase "{uncertain_span}" is:""" +QUESTION_GENERATOR_PROMPT = PromptTemplate( + template=QUESTION_GENERATOR_PROMPT_TEMPLATE, + input_variables=["user_input", "current_response", "uncertain_span"], +) diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__init__.py b/venv/Lib/site-packages/langchain/chains/graph_qa/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..7953f5a9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/arangodb.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/arangodb.cpython-312.pyc new file mode 100644 index 00000000..1a65b00a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/arangodb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..e1ed738f Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/cypher.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/cypher.cpython-312.pyc new file mode 100644 index 00000000..2e28381a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/cypher.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/cypher_utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/cypher_utils.cpython-312.pyc new file mode 100644 index 00000000..37beb930 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/cypher_utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/falkordb.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/falkordb.cpython-312.pyc new file mode 100644 index 00000000..96b1d7f2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/falkordb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/gremlin.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/gremlin.cpython-312.pyc new file mode 100644 index 00000000..7ab5d4e7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/gremlin.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/hugegraph.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/hugegraph.cpython-312.pyc new file mode 100644 index 00000000..25db49ac Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/hugegraph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/kuzu.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/kuzu.cpython-312.pyc new file mode 100644 index 00000000..b7b26338 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/kuzu.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/nebulagraph.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/nebulagraph.cpython-312.pyc new file mode 100644 index 00000000..50b0ce65 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/nebulagraph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/neptune_cypher.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/neptune_cypher.cpython-312.pyc new file mode 100644 index 00000000..e23e6875 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/neptune_cypher.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/neptune_sparql.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/neptune_sparql.cpython-312.pyc new file mode 100644 index 00000000..d79765fe Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/neptune_sparql.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/ontotext_graphdb.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/ontotext_graphdb.cpython-312.pyc new file mode 100644 index 00000000..abcec982 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/ontotext_graphdb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/prompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/prompts.cpython-312.pyc new file mode 100644 index 00000000..fdfe709e Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/prompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/sparql.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/sparql.cpython-312.pyc new file mode 100644 index 00000000..9f71fd8f Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/graph_qa/__pycache__/sparql.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/arangodb.py b/venv/Lib/site-packages/langchain/chains/graph_qa/arangodb.py new file mode 100644 index 00000000..bf536fab --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/arangodb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.arangodb import ArangoGraphQAChain + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ArangoGraphQAChain": "langchain_community.chains.graph_qa.arangodb", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["ArangoGraphQAChain"] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/base.py b/venv/Lib/site-packages/langchain/chains/graph_qa/base.py new file mode 100644 index 00000000..0b2b5a32 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/base.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.base import GraphQAChain + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GraphQAChain": "langchain_community.chains.graph_qa.base", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["GraphQAChain"] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/cypher.py b/venv/Lib/site-packages/langchain/chains/graph_qa/cypher.py new file mode 100644 index 00000000..fbddc52e --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/cypher.py @@ -0,0 +1,39 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.cypher import ( + CYPHER_GENERATION_PROMPT, + INTERMEDIATE_STEPS_KEY, + GraphCypherQAChain, + construct_schema, + extract_cypher, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GraphCypherQAChain": "langchain_community.chains.graph_qa.cypher", + "INTERMEDIATE_STEPS_KEY": "langchain_community.chains.graph_qa.cypher", + "construct_schema": "langchain_community.chains.graph_qa.cypher", + "extract_cypher": "langchain_community.chains.graph_qa.cypher", + "CYPHER_GENERATION_PROMPT": "langchain_community.chains.graph_qa.cypher", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GraphCypherQAChain", + "INTERMEDIATE_STEPS_KEY", + "construct_schema", + "extract_cypher", + "CYPHER_GENERATION_PROMPT", +] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/cypher_utils.py b/venv/Lib/site-packages/langchain/chains/graph_qa/cypher_utils.py new file mode 100644 index 00000000..deeb1051 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/cypher_utils.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.cypher_utils import ( + CypherQueryCorrector, + Schema, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CypherQueryCorrector": "langchain_community.chains.graph_qa.cypher_utils", + "Schema": "langchain_community.chains.graph_qa.cypher_utils", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["CypherQueryCorrector", "Schema"] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/falkordb.py b/venv/Lib/site-packages/langchain/chains/graph_qa/falkordb.py new file mode 100644 index 00000000..1aba6adf --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/falkordb.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.falkordb import ( + INTERMEDIATE_STEPS_KEY, + FalkorDBQAChain, + extract_cypher, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FalkorDBQAChain": "langchain_community.chains.graph_qa.falkordb", + "INTERMEDIATE_STEPS_KEY": "langchain_community.chains.graph_qa.falkordb", + "extract_cypher": "langchain_community.chains.graph_qa.falkordb", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["FalkorDBQAChain", "INTERMEDIATE_STEPS_KEY", "extract_cypher"] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/gremlin.py b/venv/Lib/site-packages/langchain/chains/graph_qa/gremlin.py new file mode 100644 index 00000000..20a7be6a --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/gremlin.py @@ -0,0 +1,36 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.gremlin import ( + GRAPHDB_SPARQL_FIX_TEMPLATE, + INTERMEDIATE_STEPS_KEY, + GremlinQAChain, + extract_gremlin, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GRAPHDB_SPARQL_FIX_TEMPLATE": "langchain_community.chains.graph_qa.gremlin", + "GremlinQAChain": "langchain_community.chains.graph_qa.gremlin", + "INTERMEDIATE_STEPS_KEY": "langchain_community.chains.graph_qa.gremlin", + "extract_gremlin": "langchain_community.chains.graph_qa.gremlin", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GRAPHDB_SPARQL_FIX_TEMPLATE", + "GremlinQAChain", + "INTERMEDIATE_STEPS_KEY", + "extract_gremlin", +] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/hugegraph.py b/venv/Lib/site-packages/langchain/chains/graph_qa/hugegraph.py new file mode 100644 index 00000000..a7d7d901 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/hugegraph.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.hugegraph import HugeGraphQAChain + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "HugeGraphQAChain": "langchain_community.chains.graph_qa.hugegraph", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["HugeGraphQAChain"] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/kuzu.py b/venv/Lib/site-packages/langchain/chains/graph_qa/kuzu.py new file mode 100644 index 00000000..aa436d20 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/kuzu.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.kuzu import ( + KuzuQAChain, + extract_cypher, + remove_prefix, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "KuzuQAChain": "langchain_community.chains.graph_qa.kuzu", + "extract_cypher": "langchain_community.chains.graph_qa.kuzu", + "remove_prefix": "langchain_community.chains.graph_qa.kuzu", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["KuzuQAChain", "extract_cypher", "remove_prefix"] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/nebulagraph.py b/venv/Lib/site-packages/langchain/chains/graph_qa/nebulagraph.py new file mode 100644 index 00000000..8ea379f3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/nebulagraph.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.nebulagraph import NebulaGraphQAChain + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "NebulaGraphQAChain": "langchain_community.chains.graph_qa.nebulagraph", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["NebulaGraphQAChain"] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/neptune_cypher.py b/venv/Lib/site-packages/langchain/chains/graph_qa/neptune_cypher.py new file mode 100644 index 00000000..96fcf01f --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/neptune_cypher.py @@ -0,0 +1,39 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.neptune_cypher import ( + INTERMEDIATE_STEPS_KEY, + NeptuneOpenCypherQAChain, + extract_cypher, + trim_query, + use_simple_prompt, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "INTERMEDIATE_STEPS_KEY": "langchain_community.chains.graph_qa.neptune_cypher", + "NeptuneOpenCypherQAChain": "langchain_community.chains.graph_qa.neptune_cypher", + "extract_cypher": "langchain_community.chains.graph_qa.neptune_cypher", + "trim_query": "langchain_community.chains.graph_qa.neptune_cypher", + "use_simple_prompt": "langchain_community.chains.graph_qa.neptune_cypher", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "INTERMEDIATE_STEPS_KEY", + "NeptuneOpenCypherQAChain", + "extract_cypher", + "trim_query", + "use_simple_prompt", +] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/neptune_sparql.py b/venv/Lib/site-packages/langchain/chains/graph_qa/neptune_sparql.py new file mode 100644 index 00000000..f4445b65 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/neptune_sparql.py @@ -0,0 +1,36 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.neptune_sparql import ( + INTERMEDIATE_STEPS_KEY, + SPARQL_GENERATION_TEMPLATE, + NeptuneSparqlQAChain, + extract_sparql, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "INTERMEDIATE_STEPS_KEY": "langchain_community.chains.graph_qa.neptune_sparql", + "NeptuneSparqlQAChain": "langchain_community.chains.graph_qa.neptune_sparql", + "SPARQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.neptune_sparql", + "extract_sparql": "langchain_community.chains.graph_qa.neptune_sparql", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "INTERMEDIATE_STEPS_KEY", + "NeptuneSparqlQAChain", + "SPARQL_GENERATION_TEMPLATE", + "extract_sparql", +] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/ontotext_graphdb.py b/venv/Lib/site-packages/langchain/chains/graph_qa/ontotext_graphdb.py new file mode 100644 index 00000000..d1e8a11b --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/ontotext_graphdb.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.ontotext_graphdb import ( + OntotextGraphDBQAChain, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "OntotextGraphDBQAChain": "langchain_community.chains.graph_qa.ontotext_graphdb", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["OntotextGraphDBQAChain"] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/prompts.py b/venv/Lib/site-packages/langchain/chains/graph_qa/prompts.py new file mode 100644 index 00000000..1b7ac181 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/prompts.py @@ -0,0 +1,96 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.prompts import ( + AQL_FIX_TEMPLATE, + AQL_GENERATION_TEMPLATE, + AQL_QA_TEMPLATE, + CYPHER_GENERATION_PROMPT, + CYPHER_GENERATION_TEMPLATE, + CYPHER_QA_PROMPT, + CYPHER_QA_TEMPLATE, + GRAPHDB_QA_TEMPLATE, + GRAPHDB_SPARQL_FIX_TEMPLATE, + GRAPHDB_SPARQL_GENERATION_TEMPLATE, + GREMLIN_GENERATION_TEMPLATE, + KUZU_EXTRA_INSTRUCTIONS, + KUZU_GENERATION_TEMPLATE, + NEBULAGRAPH_EXTRA_INSTRUCTIONS, + NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS, + NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE, + NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE, + NGQL_GENERATION_TEMPLATE, + SPARQL_GENERATION_SELECT_TEMPLATE, + SPARQL_GENERATION_UPDATE_TEMPLATE, + SPARQL_INTENT_TEMPLATE, + SPARQL_QA_TEMPLATE, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AQL_FIX_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "AQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "AQL_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "CYPHER_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "CYPHER_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "CYPHER_QA_PROMPT": "langchain_community.chains.graph_qa.prompts", + "CYPHER_GENERATION_PROMPT": "langchain_community.chains.graph_qa.prompts", + "GRAPHDB_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "GRAPHDB_SPARQL_FIX_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "GRAPHDB_SPARQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "GREMLIN_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "KUZU_EXTRA_INSTRUCTIONS": "langchain_community.chains.graph_qa.prompts", + "KUZU_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "NEBULAGRAPH_EXTRA_INSTRUCTIONS": "langchain_community.chains.graph_qa.prompts", + "NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS": ( + "langchain_community.chains.graph_qa.prompts" + ), + "NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE": ( + "langchain_community.chains.graph_qa.prompts" + ), + "NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE": ( + "langchain_community.chains.graph_qa.prompts" + ), + "NGQL_GENERATION_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "SPARQL_GENERATION_SELECT_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "SPARQL_GENERATION_UPDATE_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "SPARQL_INTENT_TEMPLATE": "langchain_community.chains.graph_qa.prompts", + "SPARQL_QA_TEMPLATE": "langchain_community.chains.graph_qa.prompts", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AQL_FIX_TEMPLATE", + "AQL_GENERATION_TEMPLATE", + "AQL_QA_TEMPLATE", + "CYPHER_GENERATION_TEMPLATE", + "CYPHER_QA_TEMPLATE", + "GRAPHDB_QA_TEMPLATE", + "GRAPHDB_SPARQL_FIX_TEMPLATE", + "GRAPHDB_SPARQL_GENERATION_TEMPLATE", + "GREMLIN_GENERATION_TEMPLATE", + "KUZU_EXTRA_INSTRUCTIONS", + "KUZU_GENERATION_TEMPLATE", + "NEBULAGRAPH_EXTRA_INSTRUCTIONS", + "NEPTUNE_OPENCYPHER_EXTRA_INSTRUCTIONS", + "NEPTUNE_OPENCYPHER_GENERATION_SIMPLE_TEMPLATE", + "NEPTUNE_OPENCYPHER_GENERATION_TEMPLATE", + "NGQL_GENERATION_TEMPLATE", + "SPARQL_GENERATION_SELECT_TEMPLATE", + "SPARQL_GENERATION_UPDATE_TEMPLATE", + "SPARQL_INTENT_TEMPLATE", + "SPARQL_QA_TEMPLATE", + "CYPHER_QA_PROMPT", + "CYPHER_GENERATION_PROMPT", +] diff --git a/venv/Lib/site-packages/langchain/chains/graph_qa/sparql.py b/venv/Lib/site-packages/langchain/chains/graph_qa/sparql.py new file mode 100644 index 00000000..363f9942 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/graph_qa/sparql.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.sparql import GraphSparqlQAChain + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GraphSparqlQAChain": "langchain_community.chains.graph_qa.sparql", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["GraphSparqlQAChain"] diff --git a/venv/Lib/site-packages/langchain/chains/history_aware_retriever.py b/venv/Lib/site-packages/langchain/chains/history_aware_retriever.py new file mode 100644 index 00000000..a29d31a8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/history_aware_retriever.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from langchain_core.language_models import LanguageModelLike +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import BasePromptTemplate +from langchain_core.retrievers import RetrieverLike, RetrieverOutputLike +from langchain_core.runnables import RunnableBranch + + +def create_history_aware_retriever( + llm: LanguageModelLike, + retriever: RetrieverLike, + prompt: BasePromptTemplate, +) -> RetrieverOutputLike: + """Create a chain that takes conversation history and returns documents. + + If there is no `chat_history`, then the `input` is just passed directly to the + retriever. If there is `chat_history`, then the prompt and LLM will be used + to generate a search query. That search query is then passed to the retriever. + + Args: + llm: Language model to use for generating a search term given chat history + retriever: RetrieverLike object that takes a string as input and outputs + a list of Documents. + prompt: The prompt used to generate the search query for the retriever. + + Returns: + An LCEL Runnable. The runnable input must take in `input`, and if there + is chat history should take it in the form of `chat_history`. + The Runnable output is a list of Documents + + Example: + .. code-block:: python + + # pip install -U langchain langchain-community + + from langchain_community.chat_models import ChatOpenAI + from langchain.chains import create_history_aware_retriever + from langchain import hub + + rephrase_prompt = hub.pull("langchain-ai/chat-langchain-rephrase") + llm = ChatOpenAI() + retriever = ... + chat_retriever_chain = create_history_aware_retriever( + llm, retriever, rephrase_prompt + ) + + chain.invoke({"input": "...", "chat_history": }) + + """ + if "input" not in prompt.input_variables: + raise ValueError( + "Expected `input` to be a prompt variable, " + f"but got {prompt.input_variables}" + ) + + retrieve_documents: RetrieverOutputLike = RunnableBranch( + ( + # Both empty string and empty list evaluate to False + lambda x: not x.get("chat_history", False), + # If no chat history, then we just pass input to retriever + (lambda x: x["input"]) | retriever, + ), + # If chat history, then we pass inputs to LLM chain, then to retriever + prompt | llm | StrOutputParser() | retriever, + ).with_config(run_name="chat_retriever_chain") + return retrieve_documents diff --git a/venv/Lib/site-packages/langchain/chains/hyde/__init__.py b/venv/Lib/site-packages/langchain/chains/hyde/__init__.py new file mode 100644 index 00000000..946d0ab1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/hyde/__init__.py @@ -0,0 +1,4 @@ +"""Hypothetical Document Embeddings. + +https://arxiv.org/abs/2212.10496 +""" diff --git a/venv/Lib/site-packages/langchain/chains/hyde/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/hyde/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..5bb3f476 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/hyde/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/hyde/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/hyde/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..78b991b6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/hyde/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/hyde/__pycache__/prompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/hyde/__pycache__/prompts.cpython-312.pyc new file mode 100644 index 00000000..2857b2ac Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/hyde/__pycache__/prompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/hyde/base.py b/venv/Lib/site-packages/langchain/chains/hyde/base.py new file mode 100644 index 00000000..d8c00d83 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/hyde/base.py @@ -0,0 +1,123 @@ +"""Hypothetical Document Embeddings. + +https://arxiv.org/abs/2212.10496 +""" + +from __future__ import annotations + +import logging +from typing import Any, Optional + +from langchain_core.callbacks import CallbackManagerForChainRun +from langchain_core.embeddings import Embeddings +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import BasePromptTemplate +from langchain_core.runnables import Runnable +from pydantic import ConfigDict + +from langchain.chains.base import Chain +from langchain.chains.hyde.prompts import PROMPT_MAP +from langchain.chains.llm import LLMChain + +logger = logging.getLogger(__name__) + + +class HypotheticalDocumentEmbedder(Chain, Embeddings): + """Generate hypothetical document for query, and then embed that. + + Based on https://arxiv.org/abs/2212.10496 + """ + + base_embeddings: Embeddings + llm_chain: Runnable + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @property + def input_keys(self) -> list[str]: + """Input keys for Hyde's LLM chain.""" + return self.llm_chain.input_schema.model_json_schema()["required"] + + @property + def output_keys(self) -> list[str]: + """Output keys for Hyde's LLM chain.""" + if isinstance(self.llm_chain, LLMChain): + return self.llm_chain.output_keys + else: + return ["text"] + + def embed_documents(self, texts: list[str]) -> list[list[float]]: + """Call the base embeddings.""" + return self.base_embeddings.embed_documents(texts) + + def combine_embeddings(self, embeddings: list[list[float]]) -> list[float]: + """Combine embeddings into final embeddings.""" + try: + import numpy as np + + return list(np.array(embeddings).mean(axis=0)) + except ImportError: + logger.warning( + "NumPy not found in the current Python environment. " + "HypotheticalDocumentEmbedder will use a pure Python implementation " + "for internal calculations, which may significantly impact " + "performance, especially for large datasets. For optimal speed and " + "efficiency, consider installing NumPy: pip install numpy" + ) + if not embeddings: + return [] + num_vectors = len(embeddings) + return [sum(dim_values) / num_vectors for dim_values in zip(*embeddings)] + + def embed_query(self, text: str) -> list[float]: + """Generate a hypothetical document and embedded it.""" + var_name = self.input_keys[0] + result = self.llm_chain.invoke({var_name: text}) + if isinstance(self.llm_chain, LLMChain): + documents = [result[self.output_keys[0]]] + else: + documents = [result] + embeddings = self.embed_documents(documents) + return self.combine_embeddings(embeddings) + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + """Call the internal llm chain.""" + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + return self.llm_chain.invoke( + inputs, config={"callbacks": _run_manager.get_child()} + ) + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + base_embeddings: Embeddings, + prompt_key: Optional[str] = None, + custom_prompt: Optional[BasePromptTemplate] = None, + **kwargs: Any, + ) -> HypotheticalDocumentEmbedder: + """Load and use LLMChain with either a specific prompt key or custom prompt.""" + if custom_prompt is not None: + prompt = custom_prompt + elif prompt_key is not None and prompt_key in PROMPT_MAP: + prompt = PROMPT_MAP[prompt_key] + else: + raise ValueError( + f"Must specify prompt_key if custom_prompt not provided. Should be one " + f"of {list(PROMPT_MAP.keys())}." + ) + + llm_chain = prompt | llm | StrOutputParser() + return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs) + + @property + def _chain_type(self) -> str: + return "hyde_chain" diff --git a/venv/Lib/site-packages/langchain/chains/hyde/prompts.py b/venv/Lib/site-packages/langchain/chains/hyde/prompts.py new file mode 100644 index 00000000..36c1cc06 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/hyde/prompts.py @@ -0,0 +1,47 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +web_search_template = """Please write a passage to answer the question +Question: {QUESTION} +Passage:""" +web_search = PromptTemplate(template=web_search_template, input_variables=["QUESTION"]) +sci_fact_template = """Please write a scientific paper passage to support/refute the claim +Claim: {Claim} +Passage:""" +sci_fact = PromptTemplate(template=sci_fact_template, input_variables=["Claim"]) +arguana_template = """Please write a counter argument for the passage +Passage: {PASSAGE} +Counter Argument:""" +arguana = PromptTemplate(template=arguana_template, input_variables=["PASSAGE"]) +trec_covid_template = """Please write a scientific paper passage to answer the question +Question: {QUESTION} +Passage:""" +trec_covid = PromptTemplate(template=trec_covid_template, input_variables=["QUESTION"]) +fiqa_template = """Please write a financial article passage to answer the question +Question: {QUESTION} +Passage:""" +fiqa = PromptTemplate(template=fiqa_template, input_variables=["QUESTION"]) +dbpedia_entity_template = """Please write a passage to answer the question. +Question: {QUESTION} +Passage:""" +dbpedia_entity = PromptTemplate( + template=dbpedia_entity_template, input_variables=["QUESTION"] +) +trec_news_template = """Please write a news passage about the topic. +Topic: {TOPIC} +Passage:""" +trec_news = PromptTemplate(template=trec_news_template, input_variables=["TOPIC"]) +mr_tydi_template = """Please write a passage in Swahili/Korean/Japanese/Bengali to answer the question in detail. +Question: {QUESTION} +Passage:""" +mr_tydi = PromptTemplate(template=mr_tydi_template, input_variables=["QUESTION"]) +PROMPT_MAP = { + "web_search": web_search, + "sci_fact": sci_fact, + "arguana": arguana, + "trec_covid": trec_covid, + "fiqa": fiqa, + "dbpedia_entity": dbpedia_entity, + "trec_news": trec_news, + "mr_tydi": mr_tydi, +} diff --git a/venv/Lib/site-packages/langchain/chains/llm.py b/venv/Lib/site-packages/langchain/chains/llm.py new file mode 100644 index 00000000..b71758d8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm.py @@ -0,0 +1,426 @@ +"""Chain that just formats a prompt and calls an LLM.""" + +from __future__ import annotations + +import warnings +from collections.abc import Sequence +from typing import Any, Optional, Union, cast + +from langchain_core._api import deprecated +from langchain_core.callbacks import ( + AsyncCallbackManager, + AsyncCallbackManagerForChainRun, + CallbackManager, + CallbackManagerForChainRun, + Callbacks, +) +from langchain_core.language_models import ( + BaseLanguageModel, + LanguageModelInput, +) +from langchain_core.messages import BaseMessage +from langchain_core.output_parsers import BaseLLMOutputParser, StrOutputParser +from langchain_core.outputs import ChatGeneration, Generation, LLMResult +from langchain_core.prompt_values import PromptValue +from langchain_core.prompts import BasePromptTemplate, PromptTemplate +from langchain_core.runnables import ( + Runnable, + RunnableBinding, + RunnableBranch, + RunnableWithFallbacks, +) +from langchain_core.runnables.configurable import DynamicRunnable +from langchain_core.utils.input import get_colored_text +from pydantic import ConfigDict, Field + +from langchain.chains.base import Chain + + +@deprecated( + since="0.1.17", + alternative="RunnableSequence, e.g., `prompt | llm`", + removal="1.0", +) +class LLMChain(Chain): + """Chain to run queries against LLMs. + + This class is deprecated. See below for an example implementation using + LangChain runnables: + + .. code-block:: python + + from langchain_core.output_parsers import StrOutputParser + from langchain_core.prompts import PromptTemplate + from langchain_openai import OpenAI + + prompt_template = "Tell me a {adjective} joke" + prompt = PromptTemplate( + input_variables=["adjective"], template=prompt_template + ) + llm = OpenAI() + chain = prompt | llm | StrOutputParser() + + chain.invoke("your adjective here") + + Example: + .. code-block:: python + + from langchain.chains import LLMChain + from langchain_community.llms import OpenAI + from langchain_core.prompts import PromptTemplate + prompt_template = "Tell me a {adjective} joke" + prompt = PromptTemplate( + input_variables=["adjective"], template=prompt_template + ) + llm = LLMChain(llm=OpenAI(), prompt=prompt) + """ + + @classmethod + def is_lc_serializable(self) -> bool: + return True + + prompt: BasePromptTemplate + """Prompt object to use.""" + llm: Union[ + Runnable[LanguageModelInput, str], Runnable[LanguageModelInput, BaseMessage] + ] + """Language model to call.""" + output_key: str = "text" #: :meta private: + output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser) + """Output parser to use. + Defaults to one that takes the most likely string but does not change it + otherwise.""" + return_final_only: bool = True + """Whether to return only the final parsed result. Defaults to True. + If false, will return a bunch of extra information about the generation.""" + llm_kwargs: dict = Field(default_factory=dict) + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @property + def input_keys(self) -> list[str]: + """Will be whatever keys the prompt expects. + + :meta private: + """ + return self.prompt.input_variables + + @property + def output_keys(self) -> list[str]: + """Will always return text key. + + :meta private: + """ + if self.return_final_only: + return [self.output_key] + else: + return [self.output_key, "full_generation"] + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + response = self.generate([inputs], run_manager=run_manager) + return self.create_outputs(response)[0] + + def generate( + self, + input_list: list[dict[str, Any]], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> LLMResult: + """Generate LLM result from inputs.""" + prompts, stop = self.prep_prompts(input_list, run_manager=run_manager) + callbacks = run_manager.get_child() if run_manager else None + if isinstance(self.llm, BaseLanguageModel): + return self.llm.generate_prompt( + prompts, + stop, + callbacks=callbacks, + **self.llm_kwargs, + ) + else: + results = self.llm.bind(stop=stop, **self.llm_kwargs).batch( + cast(list, prompts), {"callbacks": callbacks} + ) + generations: list[list[Generation]] = [] + for res in results: + if isinstance(res, BaseMessage): + generations.append([ChatGeneration(message=res)]) + else: + generations.append([Generation(text=res)]) + return LLMResult(generations=generations) + + async def agenerate( + self, + input_list: list[dict[str, Any]], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> LLMResult: + """Generate LLM result from inputs.""" + prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager) + callbacks = run_manager.get_child() if run_manager else None + if isinstance(self.llm, BaseLanguageModel): + return await self.llm.agenerate_prompt( + prompts, + stop, + callbacks=callbacks, + **self.llm_kwargs, + ) + else: + results = await self.llm.bind(stop=stop, **self.llm_kwargs).abatch( + cast(list, prompts), {"callbacks": callbacks} + ) + generations: list[list[Generation]] = [] + for res in results: + if isinstance(res, BaseMessage): + generations.append([ChatGeneration(message=res)]) + else: + generations.append([Generation(text=res)]) + return LLMResult(generations=generations) + + def prep_prompts( + self, + input_list: list[dict[str, Any]], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> tuple[list[PromptValue], Optional[list[str]]]: + """Prepare prompts from inputs.""" + stop = None + if len(input_list) == 0: + return [], stop + if "stop" in input_list[0]: + stop = input_list[0]["stop"] + prompts = [] + for inputs in input_list: + selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} + prompt = self.prompt.format_prompt(**selected_inputs) + _colored_text = get_colored_text(prompt.to_string(), "green") + _text = "Prompt after formatting:\n" + _colored_text + if run_manager: + run_manager.on_text(_text, end="\n", verbose=self.verbose) + if "stop" in inputs and inputs["stop"] != stop: + raise ValueError( + "If `stop` is present in any inputs, should be present in all." + ) + prompts.append(prompt) + return prompts, stop + + async def aprep_prompts( + self, + input_list: list[dict[str, Any]], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> tuple[list[PromptValue], Optional[list[str]]]: + """Prepare prompts from inputs.""" + stop = None + if len(input_list) == 0: + return [], stop + if "stop" in input_list[0]: + stop = input_list[0]["stop"] + prompts = [] + for inputs in input_list: + selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} + prompt = self.prompt.format_prompt(**selected_inputs) + _colored_text = get_colored_text(prompt.to_string(), "green") + _text = "Prompt after formatting:\n" + _colored_text + if run_manager: + await run_manager.on_text(_text, end="\n", verbose=self.verbose) + if "stop" in inputs and inputs["stop"] != stop: + raise ValueError( + "If `stop` is present in any inputs, should be present in all." + ) + prompts.append(prompt) + return prompts, stop + + def apply( + self, input_list: list[dict[str, Any]], callbacks: Callbacks = None + ) -> list[dict[str, str]]: + """Utilize the LLM generate method for speed gains.""" + callback_manager = CallbackManager.configure( + callbacks, self.callbacks, self.verbose + ) + run_manager = callback_manager.on_chain_start( + None, + {"input_list": input_list}, + name=self.get_name(), + ) + try: + response = self.generate(input_list, run_manager=run_manager) + except BaseException as e: + run_manager.on_chain_error(e) + raise e + outputs = self.create_outputs(response) + run_manager.on_chain_end({"outputs": outputs}) + return outputs + + async def aapply( + self, input_list: list[dict[str, Any]], callbacks: Callbacks = None + ) -> list[dict[str, str]]: + """Utilize the LLM generate method for speed gains.""" + callback_manager = AsyncCallbackManager.configure( + callbacks, self.callbacks, self.verbose + ) + run_manager = await callback_manager.on_chain_start( + None, + {"input_list": input_list}, + name=self.get_name(), + ) + try: + response = await self.agenerate(input_list, run_manager=run_manager) + except BaseException as e: + await run_manager.on_chain_error(e) + raise e + outputs = self.create_outputs(response) + await run_manager.on_chain_end({"outputs": outputs}) + return outputs + + @property + def _run_output_key(self) -> str: + return self.output_key + + def create_outputs(self, llm_result: LLMResult) -> list[dict[str, Any]]: + """Create outputs from response.""" + result = [ + # Get the text of the top generated string. + { + self.output_key: self.output_parser.parse_result(generation), + "full_generation": generation, + } + for generation in llm_result.generations + ] + if self.return_final_only: + result = [{self.output_key: r[self.output_key]} for r in result] + return result + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, str]: + response = await self.agenerate([inputs], run_manager=run_manager) + return self.create_outputs(response)[0] + + def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str: + """Format prompt with kwargs and pass to LLM. + + Args: + callbacks: Callbacks to pass to LLMChain + **kwargs: Keys to pass to prompt template. + + Returns: + Completion from LLM. + + Example: + .. code-block:: python + + completion = llm.predict(adjective="funny") + """ + return self(kwargs, callbacks=callbacks)[self.output_key] + + async def apredict(self, callbacks: Callbacks = None, **kwargs: Any) -> str: + """Format prompt with kwargs and pass to LLM. + + Args: + callbacks: Callbacks to pass to LLMChain + **kwargs: Keys to pass to prompt template. + + Returns: + Completion from LLM. + + Example: + .. code-block:: python + + completion = llm.predict(adjective="funny") + """ + return (await self.acall(kwargs, callbacks=callbacks))[self.output_key] + + def predict_and_parse( + self, callbacks: Callbacks = None, **kwargs: Any + ) -> Union[str, list[str], dict[str, Any]]: + """Call predict and then parse the results.""" + warnings.warn( + "The predict_and_parse method is deprecated, " + "instead pass an output parser directly to LLMChain." + ) + result = self.predict(callbacks=callbacks, **kwargs) + if self.prompt.output_parser is not None: + return self.prompt.output_parser.parse(result) + else: + return result + + async def apredict_and_parse( + self, callbacks: Callbacks = None, **kwargs: Any + ) -> Union[str, list[str], dict[str, str]]: + """Call apredict and then parse the results.""" + warnings.warn( + "The apredict_and_parse method is deprecated, " + "instead pass an output parser directly to LLMChain." + ) + result = await self.apredict(callbacks=callbacks, **kwargs) + if self.prompt.output_parser is not None: + return self.prompt.output_parser.parse(result) + else: + return result + + def apply_and_parse( + self, input_list: list[dict[str, Any]], callbacks: Callbacks = None + ) -> Sequence[Union[str, list[str], dict[str, str]]]: + """Call apply and then parse the results.""" + warnings.warn( + "The apply_and_parse method is deprecated, " + "instead pass an output parser directly to LLMChain." + ) + result = self.apply(input_list, callbacks=callbacks) + return self._parse_generation(result) + + def _parse_generation( + self, generation: list[dict[str, str]] + ) -> Sequence[Union[str, list[str], dict[str, str]]]: + if self.prompt.output_parser is not None: + return [ + self.prompt.output_parser.parse(res[self.output_key]) + for res in generation + ] + else: + return generation + + async def aapply_and_parse( + self, input_list: list[dict[str, Any]], callbacks: Callbacks = None + ) -> Sequence[Union[str, list[str], dict[str, str]]]: + """Call apply and then parse the results.""" + warnings.warn( + "The aapply_and_parse method is deprecated, " + "instead pass an output parser directly to LLMChain." + ) + result = await self.aapply(input_list, callbacks=callbacks) + return self._parse_generation(result) + + @property + def _chain_type(self) -> str: + return "llm_chain" + + @classmethod + def from_string(cls, llm: BaseLanguageModel, template: str) -> LLMChain: + """Create LLMChain from LLM and template.""" + prompt_template = PromptTemplate.from_template(template) + return cls(llm=llm, prompt=prompt_template) + + def _get_num_tokens(self, text: str) -> int: + return _get_language_model(self.llm).get_num_tokens(text) + + +def _get_language_model(llm_like: Runnable) -> BaseLanguageModel: + if isinstance(llm_like, BaseLanguageModel): + return llm_like + elif isinstance(llm_like, RunnableBinding): + return _get_language_model(llm_like.bound) + elif isinstance(llm_like, RunnableWithFallbacks): + return _get_language_model(llm_like.runnable) + elif isinstance(llm_like, (RunnableBranch, DynamicRunnable)): + return _get_language_model(llm_like.default) + else: + raise ValueError( + f"Unable to extract BaseLanguageModel from llm_like object of type " + f"{type(llm_like)}" + ) diff --git a/venv/Lib/site-packages/langchain/chains/llm_bash/__init__.py b/venv/Lib/site-packages/langchain/chains/llm_bash/__init__.py new file mode 100644 index 00000000..b055d992 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_bash/__init__.py @@ -0,0 +1,9 @@ +def __getattr__(name: str = "") -> None: + """Raise an error on import since is deprecated.""" + raise AttributeError( + "This module has been moved to langchain-experimental. " + "For more details: https://github.com/langchain-ai/langchain/discussions/11352." + "To access this code, install it with `pip install langchain-experimental`." + "`from langchain_experimental.llm_bash.base " + "import LLMBashChain`" + ) diff --git a/venv/Lib/site-packages/langchain/chains/llm_bash/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/llm_bash/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..19fb774c Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/llm_bash/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/llm_checker/__init__.py b/venv/Lib/site-packages/langchain/chains/llm_checker/__init__.py new file mode 100644 index 00000000..95516d81 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_checker/__init__.py @@ -0,0 +1,4 @@ +"""Chain that tries to verify assumptions before answering a question. + +Heavily borrowed from https://github.com/jagilley/fact-checker +""" diff --git a/venv/Lib/site-packages/langchain/chains/llm_checker/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/llm_checker/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..07bae8f0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/llm_checker/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/llm_checker/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/llm_checker/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..e701fd51 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/llm_checker/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/llm_checker/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/llm_checker/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..b68b302d Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/llm_checker/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/llm_checker/base.py b/venv/Lib/site-packages/langchain/chains/llm_checker/base.py new file mode 100644 index 00000000..56d8ba7b --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_checker/base.py @@ -0,0 +1,192 @@ +"""Chain for question-answering with self-verification.""" + +from __future__ import annotations + +import warnings +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import CallbackManagerForChainRun +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import PromptTemplate +from pydantic import ConfigDict, model_validator + +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain +from langchain.chains.llm_checker.prompt import ( + CHECK_ASSERTIONS_PROMPT, + CREATE_DRAFT_ANSWER_PROMPT, + LIST_ASSERTIONS_PROMPT, + REVISED_ANSWER_PROMPT, +) +from langchain.chains.sequential import SequentialChain + + +def _load_question_to_checked_assertions_chain( + llm: BaseLanguageModel, + create_draft_answer_prompt: PromptTemplate, + list_assertions_prompt: PromptTemplate, + check_assertions_prompt: PromptTemplate, + revised_answer_prompt: PromptTemplate, +) -> SequentialChain: + create_draft_answer_chain = LLMChain( + llm=llm, + prompt=create_draft_answer_prompt, + output_key="statement", + ) + list_assertions_chain = LLMChain( + llm=llm, + prompt=list_assertions_prompt, + output_key="assertions", + ) + check_assertions_chain = LLMChain( + llm=llm, + prompt=check_assertions_prompt, + output_key="checked_assertions", + ) + revised_answer_chain = LLMChain( + llm=llm, + prompt=revised_answer_prompt, + output_key="revised_statement", + ) + chains = [ + create_draft_answer_chain, + list_assertions_chain, + check_assertions_chain, + revised_answer_chain, + ] + question_to_checked_assertions_chain = SequentialChain( + chains=chains, # type: ignore[arg-type] + input_variables=["question"], + output_variables=["revised_statement"], + verbose=True, + ) + return question_to_checked_assertions_chain + + +@deprecated( + since="0.2.13", + message=( + "See LangGraph guides for a variety of self-reflection and corrective " + "strategies for question-answering and other tasks: " + "https://langchain-ai.github.io/langgraph/tutorials/rag/langgraph_self_rag/" + ), + removal="1.0", +) +class LLMCheckerChain(Chain): + """Chain for question-answering with self-verification. + + Example: + .. code-block:: python + + from langchain_community.llms import OpenAI + from langchain.chains import LLMCheckerChain + llm = OpenAI(temperature=0.7) + checker_chain = LLMCheckerChain.from_llm(llm) + """ + + question_to_checked_assertions_chain: SequentialChain + + llm: Optional[BaseLanguageModel] = None + """[Deprecated] LLM wrapper to use.""" + create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT + """[Deprecated]""" + list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT + """[Deprecated]""" + check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT + """[Deprecated]""" + revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT + """[Deprecated] Prompt to use when questioning the documents.""" + input_key: str = "query" #: :meta private: + output_key: str = "result" #: :meta private: + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @model_validator(mode="before") + @classmethod + def raise_deprecation(cls, values: dict) -> Any: + if "llm" in values: + warnings.warn( + "Directly instantiating an LLMCheckerChain with an llm is deprecated. " + "Please instantiate with question_to_checked_assertions_chain " + "or using the from_llm class method." + ) + if ( + "question_to_checked_assertions_chain" not in values + and values["llm"] is not None + ): + question_to_checked_assertions_chain = ( + _load_question_to_checked_assertions_chain( + values["llm"], + values.get( + "create_draft_answer_prompt", CREATE_DRAFT_ANSWER_PROMPT + ), + values.get("list_assertions_prompt", LIST_ASSERTIONS_PROMPT), + values.get("check_assertions_prompt", CHECK_ASSERTIONS_PROMPT), + values.get("revised_answer_prompt", REVISED_ANSWER_PROMPT), + ) + ) + values["question_to_checked_assertions_chain"] = ( + question_to_checked_assertions_chain + ) + return values + + @property + def input_keys(self) -> list[str]: + """Return the singular input key. + + :meta private: + """ + return [self.input_key] + + @property + def output_keys(self) -> list[str]: + """Return the singular output key. + + :meta private: + """ + return [self.output_key] + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + question = inputs[self.input_key] + + output = self.question_to_checked_assertions_chain( + {"question": question}, callbacks=_run_manager.get_child() + ) + return {self.output_key: output["revised_statement"]} + + @property + def _chain_type(self) -> str: + return "llm_checker_chain" + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT, + list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT, + check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT, + revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT, + **kwargs: Any, + ) -> LLMCheckerChain: + question_to_checked_assertions_chain = ( + _load_question_to_checked_assertions_chain( + llm, + create_draft_answer_prompt, + list_assertions_prompt, + check_assertions_prompt, + revised_answer_prompt, + ) + ) + return cls( + question_to_checked_assertions_chain=question_to_checked_assertions_chain, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/chains/llm_checker/prompt.py b/venv/Lib/site-packages/langchain/chains/llm_checker/prompt.py new file mode 100644 index 00000000..8eb5fdaf --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_checker/prompt.py @@ -0,0 +1,31 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +_CREATE_DRAFT_ANSWER_TEMPLATE = """{question}\n\n""" +CREATE_DRAFT_ANSWER_PROMPT = PromptTemplate( + input_variables=["question"], template=_CREATE_DRAFT_ANSWER_TEMPLATE +) + +_LIST_ASSERTIONS_TEMPLATE = """Here is a statement: +{statement} +Make a bullet point list of the assumptions you made when producing the above statement.\n\n""" +LIST_ASSERTIONS_PROMPT = PromptTemplate( + input_variables=["statement"], template=_LIST_ASSERTIONS_TEMPLATE +) + +_CHECK_ASSERTIONS_TEMPLATE = """Here is a bullet point list of assertions: +{assertions} +For each assertion, determine whether it is true or false. If it is false, explain why.\n\n""" +CHECK_ASSERTIONS_PROMPT = PromptTemplate( + input_variables=["assertions"], template=_CHECK_ASSERTIONS_TEMPLATE +) + +_REVISED_ANSWER_TEMPLATE = """{checked_assertions} + +Question: In light of the above assertions and checks, how would you answer the question '{question}'? + +Answer:""" +REVISED_ANSWER_PROMPT = PromptTemplate( + input_variables=["checked_assertions", "question"], + template=_REVISED_ANSWER_TEMPLATE, +) diff --git a/venv/Lib/site-packages/langchain/chains/llm_math/__init__.py b/venv/Lib/site-packages/langchain/chains/llm_math/__init__.py new file mode 100644 index 00000000..fa9fd272 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_math/__init__.py @@ -0,0 +1,4 @@ +"""Chain that interprets a prompt and executes python code to do math. + +Heavily borrowed from https://replit.com/@amasad/gptpy?v=1#main.py +""" diff --git a/venv/Lib/site-packages/langchain/chains/llm_math/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/llm_math/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..3b5e36be Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/llm_math/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/llm_math/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/llm_math/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..d870b580 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/llm_math/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/llm_math/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/llm_math/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..4a8e6b9e Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/llm_math/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/llm_math/base.py b/venv/Lib/site-packages/langchain/chains/llm_math/base.py new file mode 100644 index 00000000..96ef5d10 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_math/base.py @@ -0,0 +1,304 @@ +"""Chain that interprets a prompt and executes python code to do math.""" + +from __future__ import annotations + +import math +import re +import warnings +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate +from pydantic import ConfigDict, model_validator + +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain +from langchain.chains.llm_math.prompt import PROMPT + + +@deprecated( + since="0.2.13", + message=( + "This class is deprecated and will be removed in langchain 1.0. " + "See API reference for replacement: " + "https://api.python.langchain.com/en/latest/chains/langchain.chains.llm_math.base.LLMMathChain.html" # noqa: E501 + ), + removal="1.0", +) +class LLMMathChain(Chain): + """Chain that interprets a prompt and executes python code to do math. + + Note: this class is deprecated. See below for a replacement implementation + using LangGraph. The benefits of this implementation are: + + - Uses LLM tool calling features; + - Support for both token-by-token and step-by-step streaming; + - Support for checkpointing and memory of chat history; + - Easier to modify or extend (e.g., with additional tools, structured responses, etc.) + + Install LangGraph with: + + .. code-block:: bash + + pip install -U langgraph + + .. code-block:: python + + import math + from typing import Annotated, Sequence + + from langchain_core.messages import BaseMessage + from langchain_core.runnables import RunnableConfig + from langchain_core.tools import tool + from langchain_openai import ChatOpenAI + from langgraph.graph import END, StateGraph + from langgraph.graph.message import add_messages + from langgraph.prebuilt.tool_node import ToolNode + import numexpr + from typing_extensions import TypedDict + + @tool + def calculator(expression: str) -> str: + \"\"\"Calculate expression using Python's numexpr library. + + Expression should be a single line mathematical expression + that solves the problem. + + Examples: + "37593 * 67" for "37593 times 67" + "37593**(1/5)" for "37593^(1/5)" + \"\"\" + local_dict = {"pi": math.pi, "e": math.e} + return str( + numexpr.evaluate( + expression.strip(), + global_dict={}, # restrict access to globals + local_dict=local_dict, # add common mathematical functions + ) + ) + + llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) + tools = [calculator] + llm_with_tools = llm.bind_tools(tools, tool_choice="any") + + class ChainState(TypedDict): + \"\"\"LangGraph state.\"\"\" + + messages: Annotated[Sequence[BaseMessage], add_messages] + + async def acall_chain(state: ChainState, config: RunnableConfig): + last_message = state["messages"][-1] + response = await llm_with_tools.ainvoke(state["messages"], config) + return {"messages": [response]} + + async def acall_model(state: ChainState, config: RunnableConfig): + response = await llm.ainvoke(state["messages"], config) + return {"messages": [response]} + + graph_builder = StateGraph(ChainState) + graph_builder.add_node("call_tool", acall_chain) + graph_builder.add_node("execute_tool", ToolNode(tools)) + graph_builder.add_node("call_model", acall_model) + graph_builder.set_entry_point("call_tool") + graph_builder.add_edge("call_tool", "execute_tool") + graph_builder.add_edge("execute_tool", "call_model") + graph_builder.add_edge("call_model", END) + chain = graph_builder.compile() + + .. code-block:: python + + example_query = "What is 551368 divided by 82" + + events = chain.astream( + {"messages": [("user", example_query)]}, + stream_mode="values", + ) + async for event in events: + event["messages"][-1].pretty_print() + + .. code-block:: none + + ================================ Human Message ================================= + + What is 551368 divided by 82 + ================================== Ai Message ================================== + Tool Calls: + calculator (call_MEiGXuJjJ7wGU4aOT86QuGJS) + Call ID: call_MEiGXuJjJ7wGU4aOT86QuGJS + Args: + expression: 551368 / 82 + ================================= Tool Message ================================= + Name: calculator + + 6724.0 + ================================== Ai Message ================================== + + 551368 divided by 82 equals 6724. + + Example: + .. code-block:: python + + from langchain.chains import LLMMathChain + from langchain_community.llms import OpenAI + llm_math = LLMMathChain.from_llm(OpenAI()) + """ # noqa: E501 + + llm_chain: LLMChain + llm: Optional[BaseLanguageModel] = None + """[Deprecated] LLM wrapper to use.""" + prompt: BasePromptTemplate = PROMPT + """[Deprecated] Prompt to use to translate to python if necessary.""" + input_key: str = "question" #: :meta private: + output_key: str = "answer" #: :meta private: + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @model_validator(mode="before") + @classmethod + def raise_deprecation(cls, values: dict) -> Any: + try: + import numexpr # noqa: F401 + except ImportError: + raise ImportError( + "LLMMathChain requires the numexpr package. " + "Please install it with `pip install numexpr`." + ) + if "llm" in values: + warnings.warn( + "Directly instantiating an LLMMathChain with an llm is deprecated. " + "Please instantiate with llm_chain argument or using the from_llm " + "class method." + ) + if "llm_chain" not in values and values["llm"] is not None: + prompt = values.get("prompt", PROMPT) + values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt) + return values + + @property + def input_keys(self) -> list[str]: + """Expect input key. + + :meta private: + """ + return [self.input_key] + + @property + def output_keys(self) -> list[str]: + """Expect output key. + + :meta private: + """ + return [self.output_key] + + def _evaluate_expression(self, expression: str) -> str: + import numexpr + + try: + local_dict = {"pi": math.pi, "e": math.e} + output = str( + numexpr.evaluate( + expression.strip(), + global_dict={}, # restrict access to globals + local_dict=local_dict, # add common mathematical functions + ) + ) + except Exception as e: + raise ValueError( + f'LLMMathChain._evaluate("{expression}") raised error: {e}.' + " Please try again with a valid numerical expression" + ) + + # Remove any leading and trailing brackets from the output + return re.sub(r"^\[|\]$", "", output) + + def _process_llm_result( + self, llm_output: str, run_manager: CallbackManagerForChainRun + ) -> dict[str, str]: + run_manager.on_text(llm_output, color="green", verbose=self.verbose) + llm_output = llm_output.strip() + text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) + if text_match: + expression = text_match.group(1) + output = self._evaluate_expression(expression) + run_manager.on_text("\nAnswer: ", verbose=self.verbose) + run_manager.on_text(output, color="yellow", verbose=self.verbose) + answer = "Answer: " + output + elif llm_output.startswith("Answer:"): + answer = llm_output + elif "Answer:" in llm_output: + answer = "Answer: " + llm_output.split("Answer:")[-1] + else: + raise ValueError(f"unknown format from LLM: {llm_output}") + return {self.output_key: answer} + + async def _aprocess_llm_result( + self, + llm_output: str, + run_manager: AsyncCallbackManagerForChainRun, + ) -> dict[str, str]: + await run_manager.on_text(llm_output, color="green", verbose=self.verbose) + llm_output = llm_output.strip() + text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) + if text_match: + expression = text_match.group(1) + output = self._evaluate_expression(expression) + await run_manager.on_text("\nAnswer: ", verbose=self.verbose) + await run_manager.on_text(output, color="yellow", verbose=self.verbose) + answer = "Answer: " + output + elif llm_output.startswith("Answer:"): + answer = llm_output + elif "Answer:" in llm_output: + answer = "Answer: " + llm_output.split("Answer:")[-1] + else: + raise ValueError(f"unknown format from LLM: {llm_output}") + return {self.output_key: answer} + + def _call( + self, + inputs: dict[str, str], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + _run_manager.on_text(inputs[self.input_key]) + llm_output = self.llm_chain.predict( + question=inputs[self.input_key], + stop=["```output"], + callbacks=_run_manager.get_child(), + ) + return self._process_llm_result(llm_output, _run_manager) + + async def _acall( + self, + inputs: dict[str, str], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, str]: + _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() + await _run_manager.on_text(inputs[self.input_key]) + llm_output = await self.llm_chain.apredict( + question=inputs[self.input_key], + stop=["```output"], + callbacks=_run_manager.get_child(), + ) + return await self._aprocess_llm_result(llm_output, _run_manager) + + @property + def _chain_type(self) -> str: + return "llm_math_chain" + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + prompt: BasePromptTemplate = PROMPT, + **kwargs: Any, + ) -> LLMMathChain: + llm_chain = LLMChain(llm=llm, prompt=prompt) + return cls(llm_chain=llm_chain, **kwargs) diff --git a/venv/Lib/site-packages/langchain/chains/llm_math/prompt.py b/venv/Lib/site-packages/langchain/chains/llm_math/prompt.py new file mode 100644 index 00000000..8c0fd9e8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_math/prompt.py @@ -0,0 +1,44 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +_PROMPT_TEMPLATE = """Translate a math problem into a expression that can be executed using Python's numexpr library. Use the output of running this code to answer the question. + +Question: ${{Question with math problem.}} +```text +${{single line mathematical expression that solves the problem}} +``` +...numexpr.evaluate(text)... +```output +${{Output of running the code}} +``` +Answer: ${{Answer}} + +Begin. + +Question: What is 37593 * 67? +```text +37593 * 67 +``` +...numexpr.evaluate("37593 * 67")... +```output +2518731 +``` +Answer: 2518731 + +Question: 37593^(1/5) +```text +37593**(1/5) +``` +...numexpr.evaluate("37593**(1/5)")... +```output +8.222831614237718 +``` +Answer: 8.222831614237718 + +Question: {question} +""" + +PROMPT = PromptTemplate( + input_variables=["question"], + template=_PROMPT_TEMPLATE, +) diff --git a/venv/Lib/site-packages/langchain/chains/llm_requests.py b/venv/Lib/site-packages/langchain/chains/llm_requests.py new file mode 100644 index 00000000..dca66132 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_requests.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chains.llm_requests import LLMRequestsChain + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "LLMRequestsChain": "langchain_community.chains.llm_requests", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["LLMRequestsChain"] diff --git a/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/__init__.py b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/__init__.py new file mode 100644 index 00000000..38599081 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/__init__.py @@ -0,0 +1,7 @@ +"""Summarization checker chain for verifying accuracy of text generation. + +Chain that tries to verify the accuracy of text generation by splitting it into a +list of facts, then checking if those facts are true or not, and rewriting +the text to make it more truth-ful. It will repeat this loop until it hits `max_tries` +or gets to a "true" output. +""" diff --git a/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..a0992915 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..cce32505 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/base.py b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/base.py new file mode 100644 index 00000000..e1b02a2d --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/base.py @@ -0,0 +1,202 @@ +"""Chain for summarization with self-verification.""" + +from __future__ import annotations + +import warnings +from pathlib import Path +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import CallbackManagerForChainRun +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts.prompt import PromptTemplate +from pydantic import ConfigDict, model_validator + +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain +from langchain.chains.sequential import SequentialChain + +PROMPTS_DIR = Path(__file__).parent / "prompts" + +CREATE_ASSERTIONS_PROMPT = PromptTemplate.from_file(PROMPTS_DIR / "create_facts.txt") +CHECK_ASSERTIONS_PROMPT = PromptTemplate.from_file(PROMPTS_DIR / "check_facts.txt") +REVISED_SUMMARY_PROMPT = PromptTemplate.from_file(PROMPTS_DIR / "revise_summary.txt") +ARE_ALL_TRUE_PROMPT = PromptTemplate.from_file(PROMPTS_DIR / "are_all_true_prompt.txt") + + +def _load_sequential_chain( + llm: BaseLanguageModel, + create_assertions_prompt: PromptTemplate, + check_assertions_prompt: PromptTemplate, + revised_summary_prompt: PromptTemplate, + are_all_true_prompt: PromptTemplate, + verbose: bool = False, +) -> SequentialChain: + chain = SequentialChain( + chains=[ + LLMChain( + llm=llm, + prompt=create_assertions_prompt, + output_key="assertions", + verbose=verbose, + ), + LLMChain( + llm=llm, + prompt=check_assertions_prompt, + output_key="checked_assertions", + verbose=verbose, + ), + LLMChain( + llm=llm, + prompt=revised_summary_prompt, + output_key="revised_summary", + verbose=verbose, + ), + LLMChain( + llm=llm, + output_key="all_true", + prompt=are_all_true_prompt, + verbose=verbose, + ), + ], + input_variables=["summary"], + output_variables=["all_true", "revised_summary"], + verbose=verbose, + ) + return chain + + +@deprecated( + since="0.2.13", + message=( + "See LangGraph guides for a variety of self-reflection and corrective " + "strategies for question-answering and other tasks: " + "https://langchain-ai.github.io/langgraph/tutorials/rag/langgraph_self_rag/" + ), + removal="1.0", +) +class LLMSummarizationCheckerChain(Chain): + """Chain for question-answering with self-verification. + + Example: + .. code-block:: python + + from langchain_community.llms import OpenAI + from langchain.chains import LLMSummarizationCheckerChain + llm = OpenAI(temperature=0.0) + checker_chain = LLMSummarizationCheckerChain.from_llm(llm) + """ + + sequential_chain: SequentialChain + llm: Optional[BaseLanguageModel] = None + """[Deprecated] LLM wrapper to use.""" + + create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT + """[Deprecated]""" + check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT + """[Deprecated]""" + revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT + """[Deprecated]""" + are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT + """[Deprecated]""" + + input_key: str = "query" #: :meta private: + output_key: str = "result" #: :meta private: + max_checks: int = 2 + """Maximum number of times to check the assertions. Default to double-checking.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @model_validator(mode="before") + @classmethod + def raise_deprecation(cls, values: dict) -> Any: + if "llm" in values: + warnings.warn( + "Directly instantiating an LLMSummarizationCheckerChain with an llm is " + "deprecated. Please instantiate with" + " sequential_chain argument or using the from_llm class method." + ) + if "sequential_chain" not in values and values["llm"] is not None: + values["sequential_chain"] = _load_sequential_chain( + values["llm"], + values.get("create_assertions_prompt", CREATE_ASSERTIONS_PROMPT), + values.get("check_assertions_prompt", CHECK_ASSERTIONS_PROMPT), + values.get("revised_summary_prompt", REVISED_SUMMARY_PROMPT), + values.get("are_all_true_prompt", ARE_ALL_TRUE_PROMPT), + verbose=values.get("verbose", False), + ) + return values + + @property + def input_keys(self) -> list[str]: + """Return the singular input key. + + :meta private: + """ + return [self.input_key] + + @property + def output_keys(self) -> list[str]: + """Return the singular output key. + + :meta private: + """ + return [self.output_key] + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + all_true = False + count = 0 + output = None + original_input = inputs[self.input_key] + chain_input = original_input + while not all_true and count < self.max_checks: + output = self.sequential_chain( + {"summary": chain_input}, callbacks=_run_manager.get_child() + ) + count += 1 + + if output["all_true"].strip() == "True": + break + + if self.verbose: + print(output["revised_summary"]) # noqa: T201 + + chain_input = output["revised_summary"] + + if not output: + raise ValueError("No output from chain") + + return {self.output_key: output["revised_summary"].strip()} + + @property + def _chain_type(self) -> str: + return "llm_summarization_checker_chain" + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT, + check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT, + revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT, + are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT, + verbose: bool = False, + **kwargs: Any, + ) -> LLMSummarizationCheckerChain: + chain = _load_sequential_chain( + llm, + create_assertions_prompt, + check_assertions_prompt, + revised_summary_prompt, + are_all_true_prompt, + verbose=verbose, + ) + return cls(sequential_chain=chain, verbose=verbose, **kwargs) diff --git a/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/prompts/are_all_true_prompt.txt b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/prompts/are_all_true_prompt.txt new file mode 100644 index 00000000..cb1bedab --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/prompts/are_all_true_prompt.txt @@ -0,0 +1,38 @@ +Below are some assertions that have been fact checked and are labeled as true or false. + +If all of the assertions are true, return "True". If any of the assertions are false, return "False". + +Here are some examples: +=== + +Checked Assertions: """ +- The sky is red: False +- Water is made of lava: False +- The sun is a star: True +""" +Result: False + +=== + +Checked Assertions: """ +- The sky is blue: True +- Water is wet: True +- The sun is a star: True +""" +Result: True + +=== + +Checked Assertions: """ +- The sky is blue - True +- Water is made of lava- False +- The sun is a star - True +""" +Result: False + +=== + +Checked Assertions:""" +{checked_assertions} +""" +Result: \ No newline at end of file diff --git a/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/prompts/check_facts.txt b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/prompts/check_facts.txt new file mode 100644 index 00000000..b675d318 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/prompts/check_facts.txt @@ -0,0 +1,10 @@ +You are an expert fact checker. You have been hired by a major news organization to fact check a very important story. + +Here is a bullet point list of facts: +""" +{assertions} +""" + +For each fact, determine whether it is true or false about the subject. If you are unable to determine whether the fact is true or false, output "Undetermined". +If the fact is false, explain why. + diff --git a/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/prompts/create_facts.txt b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/prompts/create_facts.txt new file mode 100644 index 00000000..e85079a1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/prompts/create_facts.txt @@ -0,0 +1,10 @@ +Given some text, extract a list of facts from the text. + +Format your output as a bulleted list. + +Text: +""" +{summary} +""" + +Facts: \ No newline at end of file diff --git a/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/prompts/revise_summary.txt b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/prompts/revise_summary.txt new file mode 100644 index 00000000..dbfc4d8e --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_summarization_checker/prompts/revise_summary.txt @@ -0,0 +1,17 @@ +Below are some assertions that have been fact checked and are labeled as true or false. If the answer is false, a suggestion is given for a correction. + +Checked Assertions: +""" +{checked_assertions} +""" + +Original Summary: +""" +{summary} +""" + +Using these checked assertions, rewrite the original summary to be completely true. + +The output should have the same structure and formatting as the original summary. + +Summary: \ No newline at end of file diff --git a/venv/Lib/site-packages/langchain/chains/llm_symbolic_math/__init__.py b/venv/Lib/site-packages/langchain/chains/llm_symbolic_math/__init__.py new file mode 100644 index 00000000..a3ce265f --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/llm_symbolic_math/__init__.py @@ -0,0 +1,9 @@ +def __getattr__(name: str = "") -> None: + """Raise an error on import since is deprecated.""" + raise AttributeError( + "This module has been moved to langchain-experimental. " + "For more details: https://github.com/langchain-ai/langchain/discussions/11352." + "To access this code, install it with `pip install langchain-experimental`." + "`from langchain_experimental.llm_symbolic_math.base " + "import LLMSymbolicMathChain`" + ) diff --git a/venv/Lib/site-packages/langchain/chains/llm_symbolic_math/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/llm_symbolic_math/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..bba650e0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/llm_symbolic_math/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/loading.py b/venv/Lib/site-packages/langchain/chains/loading.py new file mode 100644 index 00000000..3ce5f4b4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/loading.py @@ -0,0 +1,715 @@ +"""Functionality for loading chains.""" + +from __future__ import annotations + +import json +from pathlib import Path +from typing import TYPE_CHECKING, Any, Union + +import yaml +from langchain_core._api import deprecated +from langchain_core.prompts.loading import ( + _load_output_parser, + load_prompt, + load_prompt_from_config, +) + +from langchain.chains import ReduceDocumentsChain +from langchain.chains.api.base import APIChain +from langchain.chains.base import Chain +from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain +from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain +from langchain.chains.combine_documents.refine import RefineDocumentsChain +from langchain.chains.combine_documents.stuff import StuffDocumentsChain +from langchain.chains.hyde.base import HypotheticalDocumentEmbedder +from langchain.chains.llm import LLMChain +from langchain.chains.llm_checker.base import LLMCheckerChain +from langchain.chains.llm_math.base import LLMMathChain +from langchain.chains.qa_with_sources.base import QAWithSourcesChain +from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain +from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain +from langchain.chains.retrieval_qa.base import RetrievalQA, VectorDBQA + +if TYPE_CHECKING: + from langchain_community.chains.graph_qa.cypher import GraphCypherQAChain + + from langchain.chains.llm_requests import LLMRequestsChain + +try: + from langchain_community.llms.loading import load_llm, load_llm_from_config +except ImportError: + + def load_llm(*args: Any, **kwargs: Any) -> None: + raise ImportError( + "To use this load_llm functionality you must install the " + "langchain_community package. " + "You can install it with `pip install langchain_community`" + ) + + def load_llm_from_config(*args: Any, **kwargs: Any) -> None: + raise ImportError( + "To use this load_llm_from_config functionality you must install the " + "langchain_community package. " + "You can install it with `pip install langchain_community`" + ) + + +URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/" + + +def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain: + """Load LLM chain from config dict.""" + if "llm" in config: + llm_config = config.pop("llm") + llm = load_llm_from_config(llm_config, **kwargs) + elif "llm_path" in config: + llm = load_llm(config.pop("llm_path"), **kwargs) + else: + raise ValueError("One of `llm` or `llm_path` must be present.") + + if "prompt" in config: + prompt_config = config.pop("prompt") + prompt = load_prompt_from_config(prompt_config) + elif "prompt_path" in config: + prompt = load_prompt(config.pop("prompt_path")) + else: + raise ValueError("One of `prompt` or `prompt_path` must be present.") + _load_output_parser(config) + + return LLMChain(llm=llm, prompt=prompt, **config) + + +def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder: + """Load hypothetical document embedder chain from config dict.""" + if "llm_chain" in config: + llm_chain_config = config.pop("llm_chain") + llm_chain = load_chain_from_config(llm_chain_config, **kwargs) + elif "llm_chain_path" in config: + llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs) + else: + raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") + if "embeddings" in kwargs: + embeddings = kwargs.pop("embeddings") + else: + raise ValueError("`embeddings` must be present.") + return HypotheticalDocumentEmbedder( + llm_chain=llm_chain, + base_embeddings=embeddings, + **config, + ) + + +def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain: + if "llm_chain" in config: + llm_chain_config = config.pop("llm_chain") + llm_chain = load_chain_from_config(llm_chain_config, **kwargs) + elif "llm_chain_path" in config: + llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs) + else: + raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") + + if not isinstance(llm_chain, LLMChain): + raise ValueError(f"Expected LLMChain, got {llm_chain}") + + if "document_prompt" in config: + prompt_config = config.pop("document_prompt") + document_prompt = load_prompt_from_config(prompt_config) + elif "document_prompt_path" in config: + document_prompt = load_prompt(config.pop("document_prompt_path")) + else: + raise ValueError( + "One of `document_prompt` or `document_prompt_path` must be present." + ) + + return StuffDocumentsChain( + llm_chain=llm_chain, document_prompt=document_prompt, **config + ) + + +def _load_map_reduce_documents_chain( + config: dict, **kwargs: Any +) -> MapReduceDocumentsChain: + if "llm_chain" in config: + llm_chain_config = config.pop("llm_chain") + llm_chain = load_chain_from_config(llm_chain_config, **kwargs) + elif "llm_chain_path" in config: + llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs) + else: + raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") + + if not isinstance(llm_chain, LLMChain): + raise ValueError(f"Expected LLMChain, got {llm_chain}") + + if "reduce_documents_chain" in config: + reduce_documents_chain = load_chain_from_config( + config.pop("reduce_documents_chain"), **kwargs + ) + elif "reduce_documents_chain_path" in config: + reduce_documents_chain = load_chain( + config.pop("reduce_documents_chain_path"), **kwargs + ) + else: + reduce_documents_chain = _load_reduce_documents_chain(config, **kwargs) + + return MapReduceDocumentsChain( + llm_chain=llm_chain, + reduce_documents_chain=reduce_documents_chain, # type: ignore[arg-type] + **config, + ) + + +def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain: + combine_documents_chain = None + collapse_documents_chain = None + + if "combine_documents_chain" in config: + combine_document_chain_config = config.pop("combine_documents_chain") + combine_documents_chain = load_chain_from_config( + combine_document_chain_config, **kwargs + ) + elif "combine_document_chain" in config: + combine_document_chain_config = config.pop("combine_document_chain") + combine_documents_chain = load_chain_from_config( + combine_document_chain_config, **kwargs + ) + elif "combine_documents_chain_path" in config: + combine_documents_chain = load_chain( + config.pop("combine_documents_chain_path"), **kwargs + ) + elif "combine_document_chain_path" in config: + combine_documents_chain = load_chain( + config.pop("combine_document_chain_path"), **kwargs + ) + else: + raise ValueError( + "One of `combine_documents_chain` or " + "`combine_documents_chain_path` must be present." + ) + + if "collapse_documents_chain" in config: + collapse_document_chain_config = config.pop("collapse_documents_chain") + if collapse_document_chain_config is None: + collapse_documents_chain = None + else: + collapse_documents_chain = load_chain_from_config( + collapse_document_chain_config, **kwargs + ) + elif "collapse_documents_chain_path" in config: + collapse_documents_chain = load_chain( + config.pop("collapse_documents_chain_path"), **kwargs + ) + elif "collapse_document_chain" in config: + collapse_document_chain_config = config.pop("collapse_document_chain") + if collapse_document_chain_config is None: + collapse_documents_chain = None + else: + collapse_documents_chain = load_chain_from_config( + collapse_document_chain_config, **kwargs + ) + elif "collapse_document_chain_path" in config: + collapse_documents_chain = load_chain( + config.pop("collapse_document_chain_path"), **kwargs + ) + + return ReduceDocumentsChain( + combine_documents_chain=combine_documents_chain, + collapse_documents_chain=collapse_documents_chain, + **config, + ) + + +def _load_llm_bash_chain(config: dict, **kwargs: Any) -> Any: + from langchain_experimental.llm_bash.base import LLMBashChain + + llm_chain = None + if "llm_chain" in config: + llm_chain_config = config.pop("llm_chain") + llm_chain = load_chain_from_config(llm_chain_config, **kwargs) + elif "llm_chain_path" in config: + llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs) + # llm attribute is deprecated in favor of llm_chain, here to support old configs + elif "llm" in config: + llm_config = config.pop("llm") + llm = load_llm_from_config(llm_config, **kwargs) + # llm_path attribute is deprecated in favor of llm_chain_path, + # its to support old configs + elif "llm_path" in config: + llm = load_llm(config.pop("llm_path"), **kwargs) + else: + raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") + if "prompt" in config: + prompt_config = config.pop("prompt") + prompt = load_prompt_from_config(prompt_config) + elif "prompt_path" in config: + prompt = load_prompt(config.pop("prompt_path")) + if llm_chain: + return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config) + else: + return LLMBashChain(llm=llm, prompt=prompt, **config) + + +def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain: + if "llm" in config: + llm_config = config.pop("llm") + llm = load_llm_from_config(llm_config, **kwargs) + elif "llm_path" in config: + llm = load_llm(config.pop("llm_path"), **kwargs) + else: + raise ValueError("One of `llm` or `llm_path` must be present.") + if "create_draft_answer_prompt" in config: + create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt") + create_draft_answer_prompt = load_prompt_from_config( + create_draft_answer_prompt_config + ) + elif "create_draft_answer_prompt_path" in config: + create_draft_answer_prompt = load_prompt( + config.pop("create_draft_answer_prompt_path") + ) + if "list_assertions_prompt" in config: + list_assertions_prompt_config = config.pop("list_assertions_prompt") + list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config) + elif "list_assertions_prompt_path" in config: + list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path")) + if "check_assertions_prompt" in config: + check_assertions_prompt_config = config.pop("check_assertions_prompt") + check_assertions_prompt = load_prompt_from_config( + check_assertions_prompt_config + ) + elif "check_assertions_prompt_path" in config: + check_assertions_prompt = load_prompt( + config.pop("check_assertions_prompt_path") + ) + if "revised_answer_prompt" in config: + revised_answer_prompt_config = config.pop("revised_answer_prompt") + revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config) + elif "revised_answer_prompt_path" in config: + revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path")) + return LLMCheckerChain( + llm=llm, + create_draft_answer_prompt=create_draft_answer_prompt, # type: ignore[arg-type] + list_assertions_prompt=list_assertions_prompt, # type: ignore[arg-type] + check_assertions_prompt=check_assertions_prompt, # type: ignore[arg-type] + revised_answer_prompt=revised_answer_prompt, # type: ignore[arg-type] + **config, + ) + + +def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain: + llm_chain = None + if "llm_chain" in config: + llm_chain_config = config.pop("llm_chain") + llm_chain = load_chain_from_config(llm_chain_config, **kwargs) + elif "llm_chain_path" in config: + llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs) + # llm attribute is deprecated in favor of llm_chain, here to support old configs + elif "llm" in config: + llm_config = config.pop("llm") + llm = load_llm_from_config(llm_config, **kwargs) + # llm_path attribute is deprecated in favor of llm_chain_path, + # its to support old configs + elif "llm_path" in config: + llm = load_llm(config.pop("llm_path"), **kwargs) + else: + raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") + if "prompt" in config: + prompt_config = config.pop("prompt") + prompt = load_prompt_from_config(prompt_config) + elif "prompt_path" in config: + prompt = load_prompt(config.pop("prompt_path")) + if llm_chain: + return LLMMathChain(llm_chain=llm_chain, prompt=prompt, **config) # type: ignore[arg-type] + else: + return LLMMathChain(llm=llm, prompt=prompt, **config) + + +def _load_map_rerank_documents_chain( + config: dict, **kwargs: Any +) -> MapRerankDocumentsChain: + if "llm_chain" in config: + llm_chain_config = config.pop("llm_chain") + llm_chain = load_chain_from_config(llm_chain_config, **kwargs) + elif "llm_chain_path" in config: + llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs) + else: + raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") + return MapRerankDocumentsChain(llm_chain=llm_chain, **config) # type: ignore[arg-type] + + +def _load_pal_chain(config: dict, **kwargs: Any) -> Any: + from langchain_experimental.pal_chain import PALChain + + if "llm_chain" in config: + llm_chain_config = config.pop("llm_chain") + llm_chain = load_chain_from_config(llm_chain_config, **kwargs) + elif "llm_chain_path" in config: + llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs) + else: + raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") + return PALChain(llm_chain=llm_chain, **config) + + +def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain: + if "initial_llm_chain" in config: + initial_llm_chain_config = config.pop("initial_llm_chain") + initial_llm_chain = load_chain_from_config(initial_llm_chain_config, **kwargs) + elif "initial_llm_chain_path" in config: + initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"), **kwargs) + else: + raise ValueError( + "One of `initial_llm_chain` or `initial_llm_chain_path` must be present." + ) + if "refine_llm_chain" in config: + refine_llm_chain_config = config.pop("refine_llm_chain") + refine_llm_chain = load_chain_from_config(refine_llm_chain_config, **kwargs) + elif "refine_llm_chain_path" in config: + refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"), **kwargs) + else: + raise ValueError( + "One of `refine_llm_chain` or `refine_llm_chain_path` must be present." + ) + if "document_prompt" in config: + prompt_config = config.pop("document_prompt") + document_prompt = load_prompt_from_config(prompt_config) + elif "document_prompt_path" in config: + document_prompt = load_prompt(config.pop("document_prompt_path")) + return RefineDocumentsChain( + initial_llm_chain=initial_llm_chain, # type: ignore[arg-type] + refine_llm_chain=refine_llm_chain, # type: ignore[arg-type] + document_prompt=document_prompt, + **config, + ) + + +def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain: + if "combine_documents_chain" in config: + combine_documents_chain_config = config.pop("combine_documents_chain") + combine_documents_chain = load_chain_from_config( + combine_documents_chain_config, **kwargs + ) + elif "combine_documents_chain_path" in config: + combine_documents_chain = load_chain( + config.pop("combine_documents_chain_path"), **kwargs + ) + else: + raise ValueError( + "One of `combine_documents_chain` or " + "`combine_documents_chain_path` must be present." + ) + return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config) # type: ignore[arg-type] + + +def _load_sql_database_chain(config: dict, **kwargs: Any) -> Any: + from langchain_experimental.sql import SQLDatabaseChain + + if "database" in kwargs: + database = kwargs.pop("database") + else: + raise ValueError("`database` must be present.") + if "llm_chain" in config: + llm_chain_config = config.pop("llm_chain") + chain = load_chain_from_config(llm_chain_config, **kwargs) + return SQLDatabaseChain(llm_chain=chain, database=database, **config) + if "llm" in config: + llm_config = config.pop("llm") + llm = load_llm_from_config(llm_config, **kwargs) + elif "llm_path" in config: + llm = load_llm(config.pop("llm_path"), **kwargs) + else: + raise ValueError("One of `llm` or `llm_path` must be present.") + if "prompt" in config: + prompt_config = config.pop("prompt") + prompt = load_prompt_from_config(prompt_config) + else: + prompt = None + + return SQLDatabaseChain.from_llm(llm, database, prompt=prompt, **config) + + +def _load_vector_db_qa_with_sources_chain( + config: dict, **kwargs: Any +) -> VectorDBQAWithSourcesChain: + if "vectorstore" in kwargs: + vectorstore = kwargs.pop("vectorstore") + else: + raise ValueError("`vectorstore` must be present.") + if "combine_documents_chain" in config: + combine_documents_chain_config = config.pop("combine_documents_chain") + combine_documents_chain = load_chain_from_config( + combine_documents_chain_config, **kwargs + ) + elif "combine_documents_chain_path" in config: + combine_documents_chain = load_chain( + config.pop("combine_documents_chain_path"), **kwargs + ) + else: + raise ValueError( + "One of `combine_documents_chain` or " + "`combine_documents_chain_path` must be present." + ) + return VectorDBQAWithSourcesChain( + combine_documents_chain=combine_documents_chain, # type: ignore[arg-type] + vectorstore=vectorstore, + **config, + ) + + +def _load_retrieval_qa(config: dict, **kwargs: Any) -> RetrievalQA: + if "retriever" in kwargs: + retriever = kwargs.pop("retriever") + else: + raise ValueError("`retriever` must be present.") + if "combine_documents_chain" in config: + combine_documents_chain_config = config.pop("combine_documents_chain") + combine_documents_chain = load_chain_from_config( + combine_documents_chain_config, **kwargs + ) + elif "combine_documents_chain_path" in config: + combine_documents_chain = load_chain( + config.pop("combine_documents_chain_path"), **kwargs + ) + else: + raise ValueError( + "One of `combine_documents_chain` or " + "`combine_documents_chain_path` must be present." + ) + return RetrievalQA( + combine_documents_chain=combine_documents_chain, # type: ignore[arg-type] + retriever=retriever, + **config, + ) + + +def _load_retrieval_qa_with_sources_chain( + config: dict, **kwargs: Any +) -> RetrievalQAWithSourcesChain: + if "retriever" in kwargs: + retriever = kwargs.pop("retriever") + else: + raise ValueError("`retriever` must be present.") + if "combine_documents_chain" in config: + combine_documents_chain_config = config.pop("combine_documents_chain") + combine_documents_chain = load_chain_from_config( + combine_documents_chain_config, **kwargs + ) + elif "combine_documents_chain_path" in config: + combine_documents_chain = load_chain( + config.pop("combine_documents_chain_path"), **kwargs + ) + else: + raise ValueError( + "One of `combine_documents_chain` or " + "`combine_documents_chain_path` must be present." + ) + return RetrievalQAWithSourcesChain( + combine_documents_chain=combine_documents_chain, # type: ignore[arg-type] + retriever=retriever, + **config, + ) + + +def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA: + if "vectorstore" in kwargs: + vectorstore = kwargs.pop("vectorstore") + else: + raise ValueError("`vectorstore` must be present.") + if "combine_documents_chain" in config: + combine_documents_chain_config = config.pop("combine_documents_chain") + combine_documents_chain = load_chain_from_config( + combine_documents_chain_config, **kwargs + ) + elif "combine_documents_chain_path" in config: + combine_documents_chain = load_chain( + config.pop("combine_documents_chain_path"), **kwargs + ) + else: + raise ValueError( + "One of `combine_documents_chain` or " + "`combine_documents_chain_path` must be present." + ) + return VectorDBQA( + combine_documents_chain=combine_documents_chain, # type: ignore[arg-type] + vectorstore=vectorstore, + **config, + ) + + +def _load_graph_cypher_chain(config: dict, **kwargs: Any) -> GraphCypherQAChain: + if "graph" in kwargs: + graph = kwargs.pop("graph") + else: + raise ValueError("`graph` must be present.") + if "cypher_generation_chain" in config: + cypher_generation_chain_config = config.pop("cypher_generation_chain") + cypher_generation_chain = load_chain_from_config( + cypher_generation_chain_config, **kwargs + ) + else: + raise ValueError("`cypher_generation_chain` must be present.") + if "qa_chain" in config: + qa_chain_config = config.pop("qa_chain") + qa_chain = load_chain_from_config(qa_chain_config, **kwargs) + else: + raise ValueError("`qa_chain` must be present.") + + try: + from langchain_community.chains.graph_qa.cypher import GraphCypherQAChain + except ImportError: + raise ImportError( + "To use this GraphCypherQAChain functionality you must install the " + "langchain_community package. " + "You can install it with `pip install langchain_community`" + ) + return GraphCypherQAChain( + graph=graph, + cypher_generation_chain=cypher_generation_chain, + qa_chain=qa_chain, + **config, + ) + + +def _load_api_chain(config: dict, **kwargs: Any) -> APIChain: + if "api_request_chain" in config: + api_request_chain_config = config.pop("api_request_chain") + api_request_chain = load_chain_from_config(api_request_chain_config, **kwargs) + elif "api_request_chain_path" in config: + api_request_chain = load_chain(config.pop("api_request_chain_path")) + else: + raise ValueError( + "One of `api_request_chain` or `api_request_chain_path` must be present." + ) + if "api_answer_chain" in config: + api_answer_chain_config = config.pop("api_answer_chain") + api_answer_chain = load_chain_from_config(api_answer_chain_config, **kwargs) + elif "api_answer_chain_path" in config: + api_answer_chain = load_chain(config.pop("api_answer_chain_path"), **kwargs) + else: + raise ValueError( + "One of `api_answer_chain` or `api_answer_chain_path` must be present." + ) + if "requests_wrapper" in kwargs: + requests_wrapper = kwargs.pop("requests_wrapper") + else: + raise ValueError("`requests_wrapper` must be present.") + return APIChain( + api_request_chain=api_request_chain, # type: ignore[arg-type] + api_answer_chain=api_answer_chain, # type: ignore[arg-type] + requests_wrapper=requests_wrapper, + **config, + ) + + +def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain: + try: + from langchain.chains.llm_requests import LLMRequestsChain + except ImportError: + raise ImportError( + "To use this LLMRequestsChain functionality you must install the " + "langchain package. " + "You can install it with `pip install langchain`" + ) + + if "llm_chain" in config: + llm_chain_config = config.pop("llm_chain") + llm_chain = load_chain_from_config(llm_chain_config, **kwargs) + elif "llm_chain_path" in config: + llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs) + else: + raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") + if "requests_wrapper" in kwargs: + requests_wrapper = kwargs.pop("requests_wrapper") + return LLMRequestsChain( + llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config + ) + else: + return LLMRequestsChain(llm_chain=llm_chain, **config) + + +type_to_loader_dict = { + "api_chain": _load_api_chain, + "hyde_chain": _load_hyde_chain, + "llm_chain": _load_llm_chain, + "llm_bash_chain": _load_llm_bash_chain, + "llm_checker_chain": _load_llm_checker_chain, + "llm_math_chain": _load_llm_math_chain, + "llm_requests_chain": _load_llm_requests_chain, + "pal_chain": _load_pal_chain, + "qa_with_sources_chain": _load_qa_with_sources_chain, + "stuff_documents_chain": _load_stuff_documents_chain, + "map_reduce_documents_chain": _load_map_reduce_documents_chain, + "reduce_documents_chain": _load_reduce_documents_chain, + "map_rerank_documents_chain": _load_map_rerank_documents_chain, + "refine_documents_chain": _load_refine_documents_chain, + "sql_database_chain": _load_sql_database_chain, + "vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain, + "vector_db_qa": _load_vector_db_qa, + "retrieval_qa": _load_retrieval_qa, + "retrieval_qa_with_sources_chain": _load_retrieval_qa_with_sources_chain, + "graph_cypher_chain": _load_graph_cypher_chain, +} + + +@deprecated( + since="0.2.13", + message=( + "This function is deprecated and will be removed in langchain 1.0. " + "At that point chains must be imported from their respective modules." + ), + removal="1.0", +) +def load_chain_from_config(config: dict, **kwargs: Any) -> Chain: + """Load chain from Config Dict.""" + if "_type" not in config: + raise ValueError("Must specify a chain Type in config") + config_type = config.pop("_type") + + if config_type not in type_to_loader_dict: + raise ValueError(f"Loading {config_type} chain not supported") + + chain_loader = type_to_loader_dict[config_type] + return chain_loader(config, **kwargs) + + +@deprecated( + since="0.2.13", + message=( + "This function is deprecated and will be removed in langchain 1.0. " + "At that point chains must be imported from their respective modules." + ), + removal="1.0", +) +def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain: + """Unified method for loading a chain from LangChainHub or local fs.""" + if isinstance(path, str) and path.startswith("lc://"): + raise RuntimeError( + "Loading from the deprecated github-based Hub is no longer supported. " + "Please use the new LangChain Hub at https://smith.langchain.com/hub " + "instead." + ) + return _load_chain_from_file(path, **kwargs) + + +def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain: + """Load chain from file.""" + # Convert file to Path object. + if isinstance(file, str): + file_path = Path(file) + else: + file_path = file + # Load from either json or yaml. + if file_path.suffix == ".json": + with open(file_path) as f: + config = json.load(f) + elif file_path.suffix.endswith((".yaml", ".yml")): + with open(file_path) as f: + config = yaml.safe_load(f) + else: + raise ValueError("File type must be json or yaml") + + # Override default 'verbose' and 'memory' for the chain + if "verbose" in kwargs: + config["verbose"] = kwargs.pop("verbose") + if "memory" in kwargs: + config["memory"] = kwargs.pop("memory") + + # Load the chain from the config now. + return load_chain_from_config(config, **kwargs) diff --git a/venv/Lib/site-packages/langchain/chains/mapreduce.py b/venv/Lib/site-packages/langchain/chains/mapreduce.py new file mode 100644 index 00000000..6e7842f2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/mapreduce.py @@ -0,0 +1,120 @@ +"""Map-reduce chain. + +Splits up a document, sends the smaller parts to the LLM with one prompt, +then combines the results with another one. +""" + +from __future__ import annotations + +from collections.abc import Mapping +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import CallbackManagerForChainRun, Callbacks +from langchain_core.documents import Document +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate +from langchain_text_splitters import TextSplitter +from pydantic import ConfigDict + +from langchain.chains import ReduceDocumentsChain +from langchain.chains.base import Chain +from langchain.chains.combine_documents.base import BaseCombineDocumentsChain +from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain +from langchain.chains.combine_documents.stuff import StuffDocumentsChain +from langchain.chains.llm import LLMChain + + +@deprecated( + since="0.2.13", + removal="1.0", + message=( + "Refer to migration guide here for a recommended implementation using " + "LangGraph: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain/" # noqa: E501 + ". See also LangGraph guides for map-reduce: " + "https://langchain-ai.github.io/langgraph/how-tos/map-reduce/." + ), +) +class MapReduceChain(Chain): + """Map-reduce chain.""" + + combine_documents_chain: BaseCombineDocumentsChain + """Chain to use to combine documents.""" + text_splitter: TextSplitter + """Text splitter to use.""" + input_key: str = "input_text" #: :meta private: + output_key: str = "output_text" #: :meta private: + + @classmethod + def from_params( + cls, + llm: BaseLanguageModel, + prompt: BasePromptTemplate, + text_splitter: TextSplitter, + callbacks: Callbacks = None, + combine_chain_kwargs: Optional[Mapping[str, Any]] = None, + reduce_chain_kwargs: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> MapReduceChain: + """Construct a map-reduce chain that uses the chain for map and reduce.""" + llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks) + stuff_chain = StuffDocumentsChain( + llm_chain=llm_chain, + callbacks=callbacks, + **(reduce_chain_kwargs if reduce_chain_kwargs else {}), + ) + reduce_documents_chain = ReduceDocumentsChain( + combine_documents_chain=stuff_chain + ) + combine_documents_chain = MapReduceDocumentsChain( + llm_chain=llm_chain, + reduce_documents_chain=reduce_documents_chain, + callbacks=callbacks, + **(combine_chain_kwargs if combine_chain_kwargs else {}), + ) + return cls( + combine_documents_chain=combine_documents_chain, + text_splitter=text_splitter, + callbacks=callbacks, + **kwargs, + ) + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @property + def input_keys(self) -> list[str]: + """Expect input key. + + :meta private: + """ + return [self.input_key] + + @property + def output_keys(self) -> list[str]: + """Return output key. + + :meta private: + """ + return [self.output_key] + + def _call( + self, + inputs: dict[str, str], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + # Split the larger text into smaller chunks. + doc_text = inputs.pop(self.input_key) + texts = self.text_splitter.split_text(doc_text) + docs = [Document(page_content=text) for text in texts] + _inputs: dict[str, Any] = { + **inputs, + self.combine_documents_chain.input_key: docs, + } + outputs = self.combine_documents_chain.run( + _inputs, callbacks=_run_manager.get_child() + ) + return {self.output_key: outputs} diff --git a/venv/Lib/site-packages/langchain/chains/moderation.py b/venv/Lib/site-packages/langchain/chains/moderation.py new file mode 100644 index 00000000..e31418a8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/moderation.py @@ -0,0 +1,133 @@ +"""Pass input through a moderation endpoint.""" + +from typing import Any, Optional + +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from langchain_core.utils import check_package_version, get_from_dict_or_env +from pydantic import Field, model_validator + +from langchain.chains.base import Chain + + +class OpenAIModerationChain(Chain): + """Pass input through a moderation endpoint. + + To use, you should have the ``openai`` python package installed, and the + environment variable ``OPENAI_API_KEY`` set with your API key. + + Any parameters that are valid to be passed to the openai.create call can be passed + in, even if not explicitly saved on this class. + + Example: + .. code-block:: python + + from langchain.chains import OpenAIModerationChain + moderation = OpenAIModerationChain() + """ + + client: Any = None #: :meta private: + async_client: Any = None #: :meta private: + model_name: Optional[str] = None + """Moderation model name to use.""" + error: bool = False + """Whether or not to error if bad content was found.""" + input_key: str = "input" #: :meta private: + output_key: str = "output" #: :meta private: + openai_api_key: Optional[str] = None + openai_organization: Optional[str] = None + openai_pre_1_0: bool = Field(default=False) + + @model_validator(mode="before") + @classmethod + def validate_environment(cls, values: dict) -> Any: + """Validate that api key and python package exists in environment.""" + openai_api_key = get_from_dict_or_env( + values, "openai_api_key", "OPENAI_API_KEY" + ) + openai_organization = get_from_dict_or_env( + values, + "openai_organization", + "OPENAI_ORGANIZATION", + default="", + ) + try: + import openai + + openai.api_key = openai_api_key + if openai_organization: + openai.organization = openai_organization + values["openai_pre_1_0"] = False + try: + check_package_version("openai", gte_version="1.0") + except ValueError: + values["openai_pre_1_0"] = True + if values["openai_pre_1_0"]: + values["client"] = openai.Moderation # type: ignore[attr-defined] + else: + values["client"] = openai.OpenAI(api_key=openai_api_key) + values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key) + + except ImportError: + raise ImportError( + "Could not import openai python package. " + "Please install it with `pip install openai`." + ) + return values + + @property + def input_keys(self) -> list[str]: + """Expect input key. + + :meta private: + """ + return [self.input_key] + + @property + def output_keys(self) -> list[str]: + """Return output key. + + :meta private: + """ + return [self.output_key] + + def _moderate(self, text: str, results: Any) -> str: + if self.openai_pre_1_0: + condition = results["flagged"] + else: + condition = results.flagged + if condition: + error_str = "Text was found that violates OpenAI's content policy." + if self.error: + raise ValueError(error_str) + else: + return error_str + return text + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + text = inputs[self.input_key] + if self.openai_pre_1_0: + results = self.client.create(text) + output = self._moderate(text, results["results"][0]) + else: + results = self.client.moderations.create(input=text) + output = self._moderate(text, results.results[0]) + return {self.output_key: output} + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + if self.openai_pre_1_0: + return await super()._acall(inputs, run_manager=run_manager) + text = inputs[self.input_key] + results = await self.async_client.moderations.create(input=text) + output = self._moderate(text, results.results[0]) + return {self.output_key: output} diff --git a/venv/Lib/site-packages/langchain/chains/natbot/__init__.py b/venv/Lib/site-packages/langchain/chains/natbot/__init__.py new file mode 100644 index 00000000..45a2231a --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/natbot/__init__.py @@ -0,0 +1,4 @@ +"""Implement a GPT-3 driven browser. + +Heavily influenced from https://github.com/nat/natbot +""" diff --git a/venv/Lib/site-packages/langchain/chains/natbot/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/natbot/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..e1dafedc Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/natbot/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/natbot/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/natbot/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..6fe281b7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/natbot/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/natbot/__pycache__/crawler.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/natbot/__pycache__/crawler.cpython-312.pyc new file mode 100644 index 00000000..71b98559 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/natbot/__pycache__/crawler.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/natbot/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/natbot/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..a055875d Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/natbot/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/natbot/base.py b/venv/Lib/site-packages/langchain/chains/natbot/base.py new file mode 100644 index 00000000..2dcfb6b4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/natbot/base.py @@ -0,0 +1,163 @@ +"""Implement an LLM driven browser.""" + +from __future__ import annotations + +import warnings +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.caches import BaseCache as BaseCache +from langchain_core.callbacks import CallbackManagerForChainRun +from langchain_core.callbacks import Callbacks as Callbacks +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import StrOutputParser +from langchain_core.runnables import Runnable +from pydantic import ConfigDict, model_validator + +from langchain.chains.base import Chain +from langchain.chains.natbot.prompt import PROMPT + + +@deprecated( + since="0.2.13", + message=( + "Importing NatBotChain from langchain is deprecated and will be removed in " + "langchain 1.0. Please import from langchain_community instead: " + "from langchain_community.chains.natbot import NatBotChain. " + "You may need to pip install -U langchain-community." + ), + removal="1.0", +) +class NatBotChain(Chain): + """Implement an LLM driven browser. + + **Security Note**: This toolkit provides code to control a web-browser. + + The web-browser can be used to navigate to: + + - Any URL (including any internal network URLs) + - And local files + + Exercise care if exposing this chain to end-users. Control who is able to + access and use this chain, and isolate the network access of the server + that hosts this chain. + + See https://python.langchain.com/docs/security for more information. + + Example: + .. code-block:: python + + from langchain.chains import NatBotChain + natbot = NatBotChain.from_default("Buy me a new hat.") + """ + + llm_chain: Runnable + objective: str + """Objective that NatBot is tasked with completing.""" + llm: Optional[BaseLanguageModel] = None + """[Deprecated] LLM wrapper to use.""" + input_url_key: str = "url" #: :meta private: + input_browser_content_key: str = "browser_content" #: :meta private: + previous_command: str = "" #: :meta private: + output_key: str = "command" #: :meta private: + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @model_validator(mode="before") + @classmethod + def raise_deprecation(cls, values: dict) -> Any: + if "llm" in values: + warnings.warn( + "Directly instantiating an NatBotChain with an llm is deprecated. " + "Please instantiate with llm_chain argument or using the from_llm " + "class method." + ) + if "llm_chain" not in values and values["llm"] is not None: + values["llm_chain"] = PROMPT | values["llm"] | StrOutputParser() + return values + + @classmethod + def from_default(cls, objective: str, **kwargs: Any) -> NatBotChain: + """Load with default LLMChain.""" + raise NotImplementedError( + "This method is no longer implemented. Please use from_llm." + "llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)" + "For example, NatBotChain.from_llm(llm, objective)" + ) + + @classmethod + def from_llm( + cls, llm: BaseLanguageModel, objective: str, **kwargs: Any + ) -> NatBotChain: + """Load from LLM.""" + llm_chain = PROMPT | llm | StrOutputParser() + return cls(llm_chain=llm_chain, objective=objective, **kwargs) + + @property + def input_keys(self) -> list[str]: + """Expect url and browser content. + + :meta private: + """ + return [self.input_url_key, self.input_browser_content_key] + + @property + def output_keys(self) -> list[str]: + """Return command. + + :meta private: + """ + return [self.output_key] + + def _call( + self, + inputs: dict[str, str], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + url = inputs[self.input_url_key] + browser_content = inputs[self.input_browser_content_key] + llm_cmd = self.llm_chain.invoke( + { + "objective": self.objective, + "url": url[:100], + "previous_command": self.previous_command, + "browser_content": browser_content[:4500], + }, + config={"callbacks": _run_manager.get_child()}, + ) + llm_cmd = llm_cmd.strip() + self.previous_command = llm_cmd + return {self.output_key: llm_cmd} + + def execute(self, url: str, browser_content: str) -> str: + """Figure out next browser command to run. + + Args: + url: URL of the site currently on. + browser_content: Content of the page as currently displayed by the browser. + + Returns: + Next browser command to run. + + Example: + .. code-block:: python + + browser_content = "...." + llm_command = natbot.run("www.google.com", browser_content) + """ + _inputs = { + self.input_url_key: url, + self.input_browser_content_key: browser_content, + } + return self(_inputs)[self.output_key] + + @property + def _chain_type(self) -> str: + return "nat_bot_chain" + + +NatBotChain.model_rebuild() diff --git a/venv/Lib/site-packages/langchain/chains/natbot/crawler.py b/venv/Lib/site-packages/langchain/chains/natbot/crawler.py new file mode 100644 index 00000000..b5fbc553 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/natbot/crawler.py @@ -0,0 +1,446 @@ +# flake8: noqa +import time +from sys import platform +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + List, + Optional, + Set, + Tuple, + TypedDict, + Union, +) + +if TYPE_CHECKING: + from playwright.sync_api import Browser, CDPSession, Page, sync_playwright + +black_listed_elements: Set[str] = { + "html", + "head", + "title", + "meta", + "iframe", + "body", + "script", + "style", + "path", + "svg", + "br", + "::marker", +} + + +class ElementInViewPort(TypedDict): + """A typed dictionary containing information about elements in the viewport.""" + + node_index: str + backend_node_id: int + node_name: Optional[str] + node_value: Optional[str] + node_meta: List[str] + is_clickable: bool + origin_x: int + origin_y: int + center_x: int + center_y: int + + +class Crawler: + """A crawler for web pages. + + **Security Note**: This is an implementation of a crawler that uses a browser via + Playwright. + + This crawler can be used to load arbitrary webpages INCLUDING content + from the local file system. + + Control access to who can submit crawling requests and what network access + the crawler has. + + Make sure to scope permissions to the minimal permissions necessary for + the application. + + See https://python.langchain.com/docs/security for more information. + """ + + def __init__(self) -> None: + try: + from playwright.sync_api import sync_playwright + except ImportError: + raise ImportError( + "Could not import playwright python package. " + "Please install it with `pip install playwright`." + ) + self.browser: Browser = ( + sync_playwright().start().chromium.launch(headless=False) + ) + self.page: Page = self.browser.new_page() + self.page.set_viewport_size({"width": 1280, "height": 1080}) + self.page_element_buffer: Dict[int, ElementInViewPort] + self.client: CDPSession + + def go_to_page(self, url: str) -> None: + self.page.goto(url=url if "://" in url else "http://" + url) + self.client = self.page.context.new_cdp_session(self.page) + self.page_element_buffer = {} + + def scroll(self, direction: str) -> None: + if direction == "up": + self.page.evaluate( + "(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop - window.innerHeight;" + ) + elif direction == "down": + self.page.evaluate( + "(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop + window.innerHeight;" + ) + + def click(self, id: Union[str, int]) -> None: + # Inject javascript into the page which removes the target= attribute from all links + js = """ + links = document.getElementsByTagName("a"); + for (var i = 0; i < links.length; i++) { + links[i].removeAttribute("target"); + } + """ + self.page.evaluate(js) + + element = self.page_element_buffer.get(int(id)) + if element: + x: float = element["center_x"] + y: float = element["center_y"] + + self.page.mouse.click(x, y) + else: + print("Could not find element") # noqa: T201 + + def type(self, id: Union[str, int], text: str) -> None: + self.click(id) + self.page.keyboard.type(text) + + def enter(self) -> None: + self.page.keyboard.press("Enter") + + def crawl(self) -> List[str]: + page = self.page + page_element_buffer = self.page_element_buffer + start = time.time() + + page_state_as_text = [] + + device_pixel_ratio: float = page.evaluate("window.devicePixelRatio") + if platform == "darwin" and device_pixel_ratio == 1: # lies + device_pixel_ratio = 2 + + win_upper_bound: float = page.evaluate("window.pageYOffset") + win_left_bound: float = page.evaluate("window.pageXOffset") + win_width: float = page.evaluate("window.screen.width") + win_height: float = page.evaluate("window.screen.height") + win_right_bound: float = win_left_bound + win_width + win_lower_bound: float = win_upper_bound + win_height + + # percentage_progress_start = (win_upper_bound / document_scroll_height) * 100 + # percentage_progress_end = ( + # (win_height + win_upper_bound) / document_scroll_height + # ) * 100 + percentage_progress_start = 1 + percentage_progress_end = 2 + + page_state_as_text.append( + { + "x": 0, + "y": 0, + "text": "[scrollbar {:0.2f}-{:0.2f}%]".format( + round(percentage_progress_start, 2), round(percentage_progress_end) + ), + } + ) + + tree = self.client.send( + "DOMSnapshot.captureSnapshot", + {"computedStyles": [], "includeDOMRects": True, "includePaintOrder": True}, + ) + strings: Dict[int, str] = tree["strings"] + document: Dict[str, Any] = tree["documents"][0] + nodes: Dict[str, Any] = document["nodes"] + backend_node_id: Dict[int, int] = nodes["backendNodeId"] + attributes: Dict[int, Dict[int, Any]] = nodes["attributes"] + node_value: Dict[int, int] = nodes["nodeValue"] + parent: Dict[int, int] = nodes["parentIndex"] + node_names: Dict[int, int] = nodes["nodeName"] + is_clickable: Set[int] = set(nodes["isClickable"]["index"]) + + input_value: Dict[str, Any] = nodes["inputValue"] + input_value_index: List[int] = input_value["index"] + input_value_values: List[int] = input_value["value"] + + layout: Dict[str, Any] = document["layout"] + layout_node_index: List[int] = layout["nodeIndex"] + bounds: Dict[int, List[float]] = layout["bounds"] + + cursor: int = 0 + + child_nodes: Dict[str, List[Dict[str, Any]]] = {} + elements_in_view_port: List[ElementInViewPort] = [] + + anchor_ancestry: Dict[str, Tuple[bool, Optional[int]]] = {"-1": (False, None)} + button_ancestry: Dict[str, Tuple[bool, Optional[int]]] = {"-1": (False, None)} + + def convert_name( + node_name: Optional[str], has_click_handler: Optional[bool] + ) -> str: + if node_name == "a": + return "link" + if node_name == "input": + return "input" + if node_name == "img": + return "img" + if ( + node_name == "button" or has_click_handler + ): # found pages that needed this quirk + return "button" + else: + return "text" + + def find_attributes( + attributes: Dict[int, Any], keys: List[str] + ) -> Dict[str, str]: + values = {} + + for [key_index, value_index] in zip(*(iter(attributes),) * 2): + if value_index < 0: + continue + key = strings[key_index] + value = strings[value_index] + + if key in keys: + values[key] = value + keys.remove(key) + + if not keys: + return values + + return values + + def add_to_hash_tree( + hash_tree: Dict[str, Tuple[bool, Optional[int]]], + tag: str, + node_id: int, + node_name: Optional[str], + parent_id: int, + ) -> Tuple[bool, Optional[int]]: + parent_id_str = str(parent_id) + if not parent_id_str in hash_tree: + parent_name = strings[node_names[parent_id]].lower() + grand_parent_id = parent[parent_id] + + add_to_hash_tree( + hash_tree, tag, parent_id, parent_name, grand_parent_id + ) + + is_parent_desc_anchor, anchor_id = hash_tree[parent_id_str] + + # even if the anchor is nested in another anchor, we set the "root" for all descendants to be ::Self + if node_name == tag: + value: Tuple[bool, Optional[int]] = (True, node_id) + elif ( + is_parent_desc_anchor + ): # reuse the parent's anchor_id (which could be much higher in the tree) + value = (True, anchor_id) + else: + value = ( + False, + None, + ) # not a descendant of an anchor, most likely it will become text, an interactive element or discarded + + hash_tree[str(node_id)] = value + + return value + + for index, node_name_index in enumerate(node_names): + node_parent = parent[index] + node_name: Optional[str] = strings[node_name_index].lower() + + is_ancestor_of_anchor, anchor_id = add_to_hash_tree( + anchor_ancestry, "a", index, node_name, node_parent + ) + + is_ancestor_of_button, button_id = add_to_hash_tree( + button_ancestry, "button", index, node_name, node_parent + ) + + try: + cursor = layout_node_index.index( + index + ) # todo replace this with proper cursoring, ignoring the fact this is O(n^2) for the moment + except: + continue + + if node_name in black_listed_elements: + continue + + [x, y, width, height] = bounds[cursor] + x /= device_pixel_ratio + y /= device_pixel_ratio + width /= device_pixel_ratio + height /= device_pixel_ratio + + elem_left_bound = x + elem_top_bound = y + elem_right_bound = x + width + elem_lower_bound = y + height + + partially_is_in_viewport = ( + elem_left_bound < win_right_bound + and elem_right_bound >= win_left_bound + and elem_top_bound < win_lower_bound + and elem_lower_bound >= win_upper_bound + ) + + if not partially_is_in_viewport: + continue + + meta_data: List[str] = [] + + # inefficient to grab the same set of keys for kinds of objects, but it's fine for now + element_attributes = find_attributes( + attributes[index], ["type", "placeholder", "aria-label", "title", "alt"] + ) + + ancestor_exception = is_ancestor_of_anchor or is_ancestor_of_button + ancestor_node_key = ( + None + if not ancestor_exception + else str(anchor_id) + if is_ancestor_of_anchor + else str(button_id) + ) + ancestor_node = ( + None + if not ancestor_exception + else child_nodes.setdefault(str(ancestor_node_key), []) + ) + + if node_name == "#text" and ancestor_exception and ancestor_node: + text = strings[node_value[index]] + if text == "|" or text == "•": + continue + ancestor_node.append({"type": "type", "value": text}) + else: + if ( + node_name == "input" and element_attributes.get("type") == "submit" + ) or node_name == "button": + node_name = "button" + element_attributes.pop( + "type", None + ) # prevent [button ... (button)..] + + for key in element_attributes: + if ancestor_exception and ancestor_node: + ancestor_node.append( + { + "type": "attribute", + "key": key, + "value": element_attributes[key], + } + ) + else: + meta_data.append(element_attributes[key]) + + element_node_value = None + + if node_value[index] >= 0: + element_node_value = strings[node_value[index]] + if ( + element_node_value == "|" + ): # commonly used as a separator, does not add much context - lets save ourselves some token space + continue + elif ( + node_name == "input" + and index in input_value_index + and element_node_value is None + ): + node_input_text_index = input_value_index.index(index) + text_index = input_value_values[node_input_text_index] + if node_input_text_index >= 0 and text_index >= 0: + element_node_value = strings[text_index] + + # remove redundant elements + if ancestor_exception and (node_name != "a" and node_name != "button"): + continue + + elements_in_view_port.append( + { + "node_index": str(index), + "backend_node_id": backend_node_id[index], + "node_name": node_name, + "node_value": element_node_value, + "node_meta": meta_data, + "is_clickable": index in is_clickable, + "origin_x": int(x), + "origin_y": int(y), + "center_x": int(x + (width / 2)), + "center_y": int(y + (height / 2)), + } + ) + + # lets filter further to remove anything that does not hold any text nor has click handlers + merge text from leaf#text nodes with the parent + elements_of_interest = [] + id_counter = 0 + + for element in elements_in_view_port: + node_index = element.get("node_index") + node_name = element.get("node_name") + element_node_value = element.get("node_value") + node_is_clickable = element.get("is_clickable") + node_meta_data: Optional[List[str]] = element.get("node_meta") + + inner_text = f"{element_node_value} " if element_node_value else "" + meta = "" + + if node_index in child_nodes: + for child in child_nodes[node_index]: + entry_type = child.get("type") + entry_value = child.get("value") + + if entry_type == "attribute" and node_meta_data: + entry_key = child.get("key") + node_meta_data.append(f'{entry_key}="{entry_value}"') + else: + inner_text += f"{entry_value} " + + if node_meta_data: + meta_string = " ".join(node_meta_data) + meta = f" {meta_string}" + + if inner_text != "": + inner_text = f"{inner_text.strip()}" + + converted_node_name = convert_name(node_name, node_is_clickable) + + # not very elegant, more like a placeholder + if ( + (converted_node_name != "button" or meta == "") + and converted_node_name != "link" + and converted_node_name != "input" + and converted_node_name != "img" + and converted_node_name != "textarea" + ) and inner_text.strip() == "": + continue + + page_element_buffer[id_counter] = element + + if inner_text != "": + elements_of_interest.append( + f"""<{converted_node_name} id={id_counter}{meta}>{inner_text}""" + ) + else: + elements_of_interest.append( + f"""<{converted_node_name} id={id_counter}{meta}/>""" + ) + id_counter += 1 + + print("Parsing time: {:0.2f} seconds".format(time.time() - start)) # noqa: T201 + return elements_of_interest diff --git a/venv/Lib/site-packages/langchain/chains/natbot/prompt.py b/venv/Lib/site-packages/langchain/chains/natbot/prompt.py new file mode 100644 index 00000000..82a35f58 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/natbot/prompt.py @@ -0,0 +1,144 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +_PROMPT_TEMPLATE = """ +You are an agents controlling a browser. You are given: + + (1) an objective that you are trying to achieve + (2) the URL of your current web page + (3) a simplified text description of what's visible in the browser window (more on that below) + +You can issue these commands: + SCROLL UP - scroll up one page + SCROLL DOWN - scroll down one page + CLICK X - click on a given element. You can only click on links, buttons, and inputs! + TYPE X "TEXT" - type the specified text into the input with id X + TYPESUBMIT X "TEXT" - same as TYPE above, except then it presses ENTER to submit the form + +The format of the browser content is highly simplified; all formatting elements are stripped. +Interactive elements such as links, inputs, buttons are represented like this: + + text + + text + +Images are rendered as their alt text like this: + + + +Based on your given objective, issue whatever command you believe will get you closest to achieving your goal. +You always start on Google; you should submit a search query to Google that will take you to the best page for +achieving your objective. And then interact with that page to achieve your objective. + +If you find yourself on Google and there are no search results displayed yet, you should probably issue a command +like "TYPESUBMIT 7 "search query"" to get to a more useful page. + +Then, if you find yourself on a Google search results page, you might issue the command "CLICK 24" to click +on the first link in the search results. (If your previous command was a TYPESUBMIT your next command should +probably be a CLICK.) + +Don't try to interact with elements that you can't see. + +Here are some examples: + +EXAMPLE 1: +================================================== +CURRENT BROWSER CONTENT: +------------------ +About +Store +Gmail +Images +(Google apps) +Sign in +(Google) + + + + +Advertising +Business +How Search works +Carbon neutral since 2007 +Privacy +Terms +Settings +------------------ +OBJECTIVE: Find a 2 bedroom house for sale in Anchorage AK for under $750k +CURRENT URL: https://www.google.com/ +YOUR COMMAND: +TYPESUBMIT 8 "anchorage redfin" +================================================== + +EXAMPLE 2: +================================================== +CURRENT BROWSER CONTENT: +------------------ +About +Store +Gmail +Images +(Google apps) +Sign in +(Google) + + + + +Advertising +Business +How Search works +Carbon neutral since 2007 +Privacy +Terms +Settings +------------------ +OBJECTIVE: Make a reservation for 4 at Dorsia at 8pm +CURRENT URL: https://www.google.com/ +YOUR COMMAND: +TYPESUBMIT 8 "dorsia nyc opentable" +================================================== + +EXAMPLE 3: +================================================== +CURRENT BROWSER CONTENT: +------------------ + + + + +OpenTable logo + +Find your table for any occasion + +Sep 28, 2022 +7:00 PM +2 people + + +It looks like you're in Peninsula. Not correct? + + +------------------ +OBJECTIVE: Make a reservation for 4 for dinner at Dorsia in New York City at 8pm +CURRENT URL: https://www.opentable.com/ +YOUR COMMAND: +TYPESUBMIT 12 "dorsia new york city" +================================================== + +The current browser content, objective, and current URL follow. Reply with your next command to the browser. + +CURRENT BROWSER CONTENT: +------------------ +{browser_content} +------------------ + +OBJECTIVE: {objective} +CURRENT URL: {url} +PREVIOUS COMMAND: {previous_command} +YOUR COMMAND: +""" +PROMPT = PromptTemplate( + input_variables=["browser_content", "url", "previous_command", "objective"], + template=_PROMPT_TEMPLATE, +) diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/__init__.py b/venv/Lib/site-packages/langchain/chains/openai_functions/__init__.py new file mode 100644 index 00000000..6312b619 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/openai_functions/__init__.py @@ -0,0 +1,44 @@ +from langchain_core.utils.function_calling import convert_to_openai_function + +from langchain.chains.openai_functions.base import ( + create_openai_fn_chain, + create_structured_output_chain, +) +from langchain.chains.openai_functions.citation_fuzzy_match import ( + create_citation_fuzzy_match_chain, + create_citation_fuzzy_match_runnable, +) +from langchain.chains.openai_functions.extraction import ( + create_extraction_chain, + create_extraction_chain_pydantic, +) +from langchain.chains.openai_functions.qa_with_structure import ( + create_qa_with_sources_chain, + create_qa_with_structure_chain, +) +from langchain.chains.openai_functions.tagging import ( + create_tagging_chain, + create_tagging_chain_pydantic, +) +from langchain.chains.structured_output.base import ( + create_openai_fn_runnable, + create_structured_output_runnable, + get_openai_output_parser, +) + +__all__ = [ + "convert_to_openai_function", + "create_tagging_chain", + "create_tagging_chain_pydantic", + "create_extraction_chain_pydantic", + "create_extraction_chain", + "create_citation_fuzzy_match_chain", + "create_citation_fuzzy_match_runnable", + "create_qa_with_structure_chain", + "create_qa_with_sources_chain", + "create_structured_output_chain", + "create_openai_fn_chain", + "create_structured_output_runnable", # backwards compatibility + "create_openai_fn_runnable", # backwards compatibility + "get_openai_output_parser", # backwards compatibility +] diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..159d68e6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..ba397964 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/citation_fuzzy_match.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/citation_fuzzy_match.cpython-312.pyc new file mode 100644 index 00000000..0b9dcdcf Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/citation_fuzzy_match.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/extraction.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/extraction.cpython-312.pyc new file mode 100644 index 00000000..150882c7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/extraction.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/openapi.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/openapi.cpython-312.pyc new file mode 100644 index 00000000..ce189e35 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/openapi.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/qa_with_structure.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/qa_with_structure.cpython-312.pyc new file mode 100644 index 00000000..45f23f07 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/qa_with_structure.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/tagging.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/tagging.cpython-312.pyc new file mode 100644 index 00000000..4ed66035 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/tagging.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..55f904ed Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/openai_functions/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/base.py b/venv/Lib/site-packages/langchain/chains/openai_functions/base.py new file mode 100644 index 00000000..ca191a35 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/openai_functions/base.py @@ -0,0 +1,232 @@ +"""Methods for creating chains that use OpenAI function-calling APIs.""" + +from collections.abc import Sequence +from typing import ( + Any, + Callable, + Optional, + Union, +) + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import ( + BaseLLMOutputParser, +) +from langchain_core.output_parsers.openai_functions import ( + PydanticAttrOutputFunctionsParser, +) +from langchain_core.prompts import BasePromptTemplate +from langchain_core.utils.function_calling import ( + PYTHON_TO_JSON_TYPES, + convert_to_openai_function, +) +from pydantic import BaseModel + +from langchain.chains import LLMChain +from langchain.chains.structured_output.base import ( + create_openai_fn_runnable, + create_structured_output_runnable, + get_openai_output_parser, +) + +__all__ = [ + "get_openai_output_parser", + "create_openai_fn_runnable", + "create_structured_output_runnable", # deprecated + "create_openai_fn_chain", # deprecated + "create_structured_output_chain", # deprecated + "PYTHON_TO_JSON_TYPES", # backwards compatibility + "convert_to_openai_function", # backwards compatibility +] + + +@deprecated(since="0.1.1", removal="1.0", alternative="create_openai_fn_runnable") +def create_openai_fn_chain( + functions: Sequence[Union[dict[str, Any], type[BaseModel], Callable]], + llm: BaseLanguageModel, + prompt: BasePromptTemplate, + *, + enforce_single_function_usage: bool = True, + output_key: str = "function", + output_parser: Optional[BaseLLMOutputParser] = None, + **kwargs: Any, +) -> LLMChain: + """[Legacy] Create an LLM chain that uses OpenAI functions. + + Args: + functions: A sequence of either dictionaries, pydantic.BaseModels classes, or + Python functions. If dictionaries are passed in, they are assumed to + already be a valid OpenAI functions. If only a single + function is passed in, then it will be enforced that the model use that + function. pydantic.BaseModels and Python functions should have docstrings + describing what the function does. For best results, pydantic.BaseModels + should have descriptions of the parameters and Python functions should have + Google Python style args descriptions in the docstring. Additionally, + Python functions should only use primitive types (str, int, float, bool) or + pydantic.BaseModels for arguments. + llm: Language model to use, assumed to support the OpenAI function-calling API. + prompt: BasePromptTemplate to pass to the model. + enforce_single_function_usage: only used if a single function is passed in. If + True, then the model will be forced to use the given function. If False, + then the model will be given the option to use the given function or not. + output_key: The key to use when returning the output in LLMChain.__call__. + output_parser: BaseLLMOutputParser to use for parsing model outputs. By default + will be inferred from the function types. If pydantic.BaseModels are passed + in, then the OutputParser will try to parse outputs using those. Otherwise + model outputs will simply be parsed as JSON. If multiple functions are + passed in and they are not pydantic.BaseModels, the chain output will + include both the name of the function that was returned and the arguments + to pass to the function. + + Returns: + An LLMChain that will pass in the given functions to the model when run. + + Example: + .. code-block:: python + + from typing import Optional + + from langchain.chains.openai_functions import create_openai_fn_chain + from langchain_community.chat_models import ChatOpenAI + from langchain_core.prompts import ChatPromptTemplate + + from pydantic import BaseModel, Field + + + class RecordPerson(BaseModel): + \"\"\"Record some identifying information about a person.\"\"\" + + name: str = Field(..., description="The person's name") + age: int = Field(..., description="The person's age") + fav_food: Optional[str] = Field(None, description="The person's favorite food") + + + class RecordDog(BaseModel): + \"\"\"Record some identifying information about a dog.\"\"\" + + name: str = Field(..., description="The dog's name") + color: str = Field(..., description="The dog's color") + fav_food: Optional[str] = Field(None, description="The dog's favorite food") + + + llm = ChatOpenAI(model="gpt-4", temperature=0) + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a world class algorithm for recording entities."), + ("human", "Make calls to the relevant function to record the entities in the following input: {input}"), + ("human", "Tip: Make sure to answer in the correct format"), + ] + ) + chain = create_openai_fn_chain([RecordPerson, RecordDog], llm, prompt) + chain.run("Harry was a chubby brown beagle who loved chicken") + # -> RecordDog(name="Harry", color="brown", fav_food="chicken") + """ # noqa: E501 + if not functions: + raise ValueError("Need to pass in at least one function. Received zero.") + openai_functions = [convert_to_openai_function(f) for f in functions] + output_parser = output_parser or get_openai_output_parser(functions) + llm_kwargs: dict[str, Any] = { + "functions": openai_functions, + } + if len(openai_functions) == 1 and enforce_single_function_usage: + llm_kwargs["function_call"] = {"name": openai_functions[0]["name"]} + llm_chain = LLMChain( + llm=llm, + prompt=prompt, + output_parser=output_parser, + llm_kwargs=llm_kwargs, + output_key=output_key, + **kwargs, + ) + return llm_chain + + +@deprecated( + since="0.1.1", removal="1.0", alternative="ChatOpenAI.with_structured_output" +) +def create_structured_output_chain( + output_schema: Union[dict[str, Any], type[BaseModel]], + llm: BaseLanguageModel, + prompt: BasePromptTemplate, + *, + output_key: str = "function", + output_parser: Optional[BaseLLMOutputParser] = None, + **kwargs: Any, +) -> LLMChain: + """[Legacy] Create an LLMChain that uses an OpenAI function to get a structured output. + + Args: + output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary + is passed in, it's assumed to already be a valid JsonSchema. + For best results, pydantic.BaseModels should have docstrings describing what + the schema represents and descriptions for the parameters. + llm: Language model to use, assumed to support the OpenAI function-calling API. + prompt: BasePromptTemplate to pass to the model. + output_key: The key to use when returning the output in LLMChain.__call__. + output_parser: BaseLLMOutputParser to use for parsing model outputs. By default + will be inferred from the function types. If pydantic.BaseModels are passed + in, then the OutputParser will try to parse outputs using those. Otherwise + model outputs will simply be parsed as JSON. + + Returns: + An LLMChain that will pass the given function to the model. + + Example: + .. code-block:: python + + from typing import Optional + + from langchain.chains.openai_functions import create_structured_output_chain + from langchain_community.chat_models import ChatOpenAI + from langchain_core.prompts import ChatPromptTemplate + + from pydantic import BaseModel, Field + + class Dog(BaseModel): + \"\"\"Identifying information about a dog.\"\"\" + + name: str = Field(..., description="The dog's name") + color: str = Field(..., description="The dog's color") + fav_food: Optional[str] = Field(None, description="The dog's favorite food") + + llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0) + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a world class algorithm for extracting information in structured formats."), + ("human", "Use the given format to extract information from the following input: {input}"), + ("human", "Tip: Make sure to answer in the correct format"), + ] + ) + chain = create_structured_output_chain(Dog, llm, prompt) + chain.run("Harry was a chubby brown beagle who loved chicken") + # -> Dog(name="Harry", color="brown", fav_food="chicken") + """ # noqa: E501 + if isinstance(output_schema, dict): + function: Any = { + "name": "output_formatter", + "description": ( + "Output formatter. Should always be used to format your response to the" + " user." + ), + "parameters": output_schema, + } + else: + + class _OutputFormatter(BaseModel): + """Output formatter. Should always be used to format your response to the user.""" # noqa: E501 + + output: output_schema # type: ignore[valid-type] + + function = _OutputFormatter + output_parser = output_parser or PydanticAttrOutputFunctionsParser( + pydantic_schema=_OutputFormatter, attr_name="output" + ) + return create_openai_fn_chain( + [function], + llm, + prompt, + output_key=output_key, + output_parser=output_parser, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/citation_fuzzy_match.py b/venv/Lib/site-packages/langchain/chains/openai_functions/citation_fuzzy_match.py new file mode 100644 index 00000000..674a0809 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/openai_functions/citation_fuzzy_match.py @@ -0,0 +1,159 @@ +from collections.abc import Iterator + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseChatModel, BaseLanguageModel +from langchain_core.messages import HumanMessage, SystemMessage +from langchain_core.output_parsers.openai_functions import PydanticOutputFunctionsParser +from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate +from langchain_core.runnables import Runnable +from pydantic import BaseModel, Field + +from langchain.chains.llm import LLMChain +from langchain.chains.openai_functions.utils import get_llm_kwargs + + +class FactWithEvidence(BaseModel): + """Class representing a single statement. + + Each fact has a body and a list of sources. + If there are multiple facts make sure to break them apart + such that each one only uses a set of sources that are relevant to it. + """ + + fact: str = Field(..., description="Body of the sentence, as part of a response") + substring_quote: list[str] = Field( + ..., + description=( + "Each source should be a direct quote from the context, " + "as a substring of the original content" + ), + ) + + def _get_span(self, quote: str, context: str, errs: int = 100) -> Iterator[str]: + import regex + + minor = quote + major = context + + errs_ = 0 + s = regex.search(f"({minor}){{e<={errs_}}}", major) + while s is None and errs_ <= errs: + errs_ += 1 + s = regex.search(f"({minor}){{e<={errs_}}}", major) + + if s is not None: + yield from s.spans() + + def get_spans(self, context: str) -> Iterator[str]: + for quote in self.substring_quote: + yield from self._get_span(quote, context) + + +class QuestionAnswer(BaseModel): + """A question and its answer as a list of facts each one should have a source. + each sentence contains a body and a list of sources.""" + + question: str = Field(..., description="Question that was asked") + answer: list[FactWithEvidence] = Field( + ..., + description=( + "Body of the answer, each fact should be " + "its separate object with a body and a list of sources" + ), + ) + + +def create_citation_fuzzy_match_runnable(llm: BaseChatModel) -> Runnable: + """Create a citation fuzzy match Runnable. + + Example usage: + + .. code-block:: python + + from langchain.chains import create_citation_fuzzy_match_runnable + from langchain_openai import ChatOpenAI + + llm = ChatOpenAI(model="gpt-4o-mini") + + context = "Alice has blue eyes. Bob has brown eyes. Charlie has green eyes." + question = "What color are Bob's eyes?" + + chain = create_citation_fuzzy_match_runnable(llm) + chain.invoke({"question": question, "context": context}) + + Args: + llm: Language model to use for the chain. Must implement bind_tools. + + Returns: + Runnable that can be used to answer questions with citations. + """ + if llm.bind_tools is BaseChatModel.bind_tools: + raise ValueError( + "Language model must implement bind_tools to use this function." + ) + prompt = ChatPromptTemplate( + [ + SystemMessage( + "You are a world class algorithm to answer " + "questions with correct and exact citations." + ), + HumanMessagePromptTemplate.from_template( + "Answer question using the following context." + "\n\n{context}" + "\n\nQuestion: {question}" + "\n\nTips: Make sure to cite your sources, " + "and use the exact words from the context." + ), + ] + ) + return prompt | llm.with_structured_output(QuestionAnswer) + + +@deprecated( + since="0.2.13", + removal="1.0", + alternative="create_citation_fuzzy_match_runnable", +) +def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain: + """Create a citation fuzzy match chain. + + Args: + llm: Language model to use for the chain. + + Returns: + Chain (LLMChain) that can be used to answer questions with citations. + """ + output_parser = PydanticOutputFunctionsParser(pydantic_schema=QuestionAnswer) + schema = QuestionAnswer.schema() + function = { + "name": schema["title"], + "description": schema["description"], + "parameters": schema, + } + llm_kwargs = get_llm_kwargs(function) + messages = [ + SystemMessage( + content=( + "You are a world class algorithm to answer " + "questions with correct and exact citations." + ) + ), + HumanMessage(content="Answer question using the following context"), + HumanMessagePromptTemplate.from_template("{context}"), + HumanMessagePromptTemplate.from_template("Question: {question}"), + HumanMessage( + content=( + "Tips: Make sure to cite your sources, " + "and use the exact words from the context." + ) + ), + ] + prompt = ChatPromptTemplate(messages=messages) # type: ignore[arg-type] + + chain = LLMChain( + llm=llm, + prompt=prompt, + llm_kwargs=llm_kwargs, + output_parser=output_parser, + ) + return chain diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/extraction.py b/venv/Lib/site-packages/langchain/chains/openai_functions/extraction.py new file mode 100644 index 00000000..a549688c --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/openai_functions/extraction.py @@ -0,0 +1,197 @@ +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers.openai_functions import ( + JsonKeyOutputFunctionsParser, + PydanticAttrOutputFunctionsParser, +) +from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate +from pydantic import BaseModel + +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain +from langchain.chains.openai_functions.utils import ( + _convert_schema, + _resolve_schema_references, + get_llm_kwargs, +) + + +def _get_extraction_function(entity_schema: dict) -> dict: + return { + "name": "information_extraction", + "description": "Extracts the relevant information from the passage.", + "parameters": { + "type": "object", + "properties": { + "info": {"type": "array", "items": _convert_schema(entity_schema)} + }, + "required": ["info"], + }, + } + + +_EXTRACTION_TEMPLATE = """Extract and save the relevant entities mentioned \ +in the following passage together with their properties. + +Only extract the properties mentioned in the 'information_extraction' function. + +If a property is not present and is not required in the function parameters, do not include it in the output. + +Passage: +{input} +""" # noqa: E501 + + +@deprecated( + since="0.1.14", + message=( + "LangChain has introduced a method called `with_structured_output` that" + "is available on ChatModels capable of tool calling." + "You can read more about the method here: " + ". " + "Please follow our extraction use case documentation for more guidelines" + "on how to do information extraction with LLMs." + ". " + "If you notice other issues, please provide " + "feedback here:" + "" + ), + removal="1.0", + alternative=( + """ + from pydantic import BaseModel, Field + from langchain_anthropic import ChatAnthropic + + class Joke(BaseModel): + setup: str = Field(description="The setup of the joke") + punchline: str = Field(description="The punchline to the joke") + + # Or any other chat model that supports tools. + # Please reference to to the documentation of structured_output + # to see an up to date list of which models support + # with_structured_output. + model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) + structured_llm = model.with_structured_output(Joke) + structured_llm.invoke("Tell me a joke about cats. + Make sure to call the Joke function.") + """ + ), +) +def create_extraction_chain( + schema: dict, + llm: BaseLanguageModel, + prompt: Optional[BasePromptTemplate] = None, + tags: Optional[list[str]] = None, + verbose: bool = False, +) -> Chain: + """Creates a chain that extracts information from a passage. + + Args: + schema: The schema of the entities to extract. + llm: The language model to use. + prompt: The prompt to use for extraction. + verbose: Whether to run in verbose mode. In verbose mode, some intermediate + logs will be printed to the console. Defaults to the global `verbose` value, + accessible via `langchain.globals.get_verbose()`. + + Returns: + Chain that can be used to extract information from a passage. + """ + function = _get_extraction_function(schema) + extraction_prompt = prompt or ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE) + output_parser = JsonKeyOutputFunctionsParser(key_name="info") + llm_kwargs = get_llm_kwargs(function) + chain = LLMChain( + llm=llm, + prompt=extraction_prompt, + llm_kwargs=llm_kwargs, + output_parser=output_parser, + tags=tags, + verbose=verbose, + ) + return chain + + +@deprecated( + since="0.1.14", + message=( + "LangChain has introduced a method called `with_structured_output` that" + "is available on ChatModels capable of tool calling." + "You can read more about the method here: " + ". " + "Please follow our extraction use case documentation for more guidelines" + "on how to do information extraction with LLMs." + ". " + "If you notice other issues, please provide " + "feedback here:" + "" + ), + removal="1.0", + alternative=( + """ + from pydantic import BaseModel, Field + from langchain_anthropic import ChatAnthropic + + class Joke(BaseModel): + setup: str = Field(description="The setup of the joke") + punchline: str = Field(description="The punchline to the joke") + + # Or any other chat model that supports tools. + # Please reference to to the documentation of structured_output + # to see an up to date list of which models support + # with_structured_output. + model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) + structured_llm = model.with_structured_output(Joke) + structured_llm.invoke("Tell me a joke about cats. + Make sure to call the Joke function.") + """ + ), +) +def create_extraction_chain_pydantic( + pydantic_schema: Any, + llm: BaseLanguageModel, + prompt: Optional[BasePromptTemplate] = None, + verbose: bool = False, +) -> Chain: + """Creates a chain that extracts information from a passage using pydantic schema. + + Args: + pydantic_schema: The pydantic schema of the entities to extract. + llm: The language model to use. + prompt: The prompt to use for extraction. + verbose: Whether to run in verbose mode. In verbose mode, some intermediate + logs will be printed to the console. Defaults to the global `verbose` value, + accessible via `langchain.globals.get_verbose()` + + Returns: + Chain that can be used to extract information from a passage. + """ + + class PydanticSchema(BaseModel): + info: list[pydantic_schema] + + if hasattr(pydantic_schema, "model_json_schema"): + openai_schema = pydantic_schema.model_json_schema() + else: + openai_schema = pydantic_schema.schema() + + openai_schema = _resolve_schema_references( + openai_schema, openai_schema.get("definitions", {}) + ) + + function = _get_extraction_function(openai_schema) + extraction_prompt = prompt or ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE) + output_parser = PydanticAttrOutputFunctionsParser( + pydantic_schema=PydanticSchema, attr_name="info" + ) + llm_kwargs = get_llm_kwargs(function) + chain = LLMChain( + llm=llm, + prompt=extraction_prompt, + llm_kwargs=llm_kwargs, + output_parser=output_parser, + verbose=verbose, + ) + return chain diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/openapi.py b/venv/Lib/site-packages/langchain/chains/openai_functions/openapi.py new file mode 100644 index 00000000..d177c011 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/openai_functions/openapi.py @@ -0,0 +1,405 @@ +from __future__ import annotations + +import json +import re +from collections import defaultdict +from typing import TYPE_CHECKING, Any, Callable, Optional, Union + +import requests +from langchain_core._api import deprecated +from langchain_core.callbacks import CallbackManagerForChainRun +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser +from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate +from langchain_core.utils.input import get_colored_text +from requests import Response + +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain +from langchain.chains.sequential import SequentialChain + +if TYPE_CHECKING: + from langchain_community.utilities.openapi import OpenAPISpec + from openapi_pydantic import Parameter + + +def _get_description(o: Any, prefer_short: bool) -> Optional[str]: + summary = getattr(o, "summary", None) + description = getattr(o, "description", None) + if prefer_short: + return summary or description + return description or summary + + +def _format_url(url: str, path_params: dict) -> str: + expected_path_param = re.findall(r"{(.*?)}", url) + new_params = {} + for param in expected_path_param: + clean_param = param.lstrip(".;").rstrip("*") + val = path_params[clean_param] + if isinstance(val, list): + if param[0] == ".": + sep = "." if param[-1] == "*" else "," + new_val = "." + sep.join(val) + elif param[0] == ";": + sep = f"{clean_param}=" if param[-1] == "*" else "," + new_val = f"{clean_param}=" + sep.join(val) + else: + new_val = ",".join(val) + elif isinstance(val, dict): + kv_sep = "=" if param[-1] == "*" else "," + kv_strs = [kv_sep.join((k, v)) for k, v in val.items()] + if param[0] == ".": + sep = "." + new_val = "." + elif param[0] == ";": + sep = ";" + new_val = ";" + else: + sep = "," + new_val = "" + new_val += sep.join(kv_strs) + else: + if param[0] == ".": + new_val = f".{val}" + elif param[0] == ";": + new_val = f";{clean_param}={val}" + else: + new_val = val + new_params[param] = new_val + return url.format(**new_params) + + +def _openapi_params_to_json_schema(params: list[Parameter], spec: OpenAPISpec) -> dict: + properties = {} + required = [] + for p in params: + if p.param_schema: + schema = spec.get_schema(p.param_schema) + else: + media_type_schema = list(p.content.values())[0].media_type_schema + schema = spec.get_schema(media_type_schema) + if p.description and not schema.description: + schema.description = p.description + properties[p.name] = json.loads(schema.json(exclude_none=True)) + if p.required: + required.append(p.name) + return {"type": "object", "properties": properties, "required": required} + + +def openapi_spec_to_openai_fn( + spec: OpenAPISpec, +) -> tuple[list[dict[str, Any]], Callable]: + """Convert a valid OpenAPI spec to the JSON Schema format expected for OpenAI + functions. + + Args: + spec: OpenAPI spec to convert. + + Returns: + Tuple of the OpenAI functions JSON schema and a default function for executing + a request based on the OpenAI function schema. + """ + try: + from langchain_community.tools import APIOperation + except ImportError: + raise ImportError( + "Could not import langchain_community.tools. " + "Please install it with `pip install langchain-community`." + ) + + if not spec.paths: + return [], lambda: None + functions = [] + _name_to_call_map = {} + for path in spec.paths: + path_params = { + (p.name, p.param_in): p for p in spec.get_parameters_for_path(path) + } + for method in spec.get_methods_for_path(path): + request_args = {} + op = spec.get_operation(path, method) + op_params = path_params.copy() + for param in spec.get_parameters_for_operation(op): + op_params[(param.name, param.param_in)] = param + params_by_type = defaultdict(list) + for name_loc, p in op_params.items(): + params_by_type[name_loc[1]].append(p) + param_loc_to_arg_name = { + "query": "params", + "header": "headers", + "cookie": "cookies", + "path": "path_params", + } + for param_loc, arg_name in param_loc_to_arg_name.items(): + if params_by_type[param_loc]: + request_args[arg_name] = _openapi_params_to_json_schema( + params_by_type[param_loc], spec + ) + request_body = spec.get_request_body_for_operation(op) + # TODO: Support more MIME types. + if request_body and request_body.content: + media_types = {} + for media_type, media_type_object in request_body.content.items(): + if media_type_object.media_type_schema: + schema = spec.get_schema(media_type_object.media_type_schema) + media_types[media_type] = json.loads( + schema.json(exclude_none=True) + ) + if len(media_types) == 1: + media_type, schema_dict = list(media_types.items())[0] + key = "json" if media_type == "application/json" else "data" + request_args[key] = schema_dict + elif len(media_types) > 1: + request_args["data"] = {"anyOf": list(media_types.values())} + + api_op = APIOperation.from_openapi_spec(spec, path, method) + fn = { + "name": api_op.operation_id, + "description": api_op.description, + "parameters": { + "type": "object", + "properties": request_args, + }, + } + functions.append(fn) + _name_to_call_map[fn["name"]] = { + "method": method, + "url": api_op.base_url + api_op.path, + } + + def default_call_api( + name: str, + fn_args: dict, + headers: Optional[dict] = None, + params: Optional[dict] = None, + **kwargs: Any, + ) -> Any: + method = _name_to_call_map[name]["method"] + url = _name_to_call_map[name]["url"] + path_params = fn_args.pop("path_params", {}) + url = _format_url(url, path_params) + if "data" in fn_args and isinstance(fn_args["data"], dict): + fn_args["data"] = json.dumps(fn_args["data"]) + _kwargs = {**fn_args, **kwargs} + if headers is not None: + if "headers" in _kwargs: + _kwargs["headers"].update(headers) + else: + _kwargs["headers"] = headers + if params is not None: + if "params" in _kwargs: + _kwargs["params"].update(params) + else: + _kwargs["params"] = params + return requests.request(method, url, **_kwargs) + + return functions, default_call_api + + +class SimpleRequestChain(Chain): + """Chain for making a simple request to an API endpoint.""" + + request_method: Callable + """Method to use for making the request.""" + output_key: str = "response" + """Key to use for the output of the request.""" + input_key: str = "function" + """Key to use for the input of the request.""" + + @property + def input_keys(self) -> list[str]: + return [self.input_key] + + @property + def output_keys(self) -> list[str]: + return [self.output_key] + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Run the logic of this chain and return the output.""" + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + name = inputs[self.input_key].pop("name") + args = inputs[self.input_key].pop("arguments") + _pretty_name = get_colored_text(name, "green") + _pretty_args = get_colored_text(json.dumps(args, indent=2), "green") + _text = f"Calling endpoint {_pretty_name} with arguments:\n" + _pretty_args + _run_manager.on_text(_text) + api_response: Response = self.request_method(name, args) + if api_response.status_code != 200: + response = ( + f"{api_response.status_code}: {api_response.reason}" + + f"\nFor {name} " + + f"Called with args: {args.get('params', '')}" + ) + else: + try: + response = api_response.json() + except Exception: + response = api_response.text + return {self.output_key: response} + + +@deprecated( + since="0.2.13", + message=( + "This function is deprecated and will be removed in langchain 1.0. " + "See API reference for replacement: " + "https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html" # noqa: E501 + ), + removal="1.0", +) +def get_openapi_chain( + spec: Union[OpenAPISpec, str], + llm: Optional[BaseLanguageModel] = None, + prompt: Optional[BasePromptTemplate] = None, + request_chain: Optional[Chain] = None, + llm_chain_kwargs: Optional[dict] = None, + verbose: bool = False, + headers: Optional[dict] = None, + params: Optional[dict] = None, + **kwargs: Any, +) -> SequentialChain: + """Create a chain for querying an API from a OpenAPI spec. + + Note: this class is deprecated. See below for a replacement implementation. + The benefits of this implementation are: + + - Uses LLM tool calling features to encourage properly-formatted API requests; + - Includes async support. + + .. code-block:: python + + from typing import Any + + from langchain.chains.openai_functions.openapi import openapi_spec_to_openai_fn + from langchain_community.utilities.openapi import OpenAPISpec + from langchain_core.prompts import ChatPromptTemplate + from langchain_openai import ChatOpenAI + + # Define API spec. Can be JSON or YAML + api_spec = \"\"\" + { + "openapi": "3.1.0", + "info": { + "title": "JSONPlaceholder API", + "version": "1.0.0" + }, + "servers": [ + { + "url": "https://jsonplaceholder.typicode.com" + } + ], + "paths": { + "/posts": { + "get": { + "summary": "Get posts", + "parameters": [ + { + "name": "_limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "example": 2 + }, + "description": "Limit the number of results" + } + ] + } + } + } + } + \"\"\" + + parsed_spec = OpenAPISpec.from_text(api_spec) + openai_fns, call_api_fn = openapi_spec_to_openai_fn(parsed_spec) + tools = [ + {"type": "function", "function": fn} + for fn in openai_fns + ] + + prompt = ChatPromptTemplate.from_template( + "Use the provided APIs to respond to this user query:\\n\\n{query}" + ) + llm = ChatOpenAI(model="gpt-4o-mini", temperature=0).bind_tools(tools) + + def _execute_tool(message) -> Any: + if tool_calls := message.tool_calls: + tool_call = message.tool_calls[0] + response = call_api_fn(name=tool_call["name"], fn_args=tool_call["args"]) + response.raise_for_status() + return response.json() + else: + return message.content + + chain = prompt | llm | _execute_tool + + .. code-block:: python + + response = chain.invoke({"query": "Get me top two posts."}) + + Args: + spec: OpenAPISpec or url/file/text string corresponding to one. + llm: language model, should be an OpenAI function-calling model, e.g. + `ChatOpenAI(model="gpt-3.5-turbo-0613")`. + prompt: Main prompt template to use. + request_chain: Chain for taking the functions output and executing the request. + """ # noqa: E501 + try: + from langchain_community.utilities.openapi import OpenAPISpec + except ImportError as e: + raise ImportError( + "Could not import langchain_community.utilities.openapi. " + "Please install it with `pip install langchain-community`." + ) from e + if isinstance(spec, str): + for conversion in ( + OpenAPISpec.from_url, + OpenAPISpec.from_file, + OpenAPISpec.from_text, + ): + try: + spec = conversion(spec) + break + except ImportError as e: + raise e + except Exception: + pass + if isinstance(spec, str): + raise ValueError(f"Unable to parse spec from source {spec}") + openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec) + if not llm: + raise ValueError( + "Must provide an LLM for this chain.For example,\n" + "from langchain_openai import ChatOpenAI\n" + "llm = ChatOpenAI()\n" + ) + prompt = prompt or ChatPromptTemplate.from_template( + "Use the provided API's to respond to this user query:\n\n{query}" + ) + llm_chain = LLMChain( + llm=llm, + prompt=prompt, + llm_kwargs={"functions": openai_fns}, + output_parser=JsonOutputFunctionsParser(args_only=False), + output_key="function", + verbose=verbose, + **(llm_chain_kwargs or {}), + ) + request_chain = request_chain or SimpleRequestChain( + request_method=lambda name, args: call_api_fn( + name, args, headers=headers, params=params + ), + verbose=verbose, + ) + return SequentialChain( + chains=[llm_chain, request_chain], + input_variables=llm_chain.input_keys, + output_variables=["response"], + verbose=verbose, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/qa_with_structure.py b/venv/Lib/site-packages/langchain/chains/openai_functions/qa_with_structure.py new file mode 100644 index 00000000..f76a2813 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/openai_functions/qa_with_structure.py @@ -0,0 +1,135 @@ +from typing import Any, Optional, Union, cast + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import HumanMessage, SystemMessage +from langchain_core.output_parsers import BaseLLMOutputParser +from langchain_core.output_parsers.openai_functions import ( + OutputFunctionsParser, + PydanticOutputFunctionsParser, +) +from langchain_core.prompts import PromptTemplate +from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate +from langchain_core.utils.pydantic import is_basemodel_subclass +from pydantic import BaseModel, Field + +from langchain.chains.llm import LLMChain +from langchain.chains.openai_functions.utils import get_llm_kwargs + + +class AnswerWithSources(BaseModel): + """An answer to the question, with sources.""" + + answer: str = Field(..., description="Answer to the question that was asked") + sources: list[str] = Field( + ..., description="List of sources used to answer the question" + ) + + +@deprecated( + since="0.2.13", + removal="1.0", + message=( + "This function is deprecated. Refer to this guide on retrieval and question " + "answering with structured responses: " + "https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501 + ), +) +def create_qa_with_structure_chain( + llm: BaseLanguageModel, + schema: Union[dict, type[BaseModel]], + output_parser: str = "base", + prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None, + verbose: bool = False, +) -> LLMChain: + """Create a question answering chain that returns an answer with sources + based on schema. + + Args: + llm: Language model to use for the chain. + schema: Pydantic schema to use for the output. + output_parser: Output parser to use. Should be one of `pydantic` or `base`. + Default to `base`. + prompt: Optional prompt to use for the chain. + + Returns: + + """ + if output_parser == "pydantic": + if not (isinstance(schema, type) and is_basemodel_subclass(schema)): + raise ValueError( + "Must provide a pydantic class for schema when output_parser is " + "'pydantic'." + ) + _output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser( + pydantic_schema=schema + ) + elif output_parser == "base": + _output_parser = OutputFunctionsParser() + else: + raise ValueError( + f"Got unexpected output_parser: {output_parser}. " + f"Should be one of `pydantic` or `base`." + ) + if isinstance(schema, type) and is_basemodel_subclass(schema): + if hasattr(schema, "model_json_schema"): + schema_dict = cast(dict, schema.model_json_schema()) + else: + schema_dict = cast(dict, schema.schema()) + else: + schema_dict = cast(dict, schema) + function = { + "name": schema_dict["title"], + "description": schema_dict["description"], + "parameters": schema_dict, + } + llm_kwargs = get_llm_kwargs(function) + messages = [ + SystemMessage( + content=( + "You are a world class algorithm to answer " + "questions in a specific format." + ) + ), + HumanMessage(content="Answer question using the following context"), + HumanMessagePromptTemplate.from_template("{context}"), + HumanMessagePromptTemplate.from_template("Question: {question}"), + HumanMessage(content="Tips: Make sure to answer in the correct format"), + ] + prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type] + + chain = LLMChain( + llm=llm, + prompt=prompt, + llm_kwargs=llm_kwargs, + output_parser=_output_parser, + verbose=verbose, + ) + return chain + + +@deprecated( + since="0.2.13", + removal="1.0", + message=( + "This function is deprecated. Refer to this guide on retrieval and question " + "answering with sources: " + "https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501 + ), +) +def create_qa_with_sources_chain( + llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any +) -> LLMChain: + """Create a question answering chain that returns an answer with sources. + + Args: + llm: Language model to use for the chain. + verbose: Whether to print the details of the chain + **kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`. + + Returns: + Chain (LLMChain) that can be used to answer questions with citations. + """ + return create_qa_with_structure_chain( + llm, AnswerWithSources, verbose=verbose, **kwargs + ) diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/tagging.py b/venv/Lib/site-packages/langchain/chains/openai_functions/tagging.py new file mode 100644 index 00000000..23422699 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/openai_functions/tagging.py @@ -0,0 +1,174 @@ +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers.openai_functions import ( + JsonOutputFunctionsParser, + PydanticOutputFunctionsParser, +) +from langchain_core.prompts import ChatPromptTemplate + +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain +from langchain.chains.openai_functions.utils import _convert_schema, get_llm_kwargs + + +def _get_tagging_function(schema: dict) -> dict: + return { + "name": "information_extraction", + "description": "Extracts the relevant information from the passage.", + "parameters": _convert_schema(schema), + } + + +_TAGGING_TEMPLATE = """Extract the desired information from the following passage. + +Only extract the properties mentioned in the 'information_extraction' function. + +Passage: +{input} +""" + + +@deprecated( + since="0.2.13", + message=( + "LangChain has introduced a method called `with_structured_output` that " + "is available on ChatModels capable of tool calling. " + "See API reference for this function for replacement: " + " " # noqa: E501 + "You can read more about `with_structured_output` here: " + ". " + "If you notice other issues, please provide " + "feedback here: " + "" + ), + removal="1.0", +) +def create_tagging_chain( + schema: dict, + llm: BaseLanguageModel, + prompt: Optional[ChatPromptTemplate] = None, + **kwargs: Any, +) -> Chain: + """Create a chain that extracts information from a passage + based on a schema. + + This function is deprecated. Please use `with_structured_output` instead. + See example usage below: + + .. code-block:: python + + from typing_extensions import Annotated, TypedDict + from langchain_anthropic import ChatAnthropic + + class Joke(TypedDict): + \"\"\"Tagged joke.\"\"\" + + setup: Annotated[str, ..., "The setup of the joke"] + punchline: Annotated[str, ..., "The punchline of the joke"] + + # Or any other chat model that supports tools. + # Please reference to to the documentation of structured_output + # to see an up to date list of which models support + # with_structured_output. + model = ChatAnthropic(model="claude-3-haiku-20240307", temperature=0) + structured_llm = model.with_structured_output(Joke) + structured_llm.invoke( + "Why did the cat cross the road? To get to the other " + "side... and then lay down in the middle of it!" + ) + Read more here: https://python.langchain.com/docs/how_to/structured_output/ + + Args: + schema: The schema of the entities to extract. + llm: The language model to use. + + Returns: + Chain (LLMChain) that can be used to extract information from a passage. + """ + function = _get_tagging_function(schema) + prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE) + output_parser = JsonOutputFunctionsParser() + llm_kwargs = get_llm_kwargs(function) + chain = LLMChain( + llm=llm, + prompt=prompt, + llm_kwargs=llm_kwargs, + output_parser=output_parser, + **kwargs, + ) + return chain + + +@deprecated( + since="0.2.13", + message=( + "LangChain has introduced a method called `with_structured_output` that " + "is available on ChatModels capable of tool calling. " + "See API reference for this function for replacement: " + " " # noqa: E501 + "You can read more about `with_structured_output` here: " + ". " + "If you notice other issues, please provide " + "feedback here: " + "" + ), + removal="1.0", +) +def create_tagging_chain_pydantic( + pydantic_schema: Any, + llm: BaseLanguageModel, + prompt: Optional[ChatPromptTemplate] = None, + **kwargs: Any, +) -> Chain: + """Create a chain that extracts information from a passage + based on a pydantic schema. + + This function is deprecated. Please use `with_structured_output` instead. + See example usage below: + + .. code-block:: python + + from pydantic import BaseModel, Field + from langchain_anthropic import ChatAnthropic + + class Joke(BaseModel): + setup: str = Field(description="The setup of the joke") + punchline: str = Field(description="The punchline to the joke") + + # Or any other chat model that supports tools. + # Please reference to to the documentation of structured_output + # to see an up to date list of which models support + # with_structured_output. + model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) + structured_llm = model.with_structured_output(Joke) + structured_llm.invoke( + "Why did the cat cross the road? To get to the other " + "side... and then lay down in the middle of it!" + ) + Read more here: https://python.langchain.com/docs/how_to/structured_output/ + + Args: + pydantic_schema: The pydantic schema of the entities to extract. + llm: The language model to use. + + Returns: + Chain (LLMChain) that can be used to extract information from a passage. + """ + if hasattr(pydantic_schema, "model_json_schema"): + openai_schema = pydantic_schema.model_json_schema() + else: + openai_schema = pydantic_schema.schema() + function = _get_tagging_function(openai_schema) + prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE) + output_parser = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema) + llm_kwargs = get_llm_kwargs(function) + chain = LLMChain( + llm=llm, + prompt=prompt, + llm_kwargs=llm_kwargs, + output_parser=output_parser, + **kwargs, + ) + return chain diff --git a/venv/Lib/site-packages/langchain/chains/openai_functions/utils.py b/venv/Lib/site-packages/langchain/chains/openai_functions/utils.py new file mode 100644 index 00000000..086d5b36 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/openai_functions/utils.py @@ -0,0 +1,40 @@ +from typing import Any + + +def _resolve_schema_references(schema: Any, definitions: dict[str, Any]) -> Any: + """ + Resolve the $ref keys in a JSON schema object using the provided definitions. + """ + if isinstance(schema, list): + for i, item in enumerate(schema): + schema[i] = _resolve_schema_references(item, definitions) + elif isinstance(schema, dict): + if "$ref" in schema: + ref_key = schema.pop("$ref").split("/")[-1] + ref = definitions.get(ref_key, {}) + schema.update(ref) + else: + for key, value in schema.items(): + schema[key] = _resolve_schema_references(value, definitions) + return schema + + +def _convert_schema(schema: dict) -> dict: + props = {k: {"title": k, **v} for k, v in schema["properties"].items()} + return { + "type": "object", + "properties": props, + "required": schema.get("required", []), + } + + +def get_llm_kwargs(function: dict) -> dict: + """Return the kwargs for the LLMChain constructor. + + Args: + function: The function to use. + + Returns: + The kwargs for the LLMChain constructor. + """ + return {"functions": [function], "function_call": {"name": function["name"]}} diff --git a/venv/Lib/site-packages/langchain/chains/openai_tools/__init__.py b/venv/Lib/site-packages/langchain/chains/openai_tools/__init__.py new file mode 100644 index 00000000..5388b7d5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/openai_tools/__init__.py @@ -0,0 +1,3 @@ +from langchain.chains.openai_tools.extraction import create_extraction_chain_pydantic + +__all__ = ["create_extraction_chain_pydantic"] diff --git a/venv/Lib/site-packages/langchain/chains/openai_tools/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/openai_tools/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..1d3ef5fa Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/openai_tools/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/openai_tools/__pycache__/extraction.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/openai_tools/__pycache__/extraction.cpython-312.pyc new file mode 100644 index 00000000..f4864e07 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/openai_tools/__pycache__/extraction.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/openai_tools/extraction.py b/venv/Lib/site-packages/langchain/chains/openai_tools/extraction.py new file mode 100644 index 00000000..ca4f9899 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/openai_tools/extraction.py @@ -0,0 +1,80 @@ +from typing import Union + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers.openai_tools import PydanticToolsParser +from langchain_core.prompts import ChatPromptTemplate +from langchain_core.runnables import Runnable +from langchain_core.utils.function_calling import convert_pydantic_to_openai_function +from pydantic import BaseModel + +_EXTRACTION_TEMPLATE = """Extract and save the relevant entities mentioned \ +in the following passage together with their properties. + +If a property is not present and is not required in the function parameters, do not include it in the output.""" # noqa: E501 + + +@deprecated( + since="0.1.14", + message=( + "LangChain has introduced a method called `with_structured_output` that" + "is available on ChatModels capable of tool calling." + "You can read more about the method here: " + ". " + "Please follow our extraction use case documentation for more guidelines" + "on how to do information extraction with LLMs." + ". " + "with_structured_output does not currently support a list of pydantic schemas. " + "If this is a blocker or if you notice other issues, please provide " + "feedback here:" + "" + ), + removal="1.0", + alternative=( + """ + from pydantic import BaseModel, Field + from langchain_anthropic import ChatAnthropic + + class Joke(BaseModel): + setup: str = Field(description="The setup of the joke") + punchline: str = Field(description="The punchline to the joke") + + # Or any other chat model that supports tools. + # Please reference to to the documentation of structured_output + # to see an up to date list of which models support + # with_structured_output. + model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) + structured_llm = model.with_structured_output(Joke) + structured_llm.invoke("Tell me a joke about cats. + Make sure to call the Joke function.") + """ + ), +) +def create_extraction_chain_pydantic( + pydantic_schemas: Union[list[type[BaseModel]], type[BaseModel]], + llm: BaseLanguageModel, + system_message: str = _EXTRACTION_TEMPLATE, +) -> Runnable: + """Creates a chain that extracts information from a passage. + + Args: + pydantic_schemas: The schema of the entities to extract. + llm: The language model to use. + system_message: The system message to use for extraction. + + Returns: + A runnable that extracts information from a passage. + """ + if not isinstance(pydantic_schemas, list): + pydantic_schemas = [pydantic_schemas] + prompt = ChatPromptTemplate.from_messages( + [ + ("system", system_message), + ("user", "{input}"), + ] + ) + functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas] + tools = [{"type": "function", "function": d} for d in functions] + model = llm.bind(tools=tools) + chain = prompt | model | PydanticToolsParser(tools=pydantic_schemas) + return chain diff --git a/venv/Lib/site-packages/langchain/chains/prompt_selector.py b/venv/Lib/site-packages/langchain/chains/prompt_selector.py new file mode 100644 index 00000000..adc1ddce --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/prompt_selector.py @@ -0,0 +1,65 @@ +from abc import ABC, abstractmethod +from typing import Callable + +from langchain_core.language_models import BaseLanguageModel +from langchain_core.language_models.chat_models import BaseChatModel +from langchain_core.language_models.llms import BaseLLM +from langchain_core.prompts import BasePromptTemplate +from pydantic import BaseModel, Field + + +class BasePromptSelector(BaseModel, ABC): + """Base class for prompt selectors.""" + + @abstractmethod + def get_prompt(self, llm: BaseLanguageModel) -> BasePromptTemplate: + """Get default prompt for a language model.""" + + +class ConditionalPromptSelector(BasePromptSelector): + """Prompt collection that goes through conditionals.""" + + default_prompt: BasePromptTemplate + """Default prompt to use if no conditionals match.""" + conditionals: list[ + tuple[Callable[[BaseLanguageModel], bool], BasePromptTemplate] + ] = Field(default_factory=list) + """List of conditionals and prompts to use if the conditionals match.""" + + def get_prompt(self, llm: BaseLanguageModel) -> BasePromptTemplate: + """Get default prompt for a language model. + + Args: + llm: Language model to get prompt for. + + Returns: + Prompt to use for the language model. + """ + for condition, prompt in self.conditionals: + if condition(llm): + return prompt + return self.default_prompt + + +def is_llm(llm: BaseLanguageModel) -> bool: + """Check if the language model is a LLM. + + Args: + llm: Language model to check. + + Returns: + True if the language model is a BaseLLM model, False otherwise. + """ + return isinstance(llm, BaseLLM) + + +def is_chat_model(llm: BaseLanguageModel) -> bool: + """Check if the language model is a chat model. + + Args: + llm: Language model to check. + + Returns: + True if the language model is a BaseChatModel model, False otherwise. + """ + return isinstance(llm, BaseChatModel) diff --git a/venv/Lib/site-packages/langchain/chains/qa_generation/__init__.py b/venv/Lib/site-packages/langchain/chains/qa_generation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/chains/qa_generation/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/qa_generation/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..ff09b743 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/qa_generation/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/qa_generation/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/qa_generation/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..328bdfa6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/qa_generation/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/qa_generation/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/qa_generation/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..f5d5453a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/qa_generation/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/qa_generation/base.py b/venv/Lib/site-packages/langchain/chains/qa_generation/base.py new file mode 100644 index 00000000..0a90acb0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/qa_generation/base.py @@ -0,0 +1,123 @@ +from __future__ import annotations + +import json +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import CallbackManagerForChainRun +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate +from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter +from pydantic import Field + +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain +from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR + + +@deprecated( + since="0.2.7", + alternative=( + "example in API reference with more detail: " + "https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html" # noqa: E501 + ), + removal="1.0", +) +class QAGenerationChain(Chain): + """Base class for question-answer generation chains. + + This class is deprecated. See below for an alternative implementation. + + Advantages of this implementation include: + + - Supports async and streaming; + - Surfaces prompt and text splitter for easier customization; + - Use of JsonOutputParser supports JSONPatch operations in streaming mode, + as well as robustness to markdown. + + .. code-block:: python + + from langchain.chains.qa_generation.prompt import CHAT_PROMPT as prompt + # Note: import PROMPT if using a legacy non-chat model. + from langchain_core.output_parsers import JsonOutputParser + from langchain_core.runnables import ( + RunnableLambda, + RunnableParallel, + RunnablePassthrough, + ) + from langchain_core.runnables.base import RunnableEach + from langchain_openai import ChatOpenAI + from langchain_text_splitters import RecursiveCharacterTextSplitter + + llm = ChatOpenAI() + text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500) + split_text = RunnableLambda( + lambda x: text_splitter.create_documents([x]) + ) + + chain = RunnableParallel( + text=RunnablePassthrough(), + questions=( + split_text | RunnableEach(bound=prompt | llm | JsonOutputParser()) + ) + ) + """ + + llm_chain: LLMChain + """LLM Chain that generates responses from user input and context.""" + text_splitter: TextSplitter = Field( + default=RecursiveCharacterTextSplitter(chunk_overlap=500) + ) + """Text splitter that splits the input into chunks.""" + input_key: str = "text" + """Key of the input to the chain.""" + output_key: str = "questions" + """Key of the output of the chain.""" + k: Optional[int] = None + """Number of questions to generate.""" + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + prompt: Optional[BasePromptTemplate] = None, + **kwargs: Any, + ) -> QAGenerationChain: + """ + Create a QAGenerationChain from a language model. + + Args: + llm: a language model + prompt: a prompt template + **kwargs: additional arguments + + Returns: + a QAGenerationChain class + """ + _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) + chain = LLMChain(llm=llm, prompt=_prompt) + return cls(llm_chain=chain, **kwargs) + + @property + def _chain_type(self) -> str: + raise NotImplementedError + + @property + def input_keys(self) -> list[str]: + return [self.input_key] + + @property + def output_keys(self) -> list[str]: + return [self.output_key] + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, list]: + docs = self.text_splitter.create_documents([inputs[self.input_key]]) + results = self.llm_chain.generate( + [{"text": d.page_content} for d in docs], run_manager=run_manager + ) + qa = [json.loads(res[0].text) for res in results.generations] + return {self.output_key: qa} diff --git a/venv/Lib/site-packages/langchain/chains/qa_generation/prompt.py b/venv/Lib/site-packages/langchain/chains/qa_generation/prompt.py new file mode 100644 index 00000000..377a49e4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/qa_generation/prompt.py @@ -0,0 +1,50 @@ +# flake8: noqa +from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model +from langchain_core.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, +) +from langchain_core.prompts.prompt import PromptTemplate + +templ1 = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions. +Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities. +When coming up with this question/answer pair, you must respond in the following format: +``` +{{ + "question": "$YOUR_QUESTION_HERE", + "answer": "$THE_ANSWER_HERE" +}} +``` + +Everything between the ``` must be valid json. +""" +templ2 = """Please come up with a question/answer pair, in the specified JSON format, for the following text: +---------------- +{text}""" +CHAT_PROMPT = ChatPromptTemplate.from_messages( + [ + SystemMessagePromptTemplate.from_template(templ1), + HumanMessagePromptTemplate.from_template(templ2), + ] +) +templ = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions. +Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities. +When coming up with this question/answer pair, you must respond in the following format: +``` +{{ + "question": "$YOUR_QUESTION_HERE", + "answer": "$THE_ANSWER_HERE" +}} +``` + +Everything between the ``` must be valid json. + +Please come up with a question/answer pair, in the specified JSON format, for the following text: +---------------- +{text}""" +PROMPT = PromptTemplate.from_template(templ) + +PROMPT_SELECTOR = ConditionalPromptSelector( + default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)] +) diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/__init__.py b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__init__.py new file mode 100644 index 00000000..5614ee55 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__init__.py @@ -0,0 +1,5 @@ +"""Load question answering with sources chains.""" + +from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain + +__all__ = ["load_qa_with_sources_chain"] diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..ec35f6df Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..bce9386e Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/loading.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/loading.cpython-312.pyc new file mode 100644 index 00000000..02194cdd Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/loading.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/map_reduce_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/map_reduce_prompt.cpython-312.pyc new file mode 100644 index 00000000..887f1ddf Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/map_reduce_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/refine_prompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/refine_prompts.cpython-312.pyc new file mode 100644 index 00000000..867a8a79 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/refine_prompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/retrieval.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/retrieval.cpython-312.pyc new file mode 100644 index 00000000..76dd0050 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/retrieval.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/stuff_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/stuff_prompt.cpython-312.pyc new file mode 100644 index 00000000..d0f8be91 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/stuff_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/vector_db.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/vector_db.cpython-312.pyc new file mode 100644 index 00000000..407c1940 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/qa_with_sources/__pycache__/vector_db.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/base.py b/venv/Lib/site-packages/langchain/chains/qa_with_sources/base.py new file mode 100644 index 00000000..7b2b371e --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/qa_with_sources/base.py @@ -0,0 +1,255 @@ +"""Question answering with sources over documents.""" + +from __future__ import annotations + +import inspect +import re +from abc import ABC, abstractmethod +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from langchain_core.documents import Document +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate +from pydantic import ConfigDict, model_validator + +from langchain.chains import ReduceDocumentsChain +from langchain.chains.base import Chain +from langchain.chains.combine_documents.base import BaseCombineDocumentsChain +from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain +from langchain.chains.combine_documents.stuff import StuffDocumentsChain +from langchain.chains.llm import LLMChain +from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain +from langchain.chains.qa_with_sources.map_reduce_prompt import ( + COMBINE_PROMPT, + EXAMPLE_PROMPT, + QUESTION_PROMPT, +) + + +@deprecated( + since="0.2.13", + removal="1.0", + message=( + "This class is deprecated. Refer to this guide on retrieval and question " + "answering with sources: " + "https://python.langchain.com/docs/how_to/qa_sources/" + ), +) +class BaseQAWithSourcesChain(Chain, ABC): + """Question answering chain with sources over documents.""" + + combine_documents_chain: BaseCombineDocumentsChain + """Chain to use to combine documents.""" + question_key: str = "question" #: :meta private: + input_docs_key: str = "docs" #: :meta private: + answer_key: str = "answer" #: :meta private: + sources_answer_key: str = "sources" #: :meta private: + return_source_documents: bool = False + """Return the source documents.""" + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + document_prompt: BasePromptTemplate = EXAMPLE_PROMPT, + question_prompt: BasePromptTemplate = QUESTION_PROMPT, + combine_prompt: BasePromptTemplate = COMBINE_PROMPT, + **kwargs: Any, + ) -> BaseQAWithSourcesChain: + """Construct the chain from an LLM.""" + llm_question_chain = LLMChain(llm=llm, prompt=question_prompt) + llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt) + combine_results_chain = StuffDocumentsChain( + llm_chain=llm_combine_chain, + document_prompt=document_prompt, + document_variable_name="summaries", + ) + reduce_documents_chain = ReduceDocumentsChain( + combine_documents_chain=combine_results_chain + ) + combine_documents_chain = MapReduceDocumentsChain( + llm_chain=llm_question_chain, + reduce_documents_chain=reduce_documents_chain, + document_variable_name="context", + ) + return cls( + combine_documents_chain=combine_documents_chain, + **kwargs, + ) + + @classmethod + def from_chain_type( + cls, + llm: BaseLanguageModel, + chain_type: str = "stuff", + chain_type_kwargs: Optional[dict] = None, + **kwargs: Any, + ) -> BaseQAWithSourcesChain: + """Load chain from chain type.""" + _chain_kwargs = chain_type_kwargs or {} + combine_documents_chain = load_qa_with_sources_chain( + llm, chain_type=chain_type, **_chain_kwargs + ) + return cls(combine_documents_chain=combine_documents_chain, **kwargs) + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @property + def input_keys(self) -> list[str]: + """Expect input key. + + :meta private: + """ + return [self.question_key] + + @property + def output_keys(self) -> list[str]: + """Return output key. + + :meta private: + """ + _output_keys = [self.answer_key, self.sources_answer_key] + if self.return_source_documents: + _output_keys = _output_keys + ["source_documents"] + return _output_keys + + @model_validator(mode="before") + @classmethod + def validate_naming(cls, values: dict) -> Any: + """Fix backwards compatibility in naming.""" + if "combine_document_chain" in values: + values["combine_documents_chain"] = values.pop("combine_document_chain") + return values + + def _split_sources(self, answer: str) -> tuple[str, str]: + """Split sources from answer.""" + if re.search(r"SOURCES?:", answer, re.IGNORECASE): + answer, sources = re.split( + r"SOURCES?:|QUESTION:\s", answer, flags=re.IGNORECASE + )[:2] + sources = re.split(r"\n", sources)[0].strip() + else: + sources = "" + return answer, sources + + @abstractmethod + def _get_docs( + self, + inputs: dict[str, Any], + *, + run_manager: CallbackManagerForChainRun, + ) -> list[Document]: + """Get docs to run questioning over.""" + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + accepts_run_manager = ( + "run_manager" in inspect.signature(self._get_docs).parameters + ) + if accepts_run_manager: + docs = self._get_docs(inputs, run_manager=_run_manager) + else: + docs = self._get_docs(inputs) # type: ignore[call-arg] + + answer = self.combine_documents_chain.run( + input_documents=docs, callbacks=_run_manager.get_child(), **inputs + ) + answer, sources = self._split_sources(answer) + result: dict[str, Any] = { + self.answer_key: answer, + self.sources_answer_key: sources, + } + if self.return_source_documents: + result["source_documents"] = docs + return result + + @abstractmethod + async def _aget_docs( + self, + inputs: dict[str, Any], + *, + run_manager: AsyncCallbackManagerForChainRun, + ) -> list[Document]: + """Get docs to run questioning over.""" + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() + accepts_run_manager = ( + "run_manager" in inspect.signature(self._aget_docs).parameters + ) + if accepts_run_manager: + docs = await self._aget_docs(inputs, run_manager=_run_manager) + else: + docs = await self._aget_docs(inputs) # type: ignore[call-arg] + answer = await self.combine_documents_chain.arun( + input_documents=docs, callbacks=_run_manager.get_child(), **inputs + ) + answer, sources = self._split_sources(answer) + result: dict[str, Any] = { + self.answer_key: answer, + self.sources_answer_key: sources, + } + if self.return_source_documents: + result["source_documents"] = docs + return result + + +@deprecated( + since="0.2.13", + removal="1.0", + message=( + "This class is deprecated. Refer to this guide on retrieval and question " + "answering with sources: " + "https://python.langchain.com/docs/how_to/qa_sources/" + ), +) +class QAWithSourcesChain(BaseQAWithSourcesChain): + """Question answering with sources over documents.""" + + input_docs_key: str = "docs" #: :meta private: + + @property + def input_keys(self) -> list[str]: + """Expect input key. + + :meta private: + """ + return [self.input_docs_key, self.question_key] + + def _get_docs( + self, + inputs: dict[str, Any], + *, + run_manager: CallbackManagerForChainRun, + ) -> list[Document]: + """Get docs to run questioning over.""" + return inputs.pop(self.input_docs_key) + + async def _aget_docs( + self, + inputs: dict[str, Any], + *, + run_manager: AsyncCallbackManagerForChainRun, + ) -> list[Document]: + """Get docs to run questioning over.""" + return inputs.pop(self.input_docs_key) + + @property + def _chain_type(self) -> str: + return "qa_with_sources_chain" diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/loading.py b/venv/Lib/site-packages/langchain/chains/qa_with_sources/loading.py new file mode 100644 index 00000000..b9e04911 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/qa_with_sources/loading.py @@ -0,0 +1,201 @@ +"""Load question answering with sources chains.""" + +from __future__ import annotations + +from collections.abc import Mapping +from typing import Any, Optional, Protocol + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate + +from langchain.chains.combine_documents.base import BaseCombineDocumentsChain +from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain +from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain +from langchain.chains.combine_documents.reduce import ReduceDocumentsChain +from langchain.chains.combine_documents.refine import RefineDocumentsChain +from langchain.chains.combine_documents.stuff import StuffDocumentsChain +from langchain.chains.llm import LLMChain +from langchain.chains.qa_with_sources import ( + map_reduce_prompt, + refine_prompts, + stuff_prompt, +) +from langchain.chains.question_answering.map_rerank_prompt import ( + PROMPT as MAP_RERANK_PROMPT, +) + + +class LoadingCallable(Protocol): + """Interface for loading the combine documents chain.""" + + def __call__( + self, llm: BaseLanguageModel, **kwargs: Any + ) -> BaseCombineDocumentsChain: + """Callable to load the combine documents chain.""" + + +def _load_map_rerank_chain( + llm: BaseLanguageModel, + prompt: BasePromptTemplate = MAP_RERANK_PROMPT, + verbose: bool = False, + document_variable_name: str = "context", + rank_key: str = "score", + answer_key: str = "answer", + **kwargs: Any, +) -> MapRerankDocumentsChain: + llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) + return MapRerankDocumentsChain( + llm_chain=llm_chain, + rank_key=rank_key, + answer_key=answer_key, + document_variable_name=document_variable_name, + **kwargs, + ) + + +def _load_stuff_chain( + llm: BaseLanguageModel, + prompt: BasePromptTemplate = stuff_prompt.PROMPT, + document_prompt: BasePromptTemplate = stuff_prompt.EXAMPLE_PROMPT, + document_variable_name: str = "summaries", + verbose: Optional[bool] = None, + **kwargs: Any, +) -> StuffDocumentsChain: + llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) # type: ignore[arg-type] + return StuffDocumentsChain( + llm_chain=llm_chain, + document_variable_name=document_variable_name, + document_prompt=document_prompt, + verbose=verbose, # type: ignore[arg-type] + **kwargs, + ) + + +def _load_map_reduce_chain( + llm: BaseLanguageModel, + question_prompt: BasePromptTemplate = map_reduce_prompt.QUESTION_PROMPT, + combine_prompt: BasePromptTemplate = map_reduce_prompt.COMBINE_PROMPT, + document_prompt: BasePromptTemplate = map_reduce_prompt.EXAMPLE_PROMPT, + combine_document_variable_name: str = "summaries", + map_reduce_document_variable_name: str = "context", + collapse_prompt: Optional[BasePromptTemplate] = None, + reduce_llm: Optional[BaseLanguageModel] = None, + collapse_llm: Optional[BaseLanguageModel] = None, + verbose: Optional[bool] = None, + token_max: int = 3000, + **kwargs: Any, +) -> MapReduceDocumentsChain: + map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) # type: ignore[arg-type] + _reduce_llm = reduce_llm or llm + reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose) # type: ignore[arg-type] + combine_documents_chain = StuffDocumentsChain( + llm_chain=reduce_chain, + document_variable_name=combine_document_variable_name, + document_prompt=document_prompt, + verbose=verbose, # type: ignore[arg-type] + ) + if collapse_prompt is None: + collapse_chain = None + if collapse_llm is not None: + raise ValueError( + "collapse_llm provided, but collapse_prompt was not: please " + "provide one or stop providing collapse_llm." + ) + else: + _collapse_llm = collapse_llm or llm + collapse_chain = StuffDocumentsChain( + llm_chain=LLMChain( + llm=_collapse_llm, + prompt=collapse_prompt, + verbose=verbose, # type: ignore[arg-type] + ), + document_variable_name=combine_document_variable_name, + document_prompt=document_prompt, + ) + reduce_documents_chain = ReduceDocumentsChain( + combine_documents_chain=combine_documents_chain, + collapse_documents_chain=collapse_chain, + token_max=token_max, + verbose=verbose, # type: ignore[arg-type] + ) + return MapReduceDocumentsChain( + llm_chain=map_chain, + reduce_documents_chain=reduce_documents_chain, + document_variable_name=map_reduce_document_variable_name, + verbose=verbose, # type: ignore[arg-type] + **kwargs, + ) + + +def _load_refine_chain( + llm: BaseLanguageModel, + question_prompt: BasePromptTemplate = refine_prompts.DEFAULT_TEXT_QA_PROMPT, + refine_prompt: BasePromptTemplate = refine_prompts.DEFAULT_REFINE_PROMPT, + document_prompt: BasePromptTemplate = refine_prompts.EXAMPLE_PROMPT, + document_variable_name: str = "context_str", + initial_response_name: str = "existing_answer", + refine_llm: Optional[BaseLanguageModel] = None, + verbose: Optional[bool] = None, + **kwargs: Any, +) -> RefineDocumentsChain: + initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) # type: ignore[arg-type] + _refine_llm = refine_llm or llm + refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose) # type: ignore[arg-type] + return RefineDocumentsChain( + initial_llm_chain=initial_chain, + refine_llm_chain=refine_chain, + document_variable_name=document_variable_name, + initial_response_name=initial_response_name, + document_prompt=document_prompt, + verbose=verbose, # type: ignore[arg-type] + **kwargs, + ) + + +@deprecated( + since="0.2.13", + removal="1.0", + message=( + "This function is deprecated. Refer to this guide on retrieval and question " + "answering with sources: " + "https://python.langchain.com/docs/how_to/qa_sources/" + "\nSee also the following migration guides for replacements " + "based on `chain_type`:\n" + "stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501 + "map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501 + "refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n" # noqa: E501 + "map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501 + ), +) +def load_qa_with_sources_chain( + llm: BaseLanguageModel, + chain_type: str = "stuff", + verbose: Optional[bool] = None, + **kwargs: Any, +) -> BaseCombineDocumentsChain: + """Load a question answering with sources chain. + + Args: + llm: Language Model to use in the chain. + chain_type: Type of document combining chain to use. Should be one of "stuff", + "map_reduce", "refine" and "map_rerank". + verbose: Whether chains should be run in verbose mode or not. Note that this + applies to all chains that make up the final chain. + + Returns: + A chain to use for question answering with sources. + """ + loader_mapping: Mapping[str, LoadingCallable] = { + "stuff": _load_stuff_chain, + "map_reduce": _load_map_reduce_chain, + "refine": _load_refine_chain, + "map_rerank": _load_map_rerank_chain, + } + if chain_type not in loader_mapping: + raise ValueError( + f"Got unsupported chain type: {chain_type}. " + f"Should be one of {loader_mapping.keys()}" + ) + _func: LoadingCallable = loader_mapping[chain_type] + return _func(llm, verbose=verbose, **kwargs) diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/map_reduce_prompt.py b/venv/Lib/site-packages/langchain/chains/qa_with_sources/map_reduce_prompt.py new file mode 100644 index 00000000..e0c8545e --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/qa_with_sources/map_reduce_prompt.py @@ -0,0 +1,55 @@ +# flake8: noqa +from langchain_core.prompts import PromptTemplate + +question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question. +Return any relevant text verbatim. +{context} +Question: {question} +Relevant text, if any:""" +QUESTION_PROMPT = PromptTemplate( + template=question_prompt_template, input_variables=["context", "question"] +) + +combine_prompt_template = """Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES"). +If you don't know the answer, just say that you don't know. Don't try to make up an answer. +ALWAYS return a "SOURCES" part in your answer. + +QUESTION: Which state/country's law governs the interpretation of the contract? +========= +Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights. +Source: 28-pl +Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries. +Source: 30-pl +Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur, +Source: 4-pl +========= +FINAL ANSWER: This Agreement is governed by English law. +SOURCES: 28-pl + +QUESTION: What did the president say about Michael Jackson? +========= +Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. +Source: 0-pl +Content: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. +Source: 24-pl +Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay. +Source: 5-pl +Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation. +Source: 34-pl +========= +FINAL ANSWER: The president did not mention Michael Jackson. +SOURCES: + +QUESTION: {question} +========= +{summaries} +========= +FINAL ANSWER:""" +COMBINE_PROMPT = PromptTemplate( + template=combine_prompt_template, input_variables=["summaries", "question"] +) + +EXAMPLE_PROMPT = PromptTemplate( + template="Content: {page_content}\nSource: {source}", + input_variables=["page_content", "source"], +) diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/refine_prompts.py b/venv/Lib/site-packages/langchain/chains/qa_with_sources/refine_prompts.py new file mode 100644 index 00000000..2e13f541 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/qa_with_sources/refine_prompts.py @@ -0,0 +1,38 @@ +# flake8: noqa +from langchain_core.prompts import PromptTemplate + +DEFAULT_REFINE_PROMPT_TMPL = ( + "The original question is as follows: {question}\n" + "We have provided an existing answer, including sources: {existing_answer}\n" + "We have the opportunity to refine the existing answer" + "(only if needed) with some more context below.\n" + "------------\n" + "{context_str}\n" + "------------\n" + "Given the new context, refine the original answer to better " + "answer the question. " + "If you do update it, please update the sources as well. " + "If the context isn't useful, return the original answer." +) +DEFAULT_REFINE_PROMPT = PromptTemplate( + input_variables=["question", "existing_answer", "context_str"], + template=DEFAULT_REFINE_PROMPT_TMPL, +) + + +DEFAULT_TEXT_QA_PROMPT_TMPL = ( + "Context information is below. \n" + "---------------------\n" + "{context_str}" + "\n---------------------\n" + "Given the context information and not prior knowledge, " + "answer the question: {question}\n" +) +DEFAULT_TEXT_QA_PROMPT = PromptTemplate( + input_variables=["context_str", "question"], template=DEFAULT_TEXT_QA_PROMPT_TMPL +) + +EXAMPLE_PROMPT = PromptTemplate( + template="Content: {page_content}\nSource: {source}", + input_variables=["page_content", "source"], +) diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/retrieval.py b/venv/Lib/site-packages/langchain/chains/qa_with_sources/retrieval.py new file mode 100644 index 00000000..8b2cba75 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/qa_with_sources/retrieval.py @@ -0,0 +1,66 @@ +"""Question-answering with sources over an index.""" + +from typing import Any + +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from langchain_core.documents import Document +from langchain_core.retrievers import BaseRetriever +from pydantic import Field + +from langchain.chains.combine_documents.stuff import StuffDocumentsChain +from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain + + +class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain): + """Question-answering with sources over an index.""" + + retriever: BaseRetriever = Field(exclude=True) + """Index to connect to.""" + reduce_k_below_max_tokens: bool = False + """Reduce the number of results to return from store based on tokens limit""" + max_tokens_limit: int = 3375 + """Restrict the docs to return from store based on tokens, + enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true""" + + def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]: + num_docs = len(docs) + + if self.reduce_k_below_max_tokens and isinstance( + self.combine_documents_chain, StuffDocumentsChain + ): + tokens = [ + self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) + for doc in docs + ] + token_count = sum(tokens[:num_docs]) + while token_count > self.max_tokens_limit: + num_docs -= 1 + token_count -= tokens[num_docs] + + return docs[:num_docs] + + def _get_docs( + self, inputs: dict[str, Any], *, run_manager: CallbackManagerForChainRun + ) -> list[Document]: + question = inputs[self.question_key] + docs = self.retriever.invoke( + question, config={"callbacks": run_manager.get_child()} + ) + return self._reduce_tokens_below_limit(docs) + + async def _aget_docs( + self, inputs: dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun + ) -> list[Document]: + question = inputs[self.question_key] + docs = await self.retriever.ainvoke( + question, config={"callbacks": run_manager.get_child()} + ) + return self._reduce_tokens_below_limit(docs) + + @property + def _chain_type(self) -> str: + """Return the chain type.""" + return "retrieval_qa_with_sources_chain" diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/stuff_prompt.py b/venv/Lib/site-packages/langchain/chains/qa_with_sources/stuff_prompt.py new file mode 100644 index 00000000..82290ee0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/qa_with_sources/stuff_prompt.py @@ -0,0 +1,44 @@ +# flake8: noqa +from langchain_core.prompts import PromptTemplate + +template = """Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES"). +If you don't know the answer, just say that you don't know. Don't try to make up an answer. +ALWAYS return a "SOURCES" part in your answer. + +QUESTION: Which state/country's law governs the interpretation of the contract? +========= +Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights. +Source: 28-pl +Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries. +Source: 30-pl +Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur, +Source: 4-pl +========= +FINAL ANSWER: This Agreement is governed by English law. +SOURCES: 28-pl + +QUESTION: What did the president say about Michael Jackson? +========= +Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. +Source: 0-pl +Content: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. +Source: 24-pl +Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay. +Source: 5-pl +Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation. +Source: 34-pl +========= +FINAL ANSWER: The president did not mention Michael Jackson. +SOURCES: + +QUESTION: {question} +========= +{summaries} +========= +FINAL ANSWER:""" +PROMPT = PromptTemplate(template=template, input_variables=["summaries", "question"]) + +EXAMPLE_PROMPT = PromptTemplate( + template="Content: {page_content}\nSource: {source}", + input_variables=["page_content", "source"], +) diff --git a/venv/Lib/site-packages/langchain/chains/qa_with_sources/vector_db.py b/venv/Lib/site-packages/langchain/chains/qa_with_sources/vector_db.py new file mode 100644 index 00000000..e8ca7286 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/qa_with_sources/vector_db.py @@ -0,0 +1,75 @@ +"""Question-answering with sources over a vector database.""" + +import warnings +from typing import Any + +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from langchain_core.documents import Document +from langchain_core.vectorstores import VectorStore +from pydantic import Field, model_validator + +from langchain.chains.combine_documents.stuff import StuffDocumentsChain +from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain + + +class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain): + """Question-answering with sources over a vector database.""" + + vectorstore: VectorStore = Field(exclude=True) + """Vector Database to connect to.""" + k: int = 4 + """Number of results to return from store""" + reduce_k_below_max_tokens: bool = False + """Reduce the number of results to return from store based on tokens limit""" + max_tokens_limit: int = 3375 + """Restrict the docs to return from store based on tokens, + enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true""" + search_kwargs: dict[str, Any] = Field(default_factory=dict) + """Extra search args.""" + + def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]: + num_docs = len(docs) + + if self.reduce_k_below_max_tokens and isinstance( + self.combine_documents_chain, StuffDocumentsChain + ): + tokens = [ + self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) + for doc in docs + ] + token_count = sum(tokens[:num_docs]) + while token_count > self.max_tokens_limit: + num_docs -= 1 + token_count -= tokens[num_docs] + + return docs[:num_docs] + + def _get_docs( + self, inputs: dict[str, Any], *, run_manager: CallbackManagerForChainRun + ) -> list[Document]: + question = inputs[self.question_key] + docs = self.vectorstore.similarity_search( + question, k=self.k, **self.search_kwargs + ) + return self._reduce_tokens_below_limit(docs) + + async def _aget_docs( + self, inputs: dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun + ) -> list[Document]: + raise NotImplementedError("VectorDBQAWithSourcesChain does not support async") + + @model_validator(mode="before") + @classmethod + def raise_deprecation(cls, values: dict) -> Any: + warnings.warn( + "`VectorDBQAWithSourcesChain` is deprecated - " + "please use `from langchain.chains import RetrievalQAWithSourcesChain`" + ) + return values + + @property + def _chain_type(self) -> str: + return "vector_db_qa_with_sources_chain" diff --git a/venv/Lib/site-packages/langchain/chains/query_constructor/__init__.py b/venv/Lib/site-packages/langchain/chains/query_constructor/__init__.py new file mode 100644 index 00000000..9d08ca0e --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/query_constructor/__init__.py @@ -0,0 +1,3 @@ +from langchain.chains.query_constructor.base import load_query_constructor_runnable + +__all__ = ["load_query_constructor_runnable"] diff --git a/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..3d7296d2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..d51cb721 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/ir.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/ir.cpython-312.pyc new file mode 100644 index 00000000..7ee8d0ea Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/ir.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/parser.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/parser.cpython-312.pyc new file mode 100644 index 00000000..373fee49 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/parser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..0883838c Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/schema.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/schema.cpython-312.pyc new file mode 100644 index 00000000..0f357a03 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/query_constructor/__pycache__/schema.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/query_constructor/base.py b/venv/Lib/site-packages/langchain/chains/query_constructor/base.py new file mode 100644 index 00000000..c1dadaab --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/query_constructor/base.py @@ -0,0 +1,375 @@ +"""LLM Chain for turning a user text query into a structured query.""" + +from __future__ import annotations + +import json +from collections.abc import Sequence +from typing import Any, Callable, Optional, Union, cast + +from langchain_core._api import deprecated +from langchain_core.exceptions import OutputParserException +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.output_parsers.json import parse_and_check_json_markdown +from langchain_core.prompts import BasePromptTemplate +from langchain_core.prompts.few_shot import FewShotPromptTemplate +from langchain_core.runnables import Runnable +from langchain_core.structured_query import ( + Comparator, + Comparison, + FilterDirective, + Operation, + Operator, + StructuredQuery, +) + +from langchain.chains.llm import LLMChain +from langchain.chains.query_constructor.parser import get_parser +from langchain.chains.query_constructor.prompt import ( + DEFAULT_EXAMPLES, + DEFAULT_PREFIX, + DEFAULT_SCHEMA_PROMPT, + DEFAULT_SUFFIX, + EXAMPLE_PROMPT, + EXAMPLES_WITH_LIMIT, + PREFIX_WITH_DATA_SOURCE, + SCHEMA_WITH_LIMIT_PROMPT, + SUFFIX_WITHOUT_DATA_SOURCE, + USER_SPECIFIED_EXAMPLE_PROMPT, +) +from langchain.chains.query_constructor.schema import AttributeInfo + + +class StructuredQueryOutputParser(BaseOutputParser[StructuredQuery]): + """Output parser that parses a structured query.""" + + ast_parse: Callable + """Callable that parses dict into internal representation of query language.""" + + def parse(self, text: str) -> StructuredQuery: + try: + expected_keys = ["query", "filter"] + allowed_keys = ["query", "filter", "limit"] + parsed = parse_and_check_json_markdown(text, expected_keys) + if parsed["query"] is None or len(parsed["query"]) == 0: + parsed["query"] = " " + if parsed["filter"] == "NO_FILTER" or not parsed["filter"]: + parsed["filter"] = None + else: + parsed["filter"] = self.ast_parse(parsed["filter"]) + if not parsed.get("limit"): + parsed.pop("limit", None) + return StructuredQuery( + **{k: v for k, v in parsed.items() if k in allowed_keys} + ) + except Exception as e: + raise OutputParserException( + f"Parsing text\n{text}\n raised following error:\n{e}" + ) + + @classmethod + def from_components( + cls, + allowed_comparators: Optional[Sequence[Comparator]] = None, + allowed_operators: Optional[Sequence[Operator]] = None, + allowed_attributes: Optional[Sequence[str]] = None, + fix_invalid: bool = False, + ) -> StructuredQueryOutputParser: + """ + Create a structured query output parser from components. + + Args: + allowed_comparators: allowed comparators + allowed_operators: allowed operators + + Returns: + a structured query output parser + """ + ast_parse: Callable + if fix_invalid: + + def ast_parse(raw_filter: str) -> Optional[FilterDirective]: + filter = cast(Optional[FilterDirective], get_parser().parse(raw_filter)) + fixed = fix_filter_directive( + filter, + allowed_comparators=allowed_comparators, + allowed_operators=allowed_operators, + allowed_attributes=allowed_attributes, + ) + return fixed + + else: + ast_parse = get_parser( + allowed_comparators=allowed_comparators, + allowed_operators=allowed_operators, + allowed_attributes=allowed_attributes, + ).parse + return cls(ast_parse=ast_parse) + + +def fix_filter_directive( + filter: Optional[FilterDirective], + *, + allowed_comparators: Optional[Sequence[Comparator]] = None, + allowed_operators: Optional[Sequence[Operator]] = None, + allowed_attributes: Optional[Sequence[str]] = None, +) -> Optional[FilterDirective]: + """Fix invalid filter directive. + + Args: + filter: Filter directive to fix. + allowed_comparators: allowed comparators. Defaults to all comparators. + allowed_operators: allowed operators. Defaults to all operators. + allowed_attributes: allowed attributes. Defaults to all attributes. + + Returns: + Fixed filter directive. + """ + if ( + not (allowed_comparators or allowed_operators or allowed_attributes) + ) or not filter: + return filter + + elif isinstance(filter, Comparison): + if allowed_comparators and filter.comparator not in allowed_comparators: + return None + if allowed_attributes and filter.attribute not in allowed_attributes: + return None + return filter + elif isinstance(filter, Operation): + if allowed_operators and filter.operator not in allowed_operators: + return None + args = [ + cast( + FilterDirective, + fix_filter_directive( + arg, + allowed_comparators=allowed_comparators, + allowed_operators=allowed_operators, + allowed_attributes=allowed_attributes, + ), + ) + for arg in filter.arguments + if arg is not None + ] + if not args: + return None + elif len(args) == 1 and filter.operator in (Operator.AND, Operator.OR): + return args[0] + else: + return Operation( + operator=filter.operator, + arguments=args, + ) + else: + return filter + + +def _format_attribute_info(info: Sequence[Union[AttributeInfo, dict]]) -> str: + info_dicts = {} + for i in info: + i_dict = dict(i) + info_dicts[i_dict.pop("name")] = i_dict + return json.dumps(info_dicts, indent=4).replace("{", "{{").replace("}", "}}") + + +def construct_examples(input_output_pairs: Sequence[tuple[str, dict]]) -> list[dict]: + """Construct examples from input-output pairs. + + Args: + input_output_pairs: Sequence of input-output pairs. + + Returns: + List of examples. + """ + examples = [] + for i, (_input, output) in enumerate(input_output_pairs): + structured_request = ( + json.dumps(output, indent=4).replace("{", "{{").replace("}", "}}") + ) + example = { + "i": i + 1, + "user_query": _input, + "structured_request": structured_request, + } + examples.append(example) + return examples + + +def get_query_constructor_prompt( + document_contents: str, + attribute_info: Sequence[Union[AttributeInfo, dict]], + *, + examples: Optional[Sequence] = None, + allowed_comparators: Sequence[Comparator] = tuple(Comparator), + allowed_operators: Sequence[Operator] = tuple(Operator), + enable_limit: bool = False, + schema_prompt: Optional[BasePromptTemplate] = None, + **kwargs: Any, +) -> BasePromptTemplate: + """Create query construction prompt. + + Args: + document_contents: The contents of the document to be queried. + attribute_info: A list of AttributeInfo objects describing + the attributes of the document. + examples: Optional list of examples to use for the chain. + allowed_comparators: Sequence of allowed comparators. + allowed_operators: Sequence of allowed operators. + enable_limit: Whether to enable the limit operator. Defaults to False. + schema_prompt: Prompt for describing query schema. Should have string input + variables allowed_comparators and allowed_operators. + kwargs: Additional named params to pass to FewShotPromptTemplate init. + + Returns: + A prompt template that can be used to construct queries. + """ + default_schema_prompt = ( + SCHEMA_WITH_LIMIT_PROMPT if enable_limit else DEFAULT_SCHEMA_PROMPT + ) + schema_prompt = schema_prompt or default_schema_prompt + attribute_str = _format_attribute_info(attribute_info) + schema = schema_prompt.format( + allowed_comparators=" | ".join(allowed_comparators), + allowed_operators=" | ".join(allowed_operators), + ) + if examples and isinstance(examples[0], tuple): + examples = construct_examples(examples) + example_prompt = USER_SPECIFIED_EXAMPLE_PROMPT + prefix = PREFIX_WITH_DATA_SOURCE.format( + schema=schema, content=document_contents, attributes=attribute_str + ) + suffix = SUFFIX_WITHOUT_DATA_SOURCE.format(i=len(examples) + 1) + else: + examples = examples or ( + EXAMPLES_WITH_LIMIT if enable_limit else DEFAULT_EXAMPLES + ) + example_prompt = EXAMPLE_PROMPT + prefix = DEFAULT_PREFIX.format(schema=schema) + suffix = DEFAULT_SUFFIX.format( + i=len(examples) + 1, content=document_contents, attributes=attribute_str + ) + return FewShotPromptTemplate( + examples=list(examples), + example_prompt=example_prompt, + input_variables=["query"], + suffix=suffix, + prefix=prefix, + **kwargs, + ) + + +@deprecated( + since="0.2.13", + alternative="load_query_constructor_runnable", + removal="1.0", +) +def load_query_constructor_chain( + llm: BaseLanguageModel, + document_contents: str, + attribute_info: Sequence[Union[AttributeInfo, dict]], + examples: Optional[list] = None, + allowed_comparators: Sequence[Comparator] = tuple(Comparator), + allowed_operators: Sequence[Operator] = tuple(Operator), + enable_limit: bool = False, + schema_prompt: Optional[BasePromptTemplate] = None, + **kwargs: Any, +) -> LLMChain: + """Load a query constructor chain. + + Args: + llm: BaseLanguageModel to use for the chain. + document_contents: The contents of the document to be queried. + attribute_info: Sequence of attributes in the document. + examples: Optional list of examples to use for the chain. + allowed_comparators: Sequence of allowed comparators. Defaults to all + Comparators. + allowed_operators: Sequence of allowed operators. Defaults to all Operators. + enable_limit: Whether to enable the limit operator. Defaults to False. + schema_prompt: Prompt for describing query schema. Should have string input + variables allowed_comparators and allowed_operators. + **kwargs: Arbitrary named params to pass to LLMChain. + + Returns: + A LLMChain that can be used to construct queries. + """ + prompt = get_query_constructor_prompt( + document_contents, + attribute_info, + examples=examples, + allowed_comparators=allowed_comparators, + allowed_operators=allowed_operators, + enable_limit=enable_limit, + schema_prompt=schema_prompt, + ) + allowed_attributes = [] + for ainfo in attribute_info: + allowed_attributes.append( + ainfo.name if isinstance(ainfo, AttributeInfo) else ainfo["name"] + ) + output_parser = StructuredQueryOutputParser.from_components( + allowed_comparators=allowed_comparators, + allowed_operators=allowed_operators, + allowed_attributes=allowed_attributes, + ) + # For backwards compatibility. + prompt.output_parser = output_parser + return LLMChain(llm=llm, prompt=prompt, output_parser=output_parser, **kwargs) + + +def load_query_constructor_runnable( + llm: BaseLanguageModel, + document_contents: str, + attribute_info: Sequence[Union[AttributeInfo, dict]], + *, + examples: Optional[Sequence] = None, + allowed_comparators: Sequence[Comparator] = tuple(Comparator), + allowed_operators: Sequence[Operator] = tuple(Operator), + enable_limit: bool = False, + schema_prompt: Optional[BasePromptTemplate] = None, + fix_invalid: bool = False, + **kwargs: Any, +) -> Runnable: + """Load a query constructor runnable chain. + + Args: + llm: BaseLanguageModel to use for the chain. + document_contents: Description of the page contents of the document to be + queried. + attribute_info: Sequence of attributes in the document. + examples: Optional list of examples to use for the chain. + allowed_comparators: Sequence of allowed comparators. Defaults to all + Comparators. + allowed_operators: Sequence of allowed operators. Defaults to all Operators. + enable_limit: Whether to enable the limit operator. Defaults to False. + schema_prompt: Prompt for describing query schema. Should have string input + variables allowed_comparators and allowed_operators. + fix_invalid: Whether to fix invalid filter directives by ignoring invalid + operators, comparators and attributes. + kwargs: Additional named params to pass to FewShotPromptTemplate init. + + Returns: + A Runnable that can be used to construct queries. + """ + prompt = get_query_constructor_prompt( + document_contents, + attribute_info, + examples=examples, + allowed_comparators=allowed_comparators, + allowed_operators=allowed_operators, + enable_limit=enable_limit, + schema_prompt=schema_prompt, + **kwargs, + ) + allowed_attributes = [] + for ainfo in attribute_info: + allowed_attributes.append( + ainfo.name if isinstance(ainfo, AttributeInfo) else ainfo["name"] + ) + output_parser = StructuredQueryOutputParser.from_components( + allowed_comparators=allowed_comparators, + allowed_operators=allowed_operators, + allowed_attributes=allowed_attributes, + fix_invalid=fix_invalid, + ) + return prompt | llm | output_parser diff --git a/venv/Lib/site-packages/langchain/chains/query_constructor/ir.py b/venv/Lib/site-packages/langchain/chains/query_constructor/ir.py new file mode 100644 index 00000000..f7140159 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/query_constructor/ir.py @@ -0,0 +1,23 @@ +"""Internal representation of a structured query language.""" + +from langchain_core.structured_query import ( + Comparator, + Comparison, + Expr, + FilterDirective, + Operation, + Operator, + StructuredQuery, + Visitor, +) + +__all__ = [ + "Visitor", + "Expr", + "Operator", + "Comparator", + "FilterDirective", + "Comparison", + "Operation", + "StructuredQuery", +] diff --git a/venv/Lib/site-packages/langchain/chains/query_constructor/parser.py b/venv/Lib/site-packages/langchain/chains/query_constructor/parser.py new file mode 100644 index 00000000..8e39fd1d --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/query_constructor/parser.py @@ -0,0 +1,205 @@ +import datetime +import warnings +from collections.abc import Sequence +from typing import Any, Literal, Optional, Union + +from langchain_core.utils import check_package_version +from typing_extensions import TypedDict + +try: + check_package_version("lark", gte_version="1.1.5") + from lark import Lark, Transformer, v_args +except ImportError: + + def v_args(*args: Any, **kwargs: Any) -> Any: # type: ignore[misc] + """Dummy decorator for when lark is not installed.""" + return lambda _: None + + Transformer = object # type: ignore[assignment,misc] + Lark = object # type: ignore[assignment,misc] + +from langchain_core.structured_query import ( + Comparator, + Comparison, + FilterDirective, + Operation, + Operator, +) + +GRAMMAR = r""" + ?program: func_call + ?expr: func_call + | value + + func_call: CNAME "(" [args] ")" + + ?value: SIGNED_INT -> int + | SIGNED_FLOAT -> float + | DATE -> date + | DATETIME -> datetime + | list + | string + | ("false" | "False" | "FALSE") -> false + | ("true" | "True" | "TRUE") -> true + + args: expr ("," expr)* + DATE.2: /["']?(\d{4}-[01]\d-[0-3]\d)["']?/ + DATETIME.2: /["']?\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d[Zz]?["']?/ + string: /'[^']*'/ | ESCAPED_STRING + list: "[" [args] "]" + + %import common.CNAME + %import common.ESCAPED_STRING + %import common.SIGNED_FLOAT + %import common.SIGNED_INT + %import common.WS + %ignore WS +""" + + +class ISO8601Date(TypedDict): + """A date in ISO 8601 format (YYYY-MM-DD).""" + + date: str + type: Literal["date"] + + +class ISO8601DateTime(TypedDict): + """A datetime in ISO 8601 format (YYYY-MM-DDTHH:MM:SS).""" + + datetime: str + type: Literal["datetime"] + + +@v_args(inline=True) +class QueryTransformer(Transformer): + """Transform a query string into an intermediate representation.""" + + def __init__( + self, + *args: Any, + allowed_comparators: Optional[Sequence[Comparator]] = None, + allowed_operators: Optional[Sequence[Operator]] = None, + allowed_attributes: Optional[Sequence[str]] = None, + **kwargs: Any, + ): + super().__init__(*args, **kwargs) + self.allowed_comparators = allowed_comparators + self.allowed_operators = allowed_operators + self.allowed_attributes = allowed_attributes + + def program(self, *items: Any) -> tuple: + return items + + def func_call(self, func_name: Any, args: list) -> FilterDirective: + func = self._match_func_name(str(func_name)) + if isinstance(func, Comparator): + if self.allowed_attributes and args[0] not in self.allowed_attributes: + raise ValueError( + f"Received invalid attributes {args[0]}. Allowed attributes are " + f"{self.allowed_attributes}" + ) + return Comparison(comparator=func, attribute=args[0], value=args[1]) + elif len(args) == 1 and func in (Operator.AND, Operator.OR): + return args[0] + else: + return Operation(operator=func, arguments=args) + + def _match_func_name(self, func_name: str) -> Union[Operator, Comparator]: + if func_name in set(Comparator): + if self.allowed_comparators is not None: + if func_name not in self.allowed_comparators: + raise ValueError( + f"Received disallowed comparator {func_name}. Allowed " + f"comparators are {self.allowed_comparators}" + ) + return Comparator(func_name) + elif func_name in set(Operator): + if self.allowed_operators is not None: + if func_name not in self.allowed_operators: + raise ValueError( + f"Received disallowed operator {func_name}. Allowed operators" + f" are {self.allowed_operators}" + ) + return Operator(func_name) + else: + raise ValueError( + f"Received unrecognized function {func_name}. Valid functions are " + f"{list(Operator) + list(Comparator)}" + ) + + def args(self, *items: Any) -> tuple: + return items + + def false(self) -> bool: + return False + + def true(self) -> bool: + return True + + def list(self, item: Any) -> list: + if item is None: + return [] + return list(item) + + def int(self, item: Any) -> int: + return int(item) + + def float(self, item: Any) -> float: + return float(item) + + def date(self, item: Any) -> ISO8601Date: + item = str(item).strip("\"'") + try: + datetime.datetime.strptime(item, "%Y-%m-%d") + except ValueError: + warnings.warn( + "Dates are expected to be provided in ISO 8601 date format " + "(YYYY-MM-DD)." + ) + return {"date": item, "type": "date"} + + def datetime(self, item: Any) -> ISO8601DateTime: + item = str(item).strip("\"'") + try: + # Parse full ISO 8601 datetime format + datetime.datetime.strptime(item, "%Y-%m-%dT%H:%M:%S%z") + except ValueError: + try: + datetime.datetime.strptime(item, "%Y-%m-%dT%H:%M:%S") + except ValueError: + raise ValueError( + "Datetime values are expected to be in ISO 8601 format." + ) + return {"datetime": item, "type": "datetime"} + + def string(self, item: Any) -> str: + # Remove escaped quotes + return str(item).strip("\"'") + + +def get_parser( + allowed_comparators: Optional[Sequence[Comparator]] = None, + allowed_operators: Optional[Sequence[Operator]] = None, + allowed_attributes: Optional[Sequence[str]] = None, +) -> Lark: + """Return a parser for the query language. + + Args: + allowed_comparators: Optional[Sequence[Comparator]] + allowed_operators: Optional[Sequence[Operator]] + + Returns: + Lark parser for the query language. + """ + # QueryTransformer is None when Lark cannot be imported. + if QueryTransformer is None: + raise ImportError( + "Cannot import lark, please install it with 'pip install lark'." + ) + transformer = QueryTransformer( + allowed_comparators=allowed_comparators, + allowed_operators=allowed_operators, + allowed_attributes=allowed_attributes, + ) + return Lark(GRAMMAR, parser="lalr", transformer=transformer, start="program") diff --git a/venv/Lib/site-packages/langchain/chains/query_constructor/prompt.py b/venv/Lib/site-packages/langchain/chains/query_constructor/prompt.py new file mode 100644 index 00000000..d1355b32 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/query_constructor/prompt.py @@ -0,0 +1,227 @@ +# flake8: noqa +from langchain_core.prompts import PromptTemplate + +SONG_DATA_SOURCE = """\ +```json +{{ + "content": "Lyrics of a song", + "attributes": {{ + "artist": {{ + "type": "string", + "description": "Name of the song artist" + }}, + "length": {{ + "type": "integer", + "description": "Length of the song in seconds" + }}, + "genre": {{ + "type": "string", + "description": "The song genre, one of \"pop\", \"rock\" or \"rap\"" + }} + }} +}} +```\ +""" + +FULL_ANSWER = """\ +```json +{{ + "query": "teenager love", + "filter": "and(or(eq(\\"artist\\", \\"Taylor Swift\\"), eq(\\"artist\\", \\"Katy Perry\\")), lt(\\"length\\", 180), eq(\\"genre\\", \\"pop\\"))" +}} +```\ +""" + +NO_FILTER_ANSWER = """\ +```json +{{ + "query": "", + "filter": "NO_FILTER" +}} +```\ +""" + +WITH_LIMIT_ANSWER = """\ +```json +{{ + "query": "love", + "filter": "NO_FILTER", + "limit": 2 +}} +```\ +""" + +DEFAULT_EXAMPLES = [ + { + "i": 1, + "data_source": SONG_DATA_SOURCE, + "user_query": "What are songs by Taylor Swift or Katy Perry about teenage romance under 3 minutes long in the dance pop genre", + "structured_request": FULL_ANSWER, + }, + { + "i": 2, + "data_source": SONG_DATA_SOURCE, + "user_query": "What are songs that were not published on Spotify", + "structured_request": NO_FILTER_ANSWER, + }, +] + +EXAMPLES_WITH_LIMIT = [ + { + "i": 1, + "data_source": SONG_DATA_SOURCE, + "user_query": "What are songs by Taylor Swift or Katy Perry about teenage romance under 3 minutes long in the dance pop genre", + "structured_request": FULL_ANSWER, + }, + { + "i": 2, + "data_source": SONG_DATA_SOURCE, + "user_query": "What are songs that were not published on Spotify", + "structured_request": NO_FILTER_ANSWER, + }, + { + "i": 3, + "data_source": SONG_DATA_SOURCE, + "user_query": "What are three songs about love", + "structured_request": WITH_LIMIT_ANSWER, + }, +] + +EXAMPLE_PROMPT_TEMPLATE = """\ +<< Example {i}. >> +Data Source: +{data_source} + +User Query: +{user_query} + +Structured Request: +{structured_request} +""" + +EXAMPLE_PROMPT = PromptTemplate.from_template(EXAMPLE_PROMPT_TEMPLATE) + +USER_SPECIFIED_EXAMPLE_PROMPT = PromptTemplate.from_template( + """\ +<< Example {i}. >> +User Query: +{user_query} + +Structured Request: +```json +{structured_request} +``` +""" +) + +DEFAULT_SCHEMA = """\ +<< Structured Request Schema >> +When responding use a markdown code snippet with a JSON object formatted in the following schema: + +```json +{{{{ + "query": string \\ text string to compare to document contents + "filter": string \\ logical condition statement for filtering documents +}}}} +``` + +The query string should contain only text that is expected to match the contents of documents. Any conditions in the filter should not be mentioned in the query as well. + +A logical condition statement is composed of one or more comparison and logical operation statements. + +A comparison statement takes the form: `comp(attr, val)`: +- `comp` ({allowed_comparators}): comparator +- `attr` (string): name of attribute to apply the comparison to +- `val` (string): is the comparison value + +A logical operation statement takes the form `op(statement1, statement2, ...)`: +- `op` ({allowed_operators}): logical operator +- `statement1`, `statement2`, ... (comparison statements or logical operation statements): one or more statements to apply the operation to + +Make sure that you only use the comparators and logical operators listed above and no others. +Make sure that filters only refer to attributes that exist in the data source. +Make sure that filters only use the attributed names with its function names if there are functions applied on them. +Make sure that filters only use format `YYYY-MM-DD` when handling date data typed values. +Make sure that filters take into account the descriptions of attributes and only make comparisons that are feasible given the type of data being stored. +Make sure that filters are only used as needed. If there are no filters that should be applied return "NO_FILTER" for the filter value.\ +""" +DEFAULT_SCHEMA_PROMPT = PromptTemplate.from_template(DEFAULT_SCHEMA) + +SCHEMA_WITH_LIMIT = """\ +<< Structured Request Schema >> +When responding use a markdown code snippet with a JSON object formatted in the following schema: + +```json +{{{{ + "query": string \\ text string to compare to document contents + "filter": string \\ logical condition statement for filtering documents + "limit": int \\ the number of documents to retrieve +}}}} +``` + +The query string should contain only text that is expected to match the contents of documents. Any conditions in the filter should not be mentioned in the query as well. + +A logical condition statement is composed of one or more comparison and logical operation statements. + +A comparison statement takes the form: `comp(attr, val)`: +- `comp` ({allowed_comparators}): comparator +- `attr` (string): name of attribute to apply the comparison to +- `val` (string): is the comparison value + +A logical operation statement takes the form `op(statement1, statement2, ...)`: +- `op` ({allowed_operators}): logical operator +- `statement1`, `statement2`, ... (comparison statements or logical operation statements): one or more statements to apply the operation to + +Make sure that you only use the comparators and logical operators listed above and no others. +Make sure that filters only refer to attributes that exist in the data source. +Make sure that filters only use the attributed names with its function names if there are functions applied on them. +Make sure that filters only use format `YYYY-MM-DD` when handling date data typed values. +Make sure that filters take into account the descriptions of attributes and only make comparisons that are feasible given the type of data being stored. +Make sure that filters are only used as needed. If there are no filters that should be applied return "NO_FILTER" for the filter value. +Make sure the `limit` is always an int value. It is an optional parameter so leave it blank if it does not make sense. +""" +SCHEMA_WITH_LIMIT_PROMPT = PromptTemplate.from_template(SCHEMA_WITH_LIMIT) + +DEFAULT_PREFIX = """\ +Your goal is to structure the user's query to match the request schema provided below. + +{schema}\ +""" + +PREFIX_WITH_DATA_SOURCE = ( + DEFAULT_PREFIX + + """ + +<< Data Source >> +```json +{{{{ + "content": "{content}", + "attributes": {attributes} +}}}} +``` +""" +) + +DEFAULT_SUFFIX = """\ +<< Example {i}. >> +Data Source: +```json +{{{{ + "content": "{content}", + "attributes": {attributes} +}}}} +``` + +User Query: +{{query}} + +Structured Request: +""" + +SUFFIX_WITHOUT_DATA_SOURCE = """\ +<< Example {i}. >> +User Query: +{{query}} + +Structured Request: +""" diff --git a/venv/Lib/site-packages/langchain/chains/query_constructor/schema.py b/venv/Lib/site-packages/langchain/chains/query_constructor/schema.py new file mode 100644 index 00000000..56103d9a --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/query_constructor/schema.py @@ -0,0 +1,14 @@ +from pydantic import BaseModel, ConfigDict + + +class AttributeInfo(BaseModel): + """Information about a data source attribute.""" + + name: str + description: str + type: str + + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) diff --git a/venv/Lib/site-packages/langchain/chains/question_answering/__init__.py b/venv/Lib/site-packages/langchain/chains/question_answering/__init__.py new file mode 100644 index 00000000..e347a7fb --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/question_answering/__init__.py @@ -0,0 +1,6 @@ +from langchain.chains.question_answering.chain import LoadingCallable, load_qa_chain + +__all__ = [ + "LoadingCallable", + "load_qa_chain", +] diff --git a/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..1a6df1d9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/chain.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/chain.cpython-312.pyc new file mode 100644 index 00000000..b375139c Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/chain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/map_reduce_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/map_reduce_prompt.cpython-312.pyc new file mode 100644 index 00000000..62fac4a9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/map_reduce_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/map_rerank_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/map_rerank_prompt.cpython-312.pyc new file mode 100644 index 00000000..dca97f14 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/map_rerank_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/refine_prompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/refine_prompts.cpython-312.pyc new file mode 100644 index 00000000..b94367fb Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/refine_prompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/stuff_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/stuff_prompt.cpython-312.pyc new file mode 100644 index 00000000..5aa316d8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/question_answering/__pycache__/stuff_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/question_answering/chain.py b/venv/Lib/site-packages/langchain/chains/question_answering/chain.py new file mode 100644 index 00000000..8dbf6934 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/question_answering/chain.py @@ -0,0 +1,268 @@ +"""Load question answering chains.""" + +from collections.abc import Mapping +from typing import Any, Optional, Protocol + +from langchain_core._api import deprecated +from langchain_core.callbacks import BaseCallbackManager, Callbacks +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate + +from langchain.chains import ReduceDocumentsChain +from langchain.chains.combine_documents.base import BaseCombineDocumentsChain +from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain +from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain +from langchain.chains.combine_documents.refine import RefineDocumentsChain +from langchain.chains.combine_documents.stuff import StuffDocumentsChain +from langchain.chains.llm import LLMChain +from langchain.chains.question_answering import ( + map_reduce_prompt, + refine_prompts, + stuff_prompt, +) +from langchain.chains.question_answering.map_rerank_prompt import ( + PROMPT as MAP_RERANK_PROMPT, +) + + +class LoadingCallable(Protocol): + """Interface for loading the combine documents chain.""" + + def __call__( + self, llm: BaseLanguageModel, **kwargs: Any + ) -> BaseCombineDocumentsChain: + """Callable to load the combine documents chain.""" + + +def _load_map_rerank_chain( + llm: BaseLanguageModel, + prompt: BasePromptTemplate = MAP_RERANK_PROMPT, + verbose: bool = False, + document_variable_name: str = "context", + rank_key: str = "score", + answer_key: str = "answer", + callback_manager: Optional[BaseCallbackManager] = None, + callbacks: Callbacks = None, + **kwargs: Any, +) -> MapRerankDocumentsChain: + llm_chain = LLMChain( + llm=llm, + prompt=prompt, + verbose=verbose, + callback_manager=callback_manager, + callbacks=callbacks, + ) + return MapRerankDocumentsChain( + llm_chain=llm_chain, + rank_key=rank_key, + answer_key=answer_key, + document_variable_name=document_variable_name, + verbose=verbose, + callback_manager=callback_manager, + **kwargs, + ) + + +def _load_stuff_chain( + llm: BaseLanguageModel, + prompt: Optional[BasePromptTemplate] = None, + document_variable_name: str = "context", + verbose: Optional[bool] = None, + callback_manager: Optional[BaseCallbackManager] = None, + callbacks: Callbacks = None, + **kwargs: Any, +) -> StuffDocumentsChain: + _prompt = prompt or stuff_prompt.PROMPT_SELECTOR.get_prompt(llm) + llm_chain = LLMChain( + llm=llm, + prompt=_prompt, + verbose=verbose, # type: ignore[arg-type] + callback_manager=callback_manager, + callbacks=callbacks, + ) + # TODO: document prompt + return StuffDocumentsChain( + llm_chain=llm_chain, + document_variable_name=document_variable_name, + verbose=verbose, # type: ignore[arg-type] + callback_manager=callback_manager, + callbacks=callbacks, + **kwargs, + ) + + +def _load_map_reduce_chain( + llm: BaseLanguageModel, + question_prompt: Optional[BasePromptTemplate] = None, + combine_prompt: Optional[BasePromptTemplate] = None, + combine_document_variable_name: str = "summaries", + map_reduce_document_variable_name: str = "context", + collapse_prompt: Optional[BasePromptTemplate] = None, + reduce_llm: Optional[BaseLanguageModel] = None, + collapse_llm: Optional[BaseLanguageModel] = None, + verbose: Optional[bool] = None, + callback_manager: Optional[BaseCallbackManager] = None, + callbacks: Callbacks = None, + token_max: int = 3000, + **kwargs: Any, +) -> MapReduceDocumentsChain: + _question_prompt = ( + question_prompt or map_reduce_prompt.QUESTION_PROMPT_SELECTOR.get_prompt(llm) + ) + _combine_prompt = ( + combine_prompt or map_reduce_prompt.COMBINE_PROMPT_SELECTOR.get_prompt(llm) + ) + map_chain = LLMChain( + llm=llm, + prompt=_question_prompt, + verbose=verbose, # type: ignore[arg-type] + callback_manager=callback_manager, + callbacks=callbacks, + ) + _reduce_llm = reduce_llm or llm + reduce_chain = LLMChain( + llm=_reduce_llm, + prompt=_combine_prompt, + verbose=verbose, # type: ignore[arg-type] + callback_manager=callback_manager, + callbacks=callbacks, + ) + # TODO: document prompt + combine_documents_chain = StuffDocumentsChain( + llm_chain=reduce_chain, + document_variable_name=combine_document_variable_name, + verbose=verbose, # type: ignore[arg-type] + callback_manager=callback_manager, + callbacks=callbacks, + ) + if collapse_prompt is None: + collapse_chain = None + if collapse_llm is not None: + raise ValueError( + "collapse_llm provided, but collapse_prompt was not: please " + "provide one or stop providing collapse_llm." + ) + else: + _collapse_llm = collapse_llm or llm + collapse_chain = StuffDocumentsChain( + llm_chain=LLMChain( + llm=_collapse_llm, + prompt=collapse_prompt, + verbose=verbose, # type: ignore[arg-type] + callback_manager=callback_manager, + callbacks=callbacks, + ), + document_variable_name=combine_document_variable_name, + verbose=verbose, # type: ignore[arg-type] + callback_manager=callback_manager, + ) + reduce_documents_chain = ReduceDocumentsChain( + combine_documents_chain=combine_documents_chain, + collapse_documents_chain=collapse_chain, + token_max=token_max, + verbose=verbose, + ) + return MapReduceDocumentsChain( + llm_chain=map_chain, + document_variable_name=map_reduce_document_variable_name, + reduce_documents_chain=reduce_documents_chain, + verbose=verbose, # type: ignore[arg-type] + callback_manager=callback_manager, + callbacks=callbacks, + **kwargs, + ) + + +def _load_refine_chain( + llm: BaseLanguageModel, + question_prompt: Optional[BasePromptTemplate] = None, + refine_prompt: Optional[BasePromptTemplate] = None, + document_variable_name: str = "context_str", + initial_response_name: str = "existing_answer", + refine_llm: Optional[BaseLanguageModel] = None, + verbose: Optional[bool] = None, + callback_manager: Optional[BaseCallbackManager] = None, + callbacks: Callbacks = None, + **kwargs: Any, +) -> RefineDocumentsChain: + _question_prompt = ( + question_prompt or refine_prompts.QUESTION_PROMPT_SELECTOR.get_prompt(llm) + ) + _refine_prompt = refine_prompt or refine_prompts.REFINE_PROMPT_SELECTOR.get_prompt( + llm + ) + initial_chain = LLMChain( + llm=llm, + prompt=_question_prompt, + verbose=verbose, # type: ignore[arg-type] + callback_manager=callback_manager, + callbacks=callbacks, + ) + _refine_llm = refine_llm or llm + refine_chain = LLMChain( + llm=_refine_llm, + prompt=_refine_prompt, + verbose=verbose, # type: ignore[arg-type] + callback_manager=callback_manager, + callbacks=callbacks, + ) + return RefineDocumentsChain( + initial_llm_chain=initial_chain, + refine_llm_chain=refine_chain, + document_variable_name=document_variable_name, + initial_response_name=initial_response_name, + verbose=verbose, # type: ignore[arg-type] + callback_manager=callback_manager, + callbacks=callbacks, + **kwargs, + ) + + +@deprecated( + since="0.2.13", + removal="1.0", + message=( + "This class is deprecated. See the following migration guides for replacements " + "based on `chain_type`:\n" + "stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501 + "map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501 + "refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n" # noqa: E501 + "map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501 + "\nSee also guides on retrieval and question-answering here: " + "https://python.langchain.com/docs/how_to/#qa-with-rag" + ), +) +def load_qa_chain( + llm: BaseLanguageModel, + chain_type: str = "stuff", + verbose: Optional[bool] = None, + callback_manager: Optional[BaseCallbackManager] = None, + **kwargs: Any, +) -> BaseCombineDocumentsChain: + """Load question answering chain. + + Args: + llm: Language Model to use in the chain. + chain_type: Type of document combining chain to use. Should be one of "stuff", + "map_reduce", "map_rerank", and "refine". + verbose: Whether chains should be run in verbose mode or not. Note that this + applies to all chains that make up the final chain. + callback_manager: Callback manager to use for the chain. + + Returns: + A chain to use for question answering. + """ + loader_mapping: Mapping[str, LoadingCallable] = { + "stuff": _load_stuff_chain, + "map_reduce": _load_map_reduce_chain, + "refine": _load_refine_chain, + "map_rerank": _load_map_rerank_chain, + } + if chain_type not in loader_mapping: + raise ValueError( + f"Got unsupported chain type: {chain_type}. " + f"Should be one of {loader_mapping.keys()}" + ) + return loader_mapping[chain_type]( + llm, verbose=verbose, callback_manager=callback_manager, **kwargs + ) diff --git a/venv/Lib/site-packages/langchain/chains/question_answering/map_reduce_prompt.py b/venv/Lib/site-packages/langchain/chains/question_answering/map_reduce_prompt.py new file mode 100644 index 00000000..defaa8fe --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/question_answering/map_reduce_prompt.py @@ -0,0 +1,80 @@ +# flake8: noqa +from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model +from langchain_core.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, +) +from langchain_core.prompts.prompt import PromptTemplate + +question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question. +Return any relevant text verbatim. +{context} +Question: {question} +Relevant text, if any:""" +QUESTION_PROMPT = PromptTemplate( + template=question_prompt_template, input_variables=["context", "question"] +) +system_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question. +Return any relevant text verbatim. +______________________ +{context}""" +messages = [ + SystemMessagePromptTemplate.from_template(system_template), + HumanMessagePromptTemplate.from_template("{question}"), +] +CHAT_QUESTION_PROMPT = ChatPromptTemplate.from_messages(messages) + + +QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector( + default_prompt=QUESTION_PROMPT, conditionals=[(is_chat_model, CHAT_QUESTION_PROMPT)] +) + +combine_prompt_template = """Given the following extracted parts of a long document and a question, create a final answer. +If you don't know the answer, just say that you don't know. Don't try to make up an answer. + +QUESTION: Which state/country's law governs the interpretation of the contract? +========= +Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights. + +Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries. + +Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur, +========= +FINAL ANSWER: This Agreement is governed by English law. + +QUESTION: What did the president say about Michael Jackson? +========= +Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. + +Content: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. + +Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay. + +Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation. +========= +FINAL ANSWER: The president did not mention Michael Jackson. + +QUESTION: {question} +========= +{summaries} +========= +FINAL ANSWER:""" +COMBINE_PROMPT = PromptTemplate( + template=combine_prompt_template, input_variables=["summaries", "question"] +) + +system_template = """Given the following extracted parts of a long document and a question, create a final answer. +If you don't know the answer, just say that you don't know. Don't try to make up an answer. +______________________ +{summaries}""" +messages = [ + SystemMessagePromptTemplate.from_template(system_template), + HumanMessagePromptTemplate.from_template("{question}"), +] +CHAT_COMBINE_PROMPT = ChatPromptTemplate.from_messages(messages) + + +COMBINE_PROMPT_SELECTOR = ConditionalPromptSelector( + default_prompt=COMBINE_PROMPT, conditionals=[(is_chat_model, CHAT_COMBINE_PROMPT)] +) diff --git a/venv/Lib/site-packages/langchain/chains/question_answering/map_rerank_prompt.py b/venv/Lib/site-packages/langchain/chains/question_answering/map_rerank_prompt.py new file mode 100644 index 00000000..f9547385 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/question_answering/map_rerank_prompt.py @@ -0,0 +1,66 @@ +# flake8: noqa +from langchain.output_parsers.regex import RegexParser +from langchain_core.prompts import PromptTemplate + +output_parser = RegexParser( + regex=r"(.*?)\nScore: (\d*)", + output_keys=["answer", "score"], +) + +prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. + +In addition to giving an answer, also return a score of how fully it answered the user's question. This should be in the following format: + +Question: [question here] +Helpful Answer: [answer here] +Score: [score between 0 and 100] + +How to determine the score: +- Higher is a better answer +- Better responds fully to the asked question, with sufficient level of detail +- If you do not know the answer based on the context, that should be a score of 0 +- Don't be overconfident! + +Example #1 + +Context: +--------- +Apples are red +--------- +Question: what color are apples? +Helpful Answer: red +Score: 100 + +Example #2 + +Context: +--------- +it was night and the witness forgot his glasses. he was not sure if it was a sports car or an suv +--------- +Question: what type was the car? +Helpful Answer: a sports car or an suv +Score: 60 + +Example #3 + +Context: +--------- +Pears are either red or orange +--------- +Question: what color are apples? +Helpful Answer: This document does not answer the question +Score: 0 + +Begin! + +Context: +--------- +{context} +--------- +Question: {question} +Helpful Answer:""" +PROMPT = PromptTemplate( + template=prompt_template, + input_variables=["context", "question"], + output_parser=output_parser, +) diff --git a/venv/Lib/site-packages/langchain/chains/question_answering/refine_prompts.py b/venv/Lib/site-packages/langchain/chains/question_answering/refine_prompts.py new file mode 100644 index 00000000..303d94ec --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/question_answering/refine_prompts.py @@ -0,0 +1,75 @@ +# flake8: noqa +from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model +from langchain_core.prompts.chat import ( + AIMessagePromptTemplate, + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, +) +from langchain_core.prompts.prompt import PromptTemplate + +DEFAULT_REFINE_PROMPT_TMPL = ( + "The original question is as follows: {question}\n" + "We have provided an existing answer: {existing_answer}\n" + "We have the opportunity to refine the existing answer " + "(only if needed) with some more context below.\n" + "------------\n" + "{context_str}\n" + "------------\n" + "Given the new context, refine the original answer to better " + "answer the question. " + "If the context isn't useful, return the original answer." +) +DEFAULT_REFINE_PROMPT = PromptTemplate.from_template(DEFAULT_REFINE_PROMPT_TMPL) + +refine_template = ( + "We have the opportunity to refine the existing answer " + "(only if needed) with some more context below.\n" + "------------\n" + "{context_str}\n" + "------------\n" + "Given the new context, refine the original answer to better " + "answer the question. " + "If the context isn't useful, return the original answer." +) +CHAT_REFINE_PROMPT = ChatPromptTemplate.from_messages( + [ + ("human", "{question}"), + ("ai", "{existing_answer}"), + ("human", refine_template), + ] +) +REFINE_PROMPT_SELECTOR = ConditionalPromptSelector( + default_prompt=DEFAULT_REFINE_PROMPT, + conditionals=[(is_chat_model, CHAT_REFINE_PROMPT)], +) + + +DEFAULT_TEXT_QA_PROMPT_TMPL = ( + "Context information is below. \n" + "------------\n" + "{context_str}\n" + "------------\n" + "Given the context information and not prior knowledge, " + "answer the question: {question}\n" +) +DEFAULT_TEXT_QA_PROMPT = PromptTemplate.from_template(DEFAULT_TEXT_QA_PROMPT_TMPL) + +chat_qa_prompt_template = ( + "Context information is below.\n" + "------------\n" + "{context_str}\n" + "------------\n" + "Given the context information and not prior knowledge, " + "answer any questions" +) +CHAT_QUESTION_PROMPT = ChatPromptTemplate.from_messages( + [ + ("system", chat_qa_prompt_template), + ("human", "{question}"), + ] +) +QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector( + default_prompt=DEFAULT_TEXT_QA_PROMPT, + conditionals=[(is_chat_model, CHAT_QUESTION_PROMPT)], +) diff --git a/venv/Lib/site-packages/langchain/chains/question_answering/stuff_prompt.py b/venv/Lib/site-packages/langchain/chains/question_answering/stuff_prompt.py new file mode 100644 index 00000000..ee006433 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/question_answering/stuff_prompt.py @@ -0,0 +1,33 @@ +# flake8: noqa +from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model +from langchain_core.prompts import PromptTemplate +from langchain_core.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, +) + +prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. + +{context} + +Question: {question} +Helpful Answer:""" +PROMPT = PromptTemplate( + template=prompt_template, input_variables=["context", "question"] +) + +system_template = """Use the following pieces of context to answer the user's question. +If you don't know the answer, just say that you don't know, don't try to make up an answer. +---------------- +{context}""" +messages = [ + SystemMessagePromptTemplate.from_template(system_template), + HumanMessagePromptTemplate.from_template("{question}"), +] +CHAT_PROMPT = ChatPromptTemplate.from_messages(messages) + + +PROMPT_SELECTOR = ConditionalPromptSelector( + default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)] +) diff --git a/venv/Lib/site-packages/langchain/chains/retrieval.py b/venv/Lib/site-packages/langchain/chains/retrieval.py new file mode 100644 index 00000000..b27036ac --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/retrieval.py @@ -0,0 +1,69 @@ +from __future__ import annotations + +from typing import Any, Union + +from langchain_core.retrievers import ( + BaseRetriever, + RetrieverOutput, +) +from langchain_core.runnables import Runnable, RunnablePassthrough + + +def create_retrieval_chain( + retriever: Union[BaseRetriever, Runnable[dict, RetrieverOutput]], + combine_docs_chain: Runnable[dict[str, Any], str], +) -> Runnable: + """Create retrieval chain that retrieves documents and then passes them on. + + Args: + retriever: Retriever-like object that returns list of documents. Should + either be a subclass of BaseRetriever or a Runnable that returns + a list of documents. If a subclass of BaseRetriever, then it + is expected that an `input` key be passed in - this is what + is will be used to pass into the retriever. If this is NOT a + subclass of BaseRetriever, then all the inputs will be passed + into this runnable, meaning that runnable should take a dictionary + as input. + combine_docs_chain: Runnable that takes inputs and produces a string output. + The inputs to this will be any original inputs to this chain, a new + context key with the retrieved documents, and chat_history (if not present + in the inputs) with a value of `[]` (to easily enable conversational + retrieval. + + Returns: + An LCEL Runnable. The Runnable return is a dictionary containing at the very + least a `context` and `answer` key. + + Example: + .. code-block:: python + + # pip install -U langchain langchain-community + + from langchain_community.chat_models import ChatOpenAI + from langchain.chains.combine_documents import create_stuff_documents_chain + from langchain.chains import create_retrieval_chain + from langchain import hub + + retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat") + llm = ChatOpenAI() + retriever = ... + combine_docs_chain = create_stuff_documents_chain( + llm, retrieval_qa_chat_prompt + ) + retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain) + + retrieval_chain.invoke({"input": "..."}) + + """ + if not isinstance(retriever, BaseRetriever): + retrieval_docs: Runnable[dict, RetrieverOutput] = retriever + else: + retrieval_docs = (lambda x: x["input"]) | retriever + + retrieval_chain = ( + RunnablePassthrough.assign( + context=retrieval_docs.with_config(run_name="retrieve_documents"), + ).assign(answer=combine_docs_chain) + ).with_config(run_name="retrieval_chain") + + return retrieval_chain diff --git a/venv/Lib/site-packages/langchain/chains/retrieval_qa/__init__.py b/venv/Lib/site-packages/langchain/chains/retrieval_qa/__init__.py new file mode 100644 index 00000000..b8e4d9aa --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/retrieval_qa/__init__.py @@ -0,0 +1 @@ +"""Chain for question-answering against a vector database.""" diff --git a/venv/Lib/site-packages/langchain/chains/retrieval_qa/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/retrieval_qa/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..033866ca Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/retrieval_qa/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/retrieval_qa/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/retrieval_qa/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..48ce92cc Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/retrieval_qa/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/retrieval_qa/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/retrieval_qa/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..fe513a39 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/retrieval_qa/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/retrieval_qa/base.py b/venv/Lib/site-packages/langchain/chains/retrieval_qa/base.py new file mode 100644 index 00000000..0cf59cd1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/retrieval_qa/base.py @@ -0,0 +1,363 @@ +"""Chain for question-answering against a vector database.""" + +from __future__ import annotations + +import inspect +import warnings +from abc import abstractmethod +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, + Callbacks, +) +from langchain_core.documents import Document +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import PromptTemplate +from langchain_core.retrievers import BaseRetriever +from langchain_core.vectorstores import VectorStore +from pydantic import ConfigDict, Field, model_validator + +from langchain.chains.base import Chain +from langchain.chains.combine_documents.base import BaseCombineDocumentsChain +from langchain.chains.combine_documents.stuff import StuffDocumentsChain +from langchain.chains.llm import LLMChain +from langchain.chains.question_answering import load_qa_chain +from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR + + +@deprecated( + since="0.2.13", + removal="1.0", + message=( + "This class is deprecated. Use the `create_retrieval_chain` constructor " + "instead. See migration guide here: " + "https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/" + ), +) +class BaseRetrievalQA(Chain): + """Base class for question-answering chains.""" + + combine_documents_chain: BaseCombineDocumentsChain + """Chain to use to combine the documents.""" + input_key: str = "query" #: :meta private: + output_key: str = "result" #: :meta private: + return_source_documents: bool = False + """Return the source documents or not.""" + + model_config = ConfigDict( + populate_by_name=True, + arbitrary_types_allowed=True, + extra="forbid", + ) + + @property + def input_keys(self) -> list[str]: + """Input keys. + + :meta private: + """ + return [self.input_key] + + @property + def output_keys(self) -> list[str]: + """Output keys. + + :meta private: + """ + _output_keys = [self.output_key] + if self.return_source_documents: + _output_keys = _output_keys + ["source_documents"] + return _output_keys + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + prompt: Optional[PromptTemplate] = None, + callbacks: Callbacks = None, + llm_chain_kwargs: Optional[dict] = None, + **kwargs: Any, + ) -> BaseRetrievalQA: + """Initialize from LLM.""" + _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) + llm_chain = LLMChain( + llm=llm, prompt=_prompt, callbacks=callbacks, **(llm_chain_kwargs or {}) + ) + document_prompt = PromptTemplate( + input_variables=["page_content"], template="Context:\n{page_content}" + ) + combine_documents_chain = StuffDocumentsChain( + llm_chain=llm_chain, + document_variable_name="context", + document_prompt=document_prompt, + callbacks=callbacks, + ) + + return cls( + combine_documents_chain=combine_documents_chain, + callbacks=callbacks, + **kwargs, + ) + + @classmethod + def from_chain_type( + cls, + llm: BaseLanguageModel, + chain_type: str = "stuff", + chain_type_kwargs: Optional[dict] = None, + **kwargs: Any, + ) -> BaseRetrievalQA: + """Load chain from chain type.""" + _chain_type_kwargs = chain_type_kwargs or {} + combine_documents_chain = load_qa_chain( + llm, chain_type=chain_type, **_chain_type_kwargs + ) + return cls(combine_documents_chain=combine_documents_chain, **kwargs) + + @abstractmethod + def _get_docs( + self, + question: str, + *, + run_manager: CallbackManagerForChainRun, + ) -> list[Document]: + """Get documents to do question answering over.""" + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Run get_relevant_text and llm on input query. + + If chain has 'return_source_documents' as 'True', returns + the retrieved documents as well under the key 'source_documents'. + + Example: + .. code-block:: python + + res = indexqa({'query': 'This is my query'}) + answer, docs = res['result'], res['source_documents'] + """ + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + question = inputs[self.input_key] + accepts_run_manager = ( + "run_manager" in inspect.signature(self._get_docs).parameters + ) + if accepts_run_manager: + docs = self._get_docs(question, run_manager=_run_manager) + else: + docs = self._get_docs(question) # type: ignore[call-arg] + answer = self.combine_documents_chain.run( + input_documents=docs, question=question, callbacks=_run_manager.get_child() + ) + + if self.return_source_documents: + return {self.output_key: answer, "source_documents": docs} + else: + return {self.output_key: answer} + + @abstractmethod + async def _aget_docs( + self, + question: str, + *, + run_manager: AsyncCallbackManagerForChainRun, + ) -> list[Document]: + """Get documents to do question answering over.""" + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Run get_relevant_text and llm on input query. + + If chain has 'return_source_documents' as 'True', returns + the retrieved documents as well under the key 'source_documents'. + + Example: + .. code-block:: python + + res = indexqa({'query': 'This is my query'}) + answer, docs = res['result'], res['source_documents'] + """ + _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() + question = inputs[self.input_key] + accepts_run_manager = ( + "run_manager" in inspect.signature(self._aget_docs).parameters + ) + if accepts_run_manager: + docs = await self._aget_docs(question, run_manager=_run_manager) + else: + docs = await self._aget_docs(question) # type: ignore[call-arg] + answer = await self.combine_documents_chain.arun( + input_documents=docs, question=question, callbacks=_run_manager.get_child() + ) + + if self.return_source_documents: + return {self.output_key: answer, "source_documents": docs} + else: + return {self.output_key: answer} + + +@deprecated( + since="0.1.17", + removal="1.0", + message=( + "This class is deprecated. Use the `create_retrieval_chain` constructor " + "instead. See migration guide here: " + "https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/" + ), +) +class RetrievalQA(BaseRetrievalQA): + """Chain for question-answering against an index. + + This class is deprecated. See below for an example implementation using + `create_retrieval_chain`: + + .. code-block:: python + + from langchain.chains import create_retrieval_chain + from langchain.chains.combine_documents import create_stuff_documents_chain + from langchain_core.prompts import ChatPromptTemplate + from langchain_openai import ChatOpenAI + + + retriever = ... # Your retriever + llm = ChatOpenAI() + + system_prompt = ( + "Use the given context to answer the question. " + "If you don't know the answer, say you don't know. " + "Use three sentence maximum and keep the answer concise. " + "Context: {context}" + ) + prompt = ChatPromptTemplate.from_messages( + [ + ("system", system_prompt), + ("human", "{input}"), + ] + ) + question_answer_chain = create_stuff_documents_chain(llm, prompt) + chain = create_retrieval_chain(retriever, question_answer_chain) + + chain.invoke({"input": query}) + + Example: + .. code-block:: python + + from langchain_community.llms import OpenAI + from langchain.chains import RetrievalQA + from langchain_community.vectorstores import FAISS + from langchain_core.vectorstores import VectorStoreRetriever + retriever = VectorStoreRetriever(vectorstore=FAISS(...)) + retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=retriever) + + """ + + retriever: BaseRetriever = Field(exclude=True) + + def _get_docs( + self, + question: str, + *, + run_manager: CallbackManagerForChainRun, + ) -> list[Document]: + """Get docs.""" + return self.retriever.invoke( + question, config={"callbacks": run_manager.get_child()} + ) + + async def _aget_docs( + self, + question: str, + *, + run_manager: AsyncCallbackManagerForChainRun, + ) -> list[Document]: + """Get docs.""" + return await self.retriever.ainvoke( + question, config={"callbacks": run_manager.get_child()} + ) + + @property + def _chain_type(self) -> str: + """Return the chain type.""" + return "retrieval_qa" + + +@deprecated( + since="0.2.13", + removal="1.0", + message=( + "This class is deprecated. Use the `create_retrieval_chain` constructor " + "instead. See migration guide here: " + "https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/" + ), +) +class VectorDBQA(BaseRetrievalQA): + """Chain for question-answering against a vector database.""" + + vectorstore: VectorStore = Field(exclude=True, alias="vectorstore") + """Vector Database to connect to.""" + k: int = 4 + """Number of documents to query for.""" + search_type: str = "similarity" + """Search type to use over vectorstore. `similarity` or `mmr`.""" + search_kwargs: dict[str, Any] = Field(default_factory=dict) + """Extra search args.""" + + @model_validator(mode="before") + @classmethod + def raise_deprecation(cls, values: dict) -> Any: + warnings.warn( + "`VectorDBQA` is deprecated - " + "please use `from langchain.chains import RetrievalQA`" + ) + return values + + @model_validator(mode="before") + @classmethod + def validate_search_type(cls, values: dict) -> Any: + """Validate search type.""" + if "search_type" in values: + search_type = values["search_type"] + if search_type not in ("similarity", "mmr"): + raise ValueError(f"search_type of {search_type} not allowed.") + return values + + def _get_docs( + self, + question: str, + *, + run_manager: CallbackManagerForChainRun, + ) -> list[Document]: + """Get docs.""" + if self.search_type == "similarity": + docs = self.vectorstore.similarity_search( + question, k=self.k, **self.search_kwargs + ) + elif self.search_type == "mmr": + docs = self.vectorstore.max_marginal_relevance_search( + question, k=self.k, **self.search_kwargs + ) + else: + raise ValueError(f"search_type of {self.search_type} not allowed.") + return docs + + async def _aget_docs( + self, + question: str, + *, + run_manager: AsyncCallbackManagerForChainRun, + ) -> list[Document]: + """Get docs.""" + raise NotImplementedError("VectorDBQA does not support async") + + @property + def _chain_type(self) -> str: + """Return the chain type.""" + return "vector_db_qa" diff --git a/venv/Lib/site-packages/langchain/chains/retrieval_qa/prompt.py b/venv/Lib/site-packages/langchain/chains/retrieval_qa/prompt.py new file mode 100644 index 00000000..963c3531 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/retrieval_qa/prompt.py @@ -0,0 +1,12 @@ +# flake8: noqa +from langchain_core.prompts import PromptTemplate + +prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. + +{context} + +Question: {question} +Helpful Answer:""" +PROMPT = PromptTemplate( + template=prompt_template, input_variables=["context", "question"] +) diff --git a/venv/Lib/site-packages/langchain/chains/router/__init__.py b/venv/Lib/site-packages/langchain/chains/router/__init__.py new file mode 100644 index 00000000..5ccd5eac --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/router/__init__.py @@ -0,0 +1,12 @@ +from langchain.chains.router.base import MultiRouteChain, RouterChain +from langchain.chains.router.llm_router import LLMRouterChain +from langchain.chains.router.multi_prompt import MultiPromptChain +from langchain.chains.router.multi_retrieval_qa import MultiRetrievalQAChain + +__all__ = [ + "RouterChain", + "MultiRouteChain", + "MultiPromptChain", + "MultiRetrievalQAChain", + "LLMRouterChain", +] diff --git a/venv/Lib/site-packages/langchain/chains/router/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/router/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..d572e00b Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/router/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/router/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/router/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..d1bccf0e Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/router/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/router/__pycache__/embedding_router.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/router/__pycache__/embedding_router.cpython-312.pyc new file mode 100644 index 00000000..bce37252 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/router/__pycache__/embedding_router.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/router/__pycache__/llm_router.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/router/__pycache__/llm_router.cpython-312.pyc new file mode 100644 index 00000000..ac0f4a25 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/router/__pycache__/llm_router.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/router/__pycache__/multi_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/router/__pycache__/multi_prompt.cpython-312.pyc new file mode 100644 index 00000000..93f435e9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/router/__pycache__/multi_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/router/__pycache__/multi_prompt_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/router/__pycache__/multi_prompt_prompt.cpython-312.pyc new file mode 100644 index 00000000..ebe50f33 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/router/__pycache__/multi_prompt_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/router/__pycache__/multi_retrieval_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/router/__pycache__/multi_retrieval_prompt.cpython-312.pyc new file mode 100644 index 00000000..caa253f7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/router/__pycache__/multi_retrieval_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/router/__pycache__/multi_retrieval_qa.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/router/__pycache__/multi_retrieval_qa.cpython-312.pyc new file mode 100644 index 00000000..f0b0f036 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/router/__pycache__/multi_retrieval_qa.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/router/base.py b/venv/Lib/site-packages/langchain/chains/router/base.py new file mode 100644 index 00000000..ba58620e --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/router/base.py @@ -0,0 +1,138 @@ +"""Base classes for chain routing.""" + +from __future__ import annotations + +from abc import ABC +from collections.abc import Mapping +from typing import Any, NamedTuple, Optional + +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, + Callbacks, +) +from pydantic import ConfigDict + +from langchain.chains.base import Chain + + +class Route(NamedTuple): + destination: Optional[str] + next_inputs: dict[str, Any] + + +class RouterChain(Chain, ABC): + """Chain that outputs the name of a destination chain and the inputs to it.""" + + @property + def output_keys(self) -> list[str]: + return ["destination", "next_inputs"] + + def route(self, inputs: dict[str, Any], callbacks: Callbacks = None) -> Route: + """ + Route inputs to a destination chain. + + Args: + inputs: inputs to the chain + callbacks: callbacks to use for the chain + + Returns: + a Route object + """ + result = self(inputs, callbacks=callbacks) + return Route(result["destination"], result["next_inputs"]) + + async def aroute( + self, inputs: dict[str, Any], callbacks: Callbacks = None + ) -> Route: + result = await self.acall(inputs, callbacks=callbacks) + return Route(result["destination"], result["next_inputs"]) + + +class MultiRouteChain(Chain): + """Use a single chain to route an input to one of multiple candidate chains.""" + + router_chain: RouterChain + """Chain that routes inputs to destination chains.""" + destination_chains: Mapping[str, Chain] + """Chains that return final answer to inputs.""" + default_chain: Chain + """Default chain to use when none of the destination chains are suitable.""" + silent_errors: bool = False + """If True, use default_chain when an invalid destination name is provided. + Defaults to False.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @property + def input_keys(self) -> list[str]: + """Will be whatever keys the router chain prompt expects. + + :meta private: + """ + return self.router_chain.input_keys + + @property + def output_keys(self) -> list[str]: + """Will always return text key. + + :meta private: + """ + return [] + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + callbacks = _run_manager.get_child() + route = self.router_chain.route(inputs, callbacks=callbacks) + + _run_manager.on_text( + str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose + ) + if not route.destination: + return self.default_chain(route.next_inputs, callbacks=callbacks) + elif route.destination in self.destination_chains: + return self.destination_chains[route.destination]( + route.next_inputs, callbacks=callbacks + ) + elif self.silent_errors: + return self.default_chain(route.next_inputs, callbacks=callbacks) + else: + raise ValueError( + f"Received invalid destination chain name '{route.destination}'" + ) + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() + callbacks = _run_manager.get_child() + route = await self.router_chain.aroute(inputs, callbacks=callbacks) + + await _run_manager.on_text( + str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose + ) + if not route.destination: + return await self.default_chain.acall( + route.next_inputs, callbacks=callbacks + ) + elif route.destination in self.destination_chains: + return await self.destination_chains[route.destination].acall( + route.next_inputs, callbacks=callbacks + ) + elif self.silent_errors: + return await self.default_chain.acall( + route.next_inputs, callbacks=callbacks + ) + else: + raise ValueError( + f"Received invalid destination chain name '{route.destination}'" + ) diff --git a/venv/Lib/site-packages/langchain/chains/router/embedding_router.py b/venv/Lib/site-packages/langchain/chains/router/embedding_router.py new file mode 100644 index 00000000..aaa4af6b --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/router/embedding_router.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any, Optional + +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from langchain_core.documents import Document +from langchain_core.embeddings import Embeddings +from langchain_core.vectorstores import VectorStore +from pydantic import ConfigDict + +from langchain.chains.router.base import RouterChain + + +class EmbeddingRouterChain(RouterChain): + """Chain that uses embeddings to route between options.""" + + vectorstore: VectorStore + routing_keys: list[str] = ["query"] + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @property + def input_keys(self) -> list[str]: + """Will be whatever keys the LLM chain prompt expects. + + :meta private: + """ + return self.routing_keys + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _input = ", ".join([inputs[k] for k in self.routing_keys]) + results = self.vectorstore.similarity_search(_input, k=1) + return {"next_inputs": inputs, "destination": results[0].metadata["name"]} + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _input = ", ".join([inputs[k] for k in self.routing_keys]) + results = await self.vectorstore.asimilarity_search(_input, k=1) + return {"next_inputs": inputs, "destination": results[0].metadata["name"]} + + @classmethod + def from_names_and_descriptions( + cls, + names_and_descriptions: Sequence[tuple[str, Sequence[str]]], + vectorstore_cls: type[VectorStore], + embeddings: Embeddings, + **kwargs: Any, + ) -> EmbeddingRouterChain: + """Convenience constructor.""" + documents = [] + for name, descriptions in names_and_descriptions: + for description in descriptions: + documents.append( + Document(page_content=description, metadata={"name": name}) + ) + vectorstore = vectorstore_cls.from_documents(documents, embeddings) + return cls(vectorstore=vectorstore, **kwargs) + + @classmethod + async def afrom_names_and_descriptions( + cls, + names_and_descriptions: Sequence[tuple[str, Sequence[str]]], + vectorstore_cls: type[VectorStore], + embeddings: Embeddings, + **kwargs: Any, + ) -> EmbeddingRouterChain: + """Convenience constructor.""" + documents = [] + for name, descriptions in names_and_descriptions: + for description in descriptions: + documents.append( + Document(page_content=description, metadata={"name": name}) + ) + vectorstore = await vectorstore_cls.afrom_documents(documents, embeddings) + return cls(vectorstore=vectorstore, **kwargs) diff --git a/venv/Lib/site-packages/langchain/chains/router/llm_router.py b/venv/Lib/site-packages/langchain/chains/router/llm_router.py new file mode 100644 index 00000000..759e6f93 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/router/llm_router.py @@ -0,0 +1,195 @@ +"""Base classes for LLM-powered router chains.""" + +from __future__ import annotations + +from typing import Any, Optional, cast + +from langchain_core._api import deprecated +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from langchain_core.exceptions import OutputParserException +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.prompts import BasePromptTemplate +from langchain_core.utils.json import parse_and_check_json_markdown +from pydantic import model_validator +from typing_extensions import Self + +from langchain.chains import LLMChain +from langchain.chains.router.base import RouterChain + + +@deprecated( + since="0.2.12", + removal="1.0", + message=( + "Use RunnableLambda to select from multiple prompt templates. See example " + "in API reference: " + "https://api.python.langchain.com/en/latest/chains/langchain.chains.router.llm_router.LLMRouterChain.html" # noqa: E501 + ), +) +class LLMRouterChain(RouterChain): + """A router chain that uses an LLM chain to perform routing. + + This class is deprecated. See below for a replacement, which offers several + benefits, including streaming and batch support. + + Below is an example implementation: + + .. code-block:: python + + from operator import itemgetter + from typing import Literal + from typing_extensions import TypedDict + + from langchain_core.output_parsers import StrOutputParser + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.runnables import RunnableLambda, RunnablePassthrough + from langchain_openai import ChatOpenAI + + llm = ChatOpenAI(model="gpt-4o-mini") + + prompt_1 = ChatPromptTemplate.from_messages( + [ + ("system", "You are an expert on animals."), + ("human", "{query}"), + ] + ) + prompt_2 = ChatPromptTemplate.from_messages( + [ + ("system", "You are an expert on vegetables."), + ("human", "{query}"), + ] + ) + + chain_1 = prompt_1 | llm | StrOutputParser() + chain_2 = prompt_2 | llm | StrOutputParser() + + route_system = "Route the user's query to either the animal or vegetable expert." + route_prompt = ChatPromptTemplate.from_messages( + [ + ("system", route_system), + ("human", "{query}"), + ] + ) + + + class RouteQuery(TypedDict): + \"\"\"Route query to destination.\"\"\" + destination: Literal["animal", "vegetable"] + + + route_chain = ( + route_prompt + | llm.with_structured_output(RouteQuery) + | itemgetter("destination") + ) + + chain = { + "destination": route_chain, # "animal" or "vegetable" + "query": lambda x: x["query"], # pass through input query + } | RunnableLambda( + # if animal, chain_1. otherwise, chain_2. + lambda x: chain_1 if x["destination"] == "animal" else chain_2, + ) + + chain.invoke({"query": "what color are carrots"}) + """ # noqa: E501 + + llm_chain: LLMChain + """LLM chain used to perform routing""" + + @model_validator(mode="after") + def validate_prompt(self) -> Self: + prompt = self.llm_chain.prompt + if prompt.output_parser is None: + raise ValueError( + "LLMRouterChain requires base llm_chain prompt to have an output" + " parser that converts LLM text output to a dictionary with keys" + " 'destination' and 'next_inputs'. Received a prompt with no output" + " parser." + ) + return self + + @property + def input_keys(self) -> list[str]: + """Will be whatever keys the LLM chain prompt expects. + + :meta private: + """ + return self.llm_chain.input_keys + + def _validate_outputs(self, outputs: dict[str, Any]) -> None: + super()._validate_outputs(outputs) + if not isinstance(outputs["next_inputs"], dict): + raise ValueError + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + callbacks = _run_manager.get_child() + + prediction = self.llm_chain.predict(callbacks=callbacks, **inputs) + output = cast( + dict[str, Any], + self.llm_chain.prompt.output_parser.parse(prediction), + ) + return output + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + callbacks = _run_manager.get_child() + output = cast( + dict[str, Any], + await self.llm_chain.apredict_and_parse(callbacks=callbacks, **inputs), + ) + return output + + @classmethod + def from_llm( + cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, **kwargs: Any + ) -> LLMRouterChain: + """Convenience constructor.""" + llm_chain = LLMChain(llm=llm, prompt=prompt) + return cls(llm_chain=llm_chain, **kwargs) + + +class RouterOutputParser(BaseOutputParser[dict[str, str]]): + """Parser for output of router chain in the multi-prompt chain.""" + + default_destination: str = "DEFAULT" + next_inputs_type: type = str + next_inputs_inner_key: str = "input" + + def parse(self, text: str) -> dict[str, Any]: + try: + expected_keys = ["destination", "next_inputs"] + parsed = parse_and_check_json_markdown(text, expected_keys) + if not isinstance(parsed["destination"], str): + raise ValueError("Expected 'destination' to be a string.") + if not isinstance(parsed["next_inputs"], self.next_inputs_type): + raise ValueError( + f"Expected 'next_inputs' to be {self.next_inputs_type}." + ) + parsed["next_inputs"] = {self.next_inputs_inner_key: parsed["next_inputs"]} + if ( + parsed["destination"].strip().lower() + == self.default_destination.lower() + ): + parsed["destination"] = None + else: + parsed["destination"] = parsed["destination"].strip() + return parsed + except Exception as e: + raise OutputParserException( + f"Parsing text\n{text}\n raised following error:\n{e}" + ) diff --git a/venv/Lib/site-packages/langchain/chains/router/multi_prompt.py b/venv/Lib/site-packages/langchain/chains/router/multi_prompt.py new file mode 100644 index 00000000..8b3f398b --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/router/multi_prompt.py @@ -0,0 +1,181 @@ +"""Use a single chain to route an input to one of multiple llm chains.""" + +from __future__ import annotations + +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import PromptTemplate + +from langchain.chains import ConversationChain +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain +from langchain.chains.router.base import MultiRouteChain +from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser +from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE + + +@deprecated( + since="0.2.12", + removal="1.0", + message=( + "Please see migration guide here for recommended implementation: " + "https://python.langchain.com/docs/versions/migrating_chains/multi_prompt_chain/" # noqa: E501 + ), +) +class MultiPromptChain(MultiRouteChain): + """A multi-route chain that uses an LLM router chain to choose amongst prompts. + + This class is deprecated. See below for a replacement, which offers several + benefits, including streaming and batch support. + + Below is an example implementation: + + .. code-block:: python + + from operator import itemgetter + from typing import Literal + + from langchain_core.output_parsers import StrOutputParser + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.runnables import RunnableConfig + from langchain_openai import ChatOpenAI + from langgraph.graph import END, START, StateGraph + from typing_extensions import TypedDict + + llm = ChatOpenAI(model="gpt-4o-mini") + + # Define the prompts we will route to + prompt_1 = ChatPromptTemplate.from_messages( + [ + ("system", "You are an expert on animals."), + ("human", "{input}"), + ] + ) + prompt_2 = ChatPromptTemplate.from_messages( + [ + ("system", "You are an expert on vegetables."), + ("human", "{input}"), + ] + ) + + # Construct the chains we will route to. These format the input query + # into the respective prompt, run it through a chat model, and cast + # the result to a string. + chain_1 = prompt_1 | llm | StrOutputParser() + chain_2 = prompt_2 | llm | StrOutputParser() + + + # Next: define the chain that selects which branch to route to. + # Here we will take advantage of tool-calling features to force + # the output to select one of two desired branches. + route_system = "Route the user's query to either the animal or vegetable expert." + route_prompt = ChatPromptTemplate.from_messages( + [ + ("system", route_system), + ("human", "{input}"), + ] + ) + + + # Define schema for output: + class RouteQuery(TypedDict): + \"\"\"Route query to destination expert.\"\"\" + + destination: Literal["animal", "vegetable"] + + + route_chain = route_prompt | llm.with_structured_output(RouteQuery) + + + # For LangGraph, we will define the state of the graph to hold the query, + # destination, and final answer. + class State(TypedDict): + query: str + destination: RouteQuery + answer: str + + + # We define functions for each node, including routing the query: + async def route_query(state: State, config: RunnableConfig): + destination = await route_chain.ainvoke(state["query"], config) + return {"destination": destination} + + + # And one node for each prompt + async def prompt_1(state: State, config: RunnableConfig): + return {"answer": await chain_1.ainvoke(state["query"], config)} + + + async def prompt_2(state: State, config: RunnableConfig): + return {"answer": await chain_2.ainvoke(state["query"], config)} + + + # We then define logic that selects the prompt based on the classification + def select_node(state: State) -> Literal["prompt_1", "prompt_2"]: + if state["destination"] == "animal": + return "prompt_1" + else: + return "prompt_2" + + + # Finally, assemble the multi-prompt chain. This is a sequence of two steps: + # 1) Select "animal" or "vegetable" via the route_chain, and collect the answer + # alongside the input query. + # 2) Route the input query to chain_1 or chain_2, based on the + # selection. + graph = StateGraph(State) + graph.add_node("route_query", route_query) + graph.add_node("prompt_1", prompt_1) + graph.add_node("prompt_2", prompt_2) + + graph.add_edge(START, "route_query") + graph.add_conditional_edges("route_query", select_node) + graph.add_edge("prompt_1", END) + graph.add_edge("prompt_2", END) + app = graph.compile() + + result = await app.ainvoke({"query": "what color are carrots"}) + print(result["destination"]) + print(result["answer"]) + """ # noqa: E501 + + @property + def output_keys(self) -> list[str]: + return ["text"] + + @classmethod + def from_prompts( + cls, + llm: BaseLanguageModel, + prompt_infos: list[dict[str, str]], + default_chain: Optional[Chain] = None, + **kwargs: Any, + ) -> MultiPromptChain: + """Convenience constructor for instantiating from destination prompts.""" + destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos] + destinations_str = "\n".join(destinations) + router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format( + destinations=destinations_str + ) + router_prompt = PromptTemplate( + template=router_template, + input_variables=["input"], + output_parser=RouterOutputParser(), + ) + router_chain = LLMRouterChain.from_llm(llm, router_prompt) + destination_chains = {} + for p_info in prompt_infos: + name = p_info["name"] + prompt_template = p_info["prompt_template"] + prompt = PromptTemplate(template=prompt_template, input_variables=["input"]) + chain = LLMChain(llm=llm, prompt=prompt) + destination_chains[name] = chain + _default_chain = default_chain or ConversationChain(llm=llm, output_key="text") + return cls( + router_chain=router_chain, + destination_chains=destination_chains, + default_chain=_default_chain, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/chains/router/multi_prompt_prompt.py b/venv/Lib/site-packages/langchain/chains/router/multi_prompt_prompt.py new file mode 100644 index 00000000..b879e718 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/router/multi_prompt_prompt.py @@ -0,0 +1,32 @@ +"""Prompt for the router chain in the multi-prompt chain.""" + +MULTI_PROMPT_ROUTER_TEMPLATE = """\ +Given a raw text input to a language model select the model prompt best suited for \ +the input. You will be given the names of the available prompts and a description of \ +what the prompt is best suited for. You may also revise the original input if you \ +think that revising it will ultimately lead to a better response from the language \ +model. + +<< FORMATTING >> +Return a markdown code snippet with a JSON object formatted to look like: +```json +{{{{ + "destination": string \\ name of the prompt to use or "DEFAULT" + "next_inputs": string \\ a potentially modified version of the original input +}}}} +``` + +REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR \ +it can be "DEFAULT" if the input is not well suited for any of the candidate prompts. +REMEMBER: "next_inputs" can just be the original input if you don't think any \ +modifications are needed. + +<< CANDIDATE PROMPTS >> +{destinations} + +<< INPUT >> +{{input}} + +<< OUTPUT (must include ```json at the start of the response) >> +<< OUTPUT (must end with ```) >> +""" diff --git a/venv/Lib/site-packages/langchain/chains/router/multi_retrieval_prompt.py b/venv/Lib/site-packages/langchain/chains/router/multi_retrieval_prompt.py new file mode 100644 index 00000000..752b5f72 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/router/multi_retrieval_prompt.py @@ -0,0 +1,30 @@ +"""Prompt for the router chain in the multi-retrieval qa chain.""" + +MULTI_RETRIEVAL_ROUTER_TEMPLATE = """\ +Given a query to a question answering system select the system best suited \ +for the input. You will be given the names of the available systems and a description \ +of what questions the system is best suited for. You may also revise the original \ +input if you think that revising it will ultimately lead to a better response. + +<< FORMATTING >> +Return a markdown code snippet with a JSON object formatted to look like: +```json +{{{{ + "destination": string \\ name of the question answering system to use or "DEFAULT" + "next_inputs": string \\ a potentially modified version of the original input +}}}} +``` + +REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR \ +it can be "DEFAULT" if the input is not well suited for any of the candidate prompts. +REMEMBER: "next_inputs" can just be the original input if you don't think any \ +modifications are needed. + +<< CANDIDATE PROMPTS >> +{destinations} + +<< INPUT >> +{{input}} + +<< OUTPUT >> +""" diff --git a/venv/Lib/site-packages/langchain/chains/router/multi_retrieval_qa.py b/venv/Lib/site-packages/langchain/chains/router/multi_retrieval_qa.py new file mode 100644 index 00000000..fe900d86 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/router/multi_retrieval_qa.py @@ -0,0 +1,104 @@ +"""Use a single chain to route an input to one of multiple retrieval qa chains.""" + +from __future__ import annotations + +from collections.abc import Mapping +from typing import Any, Optional + +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import PromptTemplate +from langchain_core.retrievers import BaseRetriever + +from langchain.chains import ConversationChain +from langchain.chains.base import Chain +from langchain.chains.conversation.prompt import DEFAULT_TEMPLATE +from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA +from langchain.chains.router.base import MultiRouteChain +from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser +from langchain.chains.router.multi_retrieval_prompt import ( + MULTI_RETRIEVAL_ROUTER_TEMPLATE, +) + + +class MultiRetrievalQAChain(MultiRouteChain): + """A multi-route chain that uses an LLM router chain to choose amongst retrieval + qa chains.""" + + router_chain: LLMRouterChain + """Chain for deciding a destination chain and the input to it.""" + destination_chains: Mapping[str, BaseRetrievalQA] + """Map of name to candidate chains that inputs can be routed to.""" + default_chain: Chain + """Default chain to use when router doesn't map input to one of the destinations.""" + + @property + def output_keys(self) -> list[str]: + return ["result"] + + @classmethod + def from_retrievers( + cls, + llm: BaseLanguageModel, + retriever_infos: list[dict[str, Any]], + default_retriever: Optional[BaseRetriever] = None, + default_prompt: Optional[PromptTemplate] = None, + default_chain: Optional[Chain] = None, + *, + default_chain_llm: Optional[BaseLanguageModel] = None, + **kwargs: Any, + ) -> MultiRetrievalQAChain: + if default_prompt and not default_retriever: + raise ValueError( + "`default_retriever` must be specified if `default_prompt` is " + "provided. Received only `default_prompt`." + ) + destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos] + destinations_str = "\n".join(destinations) + router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format( + destinations=destinations_str + ) + router_prompt = PromptTemplate( + template=router_template, + input_variables=["input"], + output_parser=RouterOutputParser(next_inputs_inner_key="query"), + ) + router_chain = LLMRouterChain.from_llm(llm, router_prompt) + destination_chains = {} + for r_info in retriever_infos: + prompt = r_info.get("prompt") + retriever = r_info["retriever"] + chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever) + name = r_info["name"] + destination_chains[name] = chain + if default_chain: + _default_chain = default_chain + elif default_retriever: + _default_chain = RetrievalQA.from_llm( + llm, prompt=default_prompt, retriever=default_retriever + ) + else: + prompt_template = DEFAULT_TEMPLATE.replace("input", "query") + prompt = PromptTemplate( + template=prompt_template, input_variables=["history", "query"] + ) + if default_chain_llm is None: + raise NotImplementedError( + "conversation_llm must be provided if default_chain is not " + "specified. This API has been changed to avoid instantiating " + "default LLMs on behalf of users." + "You can provide a conversation LLM like so:\n" + "from langchain_openai import ChatOpenAI\n" + "llm = ChatOpenAI()" + ) + _default_chain = ConversationChain( + llm=default_chain_llm, + prompt=prompt, + input_key="query", + output_key="result", + ) + return cls( + router_chain=router_chain, + destination_chains=destination_chains, + default_chain=_default_chain, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/chains/sequential.py b/venv/Lib/site-packages/langchain/chains/sequential.py new file mode 100644 index 00000000..bc6c0ed1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/sequential.py @@ -0,0 +1,208 @@ +"""Chain pipeline where the outputs of one step feed directly into next.""" + +from typing import Any, Optional + +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from langchain_core.utils.input import get_color_mapping +from pydantic import ConfigDict, model_validator +from typing_extensions import Self + +from langchain.chains.base import Chain + + +class SequentialChain(Chain): + """Chain where the outputs of one chain feed directly into next.""" + + chains: list[Chain] + input_variables: list[str] + output_variables: list[str] #: :meta private: + return_all: bool = False + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @property + def input_keys(self) -> list[str]: + """Return expected input keys to the chain. + + :meta private: + """ + return self.input_variables + + @property + def output_keys(self) -> list[str]: + """Return output key. + + :meta private: + """ + return self.output_variables + + @model_validator(mode="before") + @classmethod + def validate_chains(cls, values: dict) -> Any: + """Validate that the correct inputs exist for all chains.""" + chains = values["chains"] + input_variables = values["input_variables"] + memory_keys = list() + if "memory" in values and values["memory"] is not None: + """Validate that prompt input variables are consistent.""" + memory_keys = values["memory"].memory_variables + if set(input_variables).intersection(set(memory_keys)): + overlapping_keys = set(input_variables) & set(memory_keys) + raise ValueError( + f"The input key(s) {''.join(overlapping_keys)} are found " + f"in the Memory keys ({memory_keys}) - please use input and " + f"memory keys that don't overlap." + ) + + known_variables = set(input_variables + memory_keys) + + for chain in chains: + missing_vars = set(chain.input_keys).difference(known_variables) + if chain.memory: + missing_vars = missing_vars.difference(chain.memory.memory_variables) + + if missing_vars: + raise ValueError( + f"Missing required input keys: {missing_vars}, " + f"only had {known_variables}" + ) + overlapping_keys = known_variables.intersection(chain.output_keys) + if overlapping_keys: + raise ValueError( + f"Chain returned keys that already exist: {overlapping_keys}" + ) + + known_variables |= set(chain.output_keys) + + if "output_variables" not in values: + if values.get("return_all", False): + output_keys = known_variables.difference(input_variables) + else: + output_keys = chains[-1].output_keys + values["output_variables"] = output_keys + else: + missing_vars = set(values["output_variables"]).difference(known_variables) + if missing_vars: + raise ValueError( + f"Expected output variables that were not found: {missing_vars}." + ) + + return values + + def _call( + self, + inputs: dict[str, str], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + known_values = inputs.copy() + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + for i, chain in enumerate(self.chains): + callbacks = _run_manager.get_child() + outputs = chain(known_values, return_only_outputs=True, callbacks=callbacks) + known_values.update(outputs) + return {k: known_values[k] for k in self.output_variables} + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + known_values = inputs.copy() + _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() + callbacks = _run_manager.get_child() + for i, chain in enumerate(self.chains): + outputs = await chain.acall( + known_values, return_only_outputs=True, callbacks=callbacks + ) + known_values.update(outputs) + return {k: known_values[k] for k in self.output_variables} + + +class SimpleSequentialChain(Chain): + """Simple chain where the outputs of one step feed directly into next.""" + + chains: list[Chain] + strip_outputs: bool = False + input_key: str = "input" #: :meta private: + output_key: str = "output" #: :meta private: + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @property + def input_keys(self) -> list[str]: + """Expect input key. + + :meta private: + """ + return [self.input_key] + + @property + def output_keys(self) -> list[str]: + """Return output key. + + :meta private: + """ + return [self.output_key] + + @model_validator(mode="after") + def validate_chains(self) -> Self: + """Validate that chains are all single input/output.""" + for chain in self.chains: + if len(chain.input_keys) != 1: + raise ValueError( + "Chains used in SimplePipeline should all have one input, got " + f"{chain} with {len(chain.input_keys)} inputs." + ) + if len(chain.output_keys) != 1: + raise ValueError( + "Chains used in SimplePipeline should all have one output, got " + f"{chain} with {len(chain.output_keys)} outputs." + ) + return self + + def _call( + self, + inputs: dict[str, str], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + _input = inputs[self.input_key] + color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))]) + for i, chain in enumerate(self.chains): + _input = chain.run( + _input, callbacks=_run_manager.get_child(f"step_{i + 1}") + ) + if self.strip_outputs: + _input = _input.strip() + _run_manager.on_text( + _input, color=color_mapping[str(i)], end="\n", verbose=self.verbose + ) + return {self.output_key: _input} + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() + _input = inputs[self.input_key] + color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))]) + for i, chain in enumerate(self.chains): + _input = await chain.arun( + _input, callbacks=_run_manager.get_child(f"step_{i + 1}") + ) + if self.strip_outputs: + _input = _input.strip() + await _run_manager.on_text( + _input, color=color_mapping[str(i)], end="\n", verbose=self.verbose + ) + return {self.output_key: _input} diff --git a/venv/Lib/site-packages/langchain/chains/sql_database/__init__.py b/venv/Lib/site-packages/langchain/chains/sql_database/__init__.py new file mode 100644 index 00000000..b704f72c --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/sql_database/__init__.py @@ -0,0 +1 @@ +"""Chain for interacting with SQL Database.""" diff --git a/venv/Lib/site-packages/langchain/chains/sql_database/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/sql_database/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..4ebcfef2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/sql_database/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/sql_database/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/sql_database/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..f4b787ae Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/sql_database/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/sql_database/__pycache__/query.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/sql_database/__pycache__/query.cpython-312.pyc new file mode 100644 index 00000000..efeae7bb Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/sql_database/__pycache__/query.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/sql_database/prompt.py b/venv/Lib/site-packages/langchain/chains/sql_database/prompt.py new file mode 100644 index 00000000..7efe824c --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/sql_database/prompt.py @@ -0,0 +1,284 @@ +# flake8: noqa +from langchain_core.output_parsers.list import CommaSeparatedListOutputParser +from langchain_core.prompts.prompt import PromptTemplate + + +PROMPT_SUFFIX = """Only use the following tables: +{table_info} + +Question: {input}""" + +_DEFAULT_TEMPLATE = """Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database. + +Never query for all the columns from a specific table, only ask for a few relevant columns given the question. + +Pay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. + +Use the following format: + +Question: Question here +SQLQuery: SQL Query to run +SQLResult: Result of the SQLQuery +Answer: Final answer here + +""" + +PROMPT = PromptTemplate( + input_variables=["input", "table_info", "dialect", "top_k"], + template=_DEFAULT_TEMPLATE + PROMPT_SUFFIX, +) + + +_DECIDER_TEMPLATE = """Given the below input question and list of potential tables, output a comma separated list of the table names that may be necessary to answer this question. + +Question: {query} + +Table Names: {table_names} + +Relevant Table Names:""" +DECIDER_PROMPT = PromptTemplate( + input_variables=["query", "table_names"], + template=_DECIDER_TEMPLATE, + output_parser=CommaSeparatedListOutputParser(), +) + +_cratedb_prompt = """You are a CrateDB expert. Given an input question, first create a syntactically correct CrateDB query to run, then look at the results of the query and return the answer to the input question. +Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per CrateDB. You can order the results to return the most informative data in the database. +Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. +Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. +Pay attention to use CURRENT_DATE function to get the current date, if the question involves "today". + +Use the following format: + +Question: Question here +SQLQuery: SQL Query to run +SQLResult: Result of the SQLQuery +Answer: Final answer here + +""" + +CRATEDB_PROMPT = PromptTemplate( + input_variables=["input", "table_info", "top_k"], + template=_cratedb_prompt + PROMPT_SUFFIX, +) + +_duckdb_prompt = """You are a DuckDB expert. Given an input question, first create a syntactically correct DuckDB query to run, then look at the results of the query and return the answer to the input question. +Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per DuckDB. You can order the results to return the most informative data in the database. +Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. +Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. +Pay attention to use today() function to get the current date, if the question involves "today". + +Use the following format: + +Question: Question here +SQLQuery: SQL Query to run +SQLResult: Result of the SQLQuery +Answer: Final answer here + +""" + +DUCKDB_PROMPT = PromptTemplate( + input_variables=["input", "table_info", "top_k"], + template=_duckdb_prompt + PROMPT_SUFFIX, +) + +_googlesql_prompt = """You are a GoogleSQL expert. Given an input question, first create a syntactically correct GoogleSQL query to run, then look at the results of the query and return the answer to the input question. +Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per GoogleSQL. You can order the results to return the most informative data in the database. +Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in backticks (`) to denote them as delimited identifiers. +Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. +Pay attention to use CURRENT_DATE() function to get the current date, if the question involves "today". + +Use the following format: + +Question: Question here +SQLQuery: SQL Query to run +SQLResult: Result of the SQLQuery +Answer: Final answer here + +""" + +GOOGLESQL_PROMPT = PromptTemplate( + input_variables=["input", "table_info", "top_k"], + template=_googlesql_prompt + PROMPT_SUFFIX, +) + + +_mssql_prompt = """You are an MS SQL expert. Given an input question, first create a syntactically correct MS SQL query to run, then look at the results of the query and return the answer to the input question. +Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the TOP clause as per MS SQL. You can order the results to return the most informative data in the database. +Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in square brackets ([]) to denote them as delimited identifiers. +Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. +Pay attention to use CAST(GETDATE() as date) function to get the current date, if the question involves "today". + +Use the following format: + +Question: Question here +SQLQuery: SQL Query to run +SQLResult: Result of the SQLQuery +Answer: Final answer here + +""" + +MSSQL_PROMPT = PromptTemplate( + input_variables=["input", "table_info", "top_k"], + template=_mssql_prompt + PROMPT_SUFFIX, +) + + +_mysql_prompt = """You are a MySQL expert. Given an input question, first create a syntactically correct MySQL query to run, then look at the results of the query and return the answer to the input question. +Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per MySQL. You can order the results to return the most informative data in the database. +Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in backticks (`) to denote them as delimited identifiers. +Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. +Pay attention to use CURDATE() function to get the current date, if the question involves "today". + +Use the following format: + +Question: Question here +SQLQuery: SQL Query to run +SQLResult: Result of the SQLQuery +Answer: Final answer here + +""" + +MYSQL_PROMPT = PromptTemplate( + input_variables=["input", "table_info", "top_k"], + template=_mysql_prompt + PROMPT_SUFFIX, +) + + +_mariadb_prompt = """You are a MariaDB expert. Given an input question, first create a syntactically correct MariaDB query to run, then look at the results of the query and return the answer to the input question. +Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per MariaDB. You can order the results to return the most informative data in the database. +Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in backticks (`) to denote them as delimited identifiers. +Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. +Pay attention to use CURDATE() function to get the current date, if the question involves "today". + +Use the following format: + +Question: Question here +SQLQuery: SQL Query to run +SQLResult: Result of the SQLQuery +Answer: Final answer here + +""" + +MARIADB_PROMPT = PromptTemplate( + input_variables=["input", "table_info", "top_k"], + template=_mariadb_prompt + PROMPT_SUFFIX, +) + + +_oracle_prompt = """You are an Oracle SQL expert. Given an input question, first create a syntactically correct Oracle SQL query to run, then look at the results of the query and return the answer to the input question. +Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the FETCH FIRST n ROWS ONLY clause as per Oracle SQL. You can order the results to return the most informative data in the database. +Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. +Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. +Pay attention to use TRUNC(SYSDATE) function to get the current date, if the question involves "today". + +Use the following format: + +Question: Question here +SQLQuery: SQL Query to run +SQLResult: Result of the SQLQuery +Answer: Final answer here + +""" + +ORACLE_PROMPT = PromptTemplate( + input_variables=["input", "table_info", "top_k"], + template=_oracle_prompt + PROMPT_SUFFIX, +) + + +_postgres_prompt = """You are a PostgreSQL expert. Given an input question, first create a syntactically correct PostgreSQL query to run, then look at the results of the query and return the answer to the input question. +Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per PostgreSQL. You can order the results to return the most informative data in the database. +Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. +Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. +Pay attention to use CURRENT_DATE function to get the current date, if the question involves "today". + +Use the following format: + +Question: Question here +SQLQuery: SQL Query to run +SQLResult: Result of the SQLQuery +Answer: Final answer here + +""" + +POSTGRES_PROMPT = PromptTemplate( + input_variables=["input", "table_info", "top_k"], + template=_postgres_prompt + PROMPT_SUFFIX, +) + + +_sqlite_prompt = """You are a SQLite expert. Given an input question, first create a syntactically correct SQLite query to run, then look at the results of the query and return the answer to the input question. +Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per SQLite. You can order the results to return the most informative data in the database. +Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. +Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. +Pay attention to use date('now') function to get the current date, if the question involves "today". + +Use the following format: + +Question: Question here +SQLQuery: SQL Query to run +SQLResult: Result of the SQLQuery +Answer: Final answer here + +""" + +SQLITE_PROMPT = PromptTemplate( + input_variables=["input", "table_info", "top_k"], + template=_sqlite_prompt + PROMPT_SUFFIX, +) + +_clickhouse_prompt = """You are a ClickHouse expert. Given an input question, first create a syntactically correct Clic query to run, then look at the results of the query and return the answer to the input question. +Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per ClickHouse. You can order the results to return the most informative data in the database. +Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. +Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. +Pay attention to use today() function to get the current date, if the question involves "today". + +Use the following format: + +Question: "Question here" +SQLQuery: "SQL Query to run" +SQLResult: "Result of the SQLQuery" +Answer: "Final answer here" + +""" + +CLICKHOUSE_PROMPT = PromptTemplate( + input_variables=["input", "table_info", "top_k"], + template=_clickhouse_prompt + PROMPT_SUFFIX, +) + +_prestodb_prompt = """You are a PrestoDB expert. Given an input question, first create a syntactically correct PrestoDB query to run, then look at the results of the query and return the answer to the input question. +Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per PrestoDB. You can order the results to return the most informative data in the database. +Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. +Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. +Pay attention to use current_date function to get the current date, if the question involves "today". + +Use the following format: + +Question: "Question here" +SQLQuery: "SQL Query to run" +SQLResult: "Result of the SQLQuery" +Answer: "Final answer here" + +""" + +PRESTODB_PROMPT = PromptTemplate( + input_variables=["input", "table_info", "top_k"], + template=_prestodb_prompt + PROMPT_SUFFIX, +) + + +SQL_PROMPTS = { + "crate": CRATEDB_PROMPT, + "duckdb": DUCKDB_PROMPT, + "googlesql": GOOGLESQL_PROMPT, + "mssql": MSSQL_PROMPT, + "mysql": MYSQL_PROMPT, + "mariadb": MARIADB_PROMPT, + "oracle": ORACLE_PROMPT, + "postgresql": POSTGRES_PROMPT, + "sqlite": SQLITE_PROMPT, + "clickhouse": CLICKHOUSE_PROMPT, + "prestodb": PRESTODB_PROMPT, +} diff --git a/venv/Lib/site-packages/langchain/chains/sql_database/query.py b/venv/Lib/site-packages/langchain/chains/sql_database/query.py new file mode 100644 index 00000000..c83495ce --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/sql_database/query.py @@ -0,0 +1,165 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Optional, TypedDict, Union + +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import BasePromptTemplate +from langchain_core.runnables import Runnable, RunnablePassthrough + +from langchain.chains.sql_database.prompt import PROMPT, SQL_PROMPTS + +if TYPE_CHECKING: + from langchain_community.utilities.sql_database import SQLDatabase + + +def _strip(text: str) -> str: + return text.strip() + + +class SQLInput(TypedDict): + """Input for a SQL Chain.""" + + question: str + + +class SQLInputWithTables(TypedDict): + """Input for a SQL Chain.""" + + question: str + table_names_to_use: list[str] + + +def create_sql_query_chain( + llm: BaseLanguageModel, + db: SQLDatabase, + prompt: Optional[BasePromptTemplate] = None, + k: int = 5, + *, + get_col_comments: Optional[bool] = None, +) -> Runnable[Union[SQLInput, SQLInputWithTables, dict[str, Any]], str]: + """Create a chain that generates SQL queries. + + *Security Note*: This chain generates SQL queries for the given database. + + The SQLDatabase class provides a get_table_info method that can be used + to get column information as well as sample data from the table. + + To mitigate risk of leaking sensitive data, limit permissions + to read and scope to the tables that are needed. + + Optionally, use the SQLInputWithTables input type to specify which tables + are allowed to be accessed. + + Control access to who can submit requests to this chain. + + See https://python.langchain.com/docs/security for more information. + + Args: + llm: The language model to use. + db: The SQLDatabase to generate the query for. + prompt: The prompt to use. If none is provided, will choose one + based on dialect. Defaults to None. See Prompt section below for more. + k: The number of results per select statement to return. Defaults to 5. + get_col_comments: Whether to retrieve column comments along with table info. + Defaults to False. + + Returns: + A chain that takes in a question and generates a SQL query that answers + that question. + + Example: + + .. code-block:: python + + # pip install -U langchain langchain-community langchain-openai + from langchain_openai import ChatOpenAI + from langchain.chains import create_sql_query_chain + from langchain_community.utilities import SQLDatabase + + db = SQLDatabase.from_uri("sqlite:///Chinook.db") + llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) + chain = create_sql_query_chain(llm, db) + response = chain.invoke({"question": "How many employees are there"}) + + Prompt: + If no prompt is provided, a default prompt is selected based on the SQLDatabase dialect. If one is provided, it must support input variables: + * input: The user question plus suffix "\nSQLQuery: " is passed here. + * top_k: The number of results per select statement (the `k` argument to + this function) is passed in here. + * table_info: Table definitions and sample rows are passed in here. If the + user specifies "table_names_to_use" when invoking chain, only those + will be included. Otherwise, all tables are included. + * dialect (optional): If dialect input variable is in prompt, the db + dialect will be passed in here. + + Here's an example prompt: + + .. code-block:: python + + from langchain_core.prompts import PromptTemplate + + template = '''Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. + Use the following format: + + Question: "Question here" + SQLQuery: "SQL Query to run" + SQLResult: "Result of the SQLQuery" + Answer: "Final answer here" + + Only use the following tables: + + {table_info}. + + Question: {input}''' + prompt = PromptTemplate.from_template(template) + """ # noqa: E501 + if prompt is not None: + prompt_to_use = prompt + elif db.dialect in SQL_PROMPTS: + prompt_to_use = SQL_PROMPTS[db.dialect] + else: + prompt_to_use = PROMPT + if {"input", "top_k", "table_info"}.difference( + prompt_to_use.input_variables + list(prompt_to_use.partial_variables) + ): + raise ValueError( + f"Prompt must have input variables: 'input', 'top_k', " + f"'table_info'. Received prompt with input variables: " + f"{prompt_to_use.input_variables}. Full prompt:\n\n{prompt_to_use}" + ) + if "dialect" in prompt_to_use.input_variables: + prompt_to_use = prompt_to_use.partial(dialect=db.dialect) + + table_info_kwargs = {} + if get_col_comments: + if db.dialect not in ("postgresql", "mysql", "oracle"): + raise ValueError( + f"get_col_comments=True is only supported for dialects " + f"'postgresql', 'mysql', and 'oracle'. Received dialect: " + f"{db.dialect}" + ) + else: + table_info_kwargs["get_col_comments"] = True + + inputs = { + "input": lambda x: x["question"] + "\nSQLQuery: ", + "table_info": lambda x: db.get_table_info( + table_names=x.get("table_names_to_use"), + **table_info_kwargs, + ), + } + return ( + RunnablePassthrough.assign(**inputs) # type: ignore[return-value] + | ( + lambda x: { + k: v + for k, v in x.items() + if k not in ("question", "table_names_to_use") + } + ) + | prompt_to_use.partial(top_k=str(k)) + | llm.bind(stop=["\nSQLResult:"]) + | StrOutputParser() + | _strip + ) diff --git a/venv/Lib/site-packages/langchain/chains/structured_output/__init__.py b/venv/Lib/site-packages/langchain/chains/structured_output/__init__.py new file mode 100644 index 00000000..d387a7cb --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/structured_output/__init__.py @@ -0,0 +1,6 @@ +from langchain.chains.structured_output.base import ( + create_openai_fn_runnable, + create_structured_output_runnable, +) + +__all__ = ["create_structured_output_runnable", "create_openai_fn_runnable"] diff --git a/venv/Lib/site-packages/langchain/chains/structured_output/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/structured_output/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..dd9a1a8a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/structured_output/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/structured_output/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/structured_output/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..4495ada6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/structured_output/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/structured_output/base.py b/venv/Lib/site-packages/langchain/chains/structured_output/base.py new file mode 100644 index 00000000..557e8188 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/structured_output/base.py @@ -0,0 +1,574 @@ +import json +from collections.abc import Sequence +from typing import Any, Callable, Literal, Optional, Union + +from langchain_core._api import deprecated +from langchain_core.output_parsers import ( + BaseGenerationOutputParser, + BaseOutputParser, + JsonOutputParser, + PydanticOutputParser, +) +from langchain_core.output_parsers.openai_functions import ( + JsonOutputFunctionsParser, + PydanticAttrOutputFunctionsParser, + PydanticOutputFunctionsParser, +) +from langchain_core.output_parsers.openai_tools import ( + JsonOutputKeyToolsParser, + PydanticToolsParser, +) +from langchain_core.prompts import BasePromptTemplate +from langchain_core.runnables import Runnable +from langchain_core.utils.function_calling import ( + convert_to_openai_function, + convert_to_openai_tool, +) +from langchain_core.utils.pydantic import is_basemodel_subclass +from pydantic import BaseModel + + +@deprecated( + since="0.1.14", + message=( + "LangChain has introduced a method called `with_structured_output` that " + "is available on ChatModels capable of tool calling. " + "You can read more about the method here: " + ". " + "Please follow our extraction use case documentation for more guidelines " + "on how to do information extraction with LLMs. " + ". " + "If you notice other issues, please provide " + "feedback here: " + "" + ), + removal="1.0", + alternative=( + """ + from pydantic import BaseModel, Field + from langchain_anthropic import ChatAnthropic + + class Joke(BaseModel): + setup: str = Field(description="The setup of the joke") + punchline: str = Field(description="The punchline to the joke") + + # Or any other chat model that supports tools. + # Please reference to to the documentation of structured_output + # to see an up to date list of which models support + # with_structured_output. + model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) + structured_llm = model.with_structured_output(Joke) + structured_llm.invoke("Tell me a joke about cats. + Make sure to call the Joke function.") + """ + ), +) +def create_openai_fn_runnable( + functions: Sequence[Union[dict[str, Any], type[BaseModel], Callable]], + llm: Runnable, + prompt: Optional[BasePromptTemplate] = None, + *, + enforce_single_function_usage: bool = True, + output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]] = None, + **llm_kwargs: Any, +) -> Runnable: + """Create a runnable sequence that uses OpenAI functions. + + Args: + functions: A sequence of either dictionaries, pydantic.BaseModels classes, or + Python functions. If dictionaries are passed in, they are assumed to + already be a valid OpenAI functions. If only a single + function is passed in, then it will be enforced that the model use that + function. pydantic.BaseModels and Python functions should have docstrings + describing what the function does. For best results, pydantic.BaseModels + should have descriptions of the parameters and Python functions should have + Google Python style args descriptions in the docstring. Additionally, + Python functions should only use primitive types (str, int, float, bool) or + pydantic.BaseModels for arguments. + llm: Language model to use, assumed to support the OpenAI function-calling API. + prompt: BasePromptTemplate to pass to the model. + enforce_single_function_usage: only used if a single function is passed in. If + True, then the model will be forced to use the given function. If False, + then the model will be given the option to use the given function or not. + output_parser: BaseLLMOutputParser to use for parsing model outputs. By default + will be inferred from the function types. If pydantic.BaseModels are passed + in, then the OutputParser will try to parse outputs using those. Otherwise + model outputs will simply be parsed as JSON. If multiple functions are + passed in and they are not pydantic.BaseModels, the chain output will + include both the name of the function that was returned and the arguments + to pass to the function. + **llm_kwargs: Additional named arguments to pass to the language model. + + Returns: + A runnable sequence that will pass in the given functions to the model when run. + + Example: + .. code-block:: python + + from typing import Optional + + from langchain.chains.structured_output import create_openai_fn_runnable + from langchain_openai import ChatOpenAI + from pydantic import BaseModel, Field + + + class RecordPerson(BaseModel): + '''Record some identifying information about a person.''' + + name: str = Field(..., description="The person's name") + age: int = Field(..., description="The person's age") + fav_food: Optional[str] = Field(None, description="The person's favorite food") + + + class RecordDog(BaseModel): + '''Record some identifying information about a dog.''' + + name: str = Field(..., description="The dog's name") + color: str = Field(..., description="The dog's color") + fav_food: Optional[str] = Field(None, description="The dog's favorite food") + + + llm = ChatOpenAI(model="gpt-4", temperature=0) + structured_llm = create_openai_fn_runnable([RecordPerson, RecordDog], llm) + structured_llm.invoke("Harry was a chubby brown beagle who loved chicken) + # -> RecordDog(name="Harry", color="brown", fav_food="chicken") + """ # noqa: E501 + if not functions: + raise ValueError("Need to pass in at least one function. Received zero.") + openai_functions = [convert_to_openai_function(f) for f in functions] + llm_kwargs_: dict[str, Any] = {"functions": openai_functions, **llm_kwargs} + if len(openai_functions) == 1 and enforce_single_function_usage: + llm_kwargs_["function_call"] = {"name": openai_functions[0]["name"]} + output_parser = output_parser or get_openai_output_parser(functions) + if prompt: + return prompt | llm.bind(**llm_kwargs_) | output_parser + else: + return llm.bind(**llm_kwargs_) | output_parser + + +@deprecated( + since="0.1.17", + message=( + "LangChain has introduced a method called `with_structured_output` that " + "is available on ChatModels capable of tool calling. " + "You can read more about the method here: " + "." + "Please follow our extraction use case documentation for more guidelines " + "on how to do information extraction with LLMs. " + ". " + "If you notice other issues, please provide " + "feedback here: " + "" + ), + removal="1.0", + alternative=( + """ + from pydantic import BaseModel, Field + from langchain_anthropic import ChatAnthropic + + class Joke(BaseModel): + setup: str = Field(description="The setup of the joke") + punchline: str = Field(description="The punchline to the joke") + + # Or any other chat model that supports tools. + # Please reference to to the documentation of structured_output + # to see an up to date list of which models support + # with_structured_output. + model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) + structured_llm = model.with_structured_output(Joke) + structured_llm.invoke("Tell me a joke about cats. + Make sure to call the Joke function.") + """ + ), +) +def create_structured_output_runnable( + output_schema: Union[dict[str, Any], type[BaseModel]], + llm: Runnable, + prompt: Optional[BasePromptTemplate] = None, + *, + output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]] = None, + enforce_function_usage: bool = True, + return_single: bool = True, + mode: Literal[ + "openai-functions", "openai-tools", "openai-json" + ] = "openai-functions", + **kwargs: Any, +) -> Runnable: + """Create a runnable for extracting structured outputs. + + Args: + output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary + is passed in, it's assumed to already be a valid JsonSchema. + For best results, pydantic.BaseModels should have docstrings describing what + the schema represents and descriptions for the parameters. + llm: Language model to use. Assumed to support the OpenAI function-calling API + if mode is 'openai-function'. Assumed to support OpenAI response_format + parameter if mode is 'openai-json'. + prompt: BasePromptTemplate to pass to the model. If mode is 'openai-json' and + prompt has input variable 'output_schema' then the given output_schema + will be converted to a JsonSchema and inserted in the prompt. + output_parser: Output parser to use for parsing model outputs. By default + will be inferred from the function types. If pydantic.BaseModel is passed + in, then the OutputParser will try to parse outputs using the pydantic + class. Otherwise model outputs will be parsed as JSON. + mode: How structured outputs are extracted from the model. If 'openai-functions' + then OpenAI function calling is used with the deprecated 'functions', + 'function_call' schema. If 'openai-tools' then OpenAI function + calling with the latest 'tools', 'tool_choice' schema is used. This is + recommended over 'openai-functions'. If 'openai-json' then OpenAI model + with response_format set to JSON is used. + enforce_function_usage: Only applies when mode is 'openai-tools' or + 'openai-functions'. If True, then the model will be forced to use the given + output schema. If False, then the model can elect whether to use the output + schema. + return_single: Only applies when mode is 'openai-tools'. Whether to a list of + structured outputs or a single one. If True and model does not return any + structured outputs then chain output is None. If False and model does not + return any structured outputs then chain output is an empty list. + kwargs: Additional named arguments. + + Returns: + A runnable sequence that will return a structured output(s) matching the given + output_schema. + + OpenAI tools example with Pydantic schema (mode='openai-tools'): + .. code-block:: python + + from typing import Optional + + from langchain.chains import create_structured_output_runnable + from langchain_openai import ChatOpenAI + from pydantic import BaseModel, Field + + + class RecordDog(BaseModel): + '''Record some identifying information about a dog.''' + + name: str = Field(..., description="The dog's name") + color: str = Field(..., description="The dog's color") + fav_food: Optional[str] = Field(None, description="The dog's favorite food") + + llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are an extraction algorithm. Please extract every possible instance"), + ('human', '{input}') + ] + ) + structured_llm = create_structured_output_runnable( + RecordDog, + llm, + mode="openai-tools", + enforce_function_usage=True, + return_single=True + ) + structured_llm.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) + # -> RecordDog(name="Harry", color="brown", fav_food="chicken") + + OpenAI tools example with dict schema (mode="openai-tools"): + .. code-block:: python + + from typing import Optional + + from langchain.chains import create_structured_output_runnable + from langchain_openai import ChatOpenAI + + + dog_schema = { + "type": "function", + "function": { + "name": "record_dog", + "description": "Record some identifying information about a dog.", + "parameters": { + "type": "object", + "properties": { + "name": { + "description": "The dog's name", + "type": "string" + }, + "color": { + "description": "The dog's color", + "type": "string" + }, + "fav_food": { + "description": "The dog's favorite food", + "type": "string" + } + }, + "required": ["name", "color"] + } + } + } + + + llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + structured_llm = create_structured_output_runnable( + dog_schema, + llm, + mode="openai-tools", + enforce_function_usage=True, + return_single=True + ) + structured_llm.invoke("Harry was a chubby brown beagle who loved chicken") + # -> {'name': 'Harry', 'color': 'brown', 'fav_food': 'chicken'} + + OpenAI functions example (mode="openai-functions"): + .. code-block:: python + + from typing import Optional + + from langchain.chains import create_structured_output_runnable + from langchain_openai import ChatOpenAI + from pydantic import BaseModel, Field + + class Dog(BaseModel): + '''Identifying information about a dog.''' + + name: str = Field(..., description="The dog's name") + color: str = Field(..., description="The dog's color") + fav_food: Optional[str] = Field(None, description="The dog's favorite food") + + llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-functions") + structured_llm.invoke("Harry was a chubby brown beagle who loved chicken") + # -> Dog(name="Harry", color="brown", fav_food="chicken") + + OpenAI functions with prompt example: + .. code-block:: python + + from typing import Optional + + from langchain.chains import create_structured_output_runnable + from langchain_openai import ChatOpenAI + from langchain_core.prompts import ChatPromptTemplate + from pydantic import BaseModel, Field + + class Dog(BaseModel): + '''Identifying information about a dog.''' + + name: str = Field(..., description="The dog's name") + color: str = Field(..., description="The dog's color") + fav_food: Optional[str] = Field(None, description="The dog's favorite food") + + llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-functions") + system = '''Extract information about any dogs mentioned in the user input.''' + prompt = ChatPromptTemplate.from_messages( + [("system", system), ("human", "{input}"),] + ) + chain = prompt | structured_llm + chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) + # -> Dog(name="Harry", color="brown", fav_food="chicken") + OpenAI json response format example (mode="openai-json"): + .. code-block:: python + + from typing import Optional + + from langchain.chains import create_structured_output_runnable + from langchain_openai import ChatOpenAI + from langchain_core.prompts import ChatPromptTemplate + from pydantic import BaseModel, Field + + class Dog(BaseModel): + '''Identifying information about a dog.''' + + name: str = Field(..., description="The dog's name") + color: str = Field(..., description="The dog's color") + fav_food: Optional[str] = Field(None, description="The dog's favorite food") + + llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-json") + system = '''You are a world class assistant for extracting information in structured JSON formats. \ + + Extract a valid JSON blob from the user input that matches the following JSON Schema: + + {output_schema}''' + prompt = ChatPromptTemplate.from_messages( + [("system", system), ("human", "{input}"),] + ) + chain = prompt | structured_llm + chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) + """ # noqa: E501 + # for backwards compatibility + force_function_usage = kwargs.get( + "enforce_single_function_usage", enforce_function_usage + ) + + if mode == "openai-tools": + # Protect against typos in kwargs + keys_in_kwargs = set(kwargs.keys()) + # Backwards compatibility keys + unrecognized_keys = keys_in_kwargs - {"enforce_single_function_usage"} + if unrecognized_keys: + raise TypeError( + f"Got an unexpected keyword argument(s): {unrecognized_keys}." + ) + + return _create_openai_tools_runnable( + output_schema, + llm, + prompt=prompt, + output_parser=output_parser, + enforce_tool_usage=force_function_usage, + first_tool_only=return_single, + ) + + elif mode == "openai-functions": + return _create_openai_functions_structured_output_runnable( + output_schema, + llm, + prompt=prompt, + output_parser=output_parser, + enforce_single_function_usage=force_function_usage, + **kwargs, # llm-specific kwargs + ) + elif mode == "openai-json": + if force_function_usage: + raise ValueError( + "enforce_single_function_usage is not supported for mode='openai-json'." + ) + return _create_openai_json_runnable( + output_schema, llm, prompt=prompt, output_parser=output_parser, **kwargs + ) + else: + raise ValueError( + f"Invalid mode {mode}. Expected one of 'openai-tools', 'openai-functions', " + f"'openai-json'." + ) + + +def _create_openai_tools_runnable( + tool: Union[dict[str, Any], type[BaseModel], Callable], + llm: Runnable, + *, + prompt: Optional[BasePromptTemplate], + output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]], + enforce_tool_usage: bool, + first_tool_only: bool, +) -> Runnable: + oai_tool = convert_to_openai_tool(tool) + llm_kwargs: dict[str, Any] = {"tools": [oai_tool]} + if enforce_tool_usage: + llm_kwargs["tool_choice"] = { + "type": "function", + "function": {"name": oai_tool["function"]["name"]}, + } + output_parser = output_parser or _get_openai_tool_output_parser( + tool, first_tool_only=first_tool_only + ) + if prompt: + return prompt | llm.bind(**llm_kwargs) | output_parser + else: + return llm.bind(**llm_kwargs) | output_parser + + +def _get_openai_tool_output_parser( + tool: Union[dict[str, Any], type[BaseModel], Callable], + *, + first_tool_only: bool = False, +) -> Union[BaseOutputParser, BaseGenerationOutputParser]: + if isinstance(tool, type) and is_basemodel_subclass(tool): + output_parser: Union[BaseOutputParser, BaseGenerationOutputParser] = ( + PydanticToolsParser(tools=[tool], first_tool_only=first_tool_only) + ) + else: + key_name = convert_to_openai_tool(tool)["function"]["name"] + output_parser = JsonOutputKeyToolsParser( + first_tool_only=first_tool_only, key_name=key_name + ) + return output_parser + + +def get_openai_output_parser( + functions: Sequence[Union[dict[str, Any], type[BaseModel], Callable]], +) -> Union[BaseOutputParser, BaseGenerationOutputParser]: + """Get the appropriate function output parser given the user functions. + + Args: + functions: Sequence where element is a dictionary, a pydantic.BaseModel class, + or a Python function. If a dictionary is passed in, it is assumed to + already be a valid OpenAI function. + + Returns: + A PydanticOutputFunctionsParser if functions are Pydantic classes, otherwise + a JsonOutputFunctionsParser. If there's only one function and it is + not a Pydantic class, then the output parser will automatically extract + only the function arguments and not the function name. + """ + if isinstance(functions[0], type) and is_basemodel_subclass(functions[0]): + if len(functions) > 1: + pydantic_schema: Union[dict, type[BaseModel]] = { + convert_to_openai_function(fn)["name"]: fn for fn in functions + } + else: + pydantic_schema = functions[0] + output_parser: Union[BaseOutputParser, BaseGenerationOutputParser] = ( + PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema) + ) + else: + output_parser = JsonOutputFunctionsParser(args_only=len(functions) <= 1) + return output_parser + + +def _create_openai_json_runnable( + output_schema: Union[dict[str, Any], type[BaseModel]], + llm: Runnable, + prompt: Optional[BasePromptTemplate] = None, + *, + output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]] = None, +) -> Runnable: + """""" + if isinstance(output_schema, type) and is_basemodel_subclass(output_schema): + output_parser = output_parser or PydanticOutputParser( + pydantic_object=output_schema, + ) + schema_as_dict = convert_to_openai_function(output_schema)["parameters"] + else: + output_parser = output_parser or JsonOutputParser() + schema_as_dict = output_schema + + llm = llm.bind(response_format={"type": "json_object"}) + if prompt: + if "output_schema" in prompt.input_variables: + prompt = prompt.partial(output_schema=json.dumps(schema_as_dict, indent=2)) + + return prompt | llm | output_parser + else: + return llm | output_parser + + +def _create_openai_functions_structured_output_runnable( + output_schema: Union[dict[str, Any], type[BaseModel]], + llm: Runnable, + prompt: Optional[BasePromptTemplate] = None, + *, + output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]] = None, + **llm_kwargs: Any, +) -> Runnable: + if isinstance(output_schema, dict): + function: Any = { + "name": "output_formatter", + "description": ( + "Output formatter. Should always be used to format your response to the" + " user." + ), + "parameters": output_schema, + } + else: + + class _OutputFormatter(BaseModel): + """Output formatter. Should always be used to format your response to the user.""" # noqa: E501 + + output: output_schema # type: ignore[valid-type] + + function = _OutputFormatter + output_parser = output_parser or PydanticAttrOutputFunctionsParser( + pydantic_schema=_OutputFormatter, attr_name="output" + ) + return create_openai_fn_runnable( + [function], + llm, + prompt=prompt, + output_parser=output_parser, + **llm_kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/chains/summarize/__init__.py b/venv/Lib/site-packages/langchain/chains/summarize/__init__.py new file mode 100644 index 00000000..f2e0d352 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/summarize/__init__.py @@ -0,0 +1,6 @@ +from langchain.chains.summarize.chain import ( + LoadingCallable, + load_summarize_chain, +) + +__all__ = ["LoadingCallable", "load_summarize_chain"] diff --git a/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..84c2059e Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/chain.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/chain.cpython-312.pyc new file mode 100644 index 00000000..51d6618e Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/chain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/map_reduce_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/map_reduce_prompt.cpython-312.pyc new file mode 100644 index 00000000..bb5adb42 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/map_reduce_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/refine_prompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/refine_prompts.cpython-312.pyc new file mode 100644 index 00000000..fa4b20b0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/refine_prompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/stuff_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/stuff_prompt.cpython-312.pyc new file mode 100644 index 00000000..eeeffc3a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chains/summarize/__pycache__/stuff_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chains/summarize/chain.py b/venv/Lib/site-packages/langchain/chains/summarize/chain.py new file mode 100644 index 00000000..b5bccbd2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/summarize/chain.py @@ -0,0 +1,168 @@ +"""Load summarizing chains.""" + +from collections.abc import Mapping +from typing import Any, Optional, Protocol + +from langchain_core.callbacks import Callbacks +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate + +from langchain.chains.combine_documents.base import BaseCombineDocumentsChain +from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain +from langchain.chains.combine_documents.reduce import ReduceDocumentsChain +from langchain.chains.combine_documents.refine import RefineDocumentsChain +from langchain.chains.combine_documents.stuff import StuffDocumentsChain +from langchain.chains.llm import LLMChain +from langchain.chains.summarize import map_reduce_prompt, refine_prompts, stuff_prompt + + +class LoadingCallable(Protocol): + """Interface for loading the combine documents chain.""" + + def __call__( + self, llm: BaseLanguageModel, **kwargs: Any + ) -> BaseCombineDocumentsChain: + """Callable to load the combine documents chain.""" + + +def _load_stuff_chain( + llm: BaseLanguageModel, + prompt: BasePromptTemplate = stuff_prompt.PROMPT, + document_variable_name: str = "text", + verbose: Optional[bool] = None, + **kwargs: Any, +) -> StuffDocumentsChain: + llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) # type: ignore[arg-type] + # TODO: document prompt + return StuffDocumentsChain( + llm_chain=llm_chain, + document_variable_name=document_variable_name, + verbose=verbose, # type: ignore[arg-type] + **kwargs, + ) + + +def _load_map_reduce_chain( + llm: BaseLanguageModel, + map_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT, + combine_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT, + combine_document_variable_name: str = "text", + map_reduce_document_variable_name: str = "text", + collapse_prompt: Optional[BasePromptTemplate] = None, + reduce_llm: Optional[BaseLanguageModel] = None, + collapse_llm: Optional[BaseLanguageModel] = None, + verbose: Optional[bool] = None, + token_max: int = 3000, + callbacks: Callbacks = None, + *, + collapse_max_retries: Optional[int] = None, + **kwargs: Any, +) -> MapReduceDocumentsChain: + map_chain = LLMChain( + llm=llm, + prompt=map_prompt, + verbose=verbose, # type: ignore[arg-type] + callbacks=callbacks, + ) + _reduce_llm = reduce_llm or llm + reduce_chain = LLMChain( + llm=_reduce_llm, + prompt=combine_prompt, + verbose=verbose, # type: ignore[arg-type] + callbacks=callbacks, + ) + # TODO: document prompt + combine_documents_chain = StuffDocumentsChain( + llm_chain=reduce_chain, + document_variable_name=combine_document_variable_name, + verbose=verbose, # type: ignore[arg-type] + callbacks=callbacks, + ) + if collapse_prompt is None: + collapse_chain = None + if collapse_llm is not None: + raise ValueError( + "collapse_llm provided, but collapse_prompt was not: please " + "provide one or stop providing collapse_llm." + ) + else: + _collapse_llm = collapse_llm or llm + collapse_chain = StuffDocumentsChain( + llm_chain=LLMChain( + llm=_collapse_llm, + prompt=collapse_prompt, + verbose=verbose, # type: ignore[arg-type] + callbacks=callbacks, + ), + document_variable_name=combine_document_variable_name, + ) + reduce_documents_chain = ReduceDocumentsChain( + combine_documents_chain=combine_documents_chain, + collapse_documents_chain=collapse_chain, + token_max=token_max, + verbose=verbose, # type: ignore[arg-type] + callbacks=callbacks, + collapse_max_retries=collapse_max_retries, + ) + return MapReduceDocumentsChain( + llm_chain=map_chain, + reduce_documents_chain=reduce_documents_chain, + document_variable_name=map_reduce_document_variable_name, + verbose=verbose, # type: ignore[arg-type] + callbacks=callbacks, + **kwargs, + ) + + +def _load_refine_chain( + llm: BaseLanguageModel, + question_prompt: BasePromptTemplate = refine_prompts.PROMPT, + refine_prompt: BasePromptTemplate = refine_prompts.REFINE_PROMPT, + document_variable_name: str = "text", + initial_response_name: str = "existing_answer", + refine_llm: Optional[BaseLanguageModel] = None, + verbose: Optional[bool] = None, + **kwargs: Any, +) -> RefineDocumentsChain: + initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) # type: ignore[arg-type] + _refine_llm = refine_llm or llm + refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose) # type: ignore[arg-type] + return RefineDocumentsChain( + initial_llm_chain=initial_chain, + refine_llm_chain=refine_chain, + document_variable_name=document_variable_name, + initial_response_name=initial_response_name, + verbose=verbose, # type: ignore[arg-type] + **kwargs, + ) + + +def load_summarize_chain( + llm: BaseLanguageModel, + chain_type: str = "stuff", + verbose: Optional[bool] = None, + **kwargs: Any, +) -> BaseCombineDocumentsChain: + """Load summarizing chain. + + Args: + llm: Language Model to use in the chain. + chain_type: Type of document combining chain to use. Should be one of "stuff", + "map_reduce", and "refine". + verbose: Whether chains should be run in verbose mode or not. Note that this + applies to all chains that make up the final chain. + + Returns: + A chain to use for summarizing. + """ + loader_mapping: Mapping[str, LoadingCallable] = { + "stuff": _load_stuff_chain, + "map_reduce": _load_map_reduce_chain, + "refine": _load_refine_chain, + } + if chain_type not in loader_mapping: + raise ValueError( + f"Got unsupported chain type: {chain_type}. " + f"Should be one of {loader_mapping.keys()}" + ) + return loader_mapping[chain_type](llm, verbose=verbose, **kwargs) diff --git a/venv/Lib/site-packages/langchain/chains/summarize/map_reduce_prompt.py b/venv/Lib/site-packages/langchain/chains/summarize/map_reduce_prompt.py new file mode 100644 index 00000000..3cf06395 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/summarize/map_reduce_prompt.py @@ -0,0 +1,11 @@ +# flake8: noqa +from langchain_core.prompts import PromptTemplate + +prompt_template = """Write a concise summary of the following: + + +"{text}" + + +CONCISE SUMMARY:""" +PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"]) diff --git a/venv/Lib/site-packages/langchain/chains/summarize/refine_prompts.py b/venv/Lib/site-packages/langchain/chains/summarize/refine_prompts.py new file mode 100644 index 00000000..63c1c338 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/summarize/refine_prompts.py @@ -0,0 +1,23 @@ +from langchain_core.prompts import PromptTemplate + +REFINE_PROMPT_TMPL = """\ +Your job is to produce a final summary. +We have provided an existing summary up to a certain point: {existing_answer} +We have the opportunity to refine the existing summary (only if needed) with some more context below. +------------ +{text} +------------ +Given the new context, refine the original summary. +If the context isn't useful, return the original summary.\ +""" # noqa: E501 +REFINE_PROMPT = PromptTemplate.from_template(REFINE_PROMPT_TMPL) + + +prompt_template = """Write a concise summary of the following: + + +"{text}" + + +CONCISE SUMMARY:""" +PROMPT = PromptTemplate.from_template(prompt_template) diff --git a/venv/Lib/site-packages/langchain/chains/summarize/stuff_prompt.py b/venv/Lib/site-packages/langchain/chains/summarize/stuff_prompt.py new file mode 100644 index 00000000..3cf06395 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/summarize/stuff_prompt.py @@ -0,0 +1,11 @@ +# flake8: noqa +from langchain_core.prompts import PromptTemplate + +prompt_template = """Write a concise summary of the following: + + +"{text}" + + +CONCISE SUMMARY:""" +PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"]) diff --git a/venv/Lib/site-packages/langchain/chains/transform.py b/venv/Lib/site-packages/langchain/chains/transform.py new file mode 100644 index 00000000..fae0b5a7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chains/transform.py @@ -0,0 +1,85 @@ +"""Chain that runs an arbitrary python function.""" + +import functools +import logging +from collections.abc import Awaitable +from typing import Any, Callable, Optional + +from langchain_core.callbacks import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from pydantic import Field + +from langchain.chains.base import Chain + +logger = logging.getLogger(__name__) + + +class TransformChain(Chain): + """Chain that transforms the chain output. + + Example: + .. code-block:: python + + from langchain.chains import TransformChain + transform_chain = TransformChain(input_variables=["text"], + output_variables["entities"], transform=func()) + """ + + input_variables: list[str] + """The keys expected by the transform's input dictionary.""" + output_variables: list[str] + """The keys returned by the transform's output dictionary.""" + transform_cb: Callable[[dict[str, str]], dict[str, str]] = Field(alias="transform") + """The transform function.""" + atransform_cb: Optional[Callable[[dict[str, Any]], Awaitable[dict[str, Any]]]] = ( + Field(None, alias="atransform") + ) + """The async coroutine transform function.""" + + @staticmethod + @functools.lru_cache + def _log_once(msg: str) -> None: + """Log a message once. + + :meta private: + """ + logger.warning(msg) + + @property + def input_keys(self) -> list[str]: + """Expect input keys. + + :meta private: + """ + return self.input_variables + + @property + def output_keys(self) -> list[str]: + """Return output keys. + + :meta private: + """ + return self.output_variables + + def _call( + self, + inputs: dict[str, str], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, str]: + return self.transform_cb(inputs) + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + if self.atransform_cb is not None: + return await self.atransform_cb(inputs) + else: + self._log_once( + "TransformChain's atransform is not provided, falling" + " back to synchronous transform" + ) + return self.transform_cb(inputs) diff --git a/venv/Lib/site-packages/langchain/chat_loaders/__init__.py b/venv/Lib/site-packages/langchain/chat_loaders/__init__.py new file mode 100644 index 00000000..7547ddce --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_loaders/__init__.py @@ -0,0 +1,19 @@ +"""**Chat Loaders** load chat messages from common communications platforms. + +Load chat messages from various +communications platforms such as Facebook Messenger, Telegram, and +WhatsApp. The loaded chat messages can be used for fine-tuning models. + +**Class hierarchy:** + +.. code-block:: + + BaseChatLoader --> ChatLoader # Examples: WhatsAppChatLoader, IMessageChatLoader + +**Main helpers:** + +.. code-block:: + + ChatSession + +""" # noqa: E501 diff --git a/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..0bde9939 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..00663872 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/facebook_messenger.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/facebook_messenger.cpython-312.pyc new file mode 100644 index 00000000..102bbc80 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/facebook_messenger.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/gmail.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/gmail.cpython-312.pyc new file mode 100644 index 00000000..2b78d626 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/gmail.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/imessage.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/imessage.cpython-312.pyc new file mode 100644 index 00000000..e338c37d Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/imessage.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/langsmith.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/langsmith.cpython-312.pyc new file mode 100644 index 00000000..f54b48ab Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/langsmith.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/slack.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/slack.cpython-312.pyc new file mode 100644 index 00000000..36d855b4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/slack.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/telegram.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/telegram.cpython-312.pyc new file mode 100644 index 00000000..f8db7963 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/telegram.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..642ea502 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/whatsapp.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/whatsapp.cpython-312.pyc new file mode 100644 index 00000000..8d33daf7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_loaders/__pycache__/whatsapp.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_loaders/base.py b/venv/Lib/site-packages/langchain/chat_loaders/base.py new file mode 100644 index 00000000..a5207e6e --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_loaders/base.py @@ -0,0 +1,3 @@ +from langchain_core.chat_loaders import BaseChatLoader + +__all__ = ["BaseChatLoader"] diff --git a/venv/Lib/site-packages/langchain/chat_loaders/facebook_messenger.py b/venv/Lib/site-packages/langchain/chat_loaders/facebook_messenger.py new file mode 100644 index 00000000..37e2f18a --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_loaders/facebook_messenger.py @@ -0,0 +1,32 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api.module_import import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_loaders.facebook_messenger import ( + FolderFacebookMessengerChatLoader, + SingleFileFacebookMessengerChatLoader, + ) + +module_lookup = { + "SingleFileFacebookMessengerChatLoader": ( + "langchain_community.chat_loaders.facebook_messenger" + ), + "FolderFacebookMessengerChatLoader": ( + "langchain_community.chat_loaders.facebook_messenger" + ), +} + +# Temporary code for backwards compatibility for deprecated imports. +# This will eventually be removed. +import_lookup = create_importer( + __package__, + deprecated_lookups=module_lookup, +) + + +def __getattr__(name: str) -> Any: + return import_lookup(name) + + +__all__ = ["SingleFileFacebookMessengerChatLoader", "FolderFacebookMessengerChatLoader"] diff --git a/venv/Lib/site-packages/langchain/chat_loaders/gmail.py b/venv/Lib/site-packages/langchain/chat_loaders/gmail.py new file mode 100644 index 00000000..337da76e --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_loaders/gmail.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_loaders.gmail import GMailLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GMailLoader": "langchain_community.chat_loaders.gmail"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GMailLoader", +] diff --git a/venv/Lib/site-packages/langchain/chat_loaders/imessage.py b/venv/Lib/site-packages/langchain/chat_loaders/imessage.py new file mode 100644 index 00000000..742313e9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_loaders/imessage.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_loaders.imessage import IMessageChatLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"IMessageChatLoader": "langchain_community.chat_loaders.imessage"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "IMessageChatLoader", +] diff --git a/venv/Lib/site-packages/langchain/chat_loaders/langsmith.py b/venv/Lib/site-packages/langchain/chat_loaders/langsmith.py new file mode 100644 index 00000000..aa90fd42 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_loaders/langsmith.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_loaders.langsmith import ( + LangSmithDatasetChatLoader, + LangSmithRunChatLoader, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "LangSmithRunChatLoader": "langchain_community.chat_loaders.langsmith", + "LangSmithDatasetChatLoader": "langchain_community.chat_loaders.langsmith", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LangSmithRunChatLoader", + "LangSmithDatasetChatLoader", +] diff --git a/venv/Lib/site-packages/langchain/chat_loaders/slack.py b/venv/Lib/site-packages/langchain/chat_loaders/slack.py new file mode 100644 index 00000000..0523c462 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_loaders/slack.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_loaders.slack import SlackChatLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SlackChatLoader": "langchain_community.chat_loaders.slack"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SlackChatLoader", +] diff --git a/venv/Lib/site-packages/langchain/chat_loaders/telegram.py b/venv/Lib/site-packages/langchain/chat_loaders/telegram.py new file mode 100644 index 00000000..01c1cc74 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_loaders/telegram.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_loaders.telegram import TelegramChatLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TelegramChatLoader": "langchain_community.chat_loaders.telegram"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TelegramChatLoader", +] diff --git a/venv/Lib/site-packages/langchain/chat_loaders/utils.py b/venv/Lib/site-packages/langchain/chat_loaders/utils.py new file mode 100644 index 00000000..04570354 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_loaders/utils.py @@ -0,0 +1,36 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_loaders.utils import ( + map_ai_messages, + map_ai_messages_in_session, + merge_chat_runs, + merge_chat_runs_in_session, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "merge_chat_runs_in_session": "langchain_community.chat_loaders.utils", + "merge_chat_runs": "langchain_community.chat_loaders.utils", + "map_ai_messages_in_session": "langchain_community.chat_loaders.utils", + "map_ai_messages": "langchain_community.chat_loaders.utils", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "merge_chat_runs_in_session", + "merge_chat_runs", + "map_ai_messages_in_session", + "map_ai_messages", +] diff --git a/venv/Lib/site-packages/langchain/chat_loaders/whatsapp.py b/venv/Lib/site-packages/langchain/chat_loaders/whatsapp.py new file mode 100644 index 00000000..eef57fd1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_loaders/whatsapp.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_loaders.whatsapp import WhatsAppChatLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WhatsAppChatLoader": "langchain_community.chat_loaders.whatsapp"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WhatsAppChatLoader", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/__init__.py b/venv/Lib/site-packages/langchain/chat_models/__init__.py new file mode 100644 index 00000000..17530d19 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/__init__.py @@ -0,0 +1,77 @@ +"""**Chat Models** are a variation on language models. + +While Chat Models use language models under the hood, the interface they expose +is a bit different. Rather than expose a "text in, text out" API, they expose +an interface where "chat messages" are the inputs and outputs. + +**Class hierarchy:** + +.. code-block:: + + BaseLanguageModel --> BaseChatModel --> # Examples: ChatOpenAI, ChatGooglePalm + +**Main helpers:** + +.. code-block:: + + AIMessage, BaseMessage, HumanMessage +""" # noqa: E501 + +import warnings + +from langchain_core._api import LangChainDeprecationWarning + +from langchain._api.interactive_env import is_interactive_env +from langchain.chat_models.base import init_chat_model + + +def __getattr__(name: str) -> None: + from langchain_community import chat_models + + # If not in interactive env, raise warning. + if not is_interactive_env(): + warnings.warn( + "Importing chat models from langchain is deprecated. Importing from " + "langchain will no longer be supported as of langchain==0.2.0. " + "Please import from langchain-community instead:\n\n" + f"`from langchain_community.chat_models import {name}`.\n\n" + "To install langchain-community run `pip install -U langchain-community`.", + category=LangChainDeprecationWarning, + ) + + return getattr(chat_models, name) + + +__all__ = [ + "init_chat_model", + "ChatOpenAI", + "BedrockChat", + "AzureChatOpenAI", + "FakeListChatModel", + "PromptLayerChatOpenAI", + "ChatDatabricks", + "ChatEverlyAI", + "ChatAnthropic", + "ChatCohere", + "ChatGooglePalm", + "ChatMlflow", + "ChatMLflowAIGateway", + "ChatOllama", + "ChatVertexAI", + "JinaChat", + "HumanInputChatModel", + "MiniMaxChat", + "ChatAnyscale", + "ChatLiteLLM", + "ErnieBotChat", + "ChatJavelinAIGateway", + "ChatKonko", + "PaiEasChatEndpoint", + "QianfanChatEndpoint", + "ChatFireworks", + "ChatYandexGPT", + "ChatBaichuan", + "ChatHunyuan", + "GigaChat", + "VolcEngineMaasChat", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..e619836c Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/anthropic.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/anthropic.cpython-312.pyc new file mode 100644 index 00000000..1b32f72a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/anthropic.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/anyscale.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/anyscale.cpython-312.pyc new file mode 100644 index 00000000..ead29cfd Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/anyscale.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/azure_openai.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/azure_openai.cpython-312.pyc new file mode 100644 index 00000000..a6a5ef2b Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/azure_openai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/azureml_endpoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/azureml_endpoint.cpython-312.pyc new file mode 100644 index 00000000..7d9574bd Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/azureml_endpoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/baichuan.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/baichuan.cpython-312.pyc new file mode 100644 index 00000000..8c950374 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/baichuan.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/baidu_qianfan_endpoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/baidu_qianfan_endpoint.cpython-312.pyc new file mode 100644 index 00000000..07591593 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/baidu_qianfan_endpoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..3b38c813 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/bedrock.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/bedrock.cpython-312.pyc new file mode 100644 index 00000000..5ae0b237 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/bedrock.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/cohere.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/cohere.cpython-312.pyc new file mode 100644 index 00000000..80bd1048 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/cohere.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/databricks.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/databricks.cpython-312.pyc new file mode 100644 index 00000000..2a161e3d Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/databricks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/ernie.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/ernie.cpython-312.pyc new file mode 100644 index 00000000..831ed131 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/ernie.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/everlyai.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/everlyai.cpython-312.pyc new file mode 100644 index 00000000..d06a9cc7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/everlyai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/fake.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/fake.cpython-312.pyc new file mode 100644 index 00000000..75170f87 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/fake.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/fireworks.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/fireworks.cpython-312.pyc new file mode 100644 index 00000000..949c4727 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/fireworks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/gigachat.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/gigachat.cpython-312.pyc new file mode 100644 index 00000000..656cb8a9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/gigachat.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/google_palm.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/google_palm.cpython-312.pyc new file mode 100644 index 00000000..3f68e29a Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/google_palm.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/human.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/human.cpython-312.pyc new file mode 100644 index 00000000..bbf0574f Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/human.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/hunyuan.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/hunyuan.cpython-312.pyc new file mode 100644 index 00000000..18480e61 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/hunyuan.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/javelin_ai_gateway.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/javelin_ai_gateway.cpython-312.pyc new file mode 100644 index 00000000..03ed744b Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/javelin_ai_gateway.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/jinachat.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/jinachat.cpython-312.pyc new file mode 100644 index 00000000..c3376f94 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/jinachat.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/konko.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/konko.cpython-312.pyc new file mode 100644 index 00000000..d4276970 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/konko.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/litellm.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/litellm.cpython-312.pyc new file mode 100644 index 00000000..02d607f4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/litellm.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/meta.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/meta.cpython-312.pyc new file mode 100644 index 00000000..d8d9bea2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/meta.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/minimax.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/minimax.cpython-312.pyc new file mode 100644 index 00000000..4b3c1e37 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/minimax.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/mlflow.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/mlflow.cpython-312.pyc new file mode 100644 index 00000000..99e33490 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/mlflow.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/mlflow_ai_gateway.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/mlflow_ai_gateway.cpython-312.pyc new file mode 100644 index 00000000..ccd83293 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/mlflow_ai_gateway.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/ollama.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/ollama.cpython-312.pyc new file mode 100644 index 00000000..efb4dd80 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/ollama.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/openai.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/openai.cpython-312.pyc new file mode 100644 index 00000000..bd96e632 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/openai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/pai_eas_endpoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/pai_eas_endpoint.cpython-312.pyc new file mode 100644 index 00000000..96459df8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/pai_eas_endpoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/promptlayer_openai.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/promptlayer_openai.cpython-312.pyc new file mode 100644 index 00000000..e92f5d48 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/promptlayer_openai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/tongyi.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/tongyi.cpython-312.pyc new file mode 100644 index 00000000..7a7a95c2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/tongyi.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/vertexai.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/vertexai.cpython-312.pyc new file mode 100644 index 00000000..9d3b4c68 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/vertexai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/volcengine_maas.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/volcengine_maas.cpython-312.pyc new file mode 100644 index 00000000..3630e887 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/volcengine_maas.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/__pycache__/yandex.cpython-312.pyc b/venv/Lib/site-packages/langchain/chat_models/__pycache__/yandex.cpython-312.pyc new file mode 100644 index 00000000..92378c99 Binary files /dev/null and b/venv/Lib/site-packages/langchain/chat_models/__pycache__/yandex.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/chat_models/anthropic.py b/venv/Lib/site-packages/langchain/chat_models/anthropic.py new file mode 100644 index 00000000..aa925896 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/anthropic.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.anthropic import ( + ChatAnthropic, + convert_messages_to_prompt_anthropic, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "convert_messages_to_prompt_anthropic": "langchain_community.chat_models.anthropic", + "ChatAnthropic": "langchain_community.chat_models.anthropic", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "convert_messages_to_prompt_anthropic", + "ChatAnthropic", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/anyscale.py b/venv/Lib/site-packages/langchain/chat_models/anyscale.py new file mode 100644 index 00000000..279b3be8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/anyscale.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.anyscale import ChatAnyscale + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatAnyscale": "langchain_community.chat_models.anyscale"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatAnyscale", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/azure_openai.py b/venv/Lib/site-packages/langchain/chat_models/azure_openai.py new file mode 100644 index 00000000..93441c7e --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/azure_openai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.azure_openai import AzureChatOpenAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AzureChatOpenAI": "langchain_community.chat_models.azure_openai"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureChatOpenAI", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/azureml_endpoint.py b/venv/Lib/site-packages/langchain/chat_models/azureml_endpoint.py new file mode 100644 index 00000000..ebcc0fdc --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/azureml_endpoint.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.azureml_endpoint import ( + AzureMLChatOnlineEndpoint, + LlamaContentFormatter, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "LlamaContentFormatter": "langchain_community.chat_models.azureml_endpoint", + "AzureMLChatOnlineEndpoint": "langchain_community.chat_models.azureml_endpoint", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LlamaContentFormatter", + "AzureMLChatOnlineEndpoint", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/baichuan.py b/venv/Lib/site-packages/langchain/chat_models/baichuan.py new file mode 100644 index 00000000..bee1ba3f --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/baichuan.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.baichuan import ChatBaichuan + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatBaichuan": "langchain_community.chat_models.baichuan"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatBaichuan", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/baidu_qianfan_endpoint.py b/venv/Lib/site-packages/langchain/chat_models/baidu_qianfan_endpoint.py new file mode 100644 index 00000000..e9db4d9b --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/baidu_qianfan_endpoint.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.baidu_qianfan_endpoint import ( + QianfanChatEndpoint, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "QianfanChatEndpoint": "langchain_community.chat_models.baidu_qianfan_endpoint" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "QianfanChatEndpoint", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/base.py b/venv/Lib/site-packages/langchain/chat_models/base.py new file mode 100644 index 00000000..bb8b899a --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/base.py @@ -0,0 +1,923 @@ +from __future__ import annotations + +import warnings +from collections.abc import AsyncIterator, Iterator, Sequence +from importlib import util +from typing import ( + Any, + Callable, + Literal, + Optional, + Union, + cast, + overload, +) + +from langchain_core.language_models import ( + BaseChatModel, + LanguageModelInput, + SimpleChatModel, +) +from langchain_core.language_models.chat_models import ( + agenerate_from_stream, + generate_from_stream, +) +from langchain_core.messages import AnyMessage, BaseMessage +from langchain_core.runnables import Runnable, RunnableConfig, ensure_config +from langchain_core.runnables.schema import StreamEvent +from langchain_core.tools import BaseTool +from langchain_core.tracers import RunLog, RunLogPatch +from pydantic import BaseModel +from typing_extensions import TypeAlias + +__all__ = [ + "init_chat_model", + # For backwards compatibility + "BaseChatModel", + "SimpleChatModel", + "generate_from_stream", + "agenerate_from_stream", +] + + +@overload +def init_chat_model( + model: str, + *, + model_provider: Optional[str] = None, + configurable_fields: Literal[None] = None, + config_prefix: Optional[str] = None, + **kwargs: Any, +) -> BaseChatModel: ... + + +@overload +def init_chat_model( + model: Literal[None] = None, + *, + model_provider: Optional[str] = None, + configurable_fields: Literal[None] = None, + config_prefix: Optional[str] = None, + **kwargs: Any, +) -> _ConfigurableModel: ... + + +@overload +def init_chat_model( + model: Optional[str] = None, + *, + model_provider: Optional[str] = None, + configurable_fields: Union[Literal["any"], list[str], tuple[str, ...]] = ..., + config_prefix: Optional[str] = None, + **kwargs: Any, +) -> _ConfigurableModel: ... + + +# FOR CONTRIBUTORS: If adding support for a new provider, please append the provider +# name to the supported list in the docstring below. Do *not* change the order of the +# existing providers. +def init_chat_model( + model: Optional[str] = None, + *, + model_provider: Optional[str] = None, + configurable_fields: Optional[ + Union[Literal["any"], list[str], tuple[str, ...]] + ] = None, + config_prefix: Optional[str] = None, + **kwargs: Any, +) -> Union[BaseChatModel, _ConfigurableModel]: + """Initialize a ChatModel from the model name and provider. + + **Note:** Must have the integration package corresponding to the model provider + installed. + + Args: + model: The name of the model, e.g. "o3-mini", "claude-3-5-sonnet-latest". You can + also specify model and model provider in a single argument using + '{model_provider}:{model}' format, e.g. "openai:o1". + model_provider: The model provider if not specified as part of model arg (see + above). Supported model_provider values and the corresponding integration + package are: + + - 'openai' -> langchain-openai + - 'anthropic' -> langchain-anthropic + - 'azure_openai' -> langchain-openai + - 'azure_ai' -> langchain-azure-ai + - 'google_vertexai' -> langchain-google-vertexai + - 'google_genai' -> langchain-google-genai + - 'bedrock' -> langchain-aws + - 'bedrock_converse' -> langchain-aws + - 'cohere' -> langchain-cohere + - 'fireworks' -> langchain-fireworks + - 'together' -> langchain-together + - 'mistralai' -> langchain-mistralai + - 'huggingface' -> langchain-huggingface + - 'groq' -> langchain-groq + - 'ollama' -> langchain-ollama + - 'google_anthropic_vertex' -> langchain-google-vertexai + - 'deepseek' -> langchain-deepseek + - 'ibm' -> langchain-ibm + - 'nvidia' -> langchain-nvidia-ai-endpoints + - 'xai' -> langchain-xai + - 'perplexity' -> langchain-perplexity + + Will attempt to infer model_provider from model if not specified. The + following providers will be inferred based on these model prefixes: + + - 'gpt-3...' | 'gpt-4...' | 'o1...' -> 'openai' + - 'claude...' -> 'anthropic' + - 'amazon....' -> 'bedrock' + - 'gemini...' -> 'google_vertexai' + - 'command...' -> 'cohere' + - 'accounts/fireworks...' -> 'fireworks' + - 'mistral...' -> 'mistralai' + - 'deepseek...' -> 'deepseek' + - 'grok...' -> 'xai' + - 'sonar...' -> 'perplexity' + configurable_fields: Which model parameters are + configurable: + + - None: No configurable fields. + - "any": All fields are configurable. *See Security Note below.* + - Union[List[str], Tuple[str, ...]]: Specified fields are configurable. + + Fields are assumed to have config_prefix stripped if there is a + config_prefix. If model is specified, then defaults to None. If model is + not specified, then defaults to ``("model", "model_provider")``. + + ***Security Note***: Setting ``configurable_fields="any"`` means fields like + api_key, base_url, etc. can be altered at runtime, potentially redirecting + model requests to a different service/user. Make sure that if you're + accepting untrusted configurations that you enumerate the + ``configurable_fields=(...)`` explicitly. + + config_prefix: If config_prefix is a non-empty string then model will be + configurable at runtime via the + ``config["configurable"]["{config_prefix}_{param}"]`` keys. If + config_prefix is an empty string then model will be configurable via + ``config["configurable"]["{param}"]``. + temperature: Model temperature. + max_tokens: Max output tokens. + timeout: The maximum time (in seconds) to wait for a response from the model + before canceling the request. + max_retries: The maximum number of attempts the system will make to resend a + request if it fails due to issues like network timeouts or rate limits. + base_url: The URL of the API endpoint where requests are sent. + rate_limiter: A ``BaseRateLimiter`` to space out requests to avoid exceeding + rate limits. + kwargs: Additional model-specific keyword args to pass to + ``<>.__init__(model=model_name, **kwargs)``. + + Returns: + A BaseChatModel corresponding to the model_name and model_provider specified if + configurability is inferred to be False. If configurable, a chat model emulator + that initializes the underlying model at runtime once a config is passed in. + + Raises: + ValueError: If model_provider cannot be inferred or isn't supported. + ImportError: If the model provider integration package is not installed. + + .. dropdown:: Init non-configurable model + :open: + + .. code-block:: python + + # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai + from langchain.chat_models import init_chat_model + + o3_mini = init_chat_model("openai:o3-mini", temperature=0) + claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0) + gemini_2_flash = init_chat_model("google_vertexai:gemini-2.0-flash", temperature=0) + + o3_mini.invoke("what's your name") + claude_sonnet.invoke("what's your name") + gemini_2_flash.invoke("what's your name") + + + .. dropdown:: Partially configurable model with no default + + .. code-block:: python + + # pip install langchain langchain-openai langchain-anthropic + from langchain.chat_models import init_chat_model + + # We don't need to specify configurable=True if a model isn't specified. + configurable_model = init_chat_model(temperature=0) + + configurable_model.invoke( + "what's your name", + config={"configurable": {"model": "gpt-4o"}} + ) + # GPT-4o response + + configurable_model.invoke( + "what's your name", + config={"configurable": {"model": "claude-3-5-sonnet-latest"}} + ) + # claude-3.5 sonnet response + + .. dropdown:: Fully configurable model with a default + + .. code-block:: python + + # pip install langchain langchain-openai langchain-anthropic + from langchain.chat_models import init_chat_model + + configurable_model_with_default = init_chat_model( + "openai:gpt-4o", + configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime. + config_prefix="foo", + temperature=0 + ) + + configurable_model_with_default.invoke("what's your name") + # GPT-4o response with temperature 0 + + configurable_model_with_default.invoke( + "what's your name", + config={ + "configurable": { + "foo_model": "anthropic:claude-3-5-sonnet-20240620", + "foo_temperature": 0.6 + } + } + ) + # Claude-3.5 sonnet response with temperature 0.6 + + .. dropdown:: Bind tools to a configurable model + + You can call any ChatModel declarative methods on a configurable model in the + same way that you would with a normal model. + + .. code-block:: python + + # pip install langchain langchain-openai langchain-anthropic + from langchain.chat_models import init_chat_model + from pydantic import BaseModel, Field + + class GetWeather(BaseModel): + '''Get the current weather in a given location''' + + location: str = Field(..., description="The city and state, e.g. San Francisco, CA") + + class GetPopulation(BaseModel): + '''Get the current population in a given location''' + + location: str = Field(..., description="The city and state, e.g. San Francisco, CA") + + configurable_model = init_chat_model( + "gpt-4o", + configurable_fields=("model", "model_provider"), + temperature=0 + ) + + configurable_model_with_tools = configurable_model.bind_tools([GetWeather, GetPopulation]) + configurable_model_with_tools.invoke( + "Which city is hotter today and which is bigger: LA or NY?" + ) + # GPT-4o response with tool calls + + configurable_model_with_tools.invoke( + "Which city is hotter today and which is bigger: LA or NY?", + config={"configurable": {"model": "claude-3-5-sonnet-20240620"}} + ) + # Claude-3.5 sonnet response with tools + + .. versionadded:: 0.2.7 + + .. versionchanged:: 0.2.8 + + Support for ``configurable_fields`` and ``config_prefix`` added. + + .. versionchanged:: 0.2.12 + + Support for Ollama via langchain-ollama package added + (langchain_ollama.ChatOllama). Previously, + the now-deprecated langchain-community version of Ollama was imported + (langchain_community.chat_models.ChatOllama). + + Support for AWS Bedrock models via the Converse API added + (model_provider="bedrock_converse"). + + .. versionchanged:: 0.3.5 + + Out of beta. + + .. versionchanged:: 0.3.19 + + Support for Deepseek, IBM, Nvidia, and xAI models added. + + """ # noqa: E501 + if not model and not configurable_fields: + configurable_fields = ("model", "model_provider") + config_prefix = config_prefix or "" + if config_prefix and not configurable_fields: + warnings.warn( + f"{config_prefix=} has been set but no fields are configurable. Set " + f"`configurable_fields=(...)` to specify the model params that are " + f"configurable." + ) + + if not configurable_fields: + return _init_chat_model_helper( + cast(str, model), model_provider=model_provider, **kwargs + ) + else: + if model: + kwargs["model"] = model + if model_provider: + kwargs["model_provider"] = model_provider + return _ConfigurableModel( + default_config=kwargs, + config_prefix=config_prefix, + configurable_fields=configurable_fields, + ) + + +def _init_chat_model_helper( + model: str, *, model_provider: Optional[str] = None, **kwargs: Any +) -> BaseChatModel: + model, model_provider = _parse_model(model, model_provider) + if model_provider == "openai": + _check_pkg("langchain_openai") + from langchain_openai import ChatOpenAI + + return ChatOpenAI(model=model, **kwargs) + elif model_provider == "anthropic": + _check_pkg("langchain_anthropic") + from langchain_anthropic import ChatAnthropic + + return ChatAnthropic(model=model, **kwargs) # type: ignore[call-arg,unused-ignore] + elif model_provider == "azure_openai": + _check_pkg("langchain_openai") + from langchain_openai import AzureChatOpenAI + + return AzureChatOpenAI(model=model, **kwargs) + elif model_provider == "azure_ai": + _check_pkg("langchain_azure_ai") + from langchain_azure_ai.chat_models import AzureAIChatCompletionsModel + + return AzureAIChatCompletionsModel(model=model, **kwargs) + elif model_provider == "cohere": + _check_pkg("langchain_cohere") + from langchain_cohere import ChatCohere + + return ChatCohere(model=model, **kwargs) + elif model_provider == "google_vertexai": + _check_pkg("langchain_google_vertexai") + from langchain_google_vertexai import ChatVertexAI + + return ChatVertexAI(model=model, **kwargs) + elif model_provider == "google_genai": + _check_pkg("langchain_google_genai") + from langchain_google_genai import ChatGoogleGenerativeAI + + return ChatGoogleGenerativeAI(model=model, **kwargs) + elif model_provider == "fireworks": + _check_pkg("langchain_fireworks") + from langchain_fireworks import ChatFireworks + + return ChatFireworks(model=model, **kwargs) + elif model_provider == "ollama": + try: + _check_pkg("langchain_ollama") + from langchain_ollama import ChatOllama + except ImportError: + # For backwards compatibility + try: + _check_pkg("langchain_community") + from langchain_community.chat_models import ChatOllama + except ImportError: + # If both langchain-ollama and langchain-community aren't available, + # raise an error related to langchain-ollama + _check_pkg("langchain_ollama") + + return ChatOllama(model=model, **kwargs) + elif model_provider == "together": + _check_pkg("langchain_together") + from langchain_together import ChatTogether + + return ChatTogether(model=model, **kwargs) + elif model_provider == "mistralai": + _check_pkg("langchain_mistralai") + from langchain_mistralai import ChatMistralAI + + return ChatMistralAI(model=model, **kwargs) # type: ignore[call-arg,unused-ignore] + elif model_provider == "huggingface": + _check_pkg("langchain_huggingface") + from langchain_huggingface import ChatHuggingFace + + return ChatHuggingFace(model_id=model, **kwargs) + elif model_provider == "groq": + _check_pkg("langchain_groq") + from langchain_groq import ChatGroq + + return ChatGroq(model=model, **kwargs) + elif model_provider == "bedrock": + _check_pkg("langchain_aws") + from langchain_aws import ChatBedrock + + # TODO: update to use model= once ChatBedrock supports + return ChatBedrock(model_id=model, **kwargs) + elif model_provider == "bedrock_converse": + _check_pkg("langchain_aws") + from langchain_aws import ChatBedrockConverse + + return ChatBedrockConverse(model=model, **kwargs) + elif model_provider == "google_anthropic_vertex": + _check_pkg("langchain_google_vertexai") + from langchain_google_vertexai.model_garden import ChatAnthropicVertex + + return ChatAnthropicVertex(model=model, **kwargs) + elif model_provider == "deepseek": + _check_pkg("langchain_deepseek", pkg_kebab="langchain-deepseek") + from langchain_deepseek import ChatDeepSeek + + return ChatDeepSeek(model=model, **kwargs) + elif model_provider == "nvidia": + _check_pkg("langchain_nvidia_ai_endpoints") + from langchain_nvidia_ai_endpoints import ChatNVIDIA + + return ChatNVIDIA(model=model, **kwargs) + elif model_provider == "ibm": + _check_pkg("langchain_ibm") + from langchain_ibm import ChatWatsonx + + return ChatWatsonx(model_id=model, **kwargs) + elif model_provider == "xai": + _check_pkg("langchain_xai") + from langchain_xai import ChatXAI + + return ChatXAI(model=model, **kwargs) + elif model_provider == "perplexity": + _check_pkg("langchain_perplexity") + from langchain_perplexity import ChatPerplexity + + return ChatPerplexity(model=model, **kwargs) + else: + supported = ", ".join(_SUPPORTED_PROVIDERS) + raise ValueError( + f"Unsupported {model_provider=}.\n\nSupported model providers are: " + f"{supported}" + ) + + +_SUPPORTED_PROVIDERS = { + "openai", + "anthropic", + "azure_openai", + "azure_ai", + "cohere", + "google_vertexai", + "google_genai", + "fireworks", + "ollama", + "together", + "mistralai", + "huggingface", + "groq", + "bedrock", + "bedrock_converse", + "google_anthropic_vertex", + "deepseek", + "ibm", + "xai", + "perplexity", +} + + +def _attempt_infer_model_provider(model_name: str) -> Optional[str]: + if any(model_name.startswith(pre) for pre in ("gpt-3", "gpt-4", "o1", "o3")): + return "openai" + elif model_name.startswith("claude"): + return "anthropic" + elif model_name.startswith("command"): + return "cohere" + elif model_name.startswith("accounts/fireworks"): + return "fireworks" + elif model_name.startswith("gemini"): + return "google_vertexai" + elif model_name.startswith("amazon."): + return "bedrock" + elif model_name.startswith("mistral"): + return "mistralai" + elif model_name.startswith("deepseek"): + return "deepseek" + elif model_name.startswith("grok"): + return "xai" + elif model_name.startswith("sonar"): + return "perplexity" + else: + return None + + +def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]: + if ( + not model_provider + and ":" in model + and model.split(":")[0] in _SUPPORTED_PROVIDERS + ): + model_provider = model.split(":")[0] + model = ":".join(model.split(":")[1:]) + model_provider = model_provider or _attempt_infer_model_provider(model) + if not model_provider: + raise ValueError( + f"Unable to infer model provider for {model=}, please specify " + f"model_provider directly." + ) + model_provider = model_provider.replace("-", "_").lower() + return model, model_provider + + +def _check_pkg(pkg: str, *, pkg_kebab: Optional[str] = None) -> None: + if not util.find_spec(pkg): + pkg_kebab = pkg_kebab if pkg_kebab is not None else pkg.replace("_", "-") + raise ImportError( + f"Unable to import {pkg}. Please install with `pip install -U {pkg_kebab}`" + ) + + +def _remove_prefix(s: str, prefix: str) -> str: + if s.startswith(prefix): + s = s[len(prefix) :] + return s + + +_DECLARATIVE_METHODS = ("bind_tools", "with_structured_output") + + +class _ConfigurableModel(Runnable[LanguageModelInput, Any]): + def __init__( + self, + *, + default_config: Optional[dict] = None, + configurable_fields: Union[Literal["any"], list[str], tuple[str, ...]] = "any", + config_prefix: str = "", + queued_declarative_operations: Sequence[tuple[str, tuple, dict]] = (), + ) -> None: + self._default_config: dict = default_config or {} + self._configurable_fields: Union[Literal["any"], list[str]] = ( + configurable_fields + if configurable_fields == "any" + else list(configurable_fields) + ) + self._config_prefix = ( + config_prefix + "_" + if config_prefix and not config_prefix.endswith("_") + else config_prefix + ) + self._queued_declarative_operations: list[tuple[str, tuple, dict]] = list( + queued_declarative_operations + ) + + def __getattr__(self, name: str) -> Any: + if name in _DECLARATIVE_METHODS: + # Declarative operations that cannot be applied until after an actual model + # object is instantiated. So instead of returning the actual operation, + # we record the operation and its arguments in a queue. This queue is + # then applied in order whenever we actually instantiate the model (in + # self._model()). + def queue(*args: Any, **kwargs: Any) -> _ConfigurableModel: + queued_declarative_operations = list( + self._queued_declarative_operations + ) + queued_declarative_operations.append((name, args, kwargs)) + return _ConfigurableModel( + default_config=dict(self._default_config), + configurable_fields=list(self._configurable_fields) + if isinstance(self._configurable_fields, list) + else self._configurable_fields, + config_prefix=self._config_prefix, + queued_declarative_operations=queued_declarative_operations, + ) + + return queue + elif self._default_config and (model := self._model()) and hasattr(model, name): + return getattr(model, name) + else: + msg = f"{name} is not a BaseChatModel attribute" + if self._default_config: + msg += " and is not implemented on the default model" + msg += "." + raise AttributeError(msg) + + def _model(self, config: Optional[RunnableConfig] = None) -> Runnable: + params = {**self._default_config, **self._model_params(config)} + model = _init_chat_model_helper(**params) + for name, args, kwargs in self._queued_declarative_operations: + model = getattr(model, name)(*args, **kwargs) + return model + + def _model_params(self, config: Optional[RunnableConfig]) -> dict: + config = ensure_config(config) + model_params = { + _remove_prefix(k, self._config_prefix): v + for k, v in config.get("configurable", {}).items() + if k.startswith(self._config_prefix) + } + if self._configurable_fields != "any": + model_params = { + k: v for k, v in model_params.items() if k in self._configurable_fields + } + return model_params + + def with_config( + self, + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> _ConfigurableModel: + """Bind config to a Runnable, returning a new Runnable.""" + config = RunnableConfig(**(config or {}), **cast(RunnableConfig, kwargs)) + model_params = self._model_params(config) + remaining_config = {k: v for k, v in config.items() if k != "configurable"} + remaining_config["configurable"] = { + k: v + for k, v in config.get("configurable", {}).items() + if _remove_prefix(k, self._config_prefix) not in model_params + } + queued_declarative_operations = list(self._queued_declarative_operations) + if remaining_config: + queued_declarative_operations.append( + ( + "with_config", + (), + {"config": remaining_config}, + ) + ) + return _ConfigurableModel( + default_config={**self._default_config, **model_params}, + configurable_fields=list(self._configurable_fields) + if isinstance(self._configurable_fields, list) + else self._configurable_fields, + config_prefix=self._config_prefix, + queued_declarative_operations=queued_declarative_operations, + ) + + @property + def InputType(self) -> TypeAlias: + """Get the input type for this runnable.""" + from langchain_core.prompt_values import ( + ChatPromptValueConcrete, + StringPromptValue, + ) + + # This is a version of LanguageModelInput which replaces the abstract + # base class BaseMessage with a union of its subclasses, which makes + # for a much better schema. + return Union[ + str, + Union[StringPromptValue, ChatPromptValueConcrete], + list[AnyMessage], + ] + + def invoke( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Any: + return self._model(config).invoke(input, config=config, **kwargs) + + async def ainvoke( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Any: + return await self._model(config).ainvoke(input, config=config, **kwargs) + + def stream( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Any]: + yield from self._model(config).stream(input, config=config, **kwargs) + + async def astream( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Any]: + async for x in self._model(config).astream(input, config=config, **kwargs): + yield x + + def batch( + self, + inputs: list[LanguageModelInput], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Any]: + config = config or None + # If <= 1 config use the underlying models batch implementation. + if config is None or isinstance(config, dict) or len(config) <= 1: + if isinstance(config, list): + config = config[0] + return self._model(config).batch( + inputs, config=config, return_exceptions=return_exceptions, **kwargs + ) + # If multiple configs default to Runnable.batch which uses executor to invoke + # in parallel. + else: + return super().batch( + inputs, config=config, return_exceptions=return_exceptions, **kwargs + ) + + async def abatch( + self, + inputs: list[LanguageModelInput], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Any]: + config = config or None + # If <= 1 config use the underlying models batch implementation. + if config is None or isinstance(config, dict) or len(config) <= 1: + if isinstance(config, list): + config = config[0] + return await self._model(config).abatch( + inputs, config=config, return_exceptions=return_exceptions, **kwargs + ) + # If multiple configs default to Runnable.batch which uses executor to invoke + # in parallel. + else: + return await super().abatch( + inputs, config=config, return_exceptions=return_exceptions, **kwargs + ) + + def batch_as_completed( + self, + inputs: Sequence[LanguageModelInput], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Any, + ) -> Iterator[tuple[int, Union[Any, Exception]]]: + config = config or None + # If <= 1 config use the underlying models batch implementation. + if config is None or isinstance(config, dict) or len(config) <= 1: + if isinstance(config, list): + config = config[0] + yield from self._model(cast(RunnableConfig, config)).batch_as_completed( # type: ignore[call-overload] + inputs, config=config, return_exceptions=return_exceptions, **kwargs + ) + # If multiple configs default to Runnable.batch which uses executor to invoke + # in parallel. + else: + yield from super().batch_as_completed( # type: ignore[call-overload] + inputs, config=config, return_exceptions=return_exceptions, **kwargs + ) + + async def abatch_as_completed( + self, + inputs: Sequence[LanguageModelInput], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Any, + ) -> AsyncIterator[tuple[int, Any]]: + config = config or None + # If <= 1 config use the underlying models batch implementation. + if config is None or isinstance(config, dict) or len(config) <= 1: + if isinstance(config, list): + config = config[0] + async for x in self._model( + cast(RunnableConfig, config) + ).abatch_as_completed( # type: ignore[call-overload] + inputs, config=config, return_exceptions=return_exceptions, **kwargs + ): + yield x + # If multiple configs default to Runnable.batch which uses executor to invoke + # in parallel. + else: + async for x in super().abatch_as_completed( # type: ignore[call-overload] + inputs, config=config, return_exceptions=return_exceptions, **kwargs + ): + yield x + + def transform( + self, + input: Iterator[LanguageModelInput], + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Any]: + yield from self._model(config).transform(input, config=config, **kwargs) + + async def atransform( + self, + input: AsyncIterator[LanguageModelInput], + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Any]: + async for x in self._model(config).atransform(input, config=config, **kwargs): + yield x + + @overload + def astream_log( + self, + input: Any, + config: Optional[RunnableConfig] = None, + *, + diff: Literal[True] = True, + with_streamed_output_list: bool = True, + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> AsyncIterator[RunLogPatch]: ... + + @overload + def astream_log( + self, + input: Any, + config: Optional[RunnableConfig] = None, + *, + diff: Literal[False], + with_streamed_output_list: bool = True, + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> AsyncIterator[RunLog]: ... + + async def astream_log( + self, + input: Any, + config: Optional[RunnableConfig] = None, + *, + diff: bool = True, + with_streamed_output_list: bool = True, + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> Union[AsyncIterator[RunLogPatch], AsyncIterator[RunLog]]: + async for x in self._model(config).astream_log( # type: ignore[call-overload, misc] + input, + config=config, + diff=diff, + with_streamed_output_list=with_streamed_output_list, + include_names=include_names, + include_types=include_types, + include_tags=include_tags, + exclude_tags=exclude_tags, + exclude_types=exclude_types, + exclude_names=exclude_names, + **kwargs, + ): + yield x + + async def astream_events( + self, + input: Any, + config: Optional[RunnableConfig] = None, + *, + version: Literal["v1", "v2"] = "v2", + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> AsyncIterator[StreamEvent]: + async for x in self._model(config).astream_events( + input, + config=config, + version=version, + include_names=include_names, + include_types=include_types, + include_tags=include_tags, + exclude_tags=exclude_tags, + exclude_types=exclude_types, + exclude_names=exclude_names, + **kwargs, + ): + yield x + + # Explicitly added to satisfy downstream linters. + def bind_tools( + self, + tools: Sequence[Union[dict[str, Any], type[BaseModel], Callable, BaseTool]], + **kwargs: Any, + ) -> Runnable[LanguageModelInput, BaseMessage]: + return self.__getattr__("bind_tools")(tools, **kwargs) + + # Explicitly added to satisfy downstream linters. + def with_structured_output( + self, schema: Union[dict, type[BaseModel]], **kwargs: Any + ) -> Runnable[LanguageModelInput, Union[dict, BaseModel]]: + return self.__getattr__("with_structured_output")(schema, **kwargs) diff --git a/venv/Lib/site-packages/langchain/chat_models/bedrock.py b/venv/Lib/site-packages/langchain/chat_models/bedrock.py new file mode 100644 index 00000000..d2d28172 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/bedrock.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.bedrock import BedrockChat, ChatPromptAdapter + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ChatPromptAdapter": "langchain_community.chat_models.bedrock", + "BedrockChat": "langchain_community.chat_models.bedrock", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatPromptAdapter", + "BedrockChat", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/cohere.py b/venv/Lib/site-packages/langchain/chat_models/cohere.py new file mode 100644 index 00000000..b43df81d --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/cohere.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.cohere import ChatCohere + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatCohere": "langchain_community.chat_models.cohere"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatCohere", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/databricks.py b/venv/Lib/site-packages/langchain/chat_models/databricks.py new file mode 100644 index 00000000..58dc3f3b --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/databricks.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.databricks import ChatDatabricks + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatDatabricks": "langchain_community.chat_models.databricks"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatDatabricks", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/ernie.py b/venv/Lib/site-packages/langchain/chat_models/ernie.py new file mode 100644 index 00000000..284af4a6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/ernie.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.ernie import ErnieBotChat + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ErnieBotChat": "langchain_community.chat_models.ernie"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ErnieBotChat", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/everlyai.py b/venv/Lib/site-packages/langchain/chat_models/everlyai.py new file mode 100644 index 00000000..87c11deb --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/everlyai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.everlyai import ChatEverlyAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatEverlyAI": "langchain_community.chat_models.everlyai"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatEverlyAI", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/fake.py b/venv/Lib/site-packages/langchain/chat_models/fake.py new file mode 100644 index 00000000..261d5fa5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/fake.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.fake import ( + FakeListChatModel, + FakeMessagesListChatModel, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FakeMessagesListChatModel": "langchain_community.chat_models.fake", + "FakeListChatModel": "langchain_community.chat_models.fake", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FakeMessagesListChatModel", + "FakeListChatModel", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/fireworks.py b/venv/Lib/site-packages/langchain/chat_models/fireworks.py new file mode 100644 index 00000000..a3908b2a --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/fireworks.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.fireworks import ChatFireworks + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatFireworks": "langchain_community.chat_models.fireworks"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatFireworks", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/gigachat.py b/venv/Lib/site-packages/langchain/chat_models/gigachat.py new file mode 100644 index 00000000..a59eda70 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/gigachat.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.gigachat import GigaChat + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GigaChat": "langchain_community.chat_models.gigachat"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GigaChat", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/google_palm.py b/venv/Lib/site-packages/langchain/chat_models/google_palm.py new file mode 100644 index 00000000..e9bb012c --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/google_palm.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.google_palm import ( + ChatGooglePalm, + ChatGooglePalmError, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ChatGooglePalm": "langchain_community.chat_models.google_palm", + "ChatGooglePalmError": "langchain_community.chat_models.google_palm", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatGooglePalm", + "ChatGooglePalmError", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/human.py b/venv/Lib/site-packages/langchain/chat_models/human.py new file mode 100644 index 00000000..e3752193 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/human.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.human import HumanInputChatModel + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"HumanInputChatModel": "langchain_community.chat_models.human"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HumanInputChatModel", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/hunyuan.py b/venv/Lib/site-packages/langchain/chat_models/hunyuan.py new file mode 100644 index 00000000..53fe8454 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/hunyuan.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.hunyuan import ChatHunyuan + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatHunyuan": "langchain_community.chat_models.hunyuan"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatHunyuan", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/javelin_ai_gateway.py b/venv/Lib/site-packages/langchain/chat_models/javelin_ai_gateway.py new file mode 100644 index 00000000..0f89edbd --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/javelin_ai_gateway.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.javelin_ai_gateway import ( + ChatJavelinAIGateway, + ChatParams, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ChatJavelinAIGateway": "langchain_community.chat_models.javelin_ai_gateway", + "ChatParams": "langchain_community.chat_models.javelin_ai_gateway", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatJavelinAIGateway", + "ChatParams", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/jinachat.py b/venv/Lib/site-packages/langchain/chat_models/jinachat.py new file mode 100644 index 00000000..dba660e1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/jinachat.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.jinachat import JinaChat + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"JinaChat": "langchain_community.chat_models.jinachat"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JinaChat", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/konko.py b/venv/Lib/site-packages/langchain/chat_models/konko.py new file mode 100644 index 00000000..fd5ba7c4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/konko.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.konko import ChatKonko + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatKonko": "langchain_community.chat_models.konko"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatKonko", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/litellm.py b/venv/Lib/site-packages/langchain/chat_models/litellm.py new file mode 100644 index 00000000..69ebcd91 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/litellm.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.litellm import ( + ChatLiteLLM, + ChatLiteLLMException, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ChatLiteLLM": "langchain_community.chat_models.litellm", + "ChatLiteLLMException": "langchain_community.chat_models.litellm", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatLiteLLM", + "ChatLiteLLMException", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/meta.py b/venv/Lib/site-packages/langchain/chat_models/meta.py new file mode 100644 index 00000000..22fa97e0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/meta.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.meta import convert_messages_to_prompt_llama + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "convert_messages_to_prompt_llama": "langchain_community.chat_models.meta" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "convert_messages_to_prompt_llama", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/minimax.py b/venv/Lib/site-packages/langchain/chat_models/minimax.py new file mode 100644 index 00000000..3c4f791b --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/minimax.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.minimax import MiniMaxChat + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MiniMaxChat": "langchain_community.chat_models.minimax"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MiniMaxChat", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/mlflow.py b/venv/Lib/site-packages/langchain/chat_models/mlflow.py new file mode 100644 index 00000000..3877e8b9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/mlflow.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.mlflow import ChatMlflow + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatMlflow": "langchain_community.chat_models.mlflow"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatMlflow", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/mlflow_ai_gateway.py b/venv/Lib/site-packages/langchain/chat_models/mlflow_ai_gateway.py new file mode 100644 index 00000000..2e54df8c --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/mlflow_ai_gateway.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.mlflow_ai_gateway import ( + ChatMLflowAIGateway, + ChatParams, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ChatMLflowAIGateway": "langchain_community.chat_models.mlflow_ai_gateway", + "ChatParams": "langchain_community.chat_models.mlflow_ai_gateway", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatMLflowAIGateway", + "ChatParams", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/ollama.py b/venv/Lib/site-packages/langchain/chat_models/ollama.py new file mode 100644 index 00000000..d5a9d3f6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/ollama.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.ollama import ChatOllama + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatOllama": "langchain_community.chat_models.ollama"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatOllama", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/openai.py b/venv/Lib/site-packages/langchain/chat_models/openai.py new file mode 100644 index 00000000..76482f22 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/openai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.openai import ChatOpenAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatOpenAI": "langchain_community.chat_models.openai"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatOpenAI", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/pai_eas_endpoint.py b/venv/Lib/site-packages/langchain/chat_models/pai_eas_endpoint.py new file mode 100644 index 00000000..2c3134f6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/pai_eas_endpoint.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.pai_eas_endpoint import PaiEasChatEndpoint + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PaiEasChatEndpoint": "langchain_community.chat_models.pai_eas_endpoint" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PaiEasChatEndpoint", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/promptlayer_openai.py b/venv/Lib/site-packages/langchain/chat_models/promptlayer_openai.py new file mode 100644 index 00000000..b7a3bb0d --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/promptlayer_openai.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.promptlayer_openai import PromptLayerChatOpenAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PromptLayerChatOpenAI": "langchain_community.chat_models.promptlayer_openai" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PromptLayerChatOpenAI", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/tongyi.py b/venv/Lib/site-packages/langchain/chat_models/tongyi.py new file mode 100644 index 00000000..bb24bef3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/tongyi.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.tongyi import ChatTongyi + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatTongyi": "langchain_community.chat_models.tongyi"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatTongyi", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/vertexai.py b/venv/Lib/site-packages/langchain/chat_models/vertexai.py new file mode 100644 index 00000000..b662a337 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/vertexai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.vertexai import ChatVertexAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatVertexAI": "langchain_community.chat_models.vertexai"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatVertexAI", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/volcengine_maas.py b/venv/Lib/site-packages/langchain/chat_models/volcengine_maas.py new file mode 100644 index 00000000..9c457114 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/volcengine_maas.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.volcengine_maas import ( + VolcEngineMaasChat, + convert_dict_to_message, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "convert_dict_to_message": "langchain_community.chat_models.volcengine_maas", + "VolcEngineMaasChat": "langchain_community.chat_models.volcengine_maas", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "convert_dict_to_message", + "VolcEngineMaasChat", +] diff --git a/venv/Lib/site-packages/langchain/chat_models/yandex.py b/venv/Lib/site-packages/langchain/chat_models/yandex.py new file mode 100644 index 00000000..af2a4402 --- /dev/null +++ b/venv/Lib/site-packages/langchain/chat_models/yandex.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_models.yandex import ChatYandexGPT + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatYandexGPT": "langchain_community.chat_models.yandex"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatYandexGPT", +] diff --git a/venv/Lib/site-packages/langchain/docstore/__init__.py b/venv/Lib/site-packages/langchain/docstore/__init__.py new file mode 100644 index 00000000..4c7ed1c6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/docstore/__init__.py @@ -0,0 +1,48 @@ +"""**Docstores** are classes to store and load Documents. + +The **Docstore** is a simplified version of the Document Loader. + +**Class hierarchy:** + +.. code-block:: + + Docstore --> # Examples: InMemoryDocstore, Wikipedia + +**Main helpers:** + +.. code-block:: + + Document, AddableMixin +""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.docstore.arbitrary_fn import DocstoreFn + from langchain_community.docstore.in_memory import InMemoryDocstore + from langchain_community.docstore.wikipedia import Wikipedia + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DocstoreFn": "langchain_community.docstore.arbitrary_fn", + "InMemoryDocstore": "langchain_community.docstore.in_memory", + "Wikipedia": "langchain_community.docstore.wikipedia", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DocstoreFn", + "InMemoryDocstore", + "Wikipedia", +] diff --git a/venv/Lib/site-packages/langchain/docstore/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/docstore/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..d8a04e94 Binary files /dev/null and b/venv/Lib/site-packages/langchain/docstore/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/docstore/__pycache__/arbitrary_fn.cpython-312.pyc b/venv/Lib/site-packages/langchain/docstore/__pycache__/arbitrary_fn.cpython-312.pyc new file mode 100644 index 00000000..d836f407 Binary files /dev/null and b/venv/Lib/site-packages/langchain/docstore/__pycache__/arbitrary_fn.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/docstore/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/docstore/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..c9b5571c Binary files /dev/null and b/venv/Lib/site-packages/langchain/docstore/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/docstore/__pycache__/document.cpython-312.pyc b/venv/Lib/site-packages/langchain/docstore/__pycache__/document.cpython-312.pyc new file mode 100644 index 00000000..031becd4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/docstore/__pycache__/document.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/docstore/__pycache__/in_memory.cpython-312.pyc b/venv/Lib/site-packages/langchain/docstore/__pycache__/in_memory.cpython-312.pyc new file mode 100644 index 00000000..df942704 Binary files /dev/null and b/venv/Lib/site-packages/langchain/docstore/__pycache__/in_memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/docstore/__pycache__/wikipedia.cpython-312.pyc b/venv/Lib/site-packages/langchain/docstore/__pycache__/wikipedia.cpython-312.pyc new file mode 100644 index 00000000..319e4fd1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/docstore/__pycache__/wikipedia.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/docstore/arbitrary_fn.py b/venv/Lib/site-packages/langchain/docstore/arbitrary_fn.py new file mode 100644 index 00000000..a3a324ee --- /dev/null +++ b/venv/Lib/site-packages/langchain/docstore/arbitrary_fn.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.docstore.arbitrary_fn import DocstoreFn + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DocstoreFn": "langchain_community.docstore.arbitrary_fn"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DocstoreFn", +] diff --git a/venv/Lib/site-packages/langchain/docstore/base.py b/venv/Lib/site-packages/langchain/docstore/base.py new file mode 100644 index 00000000..34c0f197 --- /dev/null +++ b/venv/Lib/site-packages/langchain/docstore/base.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.docstore.base import AddableMixin, Docstore + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "Docstore": "langchain_community.docstore.base", + "AddableMixin": "langchain_community.docstore.base", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Docstore", + "AddableMixin", +] diff --git a/venv/Lib/site-packages/langchain/docstore/document.py b/venv/Lib/site-packages/langchain/docstore/document.py new file mode 100644 index 00000000..88aebd27 --- /dev/null +++ b/venv/Lib/site-packages/langchain/docstore/document.py @@ -0,0 +1,3 @@ +from langchain_core.documents import Document + +__all__ = ["Document"] diff --git a/venv/Lib/site-packages/langchain/docstore/in_memory.py b/venv/Lib/site-packages/langchain/docstore/in_memory.py new file mode 100644 index 00000000..1998f250 --- /dev/null +++ b/venv/Lib/site-packages/langchain/docstore/in_memory.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.docstore.in_memory import InMemoryDocstore + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"InMemoryDocstore": "langchain_community.docstore.in_memory"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "InMemoryDocstore", +] diff --git a/venv/Lib/site-packages/langchain/docstore/wikipedia.py b/venv/Lib/site-packages/langchain/docstore/wikipedia.py new file mode 100644 index 00000000..2497b332 --- /dev/null +++ b/venv/Lib/site-packages/langchain/docstore/wikipedia.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.docstore.wikipedia import Wikipedia + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Wikipedia": "langchain_community.docstore.wikipedia"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Wikipedia", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/__init__.py b/venv/Lib/site-packages/langchain/document_loaders/__init__.py new file mode 100644 index 00000000..d5a6b972 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/__init__.py @@ -0,0 +1,553 @@ +"""**Document Loaders** are classes to load Documents. + +**Document Loaders** are usually used to load a lot of Documents in a single run. + +**Class hierarchy:** + +.. code-block:: + + BaseLoader --> Loader # Examples: TextLoader, UnstructuredFileLoader + +**Main helpers:** + +.. code-block:: + + Document, TextSplitter +""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ( + AcreomLoader, + AirbyteCDKLoader, + AirbyteGongLoader, + AirbyteHubspotLoader, + AirbyteJSONLoader, + AirbyteSalesforceLoader, + AirbyteShopifyLoader, + AirbyteStripeLoader, + AirbyteTypeformLoader, + AirbyteZendeskSupportLoader, + AirtableLoader, + AmazonTextractPDFLoader, + ApifyDatasetLoader, + ArcGISLoader, + ArxivLoader, + AssemblyAIAudioTranscriptLoader, + AsyncChromiumLoader, + AsyncHtmlLoader, + AZLyricsLoader, + AzureAIDataLoader, + AzureBlobStorageContainerLoader, + AzureBlobStorageFileLoader, + BibtexLoader, + BigQueryLoader, + BiliBiliLoader, + BlackboardLoader, + BlockchainDocumentLoader, + BraveSearchLoader, + BrowserlessLoader, + BSHTMLLoader, + ChatGPTLoader, + CollegeConfidentialLoader, + ConcurrentLoader, + ConfluenceLoader, + CoNLLULoader, + CouchbaseLoader, + CSVLoader, + CubeSemanticLoader, + DatadogLogsLoader, + DataFrameLoader, + DiffbotLoader, + DirectoryLoader, + DiscordChatLoader, + DocugamiLoader, + DocusaurusLoader, + Docx2txtLoader, + DropboxLoader, + DuckDBLoader, + EtherscanLoader, + EverNoteLoader, + FacebookChatLoader, + FaunaLoader, + FigmaFileLoader, + FileSystemBlobLoader, + GCSDirectoryLoader, + GCSFileLoader, + GeoDataFrameLoader, + GitbookLoader, + GithubFileLoader, + GitHubIssuesLoader, + GitLoader, + GoogleApiClient, + GoogleApiYoutubeLoader, + GoogleDriveLoader, + GoogleSpeechToTextLoader, + GutenbergLoader, + HNLoader, + HuggingFaceDatasetLoader, + IFixitLoader, + ImageCaptionLoader, + IMSDbLoader, + IuguLoader, + JoplinLoader, + JSONLoader, + LakeFSLoader, + LarkSuiteDocLoader, + MastodonTootsLoader, + MathpixPDFLoader, + MaxComputeLoader, + MergedDataLoader, + MHTMLLoader, + ModernTreasuryLoader, + MongodbLoader, + MWDumpLoader, + NewsURLLoader, + NotebookLoader, + NotionDBLoader, + NotionDirectoryLoader, + OBSDirectoryLoader, + OBSFileLoader, + ObsidianLoader, + OneDriveFileLoader, + OneDriveLoader, + OnlinePDFLoader, + OpenCityDataLoader, + OutlookMessageLoader, + PagedPDFSplitter, + PDFMinerLoader, + PDFMinerPDFasHTMLLoader, + PDFPlumberLoader, + PlaywrightURLLoader, + PolarsDataFrameLoader, + PsychicLoader, + PubMedLoader, + PyMuPDFLoader, + PyPDFDirectoryLoader, + PyPDFium2Loader, + PyPDFLoader, + PySparkDataFrameLoader, + PythonLoader, + ReadTheDocsLoader, + RecursiveUrlLoader, + RedditPostsLoader, + RoamLoader, + RocksetLoader, + RSSFeedLoader, + S3DirectoryLoader, + S3FileLoader, + SeleniumURLLoader, + SharePointLoader, + SitemapLoader, + SlackDirectoryLoader, + SnowflakeLoader, + SpreedlyLoader, + SRTLoader, + StripeLoader, + TelegramChatApiLoader, + TelegramChatFileLoader, + TelegramChatLoader, + TencentCOSDirectoryLoader, + TencentCOSFileLoader, + TensorflowDatasetLoader, + TextLoader, + ToMarkdownLoader, + TomlLoader, + TrelloLoader, + TwitterTweetLoader, + UnstructuredAPIFileIOLoader, + UnstructuredAPIFileLoader, + UnstructuredCSVLoader, + UnstructuredEmailLoader, + UnstructuredEPubLoader, + UnstructuredExcelLoader, + UnstructuredFileIOLoader, + UnstructuredFileLoader, + UnstructuredHTMLLoader, + UnstructuredImageLoader, + UnstructuredMarkdownLoader, + UnstructuredODTLoader, + UnstructuredOrgModeLoader, + UnstructuredPDFLoader, + UnstructuredPowerPointLoader, + UnstructuredRSTLoader, + UnstructuredRTFLoader, + UnstructuredTSVLoader, + UnstructuredURLLoader, + UnstructuredWordDocumentLoader, + UnstructuredXMLLoader, + WeatherDataLoader, + WebBaseLoader, + WhatsAppChatLoader, + WikipediaLoader, + XorbitsLoader, + YoutubeAudioLoader, + YoutubeLoader, + YuqueLoader, + ) + +from langchain_core.document_loaders import Blob, BlobLoader + +# For backwards compatibility +_old_to_new_name = { + "PagedPDFSplitter": "PyPDFLoader", + "TelegramChatLoader": "TelegramChatFileLoader", +} + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AcreomLoader": "langchain_community.document_loaders", + "AsyncHtmlLoader": "langchain_community.document_loaders", + "AsyncChromiumLoader": "langchain_community.document_loaders", + "AZLyricsLoader": "langchain_community.document_loaders", + "AirbyteCDKLoader": "langchain_community.document_loaders", + "AirbyteGongLoader": "langchain_community.document_loaders", + "AirbyteJSONLoader": "langchain_community.document_loaders", + "AirbyteHubspotLoader": "langchain_community.document_loaders", + "AirbyteSalesforceLoader": "langchain_community.document_loaders", + "AirbyteShopifyLoader": "langchain_community.document_loaders", + "AirbyteStripeLoader": "langchain_community.document_loaders", + "AirbyteTypeformLoader": "langchain_community.document_loaders", + "AirbyteZendeskSupportLoader": "langchain_community.document_loaders", + "AirtableLoader": "langchain_community.document_loaders", + "AmazonTextractPDFLoader": "langchain_community.document_loaders", + "ApifyDatasetLoader": "langchain_community.document_loaders", + "ArcGISLoader": "langchain_community.document_loaders", + "ArxivLoader": "langchain_community.document_loaders", + "AssemblyAIAudioTranscriptLoader": "langchain_community.document_loaders", + "AzureAIDataLoader": "langchain_community.document_loaders", + "AzureBlobStorageContainerLoader": "langchain_community.document_loaders", + "AzureBlobStorageFileLoader": "langchain_community.document_loaders", + "BSHTMLLoader": "langchain_community.document_loaders", + "BibtexLoader": "langchain_community.document_loaders", + "BigQueryLoader": "langchain_community.document_loaders", + "BiliBiliLoader": "langchain_community.document_loaders", + "BlackboardLoader": "langchain_community.document_loaders", + "Blob": "langchain_community.document_loaders", + "BlobLoader": "langchain_community.document_loaders", + "BlockchainDocumentLoader": "langchain_community.document_loaders", + "BraveSearchLoader": "langchain_community.document_loaders", + "BrowserlessLoader": "langchain_community.document_loaders", + "CSVLoader": "langchain_community.document_loaders", + "ChatGPTLoader": "langchain_community.document_loaders", + "CoNLLULoader": "langchain_community.document_loaders", + "CollegeConfidentialLoader": "langchain_community.document_loaders", + "ConcurrentLoader": "langchain_community.document_loaders", + "ConfluenceLoader": "langchain_community.document_loaders", + "CouchbaseLoader": "langchain_community.document_loaders", + "CubeSemanticLoader": "langchain_community.document_loaders", + "DataFrameLoader": "langchain_community.document_loaders", + "DatadogLogsLoader": "langchain_community.document_loaders", + "DiffbotLoader": "langchain_community.document_loaders", + "DirectoryLoader": "langchain_community.document_loaders", + "DiscordChatLoader": "langchain_community.document_loaders", + "DocugamiLoader": "langchain_community.document_loaders", + "DocusaurusLoader": "langchain_community.document_loaders", + "Docx2txtLoader": "langchain_community.document_loaders", + "DropboxLoader": "langchain_community.document_loaders", + "DuckDBLoader": "langchain_community.document_loaders", + "EtherscanLoader": "langchain_community.document_loaders", + "EverNoteLoader": "langchain_community.document_loaders", + "FacebookChatLoader": "langchain_community.document_loaders", + "FaunaLoader": "langchain_community.document_loaders", + "FigmaFileLoader": "langchain_community.document_loaders", + "FileSystemBlobLoader": "langchain_community.document_loaders", + "GCSDirectoryLoader": "langchain_community.document_loaders", + "GCSFileLoader": "langchain_community.document_loaders", + "GeoDataFrameLoader": "langchain_community.document_loaders", + "GitHubIssuesLoader": "langchain_community.document_loaders", + "GitLoader": "langchain_community.document_loaders", + "GithubFileLoader": "langchain_community.document_loaders", + "GitbookLoader": "langchain_community.document_loaders", + "GoogleApiClient": "langchain_community.document_loaders", + "GoogleApiYoutubeLoader": "langchain_community.document_loaders", + "GoogleSpeechToTextLoader": "langchain_community.document_loaders", + "GoogleDriveLoader": "langchain_community.document_loaders", + "GutenbergLoader": "langchain_community.document_loaders", + "HNLoader": "langchain_community.document_loaders", + "HuggingFaceDatasetLoader": "langchain_community.document_loaders", + "IFixitLoader": "langchain_community.document_loaders", + "IMSDbLoader": "langchain_community.document_loaders", + "ImageCaptionLoader": "langchain_community.document_loaders", + "IuguLoader": "langchain_community.document_loaders", + "JSONLoader": "langchain_community.document_loaders", + "JoplinLoader": "langchain_community.document_loaders", + "LarkSuiteDocLoader": "langchain_community.document_loaders", + "LakeFSLoader": "langchain_community.document_loaders", + "MHTMLLoader": "langchain_community.document_loaders", + "MWDumpLoader": "langchain_community.document_loaders", + "MastodonTootsLoader": "langchain_community.document_loaders", + "MathpixPDFLoader": "langchain_community.document_loaders", + "MaxComputeLoader": "langchain_community.document_loaders", + "MergedDataLoader": "langchain_community.document_loaders", + "ModernTreasuryLoader": "langchain_community.document_loaders", + "MongodbLoader": "langchain_community.document_loaders", + "NewsURLLoader": "langchain_community.document_loaders", + "NotebookLoader": "langchain_community.document_loaders", + "NotionDBLoader": "langchain_community.document_loaders", + "NotionDirectoryLoader": "langchain_community.document_loaders", + "OBSDirectoryLoader": "langchain_community.document_loaders", + "OBSFileLoader": "langchain_community.document_loaders", + "ObsidianLoader": "langchain_community.document_loaders", + "OneDriveFileLoader": "langchain_community.document_loaders", + "OneDriveLoader": "langchain_community.document_loaders", + "OnlinePDFLoader": "langchain_community.document_loaders", + "OpenCityDataLoader": "langchain_community.document_loaders", + "OutlookMessageLoader": "langchain_community.document_loaders", + "PagedPDFSplitter": "langchain_community.document_loaders", + "PDFMinerLoader": "langchain_community.document_loaders", + "PDFMinerPDFasHTMLLoader": "langchain_community.document_loaders", + "PDFPlumberLoader": "langchain_community.document_loaders", + "PlaywrightURLLoader": "langchain_community.document_loaders", + "PolarsDataFrameLoader": "langchain_community.document_loaders", + "PsychicLoader": "langchain_community.document_loaders", + "PubMedLoader": "langchain_community.document_loaders", + "PyMuPDFLoader": "langchain_community.document_loaders", + "PyPDFDirectoryLoader": "langchain_community.document_loaders", + "PyPDFium2Loader": "langchain_community.document_loaders", + "PyPDFLoader": "langchain_community.document_loaders", + "PySparkDataFrameLoader": "langchain_community.document_loaders", + "PythonLoader": "langchain_community.document_loaders", + "ReadTheDocsLoader": "langchain_community.document_loaders", + "RecursiveUrlLoader": "langchain_community.document_loaders", + "RedditPostsLoader": "langchain_community.document_loaders", + "RSSFeedLoader": "langchain_community.document_loaders", + "RoamLoader": "langchain_community.document_loaders", + "RocksetLoader": "langchain_community.document_loaders", + "S3DirectoryLoader": "langchain_community.document_loaders", + "S3FileLoader": "langchain_community.document_loaders", + "SRTLoader": "langchain_community.document_loaders", + "SeleniumURLLoader": "langchain_community.document_loaders", + "SharePointLoader": "langchain_community.document_loaders", + "SitemapLoader": "langchain_community.document_loaders", + "SlackDirectoryLoader": "langchain_community.document_loaders", + "SnowflakeLoader": "langchain_community.document_loaders", + "SpreedlyLoader": "langchain_community.document_loaders", + "StripeLoader": "langchain_community.document_loaders", + "TelegramChatLoader": "langchain_community.document_loaders", + "TelegramChatApiLoader": "langchain_community.document_loaders", + "TelegramChatFileLoader": "langchain_community.document_loaders", + "TensorflowDatasetLoader": "langchain_community.document_loaders", + "TencentCOSDirectoryLoader": "langchain_community.document_loaders", + "TencentCOSFileLoader": "langchain_community.document_loaders", + "TextLoader": "langchain_community.document_loaders", + "ToMarkdownLoader": "langchain_community.document_loaders", + "TomlLoader": "langchain_community.document_loaders", + "TrelloLoader": "langchain_community.document_loaders", + "TwitterTweetLoader": "langchain_community.document_loaders", + "UnstructuredAPIFileIOLoader": "langchain_community.document_loaders", + "UnstructuredAPIFileLoader": "langchain_community.document_loaders", + "UnstructuredCSVLoader": "langchain_community.document_loaders", + "UnstructuredEPubLoader": "langchain_community.document_loaders", + "UnstructuredEmailLoader": "langchain_community.document_loaders", + "UnstructuredExcelLoader": "langchain_community.document_loaders", + "UnstructuredFileIOLoader": "langchain_community.document_loaders", + "UnstructuredFileLoader": "langchain_community.document_loaders", + "UnstructuredHTMLLoader": "langchain_community.document_loaders", + "UnstructuredImageLoader": "langchain_community.document_loaders", + "UnstructuredMarkdownLoader": "langchain_community.document_loaders", + "UnstructuredODTLoader": "langchain_community.document_loaders", + "UnstructuredOrgModeLoader": "langchain_community.document_loaders", + "UnstructuredPDFLoader": "langchain_community.document_loaders", + "UnstructuredPowerPointLoader": "langchain_community.document_loaders", + "UnstructuredRSTLoader": "langchain_community.document_loaders", + "UnstructuredRTFLoader": "langchain_community.document_loaders", + "UnstructuredTSVLoader": "langchain_community.document_loaders", + "UnstructuredURLLoader": "langchain_community.document_loaders", + "UnstructuredWordDocumentLoader": "langchain_community.document_loaders", + "UnstructuredXMLLoader": "langchain_community.document_loaders", + "WeatherDataLoader": "langchain_community.document_loaders", + "WebBaseLoader": "langchain_community.document_loaders", + "WhatsAppChatLoader": "langchain_community.document_loaders", + "WikipediaLoader": "langchain_community.document_loaders", + "XorbitsLoader": "langchain_community.document_loaders", + "YoutubeAudioLoader": "langchain_community.document_loaders", + "YoutubeLoader": "langchain_community.document_loaders", + "YuqueLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AcreomLoader", + "AsyncHtmlLoader", + "AsyncChromiumLoader", + "AZLyricsLoader", + "AcreomLoader", + "AirbyteCDKLoader", + "AirbyteGongLoader", + "AirbyteJSONLoader", + "AirbyteHubspotLoader", + "AirbyteSalesforceLoader", + "AirbyteShopifyLoader", + "AirbyteStripeLoader", + "AirbyteTypeformLoader", + "AirbyteZendeskSupportLoader", + "AirtableLoader", + "AmazonTextractPDFLoader", + "ApifyDatasetLoader", + "ArcGISLoader", + "ArxivLoader", + "AssemblyAIAudioTranscriptLoader", + "AsyncHtmlLoader", + "AzureAIDataLoader", + "AzureBlobStorageContainerLoader", + "AzureBlobStorageFileLoader", + "BSHTMLLoader", + "BibtexLoader", + "BigQueryLoader", + "BiliBiliLoader", + "BlackboardLoader", + "Blob", + "BlobLoader", + "BlockchainDocumentLoader", + "BraveSearchLoader", + "BrowserlessLoader", + "CSVLoader", + "ChatGPTLoader", + "CoNLLULoader", + "CollegeConfidentialLoader", + "ConcurrentLoader", + "ConfluenceLoader", + "CouchbaseLoader", + "CubeSemanticLoader", + "DataFrameLoader", + "DatadogLogsLoader", + "DiffbotLoader", + "DirectoryLoader", + "DiscordChatLoader", + "DocugamiLoader", + "DocusaurusLoader", + "Docx2txtLoader", + "DropboxLoader", + "DuckDBLoader", + "EtherscanLoader", + "EverNoteLoader", + "FacebookChatLoader", + "FaunaLoader", + "FigmaFileLoader", + "FileSystemBlobLoader", + "GCSDirectoryLoader", + "GCSFileLoader", + "GeoDataFrameLoader", + "GithubFileLoader", + "GitHubIssuesLoader", + "GitLoader", + "GitbookLoader", + "GoogleApiClient", + "GoogleApiYoutubeLoader", + "GoogleSpeechToTextLoader", + "GoogleDriveLoader", + "GutenbergLoader", + "HNLoader", + "HuggingFaceDatasetLoader", + "IFixitLoader", + "IMSDbLoader", + "ImageCaptionLoader", + "IuguLoader", + "JSONLoader", + "JoplinLoader", + "LarkSuiteDocLoader", + "LakeFSLoader", + "MHTMLLoader", + "MWDumpLoader", + "MastodonTootsLoader", + "MathpixPDFLoader", + "MaxComputeLoader", + "MergedDataLoader", + "ModernTreasuryLoader", + "MongodbLoader", + "NewsURLLoader", + "NotebookLoader", + "NotionDBLoader", + "NotionDirectoryLoader", + "OBSDirectoryLoader", + "OBSFileLoader", + "ObsidianLoader", + "OneDriveFileLoader", + "OneDriveLoader", + "OnlinePDFLoader", + "OpenCityDataLoader", + "OutlookMessageLoader", + "PDFMinerLoader", + "PDFMinerPDFasHTMLLoader", + "PDFPlumberLoader", + "PlaywrightURLLoader", + "PolarsDataFrameLoader", + "PsychicLoader", + "PubMedLoader", + "PyMuPDFLoader", + "PyPDFDirectoryLoader", + "PagedPDFSplitter", + "PyPDFLoader", + "PyPDFium2Loader", + "PySparkDataFrameLoader", + "PythonLoader", + "RSSFeedLoader", + "ReadTheDocsLoader", + "RecursiveUrlLoader", + "RedditPostsLoader", + "RoamLoader", + "RocksetLoader", + "S3DirectoryLoader", + "S3FileLoader", + "SRTLoader", + "SeleniumURLLoader", + "SharePointLoader", + "SitemapLoader", + "SlackDirectoryLoader", + "SnowflakeLoader", + "SpreedlyLoader", + "StripeLoader", + "TelegramChatApiLoader", + "TelegramChatFileLoader", + "TelegramChatLoader", + "TensorflowDatasetLoader", + "TencentCOSDirectoryLoader", + "TencentCOSFileLoader", + "TextLoader", + "ToMarkdownLoader", + "TomlLoader", + "TrelloLoader", + "TwitterTweetLoader", + "UnstructuredAPIFileIOLoader", + "UnstructuredAPIFileLoader", + "UnstructuredCSVLoader", + "UnstructuredEPubLoader", + "UnstructuredEmailLoader", + "UnstructuredExcelLoader", + "UnstructuredFileIOLoader", + "UnstructuredFileLoader", + "UnstructuredHTMLLoader", + "UnstructuredImageLoader", + "UnstructuredMarkdownLoader", + "UnstructuredODTLoader", + "UnstructuredOrgModeLoader", + "UnstructuredPDFLoader", + "UnstructuredPowerPointLoader", + "UnstructuredRSTLoader", + "UnstructuredRTFLoader", + "UnstructuredTSVLoader", + "UnstructuredURLLoader", + "UnstructuredWordDocumentLoader", + "UnstructuredXMLLoader", + "WeatherDataLoader", + "WebBaseLoader", + "WhatsAppChatLoader", + "WikipediaLoader", + "XorbitsLoader", + "YoutubeAudioLoader", + "YoutubeLoader", + "YuqueLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..e2cedab5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/acreom.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/acreom.cpython-312.pyc new file mode 100644 index 00000000..f19084db Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/acreom.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/airbyte.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/airbyte.cpython-312.pyc new file mode 100644 index 00000000..ce4b94c2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/airbyte.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/airbyte_json.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/airbyte_json.cpython-312.pyc new file mode 100644 index 00000000..ff3f71fd Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/airbyte_json.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/airtable.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/airtable.cpython-312.pyc new file mode 100644 index 00000000..e79f65d4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/airtable.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/apify_dataset.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/apify_dataset.cpython-312.pyc new file mode 100644 index 00000000..ba171007 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/apify_dataset.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/arcgis_loader.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/arcgis_loader.cpython-312.pyc new file mode 100644 index 00000000..1db536b8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/arcgis_loader.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/arxiv.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/arxiv.cpython-312.pyc new file mode 100644 index 00000000..572a8391 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/arxiv.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/assemblyai.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/assemblyai.cpython-312.pyc new file mode 100644 index 00000000..e959192b Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/assemblyai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/async_html.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/async_html.cpython-312.pyc new file mode 100644 index 00000000..ddf38162 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/async_html.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/azlyrics.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/azlyrics.cpython-312.pyc new file mode 100644 index 00000000..30d3e9aa Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/azlyrics.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/azure_ai_data.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/azure_ai_data.cpython-312.pyc new file mode 100644 index 00000000..7c2eb064 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/azure_ai_data.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/azure_blob_storage_container.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/azure_blob_storage_container.cpython-312.pyc new file mode 100644 index 00000000..33c398b0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/azure_blob_storage_container.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/azure_blob_storage_file.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/azure_blob_storage_file.cpython-312.pyc new file mode 100644 index 00000000..e8e756d0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/azure_blob_storage_file.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/baiducloud_bos_directory.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/baiducloud_bos_directory.cpython-312.pyc new file mode 100644 index 00000000..cd70da02 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/baiducloud_bos_directory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/baiducloud_bos_file.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/baiducloud_bos_file.cpython-312.pyc new file mode 100644 index 00000000..ca06bcfa Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/baiducloud_bos_file.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..9ed0b0dc Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/base_o365.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/base_o365.cpython-312.pyc new file mode 100644 index 00000000..9864e6e8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/base_o365.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/bibtex.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/bibtex.cpython-312.pyc new file mode 100644 index 00000000..a9d2e39e Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/bibtex.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/bigquery.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/bigquery.cpython-312.pyc new file mode 100644 index 00000000..45823e03 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/bigquery.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/bilibili.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/bilibili.cpython-312.pyc new file mode 100644 index 00000000..fd15f0f4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/bilibili.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/blackboard.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/blackboard.cpython-312.pyc new file mode 100644 index 00000000..44b6219c Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/blackboard.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/blockchain.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/blockchain.cpython-312.pyc new file mode 100644 index 00000000..c37b158c Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/blockchain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/brave_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/brave_search.cpython-312.pyc new file mode 100644 index 00000000..4a5c4d24 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/brave_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/browserless.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/browserless.cpython-312.pyc new file mode 100644 index 00000000..ad57b72f Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/browserless.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/chatgpt.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/chatgpt.cpython-312.pyc new file mode 100644 index 00000000..0681079e Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/chatgpt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/chromium.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/chromium.cpython-312.pyc new file mode 100644 index 00000000..3d044390 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/chromium.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/college_confidential.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/college_confidential.cpython-312.pyc new file mode 100644 index 00000000..6b2ae94d Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/college_confidential.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/concurrent.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/concurrent.cpython-312.pyc new file mode 100644 index 00000000..7b4bd1df Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/concurrent.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/confluence.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/confluence.cpython-312.pyc new file mode 100644 index 00000000..6d1bb99a Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/confluence.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/conllu.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/conllu.cpython-312.pyc new file mode 100644 index 00000000..fa3e2d5c Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/conllu.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/couchbase.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/couchbase.cpython-312.pyc new file mode 100644 index 00000000..2db620b3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/couchbase.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/csv_loader.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/csv_loader.cpython-312.pyc new file mode 100644 index 00000000..c0be9fb6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/csv_loader.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/cube_semantic.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/cube_semantic.cpython-312.pyc new file mode 100644 index 00000000..331d02cb Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/cube_semantic.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/datadog_logs.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/datadog_logs.cpython-312.pyc new file mode 100644 index 00000000..304679cc Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/datadog_logs.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/dataframe.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/dataframe.cpython-312.pyc new file mode 100644 index 00000000..4acaa935 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/dataframe.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/diffbot.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/diffbot.cpython-312.pyc new file mode 100644 index 00000000..71baa004 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/diffbot.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/directory.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/directory.cpython-312.pyc new file mode 100644 index 00000000..2c0de8f8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/directory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/discord.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/discord.cpython-312.pyc new file mode 100644 index 00000000..318c8f09 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/discord.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/docugami.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/docugami.cpython-312.pyc new file mode 100644 index 00000000..850b54ce Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/docugami.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/docusaurus.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/docusaurus.cpython-312.pyc new file mode 100644 index 00000000..92bd61e7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/docusaurus.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/dropbox.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/dropbox.cpython-312.pyc new file mode 100644 index 00000000..cb9e3ffe Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/dropbox.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/duckdb_loader.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/duckdb_loader.cpython-312.pyc new file mode 100644 index 00000000..c3f650d5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/duckdb_loader.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/email.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/email.cpython-312.pyc new file mode 100644 index 00000000..1e01358a Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/email.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/epub.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/epub.cpython-312.pyc new file mode 100644 index 00000000..6b79e7ba Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/epub.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/etherscan.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/etherscan.cpython-312.pyc new file mode 100644 index 00000000..0a262e8b Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/etherscan.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/evernote.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/evernote.cpython-312.pyc new file mode 100644 index 00000000..5be6370b Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/evernote.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/excel.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/excel.cpython-312.pyc new file mode 100644 index 00000000..30c46366 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/excel.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/facebook_chat.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/facebook_chat.cpython-312.pyc new file mode 100644 index 00000000..c38aa307 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/facebook_chat.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/fauna.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/fauna.cpython-312.pyc new file mode 100644 index 00000000..69d858dc Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/fauna.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/figma.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/figma.cpython-312.pyc new file mode 100644 index 00000000..1b2d2565 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/figma.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/gcs_directory.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/gcs_directory.cpython-312.pyc new file mode 100644 index 00000000..7fcff4bc Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/gcs_directory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/gcs_file.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/gcs_file.cpython-312.pyc new file mode 100644 index 00000000..69d2eec0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/gcs_file.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/generic.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/generic.cpython-312.pyc new file mode 100644 index 00000000..bbe44ebb Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/generic.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/geodataframe.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/geodataframe.cpython-312.pyc new file mode 100644 index 00000000..f9348a0b Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/geodataframe.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/git.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/git.cpython-312.pyc new file mode 100644 index 00000000..c69c05ed Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/git.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/gitbook.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/gitbook.cpython-312.pyc new file mode 100644 index 00000000..47747448 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/gitbook.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/github.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/github.cpython-312.pyc new file mode 100644 index 00000000..cef4aa08 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/github.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/google_speech_to_text.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/google_speech_to_text.cpython-312.pyc new file mode 100644 index 00000000..ac04058f Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/google_speech_to_text.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/googledrive.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/googledrive.cpython-312.pyc new file mode 100644 index 00000000..e369aad2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/googledrive.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/gutenberg.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/gutenberg.cpython-312.pyc new file mode 100644 index 00000000..4205f801 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/gutenberg.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/helpers.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/helpers.cpython-312.pyc new file mode 100644 index 00000000..45bd20ea Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/helpers.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/hn.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/hn.cpython-312.pyc new file mode 100644 index 00000000..9cf0fd9c Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/hn.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/html.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/html.cpython-312.pyc new file mode 100644 index 00000000..b3a1d405 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/html.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/html_bs.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/html_bs.cpython-312.pyc new file mode 100644 index 00000000..69703191 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/html_bs.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/hugging_face_dataset.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/hugging_face_dataset.cpython-312.pyc new file mode 100644 index 00000000..9a762ebc Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/hugging_face_dataset.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/ifixit.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/ifixit.cpython-312.pyc new file mode 100644 index 00000000..2cb44e1e Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/ifixit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/image.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/image.cpython-312.pyc new file mode 100644 index 00000000..28623ba7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/image.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/image_captions.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/image_captions.cpython-312.pyc new file mode 100644 index 00000000..921c4afe Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/image_captions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/imsdb.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/imsdb.cpython-312.pyc new file mode 100644 index 00000000..ea689ee2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/imsdb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/iugu.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/iugu.cpython-312.pyc new file mode 100644 index 00000000..82c9ca2b Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/iugu.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/joplin.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/joplin.cpython-312.pyc new file mode 100644 index 00000000..b46019ee Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/joplin.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/json_loader.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/json_loader.cpython-312.pyc new file mode 100644 index 00000000..1d4b2315 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/json_loader.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/lakefs.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/lakefs.cpython-312.pyc new file mode 100644 index 00000000..925c5fce Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/lakefs.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/larksuite.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/larksuite.cpython-312.pyc new file mode 100644 index 00000000..b9256754 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/larksuite.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/markdown.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/markdown.cpython-312.pyc new file mode 100644 index 00000000..08dc34ad Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/markdown.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/mastodon.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/mastodon.cpython-312.pyc new file mode 100644 index 00000000..0334bb64 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/mastodon.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/max_compute.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/max_compute.cpython-312.pyc new file mode 100644 index 00000000..3ef7975b Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/max_compute.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/mediawikidump.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/mediawikidump.cpython-312.pyc new file mode 100644 index 00000000..84e39805 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/mediawikidump.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/merge.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/merge.cpython-312.pyc new file mode 100644 index 00000000..17e3674a Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/merge.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/mhtml.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/mhtml.cpython-312.pyc new file mode 100644 index 00000000..02263572 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/mhtml.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/modern_treasury.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/modern_treasury.cpython-312.pyc new file mode 100644 index 00000000..704d09ba Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/modern_treasury.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/mongodb.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/mongodb.cpython-312.pyc new file mode 100644 index 00000000..90fc1198 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/mongodb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/news.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/news.cpython-312.pyc new file mode 100644 index 00000000..5710901f Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/news.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/notebook.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/notebook.cpython-312.pyc new file mode 100644 index 00000000..19450ac5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/notebook.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/notion.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/notion.cpython-312.pyc new file mode 100644 index 00000000..d0c47e14 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/notion.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/notiondb.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/notiondb.cpython-312.pyc new file mode 100644 index 00000000..915fba72 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/notiondb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/nuclia.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/nuclia.cpython-312.pyc new file mode 100644 index 00000000..a2c56054 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/nuclia.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/obs_directory.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/obs_directory.cpython-312.pyc new file mode 100644 index 00000000..c6bcb455 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/obs_directory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/obs_file.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/obs_file.cpython-312.pyc new file mode 100644 index 00000000..2e136057 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/obs_file.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/obsidian.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/obsidian.cpython-312.pyc new file mode 100644 index 00000000..c78b0cf7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/obsidian.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/odt.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/odt.cpython-312.pyc new file mode 100644 index 00000000..c91559ff Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/odt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/onedrive.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/onedrive.cpython-312.pyc new file mode 100644 index 00000000..c71480f8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/onedrive.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/onedrive_file.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/onedrive_file.cpython-312.pyc new file mode 100644 index 00000000..c743fe4c Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/onedrive_file.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/onenote.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/onenote.cpython-312.pyc new file mode 100644 index 00000000..aa23e0b4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/onenote.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/open_city_data.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/open_city_data.cpython-312.pyc new file mode 100644 index 00000000..d324b86d Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/open_city_data.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/org_mode.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/org_mode.cpython-312.pyc new file mode 100644 index 00000000..a122ae51 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/org_mode.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/pdf.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/pdf.cpython-312.pyc new file mode 100644 index 00000000..57165c7b Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/pdf.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/polars_dataframe.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/polars_dataframe.cpython-312.pyc new file mode 100644 index 00000000..c8f40ee2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/polars_dataframe.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/powerpoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/powerpoint.cpython-312.pyc new file mode 100644 index 00000000..31ac3c99 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/powerpoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/psychic.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/psychic.cpython-312.pyc new file mode 100644 index 00000000..eccedac3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/psychic.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/pubmed.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/pubmed.cpython-312.pyc new file mode 100644 index 00000000..9e265bce Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/pubmed.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/pyspark_dataframe.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/pyspark_dataframe.cpython-312.pyc new file mode 100644 index 00000000..f4ec6ef3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/pyspark_dataframe.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/python.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/python.cpython-312.pyc new file mode 100644 index 00000000..a3896da1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/python.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/quip.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/quip.cpython-312.pyc new file mode 100644 index 00000000..43f671d8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/quip.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/readthedocs.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/readthedocs.cpython-312.pyc new file mode 100644 index 00000000..26b9138d Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/readthedocs.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/recursive_url_loader.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/recursive_url_loader.cpython-312.pyc new file mode 100644 index 00000000..3c6f3361 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/recursive_url_loader.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/reddit.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/reddit.cpython-312.pyc new file mode 100644 index 00000000..f829f016 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/reddit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/roam.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/roam.cpython-312.pyc new file mode 100644 index 00000000..81d7747e Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/roam.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rocksetdb.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rocksetdb.cpython-312.pyc new file mode 100644 index 00000000..dfeb67e8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rocksetdb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rspace.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rspace.cpython-312.pyc new file mode 100644 index 00000000..32015815 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rspace.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rss.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rss.cpython-312.pyc new file mode 100644 index 00000000..1253847b Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rss.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rst.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rst.cpython-312.pyc new file mode 100644 index 00000000..7e2f00a7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rst.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rtf.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rtf.cpython-312.pyc new file mode 100644 index 00000000..193cc11f Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/rtf.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/s3_directory.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/s3_directory.cpython-312.pyc new file mode 100644 index 00000000..601c753c Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/s3_directory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/s3_file.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/s3_file.cpython-312.pyc new file mode 100644 index 00000000..f8680e1d Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/s3_file.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/sharepoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/sharepoint.cpython-312.pyc new file mode 100644 index 00000000..499a543b Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/sharepoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/sitemap.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/sitemap.cpython-312.pyc new file mode 100644 index 00000000..319c63da Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/sitemap.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/slack_directory.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/slack_directory.cpython-312.pyc new file mode 100644 index 00000000..a39b3eb6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/slack_directory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/snowflake_loader.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/snowflake_loader.cpython-312.pyc new file mode 100644 index 00000000..6e8dee5b Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/snowflake_loader.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/spreedly.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/spreedly.cpython-312.pyc new file mode 100644 index 00000000..f92a97d8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/spreedly.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/srt.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/srt.cpython-312.pyc new file mode 100644 index 00000000..0d3a67e5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/srt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/stripe.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/stripe.cpython-312.pyc new file mode 100644 index 00000000..4ebd2974 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/stripe.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/telegram.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/telegram.cpython-312.pyc new file mode 100644 index 00000000..157b1fd3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/telegram.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tencent_cos_directory.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tencent_cos_directory.cpython-312.pyc new file mode 100644 index 00000000..67d1b65f Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tencent_cos_directory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tencent_cos_file.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tencent_cos_file.cpython-312.pyc new file mode 100644 index 00000000..ff49e81f Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tencent_cos_file.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tensorflow_datasets.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tensorflow_datasets.cpython-312.pyc new file mode 100644 index 00000000..70de64b8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tensorflow_datasets.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/text.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/text.cpython-312.pyc new file mode 100644 index 00000000..110d49e2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/text.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tomarkdown.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tomarkdown.cpython-312.pyc new file mode 100644 index 00000000..cd59abb8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tomarkdown.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/toml.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/toml.cpython-312.pyc new file mode 100644 index 00000000..64c5f19c Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/toml.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/trello.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/trello.cpython-312.pyc new file mode 100644 index 00000000..e4d08f5c Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/trello.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tsv.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tsv.cpython-312.pyc new file mode 100644 index 00000000..2d7d2b61 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/tsv.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/twitter.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/twitter.cpython-312.pyc new file mode 100644 index 00000000..eced3dfa Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/twitter.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/unstructured.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/unstructured.cpython-312.pyc new file mode 100644 index 00000000..d263ff87 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/unstructured.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/url.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/url.cpython-312.pyc new file mode 100644 index 00000000..ecbafe1d Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/url.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/url_playwright.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/url_playwright.cpython-312.pyc new file mode 100644 index 00000000..0412dc75 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/url_playwright.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/url_selenium.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/url_selenium.cpython-312.pyc new file mode 100644 index 00000000..29e9eadd Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/url_selenium.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/weather.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/weather.cpython-312.pyc new file mode 100644 index 00000000..3e8be944 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/weather.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/web_base.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/web_base.cpython-312.pyc new file mode 100644 index 00000000..d6c75b43 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/web_base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/whatsapp_chat.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/whatsapp_chat.cpython-312.pyc new file mode 100644 index 00000000..1239c0ce Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/whatsapp_chat.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/wikipedia.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/wikipedia.cpython-312.pyc new file mode 100644 index 00000000..f9fb1491 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/wikipedia.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/word_document.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/word_document.cpython-312.pyc new file mode 100644 index 00000000..de642cca Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/word_document.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/xml.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/xml.cpython-312.pyc new file mode 100644 index 00000000..4bccb1a0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/xml.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/xorbits.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/xorbits.cpython-312.pyc new file mode 100644 index 00000000..b78e5865 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/xorbits.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/__pycache__/youtube.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/youtube.cpython-312.pyc new file mode 100644 index 00000000..9ab0c021 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/__pycache__/youtube.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/acreom.py b/venv/Lib/site-packages/langchain/document_loaders/acreom.py new file mode 100644 index 00000000..7223cf9a --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/acreom.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import AcreomLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AcreomLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AcreomLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/airbyte.py b/venv/Lib/site-packages/langchain/document_loaders/airbyte.py new file mode 100644 index 00000000..57c4c6c5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/airbyte.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ( + AirbyteCDKLoader, + AirbyteGongLoader, + AirbyteHubspotLoader, + AirbyteSalesforceLoader, + AirbyteShopifyLoader, + AirbyteStripeLoader, + AirbyteTypeformLoader, + AirbyteZendeskSupportLoader, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AirbyteCDKLoader": "langchain_community.document_loaders", + "AirbyteHubspotLoader": "langchain_community.document_loaders", + "AirbyteStripeLoader": "langchain_community.document_loaders", + "AirbyteTypeformLoader": "langchain_community.document_loaders", + "AirbyteZendeskSupportLoader": "langchain_community.document_loaders", + "AirbyteShopifyLoader": "langchain_community.document_loaders", + "AirbyteSalesforceLoader": "langchain_community.document_loaders", + "AirbyteGongLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AirbyteCDKLoader", + "AirbyteHubspotLoader", + "AirbyteStripeLoader", + "AirbyteTypeformLoader", + "AirbyteZendeskSupportLoader", + "AirbyteShopifyLoader", + "AirbyteSalesforceLoader", + "AirbyteGongLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/airbyte_json.py b/venv/Lib/site-packages/langchain/document_loaders/airbyte_json.py new file mode 100644 index 00000000..e219dd2a --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/airbyte_json.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import AirbyteJSONLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AirbyteJSONLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AirbyteJSONLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/airtable.py b/venv/Lib/site-packages/langchain/document_loaders/airtable.py new file mode 100644 index 00000000..9daed112 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/airtable.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import AirtableLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AirtableLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AirtableLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/apify_dataset.py b/venv/Lib/site-packages/langchain/document_loaders/apify_dataset.py new file mode 100644 index 00000000..db84370a --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/apify_dataset.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ApifyDatasetLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ApifyDatasetLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ApifyDatasetLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/arcgis_loader.py b/venv/Lib/site-packages/langchain/document_loaders/arcgis_loader.py new file mode 100644 index 00000000..7585089e --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/arcgis_loader.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ArcGISLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ArcGISLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ArcGISLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/arxiv.py b/venv/Lib/site-packages/langchain/document_loaders/arxiv.py new file mode 100644 index 00000000..23b3c245 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/arxiv.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ArxivLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ArxivLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ArxivLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/assemblyai.py b/venv/Lib/site-packages/langchain/document_loaders/assemblyai.py new file mode 100644 index 00000000..2cff9381 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/assemblyai.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import AssemblyAIAudioTranscriptLoader + from langchain_community.document_loaders.assemblyai import TranscriptFormat + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "TranscriptFormat": "langchain_community.document_loaders.assemblyai", + "AssemblyAIAudioTranscriptLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TranscriptFormat", + "AssemblyAIAudioTranscriptLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/async_html.py b/venv/Lib/site-packages/langchain/document_loaders/async_html.py new file mode 100644 index 00000000..7d7ed60e --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/async_html.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import AsyncHtmlLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AsyncHtmlLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AsyncHtmlLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/azlyrics.py b/venv/Lib/site-packages/langchain/document_loaders/azlyrics.py new file mode 100644 index 00000000..66d4a609 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/azlyrics.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import AZLyricsLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AZLyricsLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AZLyricsLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/azure_ai_data.py b/venv/Lib/site-packages/langchain/document_loaders/azure_ai_data.py new file mode 100644 index 00000000..6f621785 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/azure_ai_data.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import AzureAIDataLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AzureAIDataLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureAIDataLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/azure_blob_storage_container.py b/venv/Lib/site-packages/langchain/document_loaders/azure_blob_storage_container.py new file mode 100644 index 00000000..6301a8d6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/azure_blob_storage_container.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import AzureBlobStorageContainerLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AzureBlobStorageContainerLoader": "langchain_community.document_loaders" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureBlobStorageContainerLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/azure_blob_storage_file.py b/venv/Lib/site-packages/langchain/document_loaders/azure_blob_storage_file.py new file mode 100644 index 00000000..71e30239 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/azure_blob_storage_file.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import AzureBlobStorageFileLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AzureBlobStorageFileLoader": "langchain_community.document_loaders" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureBlobStorageFileLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/baiducloud_bos_directory.py b/venv/Lib/site-packages/langchain/document_loaders/baiducloud_bos_directory.py new file mode 100644 index 00000000..142348fa --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/baiducloud_bos_directory.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.baiducloud_bos_directory import ( + BaiduBOSDirectoryLoader, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BaiduBOSDirectoryLoader": ( + "langchain_community.document_loaders.baiducloud_bos_directory" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaiduBOSDirectoryLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/baiducloud_bos_file.py b/venv/Lib/site-packages/langchain/document_loaders/baiducloud_bos_file.py new file mode 100644 index 00000000..d18c9bfc --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/baiducloud_bos_file.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.baiducloud_bos_file import ( + BaiduBOSFileLoader, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BaiduBOSFileLoader": "langchain_community.document_loaders.baiducloud_bos_file" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaiduBOSFileLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/base.py b/venv/Lib/site-packages/langchain/document_loaders/base.py new file mode 100644 index 00000000..4b0bf30b --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/base.py @@ -0,0 +1,3 @@ +from langchain_core.document_loaders import BaseBlobParser, BaseLoader + +__all__ = ["BaseLoader", "BaseBlobParser"] diff --git a/venv/Lib/site-packages/langchain/document_loaders/base_o365.py b/venv/Lib/site-packages/langchain/document_loaders/base_o365.py new file mode 100644 index 00000000..60e75fac --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/base_o365.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.base_o365 import O365BaseLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"O365BaseLoader": "langchain_community.document_loaders.base_o365"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "O365BaseLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/bibtex.py b/venv/Lib/site-packages/langchain/document_loaders/bibtex.py new file mode 100644 index 00000000..c08d663f --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/bibtex.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import BibtexLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BibtexLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BibtexLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/bigquery.py b/venv/Lib/site-packages/langchain/document_loaders/bigquery.py new file mode 100644 index 00000000..c40c433c --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/bigquery.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import BigQueryLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BigQueryLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BigQueryLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/bilibili.py b/venv/Lib/site-packages/langchain/document_loaders/bilibili.py new file mode 100644 index 00000000..3a11a6b3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/bilibili.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import BiliBiliLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BiliBiliLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BiliBiliLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/blackboard.py b/venv/Lib/site-packages/langchain/document_loaders/blackboard.py new file mode 100644 index 00000000..36a2da20 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/blackboard.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import BlackboardLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BlackboardLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BlackboardLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__init__.py b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__init__.py new file mode 100644 index 00000000..67898e4c --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__init__.py @@ -0,0 +1,36 @@ +from typing import TYPE_CHECKING, Any + +from langchain_core.document_loaders import Blob, BlobLoader + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ( + FileSystemBlobLoader, + YoutubeAudioLoader, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BlobLoader": "langchain_community.document_loaders", + "Blob": "langchain_community.document_loaders", + "FileSystemBlobLoader": "langchain_community.document_loaders", + "YoutubeAudioLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BlobLoader", + "Blob", + "FileSystemBlobLoader", + "YoutubeAudioLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..9442a1d6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__pycache__/file_system.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__pycache__/file_system.cpython-312.pyc new file mode 100644 index 00000000..b352deba Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__pycache__/file_system.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__pycache__/schema.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__pycache__/schema.cpython-312.pyc new file mode 100644 index 00000000..36273d7f Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__pycache__/schema.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__pycache__/youtube_audio.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__pycache__/youtube_audio.cpython-312.pyc new file mode 100644 index 00000000..04be41ad Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/__pycache__/youtube_audio.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/file_system.py b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/file_system.py new file mode 100644 index 00000000..151567d7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/file_system.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import FileSystemBlobLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"FileSystemBlobLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FileSystemBlobLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/schema.py b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/schema.py new file mode 100644 index 00000000..677b9cfd --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/schema.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain_core.document_loaders import Blob, BlobLoader + +from langchain._api import create_importer + +if TYPE_CHECKING: + pass + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "Blob": "langchain_community.document_loaders", + "BlobLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Blob", + "BlobLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/youtube_audio.py b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/youtube_audio.py new file mode 100644 index 00000000..d4612343 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/blob_loaders/youtube_audio.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import YoutubeAudioLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"YoutubeAudioLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "YoutubeAudioLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/blockchain.py b/venv/Lib/site-packages/langchain/document_loaders/blockchain.py new file mode 100644 index 00000000..b815c6e9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/blockchain.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import BlockchainDocumentLoader + from langchain_community.document_loaders.blockchain import BlockchainType + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BlockchainType": "langchain_community.document_loaders.blockchain", + "BlockchainDocumentLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BlockchainType", + "BlockchainDocumentLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/brave_search.py b/venv/Lib/site-packages/langchain/document_loaders/brave_search.py new file mode 100644 index 00000000..0fa833b3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/brave_search.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import BraveSearchLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BraveSearchLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BraveSearchLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/browserless.py b/venv/Lib/site-packages/langchain/document_loaders/browserless.py new file mode 100644 index 00000000..817e9b89 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/browserless.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import BrowserlessLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BrowserlessLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BrowserlessLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/chatgpt.py b/venv/Lib/site-packages/langchain/document_loaders/chatgpt.py new file mode 100644 index 00000000..e9ab0934 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/chatgpt.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ChatGPTLoader + from langchain_community.document_loaders.chatgpt import concatenate_rows + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "concatenate_rows": "langchain_community.document_loaders.chatgpt", + "ChatGPTLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "concatenate_rows", + "ChatGPTLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/chromium.py b/venv/Lib/site-packages/langchain/document_loaders/chromium.py new file mode 100644 index 00000000..e89f141e --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/chromium.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import AsyncChromiumLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AsyncChromiumLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AsyncChromiumLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/college_confidential.py b/venv/Lib/site-packages/langchain/document_loaders/college_confidential.py new file mode 100644 index 00000000..0d7b04c8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/college_confidential.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import CollegeConfidentialLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CollegeConfidentialLoader": "langchain_community.document_loaders" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CollegeConfidentialLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/concurrent.py b/venv/Lib/site-packages/langchain/document_loaders/concurrent.py new file mode 100644 index 00000000..21823140 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/concurrent.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ConcurrentLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ConcurrentLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ConcurrentLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/confluence.py b/venv/Lib/site-packages/langchain/document_loaders/confluence.py new file mode 100644 index 00000000..f91b8280 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/confluence.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ConfluenceLoader + from langchain_community.document_loaders.confluence import ContentFormat + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ContentFormat": "langchain_community.document_loaders.confluence", + "ConfluenceLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ContentFormat", + "ConfluenceLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/conllu.py b/venv/Lib/site-packages/langchain/document_loaders/conllu.py new file mode 100644 index 00000000..1d55f979 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/conllu.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import CoNLLULoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"CoNLLULoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CoNLLULoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/couchbase.py b/venv/Lib/site-packages/langchain/document_loaders/couchbase.py new file mode 100644 index 00000000..ca99b4d6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/couchbase.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import CouchbaseLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"CouchbaseLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CouchbaseLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/csv_loader.py b/venv/Lib/site-packages/langchain/document_loaders/csv_loader.py new file mode 100644 index 00000000..9840e5d6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/csv_loader.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import CSVLoader, UnstructuredCSVLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CSVLoader": "langchain_community.document_loaders", + "UnstructuredCSVLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CSVLoader", + "UnstructuredCSVLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/cube_semantic.py b/venv/Lib/site-packages/langchain/document_loaders/cube_semantic.py new file mode 100644 index 00000000..7eb4f90f --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/cube_semantic.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import CubeSemanticLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"CubeSemanticLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CubeSemanticLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/datadog_logs.py b/venv/Lib/site-packages/langchain/document_loaders/datadog_logs.py new file mode 100644 index 00000000..e574c986 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/datadog_logs.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import DatadogLogsLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DatadogLogsLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DatadogLogsLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/dataframe.py b/venv/Lib/site-packages/langchain/document_loaders/dataframe.py new file mode 100644 index 00000000..4fe8f3c1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/dataframe.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import DataFrameLoader + from langchain_community.document_loaders.dataframe import BaseDataFrameLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BaseDataFrameLoader": "langchain_community.document_loaders.dataframe", + "DataFrameLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaseDataFrameLoader", + "DataFrameLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/diffbot.py b/venv/Lib/site-packages/langchain/document_loaders/diffbot.py new file mode 100644 index 00000000..a1d10d30 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/diffbot.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import DiffbotLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DiffbotLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DiffbotLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/directory.py b/venv/Lib/site-packages/langchain/document_loaders/directory.py new file mode 100644 index 00000000..15000588 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/directory.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import DirectoryLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DirectoryLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DirectoryLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/discord.py b/venv/Lib/site-packages/langchain/document_loaders/discord.py new file mode 100644 index 00000000..184039cf --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/discord.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import DiscordChatLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DiscordChatLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DiscordChatLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/docugami.py b/venv/Lib/site-packages/langchain/document_loaders/docugami.py new file mode 100644 index 00000000..4be78b05 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/docugami.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import DocugamiLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DocugamiLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DocugamiLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/docusaurus.py b/venv/Lib/site-packages/langchain/document_loaders/docusaurus.py new file mode 100644 index 00000000..8e71d7f9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/docusaurus.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import DocusaurusLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DocusaurusLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DocusaurusLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/dropbox.py b/venv/Lib/site-packages/langchain/document_loaders/dropbox.py new file mode 100644 index 00000000..12859c66 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/dropbox.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import DropboxLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DropboxLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DropboxLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/duckdb_loader.py b/venv/Lib/site-packages/langchain/document_loaders/duckdb_loader.py new file mode 100644 index 00000000..e561488a --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/duckdb_loader.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import DuckDBLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DuckDBLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DuckDBLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/email.py b/venv/Lib/site-packages/langchain/document_loaders/email.py new file mode 100644 index 00000000..4d462e0a --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/email.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ( + OutlookMessageLoader, + UnstructuredEmailLoader, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "UnstructuredEmailLoader": "langchain_community.document_loaders", + "OutlookMessageLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredEmailLoader", + "OutlookMessageLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/epub.py b/venv/Lib/site-packages/langchain/document_loaders/epub.py new file mode 100644 index 00000000..c672b1fc --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/epub.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredEPubLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"UnstructuredEPubLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredEPubLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/etherscan.py b/venv/Lib/site-packages/langchain/document_loaders/etherscan.py new file mode 100644 index 00000000..a1be72b7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/etherscan.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import EtherscanLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EtherscanLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EtherscanLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/evernote.py b/venv/Lib/site-packages/langchain/document_loaders/evernote.py new file mode 100644 index 00000000..295ec7a2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/evernote.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import EverNoteLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EverNoteLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EverNoteLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/excel.py b/venv/Lib/site-packages/langchain/document_loaders/excel.py new file mode 100644 index 00000000..e14f4a4d --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/excel.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredExcelLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"UnstructuredExcelLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredExcelLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/facebook_chat.py b/venv/Lib/site-packages/langchain/document_loaders/facebook_chat.py new file mode 100644 index 00000000..bdf5d219 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/facebook_chat.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import FacebookChatLoader + from langchain_community.document_loaders.facebook_chat import concatenate_rows + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "concatenate_rows": "langchain_community.document_loaders.facebook_chat", + "FacebookChatLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "concatenate_rows", + "FacebookChatLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/fauna.py b/venv/Lib/site-packages/langchain/document_loaders/fauna.py new file mode 100644 index 00000000..7987cdd3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/fauna.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import FaunaLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"FaunaLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FaunaLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/figma.py b/venv/Lib/site-packages/langchain/document_loaders/figma.py new file mode 100644 index 00000000..7b0d345d --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/figma.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import FigmaFileLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"FigmaFileLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FigmaFileLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/gcs_directory.py b/venv/Lib/site-packages/langchain/document_loaders/gcs_directory.py new file mode 100644 index 00000000..e79241ae --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/gcs_directory.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import GCSDirectoryLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GCSDirectoryLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GCSDirectoryLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/gcs_file.py b/venv/Lib/site-packages/langchain/document_loaders/gcs_file.py new file mode 100644 index 00000000..4803ae44 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/gcs_file.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import GCSFileLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GCSFileLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GCSFileLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/generic.py b/venv/Lib/site-packages/langchain/document_loaders/generic.py new file mode 100644 index 00000000..8e121359 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/generic.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.generic import GenericLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GenericLoader": "langchain_community.document_loaders.generic"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GenericLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/geodataframe.py b/venv/Lib/site-packages/langchain/document_loaders/geodataframe.py new file mode 100644 index 00000000..f8f0d2de --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/geodataframe.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import GeoDataFrameLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GeoDataFrameLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GeoDataFrameLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/git.py b/venv/Lib/site-packages/langchain/document_loaders/git.py new file mode 100644 index 00000000..b4ee8e01 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/git.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import GitLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GitLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GitLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/gitbook.py b/venv/Lib/site-packages/langchain/document_loaders/gitbook.py new file mode 100644 index 00000000..443d2ae1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/gitbook.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import GitbookLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GitbookLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GitbookLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/github.py b/venv/Lib/site-packages/langchain/document_loaders/github.py new file mode 100644 index 00000000..2a2b0ffb --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/github.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import GitHubIssuesLoader + from langchain_community.document_loaders.github import BaseGitHubLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BaseGitHubLoader": "langchain_community.document_loaders.github", + "GitHubIssuesLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaseGitHubLoader", + "GitHubIssuesLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/google_speech_to_text.py b/venv/Lib/site-packages/langchain/document_loaders/google_speech_to_text.py new file mode 100644 index 00000000..85be467e --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/google_speech_to_text.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import GoogleSpeechToTextLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleSpeechToTextLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleSpeechToTextLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/googledrive.py b/venv/Lib/site-packages/langchain/document_loaders/googledrive.py new file mode 100644 index 00000000..17e290e4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/googledrive.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import GoogleDriveLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleDriveLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleDriveLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/gutenberg.py b/venv/Lib/site-packages/langchain/document_loaders/gutenberg.py new file mode 100644 index 00000000..ae19715a --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/gutenberg.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import GutenbergLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GutenbergLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GutenbergLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/helpers.py b/venv/Lib/site-packages/langchain/document_loaders/helpers.py new file mode 100644 index 00000000..25a8c91c --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/helpers.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.helpers import ( + FileEncoding, + detect_file_encodings, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FileEncoding": "langchain_community.document_loaders.helpers", + "detect_file_encodings": "langchain_community.document_loaders.helpers", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FileEncoding", + "detect_file_encodings", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/hn.py b/venv/Lib/site-packages/langchain/document_loaders/hn.py new file mode 100644 index 00000000..60fbe690 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/hn.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import HNLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"HNLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HNLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/html.py b/venv/Lib/site-packages/langchain/document_loaders/html.py new file mode 100644 index 00000000..79f50f05 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/html.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredHTMLLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"UnstructuredHTMLLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredHTMLLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/html_bs.py b/venv/Lib/site-packages/langchain/document_loaders/html_bs.py new file mode 100644 index 00000000..9665c06c --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/html_bs.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import BSHTMLLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BSHTMLLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BSHTMLLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/hugging_face_dataset.py b/venv/Lib/site-packages/langchain/document_loaders/hugging_face_dataset.py new file mode 100644 index 00000000..2b9dbc78 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/hugging_face_dataset.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import HuggingFaceDatasetLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"HuggingFaceDatasetLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HuggingFaceDatasetLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/ifixit.py b/venv/Lib/site-packages/langchain/document_loaders/ifixit.py new file mode 100644 index 00000000..6942b91f --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/ifixit.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import IFixitLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"IFixitLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "IFixitLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/image.py b/venv/Lib/site-packages/langchain/document_loaders/image.py new file mode 100644 index 00000000..5f0e2630 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/image.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredImageLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"UnstructuredImageLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredImageLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/image_captions.py b/venv/Lib/site-packages/langchain/document_loaders/image_captions.py new file mode 100644 index 00000000..7ab1dc0f --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/image_captions.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ImageCaptionLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ImageCaptionLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ImageCaptionLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/imsdb.py b/venv/Lib/site-packages/langchain/document_loaders/imsdb.py new file mode 100644 index 00000000..e21b998f --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/imsdb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import IMSDbLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"IMSDbLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "IMSDbLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/iugu.py b/venv/Lib/site-packages/langchain/document_loaders/iugu.py new file mode 100644 index 00000000..0d314b66 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/iugu.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import IuguLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"IuguLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "IuguLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/joplin.py b/venv/Lib/site-packages/langchain/document_loaders/joplin.py new file mode 100644 index 00000000..911dd14c --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/joplin.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import JoplinLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"JoplinLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JoplinLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/json_loader.py b/venv/Lib/site-packages/langchain/document_loaders/json_loader.py new file mode 100644 index 00000000..11e6b94d --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/json_loader.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import JSONLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"JSONLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JSONLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/lakefs.py b/venv/Lib/site-packages/langchain/document_loaders/lakefs.py new file mode 100644 index 00000000..0139280b --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/lakefs.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import LakeFSLoader + from langchain_community.document_loaders.lakefs import ( + LakeFSClient, + UnstructuredLakeFSLoader, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "LakeFSClient": "langchain_community.document_loaders.lakefs", + "LakeFSLoader": "langchain_community.document_loaders", + "UnstructuredLakeFSLoader": "langchain_community.document_loaders.lakefs", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LakeFSClient", + "LakeFSLoader", + "UnstructuredLakeFSLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/larksuite.py b/venv/Lib/site-packages/langchain/document_loaders/larksuite.py new file mode 100644 index 00000000..d4ee9c73 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/larksuite.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import LarkSuiteDocLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"LarkSuiteDocLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LarkSuiteDocLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/markdown.py b/venv/Lib/site-packages/langchain/document_loaders/markdown.py new file mode 100644 index 00000000..f2d7cce4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/markdown.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredMarkdownLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "UnstructuredMarkdownLoader": "langchain_community.document_loaders" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredMarkdownLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/mastodon.py b/venv/Lib/site-packages/langchain/document_loaders/mastodon.py new file mode 100644 index 00000000..ddc0e8da --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/mastodon.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import MastodonTootsLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MastodonTootsLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MastodonTootsLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/max_compute.py b/venv/Lib/site-packages/langchain/document_loaders/max_compute.py new file mode 100644 index 00000000..e0a73476 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/max_compute.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import MaxComputeLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MaxComputeLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MaxComputeLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/mediawikidump.py b/venv/Lib/site-packages/langchain/document_loaders/mediawikidump.py new file mode 100644 index 00000000..12062558 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/mediawikidump.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import MWDumpLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MWDumpLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MWDumpLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/merge.py b/venv/Lib/site-packages/langchain/document_loaders/merge.py new file mode 100644 index 00000000..e38b193b --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/merge.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import MergedDataLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MergedDataLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MergedDataLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/mhtml.py b/venv/Lib/site-packages/langchain/document_loaders/mhtml.py new file mode 100644 index 00000000..26597c84 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/mhtml.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import MHTMLLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MHTMLLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MHTMLLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/modern_treasury.py b/venv/Lib/site-packages/langchain/document_loaders/modern_treasury.py new file mode 100644 index 00000000..12b73ef7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/modern_treasury.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ModernTreasuryLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ModernTreasuryLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ModernTreasuryLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/mongodb.py b/venv/Lib/site-packages/langchain/document_loaders/mongodb.py new file mode 100644 index 00000000..f157d6ad --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/mongodb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import MongodbLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MongodbLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MongodbLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/news.py b/venv/Lib/site-packages/langchain/document_loaders/news.py new file mode 100644 index 00000000..fe2e8207 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/news.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import NewsURLLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NewsURLLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NewsURLLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/notebook.py b/venv/Lib/site-packages/langchain/document_loaders/notebook.py new file mode 100644 index 00000000..e29dcde5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/notebook.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import NotebookLoader + from langchain_community.document_loaders.notebook import ( + concatenate_cells, + remove_newlines, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "concatenate_cells": "langchain_community.document_loaders.notebook", + "remove_newlines": "langchain_community.document_loaders.notebook", + "NotebookLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "concatenate_cells", + "remove_newlines", + "NotebookLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/notion.py b/venv/Lib/site-packages/langchain/document_loaders/notion.py new file mode 100644 index 00000000..c0b1bd9e --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/notion.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import NotionDirectoryLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NotionDirectoryLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NotionDirectoryLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/notiondb.py b/venv/Lib/site-packages/langchain/document_loaders/notiondb.py new file mode 100644 index 00000000..3cd57b3f --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/notiondb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import NotionDBLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NotionDBLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NotionDBLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/nuclia.py b/venv/Lib/site-packages/langchain/document_loaders/nuclia.py new file mode 100644 index 00000000..78348dca --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/nuclia.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.nuclia import NucliaLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NucliaLoader": "langchain_community.document_loaders.nuclia"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NucliaLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/obs_directory.py b/venv/Lib/site-packages/langchain/document_loaders/obs_directory.py new file mode 100644 index 00000000..b7136298 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/obs_directory.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import OBSDirectoryLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OBSDirectoryLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OBSDirectoryLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/obs_file.py b/venv/Lib/site-packages/langchain/document_loaders/obs_file.py new file mode 100644 index 00000000..b9e69f75 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/obs_file.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import OBSFileLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OBSFileLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OBSFileLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/obsidian.py b/venv/Lib/site-packages/langchain/document_loaders/obsidian.py new file mode 100644 index 00000000..71146a8f --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/obsidian.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ObsidianLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ObsidianLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ObsidianLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/odt.py b/venv/Lib/site-packages/langchain/document_loaders/odt.py new file mode 100644 index 00000000..d82a098d --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/odt.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredODTLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"UnstructuredODTLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredODTLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/onedrive.py b/venv/Lib/site-packages/langchain/document_loaders/onedrive.py new file mode 100644 index 00000000..a13bb0c4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/onedrive.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import OneDriveLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OneDriveLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OneDriveLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/onedrive_file.py b/venv/Lib/site-packages/langchain/document_loaders/onedrive_file.py new file mode 100644 index 00000000..7f73ba99 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/onedrive_file.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import OneDriveFileLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OneDriveFileLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OneDriveFileLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/onenote.py b/venv/Lib/site-packages/langchain/document_loaders/onenote.py new file mode 100644 index 00000000..d89980f1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/onenote.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.onenote import OneNoteLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OneNoteLoader": "langchain_community.document_loaders.onenote"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OneNoteLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/open_city_data.py b/venv/Lib/site-packages/langchain/document_loaders/open_city_data.py new file mode 100644 index 00000000..badcc024 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/open_city_data.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import OpenCityDataLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OpenCityDataLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OpenCityDataLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/org_mode.py b/venv/Lib/site-packages/langchain/document_loaders/org_mode.py new file mode 100644 index 00000000..cec468a2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/org_mode.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredOrgModeLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "UnstructuredOrgModeLoader": "langchain_community.document_loaders" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredOrgModeLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/__init__.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/__init__.py new file mode 100644 index 00000000..a0c3930e --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/__init__.py @@ -0,0 +1,58 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.audio import OpenAIWhisperParser + from langchain_community.document_loaders.parsers.docai import DocAIParser + from langchain_community.document_loaders.parsers.grobid import GrobidParser + from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser + from langchain_community.document_loaders.parsers.language.language_parser import ( + LanguageParser, + ) + from langchain_community.document_loaders.parsers.pdf import ( + PDFMinerParser, + PDFPlumberParser, + PyMuPDFParser, + PyPDFium2Parser, + PyPDFParser, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BS4HTMLParser": "langchain_community.document_loaders.parsers.html.bs4", + "DocAIParser": "langchain_community.document_loaders.parsers.docai", + "GrobidParser": "langchain_community.document_loaders.parsers.grobid", + "LanguageParser": ( + "langchain_community.document_loaders.parsers.language.language_parser" + ), + "OpenAIWhisperParser": "langchain_community.document_loaders.parsers.audio", + "PDFMinerParser": "langchain_community.document_loaders.parsers.pdf", + "PDFPlumberParser": "langchain_community.document_loaders.parsers.pdf", + "PyMuPDFParser": "langchain_community.document_loaders.parsers.pdf", + "PyPDFium2Parser": "langchain_community.document_loaders.parsers.pdf", + "PyPDFParser": "langchain_community.document_loaders.parsers.pdf", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BS4HTMLParser", + "DocAIParser", + "GrobidParser", + "LanguageParser", + "OpenAIWhisperParser", + "PDFMinerParser", + "PDFPlumberParser", + "PyMuPDFParser", + "PyPDFium2Parser", + "PyPDFParser", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..73a604ac Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/audio.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/audio.cpython-312.pyc new file mode 100644 index 00000000..8560e356 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/audio.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/docai.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/docai.cpython-312.pyc new file mode 100644 index 00000000..5d08391d Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/docai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/generic.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/generic.cpython-312.pyc new file mode 100644 index 00000000..c3a3d8a3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/generic.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/grobid.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/grobid.cpython-312.pyc new file mode 100644 index 00000000..f63e60ce Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/grobid.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/msword.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/msword.cpython-312.pyc new file mode 100644 index 00000000..ce0f9505 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/msword.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/pdf.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/pdf.cpython-312.pyc new file mode 100644 index 00000000..454b423e Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/pdf.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/registry.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/registry.cpython-312.pyc new file mode 100644 index 00000000..671a74f4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/registry.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/txt.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/txt.cpython-312.pyc new file mode 100644 index 00000000..a05621f2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/__pycache__/txt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/audio.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/audio.py new file mode 100644 index 00000000..f1954c66 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/audio.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.audio import ( + OpenAIWhisperParser, + OpenAIWhisperParserLocal, + YandexSTTParser, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "OpenAIWhisperParser": "langchain_community.document_loaders.parsers.audio", + "OpenAIWhisperParserLocal": "langchain_community.document_loaders.parsers.audio", + "YandexSTTParser": "langchain_community.document_loaders.parsers.audio", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OpenAIWhisperParser", + "OpenAIWhisperParserLocal", + "YandexSTTParser", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/docai.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/docai.py new file mode 100644 index 00000000..bdc29325 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/docai.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.docai import ( + DocAIParser, + DocAIParsingResults, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DocAIParsingResults": "langchain_community.document_loaders.parsers.docai", + "DocAIParser": "langchain_community.document_loaders.parsers.docai", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DocAIParsingResults", + "DocAIParser", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/generic.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/generic.py new file mode 100644 index 00000000..5d9c6501 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/generic.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.generic import MimeTypeBasedParser + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MimeTypeBasedParser": "langchain_community.document_loaders.parsers.generic" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MimeTypeBasedParser", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/grobid.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/grobid.py new file mode 100644 index 00000000..13d08961 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/grobid.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.grobid import ( + GrobidParser, + ServerUnavailableException, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GrobidParser": "langchain_community.document_loaders.parsers.grobid", + "ServerUnavailableException": "langchain_community.document_loaders.parsers.grobid", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GrobidParser", + "ServerUnavailableException", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/html/__init__.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/html/__init__.py new file mode 100644 index 00000000..107d992a --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/html/__init__.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BS4HTMLParser": "langchain_community.document_loaders.parsers.html.bs4" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BS4HTMLParser", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/html/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/html/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..bd999919 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/html/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/html/__pycache__/bs4.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/html/__pycache__/bs4.cpython-312.pyc new file mode 100644 index 00000000..c1096d92 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/html/__pycache__/bs4.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/html/bs4.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/html/bs4.py new file mode 100644 index 00000000..107d992a --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/html/bs4.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BS4HTMLParser": "langchain_community.document_loaders.parsers.html.bs4" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BS4HTMLParser", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__init__.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__init__.py new file mode 100644 index 00000000..27db64fa --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__init__.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.language.language_parser import ( + LanguageParser, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "LanguageParser": ( + "langchain_community.document_loaders.parsers.language.language_parser" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LanguageParser", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..e65d8561 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/cobol.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/cobol.cpython-312.pyc new file mode 100644 index 00000000..5eafdd0c Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/cobol.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/code_segmenter.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/code_segmenter.cpython-312.pyc new file mode 100644 index 00000000..d8ca655b Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/code_segmenter.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/javascript.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/javascript.cpython-312.pyc new file mode 100644 index 00000000..769ade40 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/javascript.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/language_parser.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/language_parser.cpython-312.pyc new file mode 100644 index 00000000..c24dde4a Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/language_parser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/python.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/python.cpython-312.pyc new file mode 100644 index 00000000..c91eaf6e Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/__pycache__/python.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/language/cobol.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/cobol.py new file mode 100644 index 00000000..e80b5d65 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/cobol.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.language.cobol import ( + CobolSegmenter, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CobolSegmenter": "langchain_community.document_loaders.parsers.language.cobol" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CobolSegmenter", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/language/code_segmenter.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/code_segmenter.py new file mode 100644 index 00000000..1469eaeb --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/code_segmenter.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.language.code_segmenter import ( + CodeSegmenter, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CodeSegmenter": ( + "langchain_community.document_loaders.parsers.language.code_segmenter" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CodeSegmenter", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/language/javascript.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/javascript.py new file mode 100644 index 00000000..ffb2c409 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/javascript.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.language.javascript import ( + JavaScriptSegmenter, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "JavaScriptSegmenter": ( + "langchain_community.document_loaders.parsers.language.javascript" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JavaScriptSegmenter", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/language/language_parser.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/language_parser.py new file mode 100644 index 00000000..27db64fa --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/language_parser.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.language.language_parser import ( + LanguageParser, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "LanguageParser": ( + "langchain_community.document_loaders.parsers.language.language_parser" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LanguageParser", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/language/python.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/python.py new file mode 100644 index 00000000..79a70279 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/language/python.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.language.python import ( + PythonSegmenter, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PythonSegmenter": "langchain_community.document_loaders.parsers.language.python" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PythonSegmenter", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/msword.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/msword.py new file mode 100644 index 00000000..24b41850 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/msword.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.msword import MsWordParser + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MsWordParser": "langchain_community.document_loaders.parsers.msword" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MsWordParser", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/pdf.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/pdf.py new file mode 100644 index 00000000..398134ee --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/pdf.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.pdf import ( + AmazonTextractPDFParser, + DocumentIntelligenceParser, + PDFMinerParser, + PDFPlumberParser, + PyMuPDFParser, + PyPDFium2Parser, + PyPDFParser, + extract_from_images_with_rapidocr, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "extract_from_images_with_rapidocr": ( + "langchain_community.document_loaders.parsers.pdf" + ), + "PyPDFParser": "langchain_community.document_loaders.parsers.pdf", + "PDFMinerParser": "langchain_community.document_loaders.parsers.pdf", + "PyMuPDFParser": "langchain_community.document_loaders.parsers.pdf", + "PyPDFium2Parser": "langchain_community.document_loaders.parsers.pdf", + "PDFPlumberParser": "langchain_community.document_loaders.parsers.pdf", + "AmazonTextractPDFParser": "langchain_community.document_loaders.parsers.pdf", + "DocumentIntelligenceParser": "langchain_community.document_loaders.parsers.pdf", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "extract_from_images_with_rapidocr", + "PyPDFParser", + "PDFMinerParser", + "PyMuPDFParser", + "PyPDFium2Parser", + "PDFPlumberParser", + "AmazonTextractPDFParser", + "DocumentIntelligenceParser", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/registry.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/registry.py new file mode 100644 index 00000000..1c1cce71 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/registry.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.registry import get_parser + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "get_parser": "langchain_community.document_loaders.parsers.registry" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "get_parser", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/parsers/txt.py b/venv/Lib/site-packages/langchain/document_loaders/parsers/txt.py new file mode 100644 index 00000000..0597169c --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/parsers/txt.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.txt import TextParser + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TextParser": "langchain_community.document_loaders.parsers.txt"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TextParser", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/pdf.py b/venv/Lib/site-packages/langchain/document_loaders/pdf.py new file mode 100644 index 00000000..9e5a39ae --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/pdf.py @@ -0,0 +1,65 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ( + AmazonTextractPDFLoader, + MathpixPDFLoader, + OnlinePDFLoader, + PagedPDFSplitter, + PDFMinerLoader, + PDFMinerPDFasHTMLLoader, + PDFPlumberLoader, + PyMuPDFLoader, + PyPDFDirectoryLoader, + PyPDFium2Loader, + UnstructuredPDFLoader, + ) + from langchain_community.document_loaders.pdf import ( + BasePDFLoader, + DocumentIntelligenceLoader, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "UnstructuredPDFLoader": "langchain_community.document_loaders", + "BasePDFLoader": "langchain_community.document_loaders.pdf", + "OnlinePDFLoader": "langchain_community.document_loaders", + "PagedPDFSplitter": "langchain_community.document_loaders", + "PyPDFium2Loader": "langchain_community.document_loaders", + "PyPDFDirectoryLoader": "langchain_community.document_loaders", + "PDFMinerLoader": "langchain_community.document_loaders", + "PDFMinerPDFasHTMLLoader": "langchain_community.document_loaders", + "PyMuPDFLoader": "langchain_community.document_loaders", + "MathpixPDFLoader": "langchain_community.document_loaders", + "PDFPlumberLoader": "langchain_community.document_loaders", + "AmazonTextractPDFLoader": "langchain_community.document_loaders", + "DocumentIntelligenceLoader": "langchain_community.document_loaders.pdf", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredPDFLoader", + "BasePDFLoader", + "OnlinePDFLoader", + "PagedPDFSplitter", + "PyPDFium2Loader", + "PyPDFDirectoryLoader", + "PDFMinerLoader", + "PDFMinerPDFasHTMLLoader", + "PyMuPDFLoader", + "MathpixPDFLoader", + "PDFPlumberLoader", + "AmazonTextractPDFLoader", + "DocumentIntelligenceLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/polars_dataframe.py b/venv/Lib/site-packages/langchain/document_loaders/polars_dataframe.py new file mode 100644 index 00000000..67d413da --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/polars_dataframe.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import PolarsDataFrameLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PolarsDataFrameLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PolarsDataFrameLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/powerpoint.py b/venv/Lib/site-packages/langchain/document_loaders/powerpoint.py new file mode 100644 index 00000000..f86145c2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/powerpoint.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredPowerPointLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "UnstructuredPowerPointLoader": "langchain_community.document_loaders" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredPowerPointLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/psychic.py b/venv/Lib/site-packages/langchain/document_loaders/psychic.py new file mode 100644 index 00000000..c5964eac --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/psychic.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import PsychicLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PsychicLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PsychicLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/pubmed.py b/venv/Lib/site-packages/langchain/document_loaders/pubmed.py new file mode 100644 index 00000000..4bff6c4f --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/pubmed.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import PubMedLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PubMedLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PubMedLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/pyspark_dataframe.py b/venv/Lib/site-packages/langchain/document_loaders/pyspark_dataframe.py new file mode 100644 index 00000000..1c870359 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/pyspark_dataframe.py @@ -0,0 +1,26 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.pyspark_dataframe import ( + PySparkDataFrameLoader, + ) + + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PySparkDataFrameLoader": "langchain_community.document_loaders.pyspark_dataframe" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["PySparkDataFrameLoader"] diff --git a/venv/Lib/site-packages/langchain/document_loaders/python.py b/venv/Lib/site-packages/langchain/document_loaders/python.py new file mode 100644 index 00000000..04907a0b --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/python.py @@ -0,0 +1,22 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.python import PythonLoader + + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PythonLoader": "langchain_community.document_loaders.python"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["PythonLoader"] diff --git a/venv/Lib/site-packages/langchain/document_loaders/quip.py b/venv/Lib/site-packages/langchain/document_loaders/quip.py new file mode 100644 index 00000000..2b86d70d --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/quip.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.quip import QuipLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"QuipLoader": "langchain_community.document_loaders.quip"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "QuipLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/readthedocs.py b/venv/Lib/site-packages/langchain/document_loaders/readthedocs.py new file mode 100644 index 00000000..b3ba62c6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/readthedocs.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ReadTheDocsLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ReadTheDocsLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ReadTheDocsLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/recursive_url_loader.py b/venv/Lib/site-packages/langchain/document_loaders/recursive_url_loader.py new file mode 100644 index 00000000..d591641e --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/recursive_url_loader.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import RecursiveUrlLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"RecursiveUrlLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RecursiveUrlLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/reddit.py b/venv/Lib/site-packages/langchain/document_loaders/reddit.py new file mode 100644 index 00000000..3265d248 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/reddit.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import RedditPostsLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"RedditPostsLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RedditPostsLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/roam.py b/venv/Lib/site-packages/langchain/document_loaders/roam.py new file mode 100644 index 00000000..74478a2f --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/roam.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import RoamLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"RoamLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RoamLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/rocksetdb.py b/venv/Lib/site-packages/langchain/document_loaders/rocksetdb.py new file mode 100644 index 00000000..d2f63343 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/rocksetdb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import RocksetLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"RocksetLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RocksetLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/rspace.py b/venv/Lib/site-packages/langchain/document_loaders/rspace.py new file mode 100644 index 00000000..14a0f1a2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/rspace.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.rspace import RSpaceLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"RSpaceLoader": "langchain_community.document_loaders.rspace"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RSpaceLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/rss.py b/venv/Lib/site-packages/langchain/document_loaders/rss.py new file mode 100644 index 00000000..f186a2c6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/rss.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import RSSFeedLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"RSSFeedLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RSSFeedLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/rst.py b/venv/Lib/site-packages/langchain/document_loaders/rst.py new file mode 100644 index 00000000..bc02f1c2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/rst.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredRSTLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"UnstructuredRSTLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredRSTLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/rtf.py b/venv/Lib/site-packages/langchain/document_loaders/rtf.py new file mode 100644 index 00000000..e36ac4d0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/rtf.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredRTFLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"UnstructuredRTFLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredRTFLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/s3_directory.py b/venv/Lib/site-packages/langchain/document_loaders/s3_directory.py new file mode 100644 index 00000000..d81b637b --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/s3_directory.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import S3DirectoryLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"S3DirectoryLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "S3DirectoryLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/s3_file.py b/venv/Lib/site-packages/langchain/document_loaders/s3_file.py new file mode 100644 index 00000000..e6e49c19 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/s3_file.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import S3FileLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"S3FileLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "S3FileLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/sharepoint.py b/venv/Lib/site-packages/langchain/document_loaders/sharepoint.py new file mode 100644 index 00000000..31d95ca2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/sharepoint.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import SharePointLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SharePointLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SharePointLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/sitemap.py b/venv/Lib/site-packages/langchain/document_loaders/sitemap.py new file mode 100644 index 00000000..4124539b --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/sitemap.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import SitemapLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SitemapLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SitemapLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/slack_directory.py b/venv/Lib/site-packages/langchain/document_loaders/slack_directory.py new file mode 100644 index 00000000..4fc4e037 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/slack_directory.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import SlackDirectoryLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SlackDirectoryLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SlackDirectoryLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/snowflake_loader.py b/venv/Lib/site-packages/langchain/document_loaders/snowflake_loader.py new file mode 100644 index 00000000..b8c9d4c7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/snowflake_loader.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import SnowflakeLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SnowflakeLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SnowflakeLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/spreedly.py b/venv/Lib/site-packages/langchain/document_loaders/spreedly.py new file mode 100644 index 00000000..221681df --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/spreedly.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import SpreedlyLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SpreedlyLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SpreedlyLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/srt.py b/venv/Lib/site-packages/langchain/document_loaders/srt.py new file mode 100644 index 00000000..623150b6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/srt.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import SRTLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SRTLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SRTLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/stripe.py b/venv/Lib/site-packages/langchain/document_loaders/stripe.py new file mode 100644 index 00000000..940b22cf --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/stripe.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import StripeLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"StripeLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "StripeLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/telegram.py b/venv/Lib/site-packages/langchain/document_loaders/telegram.py new file mode 100644 index 00000000..14523f65 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/telegram.py @@ -0,0 +1,38 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ( + TelegramChatApiLoader, + TelegramChatFileLoader, + ) + from langchain_community.document_loaders.telegram import ( + concatenate_rows, + text_to_docs, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "concatenate_rows": "langchain_community.document_loaders.telegram", + "TelegramChatFileLoader": "langchain_community.document_loaders", + "text_to_docs": "langchain_community.document_loaders.telegram", + "TelegramChatApiLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "concatenate_rows", + "TelegramChatFileLoader", + "text_to_docs", + "TelegramChatApiLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/tencent_cos_directory.py b/venv/Lib/site-packages/langchain/document_loaders/tencent_cos_directory.py new file mode 100644 index 00000000..a456786d --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/tencent_cos_directory.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import TencentCOSDirectoryLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "TencentCOSDirectoryLoader": "langchain_community.document_loaders" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TencentCOSDirectoryLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/tencent_cos_file.py b/venv/Lib/site-packages/langchain/document_loaders/tencent_cos_file.py new file mode 100644 index 00000000..a11c44e2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/tencent_cos_file.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import TencentCOSFileLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TencentCOSFileLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TencentCOSFileLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/tensorflow_datasets.py b/venv/Lib/site-packages/langchain/document_loaders/tensorflow_datasets.py new file mode 100644 index 00000000..c75f6a18 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/tensorflow_datasets.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import TensorflowDatasetLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TensorflowDatasetLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TensorflowDatasetLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/text.py b/venv/Lib/site-packages/langchain/document_loaders/text.py new file mode 100644 index 00000000..26c399df --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/text.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import TextLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TextLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TextLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/tomarkdown.py b/venv/Lib/site-packages/langchain/document_loaders/tomarkdown.py new file mode 100644 index 00000000..065f1a3e --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/tomarkdown.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ToMarkdownLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ToMarkdownLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ToMarkdownLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/toml.py b/venv/Lib/site-packages/langchain/document_loaders/toml.py new file mode 100644 index 00000000..bd1c9acf --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/toml.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import TomlLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TomlLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TomlLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/trello.py b/venv/Lib/site-packages/langchain/document_loaders/trello.py new file mode 100644 index 00000000..b68b12f1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/trello.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import TrelloLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TrelloLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TrelloLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/tsv.py b/venv/Lib/site-packages/langchain/document_loaders/tsv.py new file mode 100644 index 00000000..8e42a378 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/tsv.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredTSVLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"UnstructuredTSVLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredTSVLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/twitter.py b/venv/Lib/site-packages/langchain/document_loaders/twitter.py new file mode 100644 index 00000000..64ccb1e7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/twitter.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import TwitterTweetLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TwitterTweetLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TwitterTweetLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/unstructured.py b/venv/Lib/site-packages/langchain/document_loaders/unstructured.py new file mode 100644 index 00000000..242d3a07 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/unstructured.py @@ -0,0 +1,54 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ( + UnstructuredAPIFileIOLoader, + UnstructuredAPIFileLoader, + UnstructuredFileIOLoader, + UnstructuredFileLoader, + ) + from langchain_community.document_loaders.unstructured import ( + UnstructuredBaseLoader, + get_elements_from_api, + satisfies_min_unstructured_version, + validate_unstructured_version, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "satisfies_min_unstructured_version": ( + "langchain_community.document_loaders.unstructured" + ), + "validate_unstructured_version": ( + "langchain_community.document_loaders.unstructured" + ), + "UnstructuredBaseLoader": "langchain_community.document_loaders.unstructured", + "UnstructuredFileLoader": "langchain_community.document_loaders", + "get_elements_from_api": "langchain_community.document_loaders.unstructured", + "UnstructuredAPIFileLoader": "langchain_community.document_loaders", + "UnstructuredFileIOLoader": "langchain_community.document_loaders", + "UnstructuredAPIFileIOLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "satisfies_min_unstructured_version", + "validate_unstructured_version", + "UnstructuredBaseLoader", + "UnstructuredFileLoader", + "get_elements_from_api", + "UnstructuredAPIFileLoader", + "UnstructuredFileIOLoader", + "UnstructuredAPIFileIOLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/url.py b/venv/Lib/site-packages/langchain/document_loaders/url.py new file mode 100644 index 00000000..5d3f86d3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/url.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredURLLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"UnstructuredURLLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredURLLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/url_playwright.py b/venv/Lib/site-packages/langchain/document_loaders/url_playwright.py new file mode 100644 index 00000000..dd692cf5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/url_playwright.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import PlaywrightURLLoader + from langchain_community.document_loaders.url_playwright import ( + PlaywrightEvaluator, + UnstructuredHtmlEvaluator, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PlaywrightEvaluator": "langchain_community.document_loaders.url_playwright", + "UnstructuredHtmlEvaluator": "langchain_community.document_loaders.url_playwright", + "PlaywrightURLLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PlaywrightEvaluator", + "UnstructuredHtmlEvaluator", + "PlaywrightURLLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/url_selenium.py b/venv/Lib/site-packages/langchain/document_loaders/url_selenium.py new file mode 100644 index 00000000..4f6e5c62 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/url_selenium.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import SeleniumURLLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SeleniumURLLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SeleniumURLLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/weather.py b/venv/Lib/site-packages/langchain/document_loaders/weather.py new file mode 100644 index 00000000..1c5fad99 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/weather.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import WeatherDataLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WeatherDataLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WeatherDataLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/web_base.py b/venv/Lib/site-packages/langchain/document_loaders/web_base.py new file mode 100644 index 00000000..9fd84d33 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/web_base.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import WebBaseLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WebBaseLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WebBaseLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/whatsapp_chat.py b/venv/Lib/site-packages/langchain/document_loaders/whatsapp_chat.py new file mode 100644 index 00000000..3fc2133e --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/whatsapp_chat.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import WhatsAppChatLoader + from langchain_community.document_loaders.whatsapp_chat import concatenate_rows + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "concatenate_rows": "langchain_community.document_loaders.whatsapp_chat", + "WhatsAppChatLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "concatenate_rows", + "WhatsAppChatLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/wikipedia.py b/venv/Lib/site-packages/langchain/document_loaders/wikipedia.py new file mode 100644 index 00000000..3f3d73b2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/wikipedia.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import WikipediaLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WikipediaLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WikipediaLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/word_document.py b/venv/Lib/site-packages/langchain/document_loaders/word_document.py new file mode 100644 index 00000000..4b8dd605 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/word_document.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ( + Docx2txtLoader, + UnstructuredWordDocumentLoader, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "Docx2txtLoader": "langchain_community.document_loaders", + "UnstructuredWordDocumentLoader": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Docx2txtLoader", + "UnstructuredWordDocumentLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/xml.py b/venv/Lib/site-packages/langchain/document_loaders/xml.py new file mode 100644 index 00000000..e14aa0c3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/xml.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import UnstructuredXMLLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"UnstructuredXMLLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UnstructuredXMLLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/xorbits.py b/venv/Lib/site-packages/langchain/document_loaders/xorbits.py new file mode 100644 index 00000000..9b48275c --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/xorbits.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import XorbitsLoader + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"XorbitsLoader": "langchain_community.document_loaders"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "XorbitsLoader", +] diff --git a/venv/Lib/site-packages/langchain/document_loaders/youtube.py b/venv/Lib/site-packages/langchain/document_loaders/youtube.py new file mode 100644 index 00000000..cf696783 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_loaders/youtube.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders import ( + GoogleApiClient, + GoogleApiYoutubeLoader, + YoutubeLoader, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "YoutubeLoader": "langchain_community.document_loaders", + "GoogleApiYoutubeLoader": "langchain_community.document_loaders", + "GoogleApiClient": "langchain_community.document_loaders", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "YoutubeLoader", + "GoogleApiYoutubeLoader", + "GoogleApiClient", +] diff --git a/venv/Lib/site-packages/langchain/document_transformers/__init__.py b/venv/Lib/site-packages/langchain/document_transformers/__init__.py new file mode 100644 index 00000000..adfd277c --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_transformers/__init__.py @@ -0,0 +1,77 @@ +"""**Document Transformers** are classes to transform Documents. + +**Document Transformers** usually used to transform a lot of Documents in a single run. + +**Class hierarchy:** + +.. code-block:: + + BaseDocumentTransformer --> # Examples: DoctranQATransformer, DoctranTextTranslator + +**Main helpers:** + +.. code-block:: + + Document +""" # noqa: E501 + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_transformers import ( + BeautifulSoupTransformer, + DoctranPropertyExtractor, + DoctranQATransformer, + DoctranTextTranslator, + EmbeddingsClusteringFilter, + EmbeddingsRedundantFilter, + GoogleTranslateTransformer, + Html2TextTransformer, + LongContextReorder, + NucliaTextTransformer, + OpenAIMetadataTagger, + get_stateful_documents, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BeautifulSoupTransformer": "langchain_community.document_transformers", + "DoctranQATransformer": "langchain_community.document_transformers", + "DoctranTextTranslator": "langchain_community.document_transformers", + "DoctranPropertyExtractor": "langchain_community.document_transformers", + "EmbeddingsClusteringFilter": "langchain_community.document_transformers", + "EmbeddingsRedundantFilter": "langchain_community.document_transformers", + "GoogleTranslateTransformer": "langchain_community.document_transformers", + "get_stateful_documents": "langchain_community.document_transformers", + "LongContextReorder": "langchain_community.document_transformers", + "NucliaTextTransformer": "langchain_community.document_transformers", + "OpenAIMetadataTagger": "langchain_community.document_transformers", + "Html2TextTransformer": "langchain_community.document_transformers", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BeautifulSoupTransformer", + "DoctranQATransformer", + "DoctranTextTranslator", + "DoctranPropertyExtractor", + "EmbeddingsClusteringFilter", + "EmbeddingsRedundantFilter", + "GoogleTranslateTransformer", + "get_stateful_documents", + "LongContextReorder", + "NucliaTextTransformer", + "OpenAIMetadataTagger", + "Html2TextTransformer", +] diff --git a/venv/Lib/site-packages/langchain/document_transformers/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..9bb92820 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_transformers/__pycache__/beautiful_soup_transformer.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/beautiful_soup_transformer.cpython-312.pyc new file mode 100644 index 00000000..5acfa206 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/beautiful_soup_transformer.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_transformers/__pycache__/doctran_text_extract.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/doctran_text_extract.cpython-312.pyc new file mode 100644 index 00000000..b4bfce2d Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/doctran_text_extract.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_transformers/__pycache__/doctran_text_qa.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/doctran_text_qa.cpython-312.pyc new file mode 100644 index 00000000..b57d979a Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/doctran_text_qa.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_transformers/__pycache__/doctran_text_translate.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/doctran_text_translate.cpython-312.pyc new file mode 100644 index 00000000..e8a15d43 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/doctran_text_translate.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_transformers/__pycache__/embeddings_redundant_filter.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/embeddings_redundant_filter.cpython-312.pyc new file mode 100644 index 00000000..3dc37268 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/embeddings_redundant_filter.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_transformers/__pycache__/google_translate.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/google_translate.cpython-312.pyc new file mode 100644 index 00000000..a449a2eb Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/google_translate.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_transformers/__pycache__/html2text.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/html2text.cpython-312.pyc new file mode 100644 index 00000000..a01d7c4b Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/html2text.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_transformers/__pycache__/long_context_reorder.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/long_context_reorder.cpython-312.pyc new file mode 100644 index 00000000..b2700372 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/long_context_reorder.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_transformers/__pycache__/nuclia_text_transform.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/nuclia_text_transform.cpython-312.pyc new file mode 100644 index 00000000..ea4f0ece Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/nuclia_text_transform.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_transformers/__pycache__/openai_functions.cpython-312.pyc b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/openai_functions.cpython-312.pyc new file mode 100644 index 00000000..a5fffab7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/document_transformers/__pycache__/openai_functions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/document_transformers/beautiful_soup_transformer.py b/venv/Lib/site-packages/langchain/document_transformers/beautiful_soup_transformer.py new file mode 100644 index 00000000..8028cff8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_transformers/beautiful_soup_transformer.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_transformers import BeautifulSoupTransformer + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BeautifulSoupTransformer": "langchain_community.document_transformers" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BeautifulSoupTransformer", +] diff --git a/venv/Lib/site-packages/langchain/document_transformers/doctran_text_extract.py b/venv/Lib/site-packages/langchain/document_transformers/doctran_text_extract.py new file mode 100644 index 00000000..d6fcb4f6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_transformers/doctran_text_extract.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_transformers import DoctranPropertyExtractor + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DoctranPropertyExtractor": "langchain_community.document_transformers" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DoctranPropertyExtractor", +] diff --git a/venv/Lib/site-packages/langchain/document_transformers/doctran_text_qa.py b/venv/Lib/site-packages/langchain/document_transformers/doctran_text_qa.py new file mode 100644 index 00000000..21dc7d2b --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_transformers/doctran_text_qa.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_transformers import DoctranQATransformer + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DoctranQATransformer": "langchain_community.document_transformers" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DoctranQATransformer", +] diff --git a/venv/Lib/site-packages/langchain/document_transformers/doctran_text_translate.py b/venv/Lib/site-packages/langchain/document_transformers/doctran_text_translate.py new file mode 100644 index 00000000..e0c48863 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_transformers/doctran_text_translate.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_transformers import DoctranTextTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DoctranTextTranslator": "langchain_community.document_transformers" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DoctranTextTranslator", +] diff --git a/venv/Lib/site-packages/langchain/document_transformers/embeddings_redundant_filter.py b/venv/Lib/site-packages/langchain/document_transformers/embeddings_redundant_filter.py new file mode 100644 index 00000000..2230444a --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_transformers/embeddings_redundant_filter.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_transformers import ( + EmbeddingsClusteringFilter, + EmbeddingsRedundantFilter, + get_stateful_documents, + ) + from langchain_community.document_transformers.embeddings_redundant_filter import ( + _DocumentWithState, + _filter_similar_embeddings, + _get_embeddings_from_stateful_docs, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "EmbeddingsRedundantFilter": "langchain_community.document_transformers", + "EmbeddingsClusteringFilter": "langchain_community.document_transformers", + "_DocumentWithState": ( + "langchain_community.document_transformers.embeddings_redundant_filter" + ), + "get_stateful_documents": "langchain_community.document_transformers", + "_get_embeddings_from_stateful_docs": ( + "langchain_community.document_transformers.embeddings_redundant_filter" + ), + "_filter_similar_embeddings": ( + "langchain_community.document_transformers.embeddings_redundant_filter" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EmbeddingsRedundantFilter", + "EmbeddingsClusteringFilter", + "_DocumentWithState", + "get_stateful_documents", + "_get_embeddings_from_stateful_docs", + "_filter_similar_embeddings", +] diff --git a/venv/Lib/site-packages/langchain/document_transformers/google_translate.py b/venv/Lib/site-packages/langchain/document_transformers/google_translate.py new file mode 100644 index 00000000..7fb25068 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_transformers/google_translate.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_transformers import GoogleTranslateTransformer + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleTranslateTransformer": "langchain_community.document_transformers" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleTranslateTransformer", +] diff --git a/venv/Lib/site-packages/langchain/document_transformers/html2text.py b/venv/Lib/site-packages/langchain/document_transformers/html2text.py new file mode 100644 index 00000000..307d4559 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_transformers/html2text.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_transformers import Html2TextTransformer + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "Html2TextTransformer": "langchain_community.document_transformers" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Html2TextTransformer", +] diff --git a/venv/Lib/site-packages/langchain/document_transformers/long_context_reorder.py b/venv/Lib/site-packages/langchain/document_transformers/long_context_reorder.py new file mode 100644 index 00000000..221ffda0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_transformers/long_context_reorder.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_transformers import LongContextReorder + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"LongContextReorder": "langchain_community.document_transformers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LongContextReorder", +] diff --git a/venv/Lib/site-packages/langchain/document_transformers/nuclia_text_transform.py b/venv/Lib/site-packages/langchain/document_transformers/nuclia_text_transform.py new file mode 100644 index 00000000..877ec94e --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_transformers/nuclia_text_transform.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_transformers import NucliaTextTransformer + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "NucliaTextTransformer": "langchain_community.document_transformers" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NucliaTextTransformer", +] diff --git a/venv/Lib/site-packages/langchain/document_transformers/openai_functions.py b/venv/Lib/site-packages/langchain/document_transformers/openai_functions.py new file mode 100644 index 00000000..0daeabf5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_transformers/openai_functions.py @@ -0,0 +1,32 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_transformers import OpenAIMetadataTagger + from langchain_community.document_transformers.openai_functions import ( + create_metadata_tagger, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "OpenAIMetadataTagger": "langchain_community.document_transformers", + "create_metadata_tagger": ( + "langchain_community.document_transformers.openai_functions" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OpenAIMetadataTagger", + "create_metadata_tagger", +] diff --git a/venv/Lib/site-packages/langchain/document_transformers/xsl/html_chunks_with_headers.xslt b/venv/Lib/site-packages/langchain/document_transformers/xsl/html_chunks_with_headers.xslt new file mode 100644 index 00000000..285edfe8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/document_transformers/xsl/html_chunks_with_headers.xslt @@ -0,0 +1,199 @@ + + + + + div|p|blockquote|ol|ul + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +

+ +

+
+ + +
+
+ + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + [ + + ]/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
diff --git a/venv/Lib/site-packages/langchain/embeddings/__init__.py b/venv/Lib/site-packages/langchain/embeddings/__init__.py new file mode 100644 index 00000000..8c8ef661 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/__init__.py @@ -0,0 +1,226 @@ +"""**Embedding models** are wrappers around embedding models +from different APIs and services. + +**Embedding models** can be LLMs or not. + +**Class hierarchy:** + +.. code-block:: + + Embeddings --> Embeddings # Examples: OpenAIEmbeddings, HuggingFaceEmbeddings +""" + +import logging +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer +from langchain.embeddings.base import init_embeddings +from langchain.embeddings.cache import CacheBackedEmbeddings + +if TYPE_CHECKING: + from langchain_community.embeddings import ( + AlephAlphaAsymmetricSemanticEmbedding, + AlephAlphaSymmetricSemanticEmbedding, + AwaEmbeddings, + AzureOpenAIEmbeddings, + BedrockEmbeddings, + BookendEmbeddings, + ClarifaiEmbeddings, + CohereEmbeddings, + DashScopeEmbeddings, + DatabricksEmbeddings, + DeepInfraEmbeddings, + DeterministicFakeEmbedding, + EdenAiEmbeddings, + ElasticsearchEmbeddings, + EmbaasEmbeddings, + ErnieEmbeddings, + FakeEmbeddings, + FastEmbedEmbeddings, + GooglePalmEmbeddings, + GPT4AllEmbeddings, + GradientEmbeddings, + HuggingFaceBgeEmbeddings, + HuggingFaceEmbeddings, + HuggingFaceHubEmbeddings, + HuggingFaceInferenceAPIEmbeddings, + HuggingFaceInstructEmbeddings, + InfinityEmbeddings, + JavelinAIGatewayEmbeddings, + JinaEmbeddings, + JohnSnowLabsEmbeddings, + LlamaCppEmbeddings, + LocalAIEmbeddings, + MiniMaxEmbeddings, + MlflowAIGatewayEmbeddings, + MlflowEmbeddings, + ModelScopeEmbeddings, + MosaicMLInstructorEmbeddings, + NLPCloudEmbeddings, + OctoAIEmbeddings, + OllamaEmbeddings, + OpenAIEmbeddings, + OpenVINOEmbeddings, + QianfanEmbeddingsEndpoint, + SagemakerEndpointEmbeddings, + SelfHostedEmbeddings, + SelfHostedHuggingFaceEmbeddings, + SelfHostedHuggingFaceInstructEmbeddings, + SentenceTransformerEmbeddings, + SpacyEmbeddings, + TensorflowHubEmbeddings, + VertexAIEmbeddings, + VoyageEmbeddings, + XinferenceEmbeddings, + ) + + +logger = logging.getLogger(__name__) + + +# TODO: this is in here to maintain backwards compatibility +class HypotheticalDocumentEmbedder: + def __init__(self, *args: Any, **kwargs: Any): + logger.warning( + "Using a deprecated class. Please use " + "`from langchain.chains import HypotheticalDocumentEmbedder` instead" + ) + from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H + + return H(*args, **kwargs) # type: ignore[return-value] + + @classmethod + def from_llm(cls, *args: Any, **kwargs: Any) -> Any: + logger.warning( + "Using a deprecated class. Please use " + "`from langchain.chains import HypotheticalDocumentEmbedder` instead" + ) + from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H + + return H.from_llm(*args, **kwargs) + + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AlephAlphaAsymmetricSemanticEmbedding": "langchain_community.embeddings", + "AlephAlphaSymmetricSemanticEmbedding": "langchain_community.embeddings", + "AwaEmbeddings": "langchain_community.embeddings", + "AzureOpenAIEmbeddings": "langchain_community.embeddings", + "BedrockEmbeddings": "langchain_community.embeddings", + "BookendEmbeddings": "langchain_community.embeddings", + "ClarifaiEmbeddings": "langchain_community.embeddings", + "CohereEmbeddings": "langchain_community.embeddings", + "DashScopeEmbeddings": "langchain_community.embeddings", + "DatabricksEmbeddings": "langchain_community.embeddings", + "DeepInfraEmbeddings": "langchain_community.embeddings", + "DeterministicFakeEmbedding": "langchain_community.embeddings", + "EdenAiEmbeddings": "langchain_community.embeddings", + "ElasticsearchEmbeddings": "langchain_community.embeddings", + "EmbaasEmbeddings": "langchain_community.embeddings", + "ErnieEmbeddings": "langchain_community.embeddings", + "FakeEmbeddings": "langchain_community.embeddings", + "FastEmbedEmbeddings": "langchain_community.embeddings", + "GooglePalmEmbeddings": "langchain_community.embeddings", + "GPT4AllEmbeddings": "langchain_community.embeddings", + "GradientEmbeddings": "langchain_community.embeddings", + "HuggingFaceBgeEmbeddings": "langchain_community.embeddings", + "HuggingFaceEmbeddings": "langchain_community.embeddings", + "HuggingFaceHubEmbeddings": "langchain_community.embeddings", + "HuggingFaceInferenceAPIEmbeddings": "langchain_community.embeddings", + "HuggingFaceInstructEmbeddings": "langchain_community.embeddings", + "InfinityEmbeddings": "langchain_community.embeddings", + "JavelinAIGatewayEmbeddings": "langchain_community.embeddings", + "JinaEmbeddings": "langchain_community.embeddings", + "JohnSnowLabsEmbeddings": "langchain_community.embeddings", + "LlamaCppEmbeddings": "langchain_community.embeddings", + "LocalAIEmbeddings": "langchain_community.embeddings", + "MiniMaxEmbeddings": "langchain_community.embeddings", + "MlflowAIGatewayEmbeddings": "langchain_community.embeddings", + "MlflowEmbeddings": "langchain_community.embeddings", + "ModelScopeEmbeddings": "langchain_community.embeddings", + "MosaicMLInstructorEmbeddings": "langchain_community.embeddings", + "NLPCloudEmbeddings": "langchain_community.embeddings", + "OctoAIEmbeddings": "langchain_community.embeddings", + "OllamaEmbeddings": "langchain_community.embeddings", + "OpenAIEmbeddings": "langchain_community.embeddings", + "OpenVINOEmbeddings": "langchain_community.embeddings", + "QianfanEmbeddingsEndpoint": "langchain_community.embeddings", + "SagemakerEndpointEmbeddings": "langchain_community.embeddings", + "SelfHostedEmbeddings": "langchain_community.embeddings", + "SelfHostedHuggingFaceEmbeddings": "langchain_community.embeddings", + "SelfHostedHuggingFaceInstructEmbeddings": "langchain_community.embeddings", + "SentenceTransformerEmbeddings": "langchain_community.embeddings", + "SpacyEmbeddings": "langchain_community.embeddings", + "TensorflowHubEmbeddings": "langchain_community.embeddings", + "VertexAIEmbeddings": "langchain_community.embeddings", + "VoyageEmbeddings": "langchain_community.embeddings", + "XinferenceEmbeddings": "langchain_community.embeddings", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AlephAlphaAsymmetricSemanticEmbedding", + "AlephAlphaSymmetricSemanticEmbedding", + "AwaEmbeddings", + "AzureOpenAIEmbeddings", + "BedrockEmbeddings", + "BookendEmbeddings", + "CacheBackedEmbeddings", + "ClarifaiEmbeddings", + "CohereEmbeddings", + "DashScopeEmbeddings", + "DatabricksEmbeddings", + "DeepInfraEmbeddings", + "DeterministicFakeEmbedding", + "EdenAiEmbeddings", + "ElasticsearchEmbeddings", + "EmbaasEmbeddings", + "ErnieEmbeddings", + "FakeEmbeddings", + "FastEmbedEmbeddings", + "GooglePalmEmbeddings", + "GPT4AllEmbeddings", + "GradientEmbeddings", + "HuggingFaceBgeEmbeddings", + "HuggingFaceEmbeddings", + "HuggingFaceHubEmbeddings", + "HuggingFaceInferenceAPIEmbeddings", + "HuggingFaceInstructEmbeddings", + "InfinityEmbeddings", + "JavelinAIGatewayEmbeddings", + "JinaEmbeddings", + "JohnSnowLabsEmbeddings", + "LlamaCppEmbeddings", + "LocalAIEmbeddings", + "MiniMaxEmbeddings", + "MlflowAIGatewayEmbeddings", + "MlflowEmbeddings", + "ModelScopeEmbeddings", + "MosaicMLInstructorEmbeddings", + "NLPCloudEmbeddings", + "OctoAIEmbeddings", + "OllamaEmbeddings", + "OpenAIEmbeddings", + "OpenVINOEmbeddings", + "QianfanEmbeddingsEndpoint", + "SagemakerEndpointEmbeddings", + "SelfHostedEmbeddings", + "SelfHostedHuggingFaceEmbeddings", + "SelfHostedHuggingFaceInstructEmbeddings", + "SentenceTransformerEmbeddings", + "SpacyEmbeddings", + "TensorflowHubEmbeddings", + "VertexAIEmbeddings", + "VoyageEmbeddings", + "XinferenceEmbeddings", + "init_embeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..41c86f2d Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/aleph_alpha.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/aleph_alpha.cpython-312.pyc new file mode 100644 index 00000000..75d68ba5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/aleph_alpha.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/awa.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/awa.cpython-312.pyc new file mode 100644 index 00000000..dcae5320 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/awa.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/azure_openai.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/azure_openai.cpython-312.pyc new file mode 100644 index 00000000..127f46c0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/azure_openai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/baidu_qianfan_endpoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/baidu_qianfan_endpoint.cpython-312.pyc new file mode 100644 index 00000000..e09587d7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/baidu_qianfan_endpoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..d208428b Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/bedrock.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/bedrock.cpython-312.pyc new file mode 100644 index 00000000..edc3b5d9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/bedrock.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/bookend.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/bookend.cpython-312.pyc new file mode 100644 index 00000000..b850335d Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/bookend.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/cache.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/cache.cpython-312.pyc new file mode 100644 index 00000000..41f831ef Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/cache.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/clarifai.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/clarifai.cpython-312.pyc new file mode 100644 index 00000000..96a6442a Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/clarifai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/cloudflare_workersai.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/cloudflare_workersai.cpython-312.pyc new file mode 100644 index 00000000..339fc169 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/cloudflare_workersai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/cohere.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/cohere.cpython-312.pyc new file mode 100644 index 00000000..ed29b094 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/cohere.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/dashscope.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/dashscope.cpython-312.pyc new file mode 100644 index 00000000..51fb1fc2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/dashscope.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/databricks.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/databricks.cpython-312.pyc new file mode 100644 index 00000000..9edd7085 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/databricks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/deepinfra.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/deepinfra.cpython-312.pyc new file mode 100644 index 00000000..025dad08 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/deepinfra.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/edenai.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/edenai.cpython-312.pyc new file mode 100644 index 00000000..c7ecbe1e Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/edenai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/elasticsearch.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/elasticsearch.cpython-312.pyc new file mode 100644 index 00000000..72c067a2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/elasticsearch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/embaas.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/embaas.cpython-312.pyc new file mode 100644 index 00000000..a8e37c45 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/embaas.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/ernie.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/ernie.cpython-312.pyc new file mode 100644 index 00000000..6b88f231 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/ernie.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/fake.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/fake.cpython-312.pyc new file mode 100644 index 00000000..c6d6dbe1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/fake.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/fastembed.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/fastembed.cpython-312.pyc new file mode 100644 index 00000000..76c2d245 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/fastembed.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/google_palm.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/google_palm.cpython-312.pyc new file mode 100644 index 00000000..67d36034 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/google_palm.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/gpt4all.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/gpt4all.cpython-312.pyc new file mode 100644 index 00000000..93229f21 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/gpt4all.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/gradient_ai.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/gradient_ai.cpython-312.pyc new file mode 100644 index 00000000..276e61f8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/gradient_ai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/huggingface.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/huggingface.cpython-312.pyc new file mode 100644 index 00000000..358c1a0d Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/huggingface.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/huggingface_hub.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/huggingface_hub.cpython-312.pyc new file mode 100644 index 00000000..6c56d1be Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/huggingface_hub.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/infinity.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/infinity.cpython-312.pyc new file mode 100644 index 00000000..b4887778 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/infinity.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/javelin_ai_gateway.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/javelin_ai_gateway.cpython-312.pyc new file mode 100644 index 00000000..6e4ace43 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/javelin_ai_gateway.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/jina.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/jina.cpython-312.pyc new file mode 100644 index 00000000..e8eefee8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/jina.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/johnsnowlabs.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/johnsnowlabs.cpython-312.pyc new file mode 100644 index 00000000..80d6c370 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/johnsnowlabs.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/llamacpp.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/llamacpp.cpython-312.pyc new file mode 100644 index 00000000..b0643989 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/llamacpp.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/llm_rails.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/llm_rails.cpython-312.pyc new file mode 100644 index 00000000..b8067ef4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/llm_rails.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/localai.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/localai.cpython-312.pyc new file mode 100644 index 00000000..4a3e93a6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/localai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/minimax.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/minimax.cpython-312.pyc new file mode 100644 index 00000000..e34fa27b Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/minimax.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/mlflow.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/mlflow.cpython-312.pyc new file mode 100644 index 00000000..d55b33f7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/mlflow.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/mlflow_gateway.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/mlflow_gateway.cpython-312.pyc new file mode 100644 index 00000000..6160e845 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/mlflow_gateway.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/modelscope_hub.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/modelscope_hub.cpython-312.pyc new file mode 100644 index 00000000..da8f7c41 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/modelscope_hub.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/mosaicml.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/mosaicml.cpython-312.pyc new file mode 100644 index 00000000..6af87e2c Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/mosaicml.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/nlpcloud.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/nlpcloud.cpython-312.pyc new file mode 100644 index 00000000..24072e5a Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/nlpcloud.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/octoai_embeddings.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/octoai_embeddings.cpython-312.pyc new file mode 100644 index 00000000..e16e4a90 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/octoai_embeddings.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/ollama.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/ollama.cpython-312.pyc new file mode 100644 index 00000000..25b056fb Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/ollama.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/openai.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/openai.cpython-312.pyc new file mode 100644 index 00000000..335dad81 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/openai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/sagemaker_endpoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/sagemaker_endpoint.cpython-312.pyc new file mode 100644 index 00000000..5ce3f6e9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/sagemaker_endpoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/self_hosted.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/self_hosted.cpython-312.pyc new file mode 100644 index 00000000..fec5bd54 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/self_hosted.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/self_hosted_hugging_face.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/self_hosted_hugging_face.cpython-312.pyc new file mode 100644 index 00000000..77b848d8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/self_hosted_hugging_face.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/sentence_transformer.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/sentence_transformer.cpython-312.pyc new file mode 100644 index 00000000..1a6c195f Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/sentence_transformer.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/spacy_embeddings.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/spacy_embeddings.cpython-312.pyc new file mode 100644 index 00000000..af7be847 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/spacy_embeddings.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/tensorflow_hub.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/tensorflow_hub.cpython-312.pyc new file mode 100644 index 00000000..e674292e Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/tensorflow_hub.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/vertexai.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/vertexai.cpython-312.pyc new file mode 100644 index 00000000..3b16d707 Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/vertexai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/voyageai.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/voyageai.cpython-312.pyc new file mode 100644 index 00000000..f49ecb5e Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/voyageai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/__pycache__/xinference.cpython-312.pyc b/venv/Lib/site-packages/langchain/embeddings/__pycache__/xinference.cpython-312.pyc new file mode 100644 index 00000000..3f8d133e Binary files /dev/null and b/venv/Lib/site-packages/langchain/embeddings/__pycache__/xinference.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/embeddings/aleph_alpha.py b/venv/Lib/site-packages/langchain/embeddings/aleph_alpha.py new file mode 100644 index 00000000..45027c6b --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/aleph_alpha.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import ( + AlephAlphaAsymmetricSemanticEmbedding, + AlephAlphaSymmetricSemanticEmbedding, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AlephAlphaAsymmetricSemanticEmbedding": "langchain_community.embeddings", + "AlephAlphaSymmetricSemanticEmbedding": "langchain_community.embeddings", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AlephAlphaAsymmetricSemanticEmbedding", + "AlephAlphaSymmetricSemanticEmbedding", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/awa.py b/venv/Lib/site-packages/langchain/embeddings/awa.py new file mode 100644 index 00000000..bf0c6048 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/awa.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import AwaEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AwaEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AwaEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/azure_openai.py b/venv/Lib/site-packages/langchain/embeddings/azure_openai.py new file mode 100644 index 00000000..d915224f --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/azure_openai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import AzureOpenAIEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AzureOpenAIEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureOpenAIEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/baidu_qianfan_endpoint.py b/venv/Lib/site-packages/langchain/embeddings/baidu_qianfan_endpoint.py new file mode 100644 index 00000000..b361978d --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/baidu_qianfan_endpoint.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import QianfanEmbeddingsEndpoint + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"QianfanEmbeddingsEndpoint": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "QianfanEmbeddingsEndpoint", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/base.py b/venv/Lib/site-packages/langchain/embeddings/base.py new file mode 100644 index 00000000..7c20e91c --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/base.py @@ -0,0 +1,226 @@ +import functools +from importlib import util +from typing import Any, Optional, Union + +from langchain_core.embeddings import Embeddings +from langchain_core.runnables import Runnable + +_SUPPORTED_PROVIDERS = { + "azure_openai": "langchain_openai", + "bedrock": "langchain_aws", + "cohere": "langchain_cohere", + "google_vertexai": "langchain_google_vertexai", + "huggingface": "langchain_huggingface", + "mistralai": "langchain_mistralai", + "ollama": "langchain_ollama", + "openai": "langchain_openai", +} + + +def _get_provider_list() -> str: + """Get formatted list of providers and their packages.""" + return "\n".join( + f" - {p}: {pkg.replace('_', '-')}" for p, pkg in _SUPPORTED_PROVIDERS.items() + ) + + +def _parse_model_string(model_name: str) -> tuple[str, str]: + """Parse a model string into provider and model name components. + + The model string should be in the format 'provider:model-name', where provider + is one of the supported providers. + + Args: + model_name: A model string in the format 'provider:model-name' + + Returns: + A tuple of (provider, model_name) + + .. code-block:: python + + _parse_model_string("openai:text-embedding-3-small") + # Returns: ("openai", "text-embedding-3-small") + + _parse_model_string("bedrock:amazon.titan-embed-text-v1") + # Returns: ("bedrock", "amazon.titan-embed-text-v1") + + Raises: + ValueError: If the model string is not in the correct format or + the provider is unsupported + """ + if ":" not in model_name: + providers = _SUPPORTED_PROVIDERS + raise ValueError( + f"Invalid model format '{model_name}'.\n" + f"Model name must be in format 'provider:model-name'\n" + f"Example valid model strings:\n" + f" - openai:text-embedding-3-small\n" + f" - bedrock:amazon.titan-embed-text-v1\n" + f" - cohere:embed-english-v3.0\n" + f"Supported providers: {providers}" + ) + + provider, model = model_name.split(":", 1) + provider = provider.lower().strip() + model = model.strip() + + if provider not in _SUPPORTED_PROVIDERS: + raise ValueError( + f"Provider '{provider}' is not supported.\n" + f"Supported providers and their required packages:\n" + f"{_get_provider_list()}" + ) + if not model: + raise ValueError("Model name cannot be empty") + return provider, model + + +def _infer_model_and_provider( + model: str, *, provider: Optional[str] = None +) -> tuple[str, str]: + if not model.strip(): + raise ValueError("Model name cannot be empty") + if provider is None and ":" in model: + provider, model_name = _parse_model_string(model) + else: + provider = provider + model_name = model + + if not provider: + providers = _SUPPORTED_PROVIDERS + raise ValueError( + "Must specify either:\n" + "1. A model string in format 'provider:model-name'\n" + " Example: 'openai:text-embedding-3-small'\n" + "2. Or explicitly set provider from: " + f"{providers}" + ) + + if provider not in _SUPPORTED_PROVIDERS: + raise ValueError( + f"Provider '{provider}' is not supported.\n" + f"Supported providers and their required packages:\n" + f"{_get_provider_list()}" + ) + return provider, model_name + + +@functools.lru_cache(maxsize=len(_SUPPORTED_PROVIDERS)) +def _check_pkg(pkg: str) -> None: + """Check if a package is installed.""" + if not util.find_spec(pkg): + raise ImportError( + f"Could not import {pkg} python package. " + f"Please install it with `pip install {pkg}`" + ) + + +def init_embeddings( + model: str, + *, + provider: Optional[str] = None, + **kwargs: Any, +) -> Union[Embeddings, Runnable[Any, list[float]]]: + """Initialize an embeddings model from a model name and optional provider. + + **Note:** Must have the integration package corresponding to the model provider + installed. + + Args: + model: Name of the model to use. Can be either: + - A model string like "openai:text-embedding-3-small" + - Just the model name if provider is specified + provider: Optional explicit provider name. If not specified, + will attempt to parse from the model string. Supported providers + and their required packages: + + {_get_provider_list()} + + **kwargs: Additional model-specific parameters passed to the embedding model. + These vary by provider, see the provider-specific documentation for details. + + Returns: + An Embeddings instance that can generate embeddings for text. + + Raises: + ValueError: If the model provider is not supported or cannot be determined + ImportError: If the required provider package is not installed + + .. dropdown:: Example Usage + :open: + + .. code-block:: python + + # Using a model string + model = init_embeddings("openai:text-embedding-3-small") + model.embed_query("Hello, world!") + + # Using explicit provider + model = init_embeddings( + model="text-embedding-3-small", + provider="openai" + ) + model.embed_documents(["Hello, world!", "Goodbye, world!"]) + + # With additional parameters + model = init_embeddings( + "openai:text-embedding-3-small", + api_key="sk-..." + ) + + .. versionadded:: 0.3.9 + """ + if not model: + providers = _SUPPORTED_PROVIDERS.keys() + raise ValueError( + f"Must specify model name. Supported providers are: {', '.join(providers)}" + ) + + provider, model_name = _infer_model_and_provider(model, provider=provider) + pkg = _SUPPORTED_PROVIDERS[provider] + _check_pkg(pkg) + + if provider == "openai": + from langchain_openai import OpenAIEmbeddings + + return OpenAIEmbeddings(model=model_name, **kwargs) + elif provider == "azure_openai": + from langchain_openai import AzureOpenAIEmbeddings + + return AzureOpenAIEmbeddings(model=model_name, **kwargs) + elif provider == "google_vertexai": + from langchain_google_vertexai import VertexAIEmbeddings + + return VertexAIEmbeddings(model=model_name, **kwargs) + elif provider == "bedrock": + from langchain_aws import BedrockEmbeddings + + return BedrockEmbeddings(model_id=model_name, **kwargs) + elif provider == "cohere": + from langchain_cohere import CohereEmbeddings + + return CohereEmbeddings(model=model_name, **kwargs) + elif provider == "mistralai": + from langchain_mistralai import MistralAIEmbeddings + + return MistralAIEmbeddings(model=model_name, **kwargs) + elif provider == "huggingface": + from langchain_huggingface import HuggingFaceEmbeddings + + return HuggingFaceEmbeddings(model_name=model_name, **kwargs) + elif provider == "ollama": + from langchain_ollama import OllamaEmbeddings + + return OllamaEmbeddings(model=model_name, **kwargs) + else: + raise ValueError( + f"Provider '{provider}' is not supported.\n" + f"Supported providers and their required packages:\n" + f"{_get_provider_list()}" + ) + + +__all__ = [ + "init_embeddings", + "Embeddings", # This one is for backwards compatibility +] diff --git a/venv/Lib/site-packages/langchain/embeddings/bedrock.py b/venv/Lib/site-packages/langchain/embeddings/bedrock.py new file mode 100644 index 00000000..ecb6fbb5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/bedrock.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import BedrockEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BedrockEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BedrockEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/bookend.py b/venv/Lib/site-packages/langchain/embeddings/bookend.py new file mode 100644 index 00000000..4854b6f5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/bookend.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import BookendEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BookendEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BookendEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/cache.py b/venv/Lib/site-packages/langchain/embeddings/cache.py new file mode 100644 index 00000000..163fd942 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/cache.py @@ -0,0 +1,277 @@ +"""Module contains code for a cache backed embedder. + +The cache backed embedder is a wrapper around an embedder that caches +embeddings in a key-value store. The cache is used to avoid recomputing +embeddings for the same text. + +The text is hashed and the hash is used as the key in the cache. +""" + +from __future__ import annotations + +import hashlib +import json +import uuid +from collections.abc import Sequence +from functools import partial +from typing import Callable, Optional, Union, cast + +from langchain_core.embeddings import Embeddings +from langchain_core.stores import BaseStore, ByteStore +from langchain_core.utils.iter import batch_iterate + +from langchain.storage.encoder_backed import EncoderBackedStore + +NAMESPACE_UUID = uuid.UUID(int=1985) + + +def _hash_string_to_uuid(input_string: str) -> uuid.UUID: + """Hash a string and returns the corresponding UUID.""" + hash_value = hashlib.sha1(input_string.encode("utf-8")).hexdigest() + return uuid.uuid5(NAMESPACE_UUID, hash_value) + + +def _key_encoder(key: str, namespace: str) -> str: + """Encode a key.""" + return namespace + str(_hash_string_to_uuid(key)) + + +def _create_key_encoder(namespace: str) -> Callable[[str], str]: + """Create an encoder for a key.""" + return partial(_key_encoder, namespace=namespace) + + +def _value_serializer(value: Sequence[float]) -> bytes: + """Serialize a value.""" + return json.dumps(value).encode() + + +def _value_deserializer(serialized_value: bytes) -> list[float]: + """Deserialize a value.""" + return cast(list[float], json.loads(serialized_value.decode())) + + +class CacheBackedEmbeddings(Embeddings): + """Interface for caching results from embedding models. + + The interface allows works with any store that implements + the abstract store interface accepting keys of type str and values of list of + floats. + + If need be, the interface can be extended to accept other implementations + of the value serializer and deserializer, as well as the key encoder. + + Note that by default only document embeddings are cached. To cache query + embeddings too, pass in a query_embedding_store to constructor. + + Examples: + + .. code-block: python + + from langchain.embeddings import CacheBackedEmbeddings + from langchain.storage import LocalFileStore + from langchain_community.embeddings import OpenAIEmbeddings + + store = LocalFileStore('./my_cache') + + underlying_embedder = OpenAIEmbeddings() + embedder = CacheBackedEmbeddings.from_bytes_store( + underlying_embedder, store, namespace=underlying_embedder.model + ) + + # Embedding is computed and cached + embeddings = embedder.embed_documents(["hello", "goodbye"]) + + # Embeddings are retrieved from the cache, no computation is done + embeddings = embedder.embed_documents(["hello", "goodbye"]) + """ + + def __init__( + self, + underlying_embeddings: Embeddings, + document_embedding_store: BaseStore[str, list[float]], + *, + batch_size: Optional[int] = None, + query_embedding_store: Optional[BaseStore[str, list[float]]] = None, + ) -> None: + """Initialize the embedder. + + Args: + underlying_embeddings: the embedder to use for computing embeddings. + document_embedding_store: The store to use for caching document embeddings. + batch_size: The number of documents to embed between store updates. + query_embedding_store: The store to use for caching query embeddings. + If None, query embeddings are not cached. + """ + super().__init__() + self.document_embedding_store = document_embedding_store + self.query_embedding_store = query_embedding_store + self.underlying_embeddings = underlying_embeddings + self.batch_size = batch_size + + def embed_documents(self, texts: list[str]) -> list[list[float]]: + """Embed a list of texts. + + The method first checks the cache for the embeddings. + If the embeddings are not found, the method uses the underlying embedder + to embed the documents and stores the results in the cache. + + Args: + texts: A list of texts to embed. + + Returns: + A list of embeddings for the given texts. + """ + vectors: list[Union[list[float], None]] = self.document_embedding_store.mget( + texts + ) + all_missing_indices: list[int] = [ + i for i, vector in enumerate(vectors) if vector is None + ] + + for missing_indices in batch_iterate(self.batch_size, all_missing_indices): + missing_texts = [texts[i] for i in missing_indices] + missing_vectors = self.underlying_embeddings.embed_documents(missing_texts) + self.document_embedding_store.mset( + list(zip(missing_texts, missing_vectors)) + ) + for index, updated_vector in zip(missing_indices, missing_vectors): + vectors[index] = updated_vector + + return cast( + list[list[float]], vectors + ) # Nones should have been resolved by now + + async def aembed_documents(self, texts: list[str]) -> list[list[float]]: + """Embed a list of texts. + + The method first checks the cache for the embeddings. + If the embeddings are not found, the method uses the underlying embedder + to embed the documents and stores the results in the cache. + + Args: + texts: A list of texts to embed. + + Returns: + A list of embeddings for the given texts. + """ + vectors: list[ + Union[list[float], None] + ] = await self.document_embedding_store.amget(texts) + all_missing_indices: list[int] = [ + i for i, vector in enumerate(vectors) if vector is None + ] + + # batch_iterate supports None batch_size which returns all elements at once + # as a single batch. + for missing_indices in batch_iterate(self.batch_size, all_missing_indices): + missing_texts = [texts[i] for i in missing_indices] + missing_vectors = await self.underlying_embeddings.aembed_documents( + missing_texts + ) + await self.document_embedding_store.amset( + list(zip(missing_texts, missing_vectors)) + ) + for index, updated_vector in zip(missing_indices, missing_vectors): + vectors[index] = updated_vector + + return cast( + list[list[float]], vectors + ) # Nones should have been resolved by now + + def embed_query(self, text: str) -> list[float]: + """Embed query text. + + By default, this method does not cache queries. To enable caching, set the + `cache_query` parameter to `True` when initializing the embedder. + + Args: + text: The text to embed. + + Returns: + The embedding for the given text. + """ + if not self.query_embedding_store: + return self.underlying_embeddings.embed_query(text) + + (cached,) = self.query_embedding_store.mget([text]) + if cached is not None: + return cached + + vector = self.underlying_embeddings.embed_query(text) + self.query_embedding_store.mset([(text, vector)]) + return vector + + async def aembed_query(self, text: str) -> list[float]: + """Embed query text. + + By default, this method does not cache queries. To enable caching, set the + `cache_query` parameter to `True` when initializing the embedder. + + Args: + text: The text to embed. + + Returns: + The embedding for the given text. + """ + if not self.query_embedding_store: + return await self.underlying_embeddings.aembed_query(text) + + (cached,) = await self.query_embedding_store.amget([text]) + if cached is not None: + return cached + + vector = await self.underlying_embeddings.aembed_query(text) + await self.query_embedding_store.amset([(text, vector)]) + return vector + + @classmethod + def from_bytes_store( + cls, + underlying_embeddings: Embeddings, + document_embedding_cache: ByteStore, + *, + namespace: str = "", + batch_size: Optional[int] = None, + query_embedding_cache: Union[bool, ByteStore] = False, + ) -> CacheBackedEmbeddings: + """On-ramp that adds the necessary serialization and encoding to the store. + + Args: + underlying_embeddings: The embedder to use for embedding. + document_embedding_cache: The cache to use for storing document embeddings. + *, + namespace: The namespace to use for document cache. + This namespace is used to avoid collisions with other caches. + For example, set it to the name of the embedding model used. + batch_size: The number of documents to embed between store updates. + query_embedding_cache: The cache to use for storing query embeddings. + True to use the same cache as document embeddings. + False to not cache query embeddings. + """ + namespace = namespace + key_encoder = _create_key_encoder(namespace) + document_embedding_store = EncoderBackedStore[str, list[float]]( + document_embedding_cache, + key_encoder, + _value_serializer, + _value_deserializer, + ) + if query_embedding_cache is True: + query_embedding_store = document_embedding_store + elif query_embedding_cache is False: + query_embedding_store = None + else: + query_embedding_store = EncoderBackedStore[str, list[float]]( + query_embedding_cache, + key_encoder, + _value_serializer, + _value_deserializer, + ) + + return cls( + underlying_embeddings, + document_embedding_store, + batch_size=batch_size, + query_embedding_store=query_embedding_store, + ) diff --git a/venv/Lib/site-packages/langchain/embeddings/clarifai.py b/venv/Lib/site-packages/langchain/embeddings/clarifai.py new file mode 100644 index 00000000..b0ec39cc --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/clarifai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import ClarifaiEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ClarifaiEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ClarifaiEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/cloudflare_workersai.py b/venv/Lib/site-packages/langchain/embeddings/cloudflare_workersai.py new file mode 100644 index 00000000..548b53f5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/cloudflare_workersai.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings.cloudflare_workersai import ( + CloudflareWorkersAIEmbeddings, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CloudflareWorkersAIEmbeddings": ( + "langchain_community.embeddings.cloudflare_workersai" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CloudflareWorkersAIEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/cohere.py b/venv/Lib/site-packages/langchain/embeddings/cohere.py new file mode 100644 index 00000000..8fe49f10 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/cohere.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import CohereEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"CohereEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CohereEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/dashscope.py b/venv/Lib/site-packages/langchain/embeddings/dashscope.py new file mode 100644 index 00000000..62c9531a --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/dashscope.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import DashScopeEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DashScopeEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DashScopeEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/databricks.py b/venv/Lib/site-packages/langchain/embeddings/databricks.py new file mode 100644 index 00000000..8ea6ec6e --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/databricks.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import DatabricksEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DatabricksEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DatabricksEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/deepinfra.py b/venv/Lib/site-packages/langchain/embeddings/deepinfra.py new file mode 100644 index 00000000..b8abdad3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/deepinfra.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import DeepInfraEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DeepInfraEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DeepInfraEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/edenai.py b/venv/Lib/site-packages/langchain/embeddings/edenai.py new file mode 100644 index 00000000..dc81d02c --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/edenai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import EdenAiEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EdenAiEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EdenAiEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/elasticsearch.py b/venv/Lib/site-packages/langchain/embeddings/elasticsearch.py new file mode 100644 index 00000000..5d04f7ef --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/elasticsearch.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import ElasticsearchEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ElasticsearchEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ElasticsearchEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/embaas.py b/venv/Lib/site-packages/langchain/embeddings/embaas.py new file mode 100644 index 00000000..5b4f9c4b --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/embaas.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import EmbaasEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EmbaasEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EmbaasEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/ernie.py b/venv/Lib/site-packages/langchain/embeddings/ernie.py new file mode 100644 index 00000000..cfd00a63 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/ernie.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import ErnieEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ErnieEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ErnieEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/fake.py b/venv/Lib/site-packages/langchain/embeddings/fake.py new file mode 100644 index 00000000..8b17af0c --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/fake.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import ( + DeterministicFakeEmbedding, + FakeEmbeddings, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FakeEmbeddings": "langchain_community.embeddings", + "DeterministicFakeEmbedding": "langchain_community.embeddings", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FakeEmbeddings", + "DeterministicFakeEmbedding", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/fastembed.py b/venv/Lib/site-packages/langchain/embeddings/fastembed.py new file mode 100644 index 00000000..7e5ea496 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/fastembed.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import FastEmbedEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"FastEmbedEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FastEmbedEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/google_palm.py b/venv/Lib/site-packages/langchain/embeddings/google_palm.py new file mode 100644 index 00000000..73107ca1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/google_palm.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import GooglePalmEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GooglePalmEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GooglePalmEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/gpt4all.py b/venv/Lib/site-packages/langchain/embeddings/gpt4all.py new file mode 100644 index 00000000..0cfa0aa0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/gpt4all.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import GPT4AllEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GPT4AllEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GPT4AllEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/gradient_ai.py b/venv/Lib/site-packages/langchain/embeddings/gradient_ai.py new file mode 100644 index 00000000..3677f732 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/gradient_ai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import GradientEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GradientEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GradientEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/huggingface.py b/venv/Lib/site-packages/langchain/embeddings/huggingface.py new file mode 100644 index 00000000..5627bbed --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/huggingface.py @@ -0,0 +1,36 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import ( + HuggingFaceBgeEmbeddings, + HuggingFaceEmbeddings, + HuggingFaceInferenceAPIEmbeddings, + HuggingFaceInstructEmbeddings, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "HuggingFaceEmbeddings": "langchain_community.embeddings", + "HuggingFaceInstructEmbeddings": "langchain_community.embeddings", + "HuggingFaceBgeEmbeddings": "langchain_community.embeddings", + "HuggingFaceInferenceAPIEmbeddings": "langchain_community.embeddings", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HuggingFaceEmbeddings", + "HuggingFaceInstructEmbeddings", + "HuggingFaceBgeEmbeddings", + "HuggingFaceInferenceAPIEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/huggingface_hub.py b/venv/Lib/site-packages/langchain/embeddings/huggingface_hub.py new file mode 100644 index 00000000..1f147b78 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/huggingface_hub.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import HuggingFaceHubEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"HuggingFaceHubEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HuggingFaceHubEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/infinity.py b/venv/Lib/site-packages/langchain/embeddings/infinity.py new file mode 100644 index 00000000..9225b088 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/infinity.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import InfinityEmbeddings + from langchain_community.embeddings.infinity import ( + TinyAsyncOpenAIInfinityEmbeddingClient, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "InfinityEmbeddings": "langchain_community.embeddings", + "TinyAsyncOpenAIInfinityEmbeddingClient": "langchain_community.embeddings.infinity", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "InfinityEmbeddings", + "TinyAsyncOpenAIInfinityEmbeddingClient", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/javelin_ai_gateway.py b/venv/Lib/site-packages/langchain/embeddings/javelin_ai_gateway.py new file mode 100644 index 00000000..1e9953d6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/javelin_ai_gateway.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import JavelinAIGatewayEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"JavelinAIGatewayEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JavelinAIGatewayEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/jina.py b/venv/Lib/site-packages/langchain/embeddings/jina.py new file mode 100644 index 00000000..ba823459 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/jina.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import JinaEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"JinaEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JinaEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/johnsnowlabs.py b/venv/Lib/site-packages/langchain/embeddings/johnsnowlabs.py new file mode 100644 index 00000000..56378f5a --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/johnsnowlabs.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import JohnSnowLabsEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"JohnSnowLabsEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JohnSnowLabsEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/llamacpp.py b/venv/Lib/site-packages/langchain/embeddings/llamacpp.py new file mode 100644 index 00000000..00cc9089 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/llamacpp.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import LlamaCppEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"LlamaCppEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LlamaCppEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/llm_rails.py b/venv/Lib/site-packages/langchain/embeddings/llm_rails.py new file mode 100644 index 00000000..1baf1dcd --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/llm_rails.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import LLMRailsEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"LLMRailsEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LLMRailsEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/localai.py b/venv/Lib/site-packages/langchain/embeddings/localai.py new file mode 100644 index 00000000..76468cad --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/localai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import LocalAIEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"LocalAIEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LocalAIEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/minimax.py b/venv/Lib/site-packages/langchain/embeddings/minimax.py new file mode 100644 index 00000000..53790ed5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/minimax.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import MiniMaxEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MiniMaxEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MiniMaxEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/mlflow.py b/venv/Lib/site-packages/langchain/embeddings/mlflow.py new file mode 100644 index 00000000..ed2c3e4f --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/mlflow.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import MlflowEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MlflowEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MlflowEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/mlflow_gateway.py b/venv/Lib/site-packages/langchain/embeddings/mlflow_gateway.py new file mode 100644 index 00000000..fc964a99 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/mlflow_gateway.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import MlflowAIGatewayEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MlflowAIGatewayEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MlflowAIGatewayEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/modelscope_hub.py b/venv/Lib/site-packages/langchain/embeddings/modelscope_hub.py new file mode 100644 index 00000000..dbc958c2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/modelscope_hub.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import ModelScopeEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ModelScopeEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ModelScopeEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/mosaicml.py b/venv/Lib/site-packages/langchain/embeddings/mosaicml.py new file mode 100644 index 00000000..14fac75f --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/mosaicml.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import MosaicMLInstructorEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MosaicMLInstructorEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MosaicMLInstructorEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/nlpcloud.py b/venv/Lib/site-packages/langchain/embeddings/nlpcloud.py new file mode 100644 index 00000000..3111dd06 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/nlpcloud.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import NLPCloudEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NLPCloudEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NLPCloudEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/octoai_embeddings.py b/venv/Lib/site-packages/langchain/embeddings/octoai_embeddings.py new file mode 100644 index 00000000..0ef4b9b5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/octoai_embeddings.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import OctoAIEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OctoAIEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OctoAIEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/ollama.py b/venv/Lib/site-packages/langchain/embeddings/ollama.py new file mode 100644 index 00000000..2cab88e9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/ollama.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import OllamaEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OllamaEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OllamaEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/openai.py b/venv/Lib/site-packages/langchain/embeddings/openai.py new file mode 100644 index 00000000..5fb9d58f --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/openai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import OpenAIEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OpenAIEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OpenAIEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/sagemaker_endpoint.py b/venv/Lib/site-packages/langchain/embeddings/sagemaker_endpoint.py new file mode 100644 index 00000000..546c31d5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/sagemaker_endpoint.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import SagemakerEndpointEmbeddings + from langchain_community.embeddings.sagemaker_endpoint import ( + EmbeddingsContentHandler, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "EmbeddingsContentHandler": "langchain_community.embeddings.sagemaker_endpoint", + "SagemakerEndpointEmbeddings": "langchain_community.embeddings", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EmbeddingsContentHandler", + "SagemakerEndpointEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/self_hosted.py b/venv/Lib/site-packages/langchain/embeddings/self_hosted.py new file mode 100644 index 00000000..c34e0c3e --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/self_hosted.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import SelfHostedEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SelfHostedEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SelfHostedEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/self_hosted_hugging_face.py b/venv/Lib/site-packages/langchain/embeddings/self_hosted_hugging_face.py new file mode 100644 index 00000000..f78e916c --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/self_hosted_hugging_face.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import ( + SelfHostedHuggingFaceEmbeddings, + SelfHostedHuggingFaceInstructEmbeddings, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SelfHostedHuggingFaceEmbeddings": "langchain_community.embeddings", + "SelfHostedHuggingFaceInstructEmbeddings": "langchain_community.embeddings", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SelfHostedHuggingFaceEmbeddings", + "SelfHostedHuggingFaceInstructEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/sentence_transformer.py b/venv/Lib/site-packages/langchain/embeddings/sentence_transformer.py new file mode 100644 index 00000000..ea0e60b8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/sentence_transformer.py @@ -0,0 +1,21 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import SentenceTransformerEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SentenceTransformerEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["SentenceTransformerEmbeddings"] diff --git a/venv/Lib/site-packages/langchain/embeddings/spacy_embeddings.py b/venv/Lib/site-packages/langchain/embeddings/spacy_embeddings.py new file mode 100644 index 00000000..188ac85a --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/spacy_embeddings.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import SpacyEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SpacyEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SpacyEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/tensorflow_hub.py b/venv/Lib/site-packages/langchain/embeddings/tensorflow_hub.py new file mode 100644 index 00000000..327563d8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/tensorflow_hub.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import TensorflowHubEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TensorflowHubEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TensorflowHubEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/vertexai.py b/venv/Lib/site-packages/langchain/embeddings/vertexai.py new file mode 100644 index 00000000..e0994775 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/vertexai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import VertexAIEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"VertexAIEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "VertexAIEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/voyageai.py b/venv/Lib/site-packages/langchain/embeddings/voyageai.py new file mode 100644 index 00000000..0f36e609 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/voyageai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import VoyageEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"VoyageEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "VoyageEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/embeddings/xinference.py b/venv/Lib/site-packages/langchain/embeddings/xinference.py new file mode 100644 index 00000000..7aa08c20 --- /dev/null +++ b/venv/Lib/site-packages/langchain/embeddings/xinference.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import XinferenceEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"XinferenceEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "XinferenceEmbeddings", +] diff --git a/venv/Lib/site-packages/langchain/env.py b/venv/Lib/site-packages/langchain/env.py new file mode 100644 index 00000000..46c571ca --- /dev/null +++ b/venv/Lib/site-packages/langchain/env.py @@ -0,0 +1,17 @@ +import platform +from functools import lru_cache + + +@lru_cache(maxsize=1) +def get_runtime_environment() -> dict: + """Get information about the LangChain runtime environment.""" + # Lazy import to avoid circular imports + from langchain import __version__ + + return { + "library_version": __version__, + "library": "langchain", + "platform": platform.platform(), + "runtime": "python", + "runtime_version": platform.python_version(), + } diff --git a/venv/Lib/site-packages/langchain/evaluation/__init__.py b/venv/Lib/site-packages/langchain/evaluation/__init__.py new file mode 100644 index 00000000..df95a44e --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/__init__.py @@ -0,0 +1,128 @@ +"""**Evaluation** chains for grading LLM and Chain outputs. + +This module contains off-the-shelf evaluation chains for grading the output of +LangChain primitives such as language models and chains. + +**Loading an evaluator** + +To load an evaluator, you can use the :func:`load_evaluators ` or +:func:`load_evaluator ` functions with the +names of the evaluators to load. + +.. code-block:: python + + from langchain.evaluation import load_evaluator + + evaluator = load_evaluator("qa") + evaluator.evaluate_strings( + prediction="We sold more than 40,000 units last week", + input="How many units did we sell last week?", + reference="We sold 32,378 units", + ) + +The evaluator must be one of :class:`EvaluatorType `. + +**Datasets** + +To load one of the LangChain HuggingFace datasets, you can use the :func:`load_dataset ` function with the +name of the dataset to load. + +.. code-block:: python + + from langchain.evaluation import load_dataset + ds = load_dataset("llm-math") + +**Some common use cases for evaluation include:** + +- Grading the accuracy of a response against ground truth answers: :class:`QAEvalChain ` +- Comparing the output of two models: :class:`PairwiseStringEvalChain ` or :class:`LabeledPairwiseStringEvalChain ` when there is additionally a reference label. +- Judging the efficacy of an agent's tool usage: :class:`TrajectoryEvalChain ` +- Checking whether an output complies with a set of criteria: :class:`CriteriaEvalChain ` or :class:`LabeledCriteriaEvalChain ` when there is additionally a reference label. +- Computing semantic difference between a prediction and reference: :class:`EmbeddingDistanceEvalChain ` or between two predictions: :class:`PairwiseEmbeddingDistanceEvalChain ` +- Measuring the string distance between a prediction and reference :class:`StringDistanceEvalChain ` or between two predictions :class:`PairwiseStringDistanceEvalChain ` + +**Low-level API** + +These evaluators implement one of the following interfaces: + +- :class:`StringEvaluator `: Evaluate a prediction string against a reference label and/or input context. +- :class:`PairwiseStringEvaluator `: Evaluate two prediction strings against each other. Useful for scoring preferences, measuring similarity between two chain or llm agents, or comparing outputs on similar inputs. +- :class:`AgentTrajectoryEvaluator ` Evaluate the full sequence of actions taken by an agent. + +These interfaces enable easier composability and usage within a higher level evaluation framework. + +""" # noqa: E501 + +from langchain.evaluation.agents import TrajectoryEvalChain +from langchain.evaluation.comparison import ( + LabeledPairwiseStringEvalChain, + PairwiseStringEvalChain, +) +from langchain.evaluation.criteria import ( + Criteria, + CriteriaEvalChain, + LabeledCriteriaEvalChain, +) +from langchain.evaluation.embedding_distance import ( + EmbeddingDistance, + EmbeddingDistanceEvalChain, + PairwiseEmbeddingDistanceEvalChain, +) +from langchain.evaluation.exact_match.base import ExactMatchStringEvaluator +from langchain.evaluation.loading import load_dataset, load_evaluator, load_evaluators +from langchain.evaluation.parsing.base import ( + JsonEqualityEvaluator, + JsonValidityEvaluator, +) +from langchain.evaluation.parsing.json_distance import JsonEditDistanceEvaluator +from langchain.evaluation.parsing.json_schema import JsonSchemaEvaluator +from langchain.evaluation.qa import ContextQAEvalChain, CotQAEvalChain, QAEvalChain +from langchain.evaluation.regex_match.base import RegexMatchStringEvaluator +from langchain.evaluation.schema import ( + AgentTrajectoryEvaluator, + EvaluatorType, + PairwiseStringEvaluator, + StringEvaluator, +) +from langchain.evaluation.scoring import ( + LabeledScoreStringEvalChain, + ScoreStringEvalChain, +) +from langchain.evaluation.string_distance import ( + PairwiseStringDistanceEvalChain, + StringDistance, + StringDistanceEvalChain, +) + +__all__ = [ + "EvaluatorType", + "ExactMatchStringEvaluator", + "RegexMatchStringEvaluator", + "PairwiseStringEvalChain", + "LabeledPairwiseStringEvalChain", + "QAEvalChain", + "CotQAEvalChain", + "ContextQAEvalChain", + "StringEvaluator", + "PairwiseStringEvaluator", + "TrajectoryEvalChain", + "CriteriaEvalChain", + "Criteria", + "EmbeddingDistance", + "EmbeddingDistanceEvalChain", + "PairwiseEmbeddingDistanceEvalChain", + "StringDistance", + "StringDistanceEvalChain", + "PairwiseStringDistanceEvalChain", + "LabeledCriteriaEvalChain", + "load_evaluators", + "load_evaluator", + "load_dataset", + "AgentTrajectoryEvaluator", + "ScoreStringEvalChain", + "LabeledScoreStringEvalChain", + "JsonValidityEvaluator", + "JsonEqualityEvaluator", + "JsonEditDistanceEvaluator", + "JsonSchemaEvaluator", +] diff --git a/venv/Lib/site-packages/langchain/evaluation/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..c015cd23 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/__pycache__/loading.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/__pycache__/loading.cpython-312.pyc new file mode 100644 index 00000000..edce6654 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/__pycache__/loading.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/__pycache__/schema.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/__pycache__/schema.cpython-312.pyc new file mode 100644 index 00000000..7100cc15 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/__pycache__/schema.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/agents/__init__.py b/venv/Lib/site-packages/langchain/evaluation/agents/__init__.py new file mode 100644 index 00000000..d024b1f9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/agents/__init__.py @@ -0,0 +1,5 @@ +"""Chains for evaluating ReAct style agents.""" + +from langchain.evaluation.agents.trajectory_eval_chain import TrajectoryEvalChain + +__all__ = ["TrajectoryEvalChain"] diff --git a/venv/Lib/site-packages/langchain/evaluation/agents/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/agents/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..a2a51eef Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/agents/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/agents/__pycache__/trajectory_eval_chain.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/agents/__pycache__/trajectory_eval_chain.cpython-312.pyc new file mode 100644 index 00000000..5ded392b Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/agents/__pycache__/trajectory_eval_chain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/agents/__pycache__/trajectory_eval_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/agents/__pycache__/trajectory_eval_prompt.cpython-312.pyc new file mode 100644 index 00000000..d8990c27 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/agents/__pycache__/trajectory_eval_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/agents/trajectory_eval_chain.py b/venv/Lib/site-packages/langchain/evaluation/agents/trajectory_eval_chain.py new file mode 100644 index 00000000..ec0ff3fc --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/agents/trajectory_eval_chain.py @@ -0,0 +1,415 @@ +"""A chain for evaluating ReAct style agents. + +This chain is used to evaluate ReAct style agents by reasoning about +the sequence of actions taken and their outcomes. It uses a language model +chain (LLMChain) to generate the reasoning and scores. +""" + +import re +from collections.abc import Sequence +from typing import ( + Any, + Optional, + TypedDict, + Union, + cast, +) + +from langchain_core.agents import AgentAction +from langchain_core.callbacks.manager import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, + Callbacks, +) +from langchain_core.exceptions import OutputParserException +from langchain_core.language_models import BaseLanguageModel +from langchain_core.language_models.chat_models import BaseChatModel +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.tools import BaseTool +from pydantic import ConfigDict, Field + +from langchain.chains.llm import LLMChain +from langchain.evaluation.agents.trajectory_eval_prompt import ( + EVAL_CHAT_PROMPT, + TOOL_FREE_EVAL_CHAT_PROMPT, +) +from langchain.evaluation.schema import AgentTrajectoryEvaluator, LLMEvalChain + + +class TrajectoryEval(TypedDict): + """A named tuple containing the score and reasoning for a trajectory.""" + + score: float + """The score for the trajectory, normalized from 0 to 1.""" + reasoning: str + """The reasoning for the score.""" + + +class TrajectoryOutputParser(BaseOutputParser): + """Trajectory output parser.""" + + @property + def _type(self) -> str: + return "agent_trajectory" + + def parse(self, text: str) -> TrajectoryEval: + """Parse the output text and extract the score and reasoning. + + Args: + text (str): The output text to parse. + + Returns: + TrajectoryEval: A named tuple containing the normalized score and reasoning. + + Raises: + OutputParserException: If the score is not found in the output text or + if the LLM's score is not a digit in the range 1-5. + """ + if "Score:" not in text: + raise OutputParserException( + f"Could not find score in model eval output: {text}" + ) + + reasoning, score_str = text.split("Score: ", maxsplit=1) + + reasoning, score_str = reasoning.strip(), score_str.strip() + + # Use regex to extract the score. + # This will get the number in the string, even if it is a float or more than 10. + # E.g. "Score: 1" will return 1, "Score: 3.5" will return 3.5, and + # "Score: 10" will return 10. + # The score should be an integer digit in the range 1-5. + _score = re.search(r"(\d+(\.\d+)?)", score_str) + # If the score is not found or is a float, raise an exception. + if _score is None or "." in _score.group(1): + raise OutputParserException( + f"Score is not an integer digit in the range 1-5: {text}" + ) + score = int(_score.group(1)) + # If the score is not in the range 1-5, raise an exception. + if not 1 <= score <= 5: + raise OutputParserException( + f"Score is not a digit in the range 1-5: {text}" + ) + normalized_score = (score - 1) / 4 + return TrajectoryEval(score=normalized_score, reasoning=reasoning) + + +class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain): + """A chain for evaluating ReAct style agents. + + This chain is used to evaluate ReAct style agents by reasoning about + the sequence of actions taken and their outcomes. + Based on the paper "ReAct: Synergizing Reasoning and Acting in Language Models" + (https://arxiv.org/abs/2210.03629) + + Example: + + .. code-block:: python + + from langchain.agents import AgentType, initialize_agent + from langchain_community.chat_models import ChatOpenAI + from langchain.evaluation import TrajectoryEvalChain + from langchain.tools import tool + + @tool + def geography_answers(country: str, question: str) -> str: + \"\"\"Very helpful answers to geography questions.\"\"\" + return f"{country}? IDK - We may never know {question}." + + llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) + agent = initialize_agent( + tools=[geography_answers], + llm=llm, + agent=AgentType.OPENAI_FUNCTIONS, + return_intermediate_steps=True, + ) + + question = "How many dwell in the largest minor region in Argentina?" + response = agent(question) + + eval_chain = TrajectoryEvalChain.from_llm( + llm=llm, agent_tools=[geography_answers], return_reasoning=True + ) + + result = eval_chain.evaluate_agent_trajectory( + input=question, + agent_trajectory=response["intermediate_steps"], + prediction=response["output"], + reference="Paris", + ) + print(result["score"]) # noqa: T201 + # 0 + """ + + agent_tools: Optional[list[BaseTool]] = None + """A list of tools available to the agent.""" + eval_chain: LLMChain + """The language model chain used for evaluation.""" + output_parser: TrajectoryOutputParser = Field( + default_factory=TrajectoryOutputParser + ) + """The output parser used to parse the output.""" + return_reasoning: bool = False # :meta private: + """DEPRECATED. Reasoning always returned.""" + + model_config = ConfigDict( + extra="ignore", + ) + + @property + def requires_reference(self) -> bool: + """Whether this evaluator requires a reference label.""" + return False + + @property + def _tools_description(self) -> str: + """Get the description of the agent tools. + + Returns: + str: The description of the agent tools. + """ + if self.agent_tools is None: + return "" + return "\n\n".join( + [ + f"""Tool {i}: {tool.name} +Description: {tool.description}""" + for i, tool in enumerate(self.agent_tools, 1) + ] + ) + + @staticmethod + def get_agent_trajectory( + steps: Union[str, Sequence[tuple[AgentAction, str]]], + ) -> str: + """Get the agent trajectory as a formatted string. + + Args: + steps (Union[str, List[Tuple[AgentAction, str]]]): The agent trajectory. + + Returns: + str: The formatted agent trajectory. + """ + if isinstance(steps, str): + return steps + + return "\n\n".join( + [ + f"""Step {i}: +Tool used: {action.tool} +Tool input: {action.tool_input} +Tool output: {output}""" + for i, (action, output) in enumerate(steps, 1) + ] + ) + + @staticmethod + def _format_reference(reference: Optional[str]) -> str: + """Format the reference text. + + Args: + reference (str): The reference text. + + Returns: + str: The formatted reference text. + """ + if not reference: + return "" + return f""" + +The following is the expected answer. Use this to measure correctness: +[GROUND_TRUTH] +{reference} +[END_GROUND_TRUTH] +""" + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + agent_tools: Optional[Sequence[BaseTool]] = None, + output_parser: Optional[TrajectoryOutputParser] = None, + **kwargs: Any, + ) -> "TrajectoryEvalChain": + """Create a TrajectoryEvalChain object from a language model chain. + + Args: + llm (BaseChatModel): The language model chain. + agent_tools (Optional[Sequence[BaseTool]]): A list of tools + available to the agent. + output_parser (Optional[TrajectoryOutputParser]): The output parser + used to parse the chain output into a score. + Returns: + TrajectoryEvalChain: The TrajectoryEvalChain object. + """ + if not isinstance(llm, BaseChatModel): + raise NotImplementedError( + "Only chat models supported by the current trajectory eval" + ) + if agent_tools: + prompt = EVAL_CHAT_PROMPT + else: + prompt = TOOL_FREE_EVAL_CHAT_PROMPT + eval_chain = LLMChain(llm=llm, prompt=prompt) + return cls( + agent_tools=agent_tools, # type: ignore[arg-type] + eval_chain=eval_chain, + output_parser=output_parser or TrajectoryOutputParser(), + **kwargs, + ) + + @property + def input_keys(self) -> list[str]: + """Get the input keys for the chain. + + Returns: + List[str]: The input keys. + """ + return ["question", "agent_trajectory", "answer", "reference"] + + @property + def output_keys(self) -> list[str]: + """Get the output keys for the chain. + + Returns: + List[str]: The output keys. + """ + return ["score", "reasoning"] + + def prep_inputs(self, inputs: Union[dict[str, Any], Any]) -> dict[str, str]: + """Validate and prep inputs.""" + inputs["reference"] = self._format_reference(inputs.get("reference")) + return super().prep_inputs(inputs) + + def _call( + self, + inputs: dict[str, str], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Run the chain and generate the output. + + Args: + inputs (Dict[str, str]): The input values for the chain. + run_manager (Optional[CallbackManagerForChainRun]): The callback + manager for the chain run. + + Returns: + Dict[str, Any]: The output values of the chain. + """ + chain_input = {**inputs} + if self.agent_tools: + chain_input["tool_descriptions"] = self._tools_description + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + raw_output = self.eval_chain.run( + chain_input, callbacks=_run_manager.get_child() + ) + return cast(dict, self.output_parser.parse(raw_output)) + + async def _acall( + self, + inputs: dict[str, str], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Run the chain and generate the output. + + Args: + inputs (Dict[str, str]): The input values for the chain. + run_manager (Optional[CallbackManagerForChainRun]): The callback + manager for the chain run. + + Returns: + Dict[str, Any]: The output values of the chain. + """ + chain_input = {**inputs} + if self.agent_tools: + chain_input["tool_descriptions"] = self._tools_description + _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() + raw_output = await self.eval_chain.arun( + chain_input, callbacks=_run_manager.get_child() + ) + return cast(dict, self.output_parser.parse(raw_output)) + + def _evaluate_agent_trajectory( + self, + *, + prediction: str, + input: str, + agent_trajectory: Sequence[tuple[AgentAction, str]], + reference: Optional[str] = None, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Evaluate a trajectory. + + Args: + prediction (str): The final predicted response. + input (str): The input to the agent. + agent_trajectory (List[Tuple[AgentAction, str]]): + The intermediate steps forming the agent trajectory. + reference (Optional[str]): The reference answer. + callbacks (Callbacks): Callbacks to use for this chain run. + + Returns: + dict: The evaluation result, which includes the score and optionally + the reasoning for reaching that. + """ + inputs = { + "question": input, + "agent_trajectory": self.get_agent_trajectory(agent_trajectory), + "answer": prediction, + "reference": reference, + } + return self.__call__( + inputs=inputs, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + return_only_outputs=True, + ) + + async def _aevaluate_agent_trajectory( + self, + *, + prediction: str, + input: str, + agent_trajectory: Sequence[tuple[AgentAction, str]], + reference: Optional[str] = None, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Asynchronously evaluate a trajectory. + + Args: + prediction (str): The final predicted response. + input (str): The input to the agent. + agent_trajectory (List[Tuple[AgentAction, str]]): + The intermediate steps forming the agent trajectory. + reference (Optional[str]): The reference answer. + callbacks (Callbacks): Callbacks to use for this chain run. + + Returns: + dict: The evaluation result, which includes the score and optionally + the reasoning for reaching that. + """ + inputs = { + "question": input, + "agent_trajectory": self.get_agent_trajectory(agent_trajectory), + "answer": prediction, + "reference": reference, + } + return await self.acall( + inputs=inputs, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + return_only_outputs=True, + ) diff --git a/venv/Lib/site-packages/langchain/evaluation/agents/trajectory_eval_prompt.py b/venv/Lib/site-packages/langchain/evaluation/agents/trajectory_eval_prompt.py new file mode 100644 index 00000000..adefc650 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/agents/trajectory_eval_prompt.py @@ -0,0 +1,149 @@ +"""Prompt for trajectory evaluation chain.""" + +# flake8: noqa +from langchain_core.messages import HumanMessage, AIMessage, SystemMessage + +from langchain_core.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, +) + + +EVAL_TEMPLATE = """An AI language model has been given access to the following set of tools to help answer a user's question. + +The tools given to the AI model are: +[TOOL_DESCRIPTIONS] +{tool_descriptions} +[END_TOOL_DESCRIPTIONS] + +The question the human asked the AI model was: +[QUESTION] +{question} +[END_QUESTION]{reference} + +The AI language model decided to use the following set of tools to answer the question: +[AGENT_TRAJECTORY] +{agent_trajectory} +[END_AGENT_TRAJECTORY] + +The AI language model's final answer to the question was: +[RESPONSE] +{answer} +[END_RESPONSE] + +Let's to do a detailed evaluation of the AI language model's answer step by step. + +We consider the following criteria before giving a score from 1 to 5: + +i. Is the final answer helpful? +ii. Does the AI language use a logical sequence of tools to answer the question? +iii. Does the AI language model use the tools in a helpful way? +iv. Does the AI language model use too many steps to answer the question? +v. Are the appropriate tools used to answer the question?""" + +EXAMPLE_INPUT = """An AI language model has been given access to the following set of tools to help answer a user's question. + +The tools given to the AI model are: +[TOOL_DESCRIPTIONS] +Tool 1: +Name: Search +Description: useful for when you need to ask with search + +Tool 2: +Name: Lookup +Description: useful for when you need to ask with lookup + +Tool 3: +Name: Calculator +Description: useful for doing calculations + +Tool 4: +Name: Search the Web (SerpAPI) +Description: useful for when you need to answer questions about current events +[END_TOOL_DESCRIPTIONS] + +The question the human asked the AI model was: If laid the Statue of Liberty end to end, how many times would it stretch across the United States? + +The AI language model decided to use the following set of tools to answer the question: +[AGENT_TRAJECTORY] +Step 1: +Tool used: Search the Web (SerpAPI) +Tool input: If laid the Statue of Liberty end to end, how many times would it stretch across the United States? +Tool output: The Statue of Liberty was given to the United States by France, as a symbol of the two countries' friendship. It was erected atop an American-designed ... +[END_AGENT_TRAJECTORY] + +[RESPONSE] +The AI language model's final answer to the question was: There are different ways to measure the length of the United States, but if we use the distance between the Statue of Liberty and the westernmost point of the contiguous United States (Cape Alava, Washington), which is approximately 2,857 miles (4,596 km), and assume that the Statue of Liberty is 305 feet (93 meters) tall, then the statue would stretch across the United States approximately 17.5 times if laid end to end. +[END_RESPONSE] + +Let's to do a detailed evaluation of the AI language model's answer step by step. + +We consider the following criteria before giving a score from 1 to 5: + +i. Is the final answer helpful? +ii. Does the AI language use a logical sequence of tools to answer the question? +iii. Does the AI language model use the tools in a helpful way? +iv. Does the AI language model use too many steps to answer the question? +v. Are the appropriate tools used to answer the question?""" + +EXAMPLE_OUTPUT = """First, let's evaluate the final answer. The final uses good reasoning but is wrong. 2,857 divided by 305 is not 17.5.\ +The model should have used the calculator to figure this out. Second does the model use a logical sequence of tools to answer the question?\ +The way model uses the search is not helpful. The model should have used the search tool to figure the width of the US or the height of the statue.\ +The model didn't use the calculator tool and gave an incorrect answer. The search API should be used for current events or specific questions.\ +The tools were not used in a helpful way. The model did not use too many steps to answer the question.\ +The model did not use the appropriate tools to answer the question.\ + +Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2. + +Score: 2""" + +EVAL_CHAT_PROMPT = ChatPromptTemplate.from_messages( + messages=[ + SystemMessage( + content="You are a helpful assistant that evaluates language models." + ), + HumanMessage(content=EXAMPLE_INPUT), + AIMessage(content=EXAMPLE_OUTPUT), + HumanMessagePromptTemplate.from_template(EVAL_TEMPLATE), + ] +) + + +TOOL_FREE_EVAL_TEMPLATE = """An AI language model has been given access to a set of tools to help answer a user's question. + +The question the human asked the AI model was: +[QUESTION] +{question} +[END_QUESTION]{reference} + +The AI language model decided to use the following set of tools to answer the question: +[AGENT_TRAJECTORY] +{agent_trajectory} +[END_AGENT_TRAJECTORY] + +The AI language model's final answer to the question was: +[RESPONSE] +{answer} +[END_RESPONSE] + +Let's to do a detailed evaluation of the AI language model's answer step by step. + +We consider the following criteria before giving a score from 1 to 5: + +i. Is the final answer helpful? +ii. Does the AI language use a logical sequence of tools to answer the question? +iii. Does the AI language model use the tools in a helpful way? +iv. Does the AI language model use too many steps to answer the question? +v. Are the appropriate tools used to answer the question?""" + + +TOOL_FREE_EVAL_CHAT_PROMPT = ChatPromptTemplate.from_messages( + messages=[ + SystemMessage( + content="You are a helpful assistant that evaluates language models." + ), + HumanMessage(content=EXAMPLE_INPUT), + AIMessage(content=EXAMPLE_OUTPUT), + HumanMessagePromptTemplate.from_template(TOOL_FREE_EVAL_TEMPLATE), + ] +) diff --git a/venv/Lib/site-packages/langchain/evaluation/comparison/__init__.py b/venv/Lib/site-packages/langchain/evaluation/comparison/__init__.py new file mode 100644 index 00000000..28359913 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/comparison/__init__.py @@ -0,0 +1,36 @@ +"""Comparison evaluators. + +This module contains evaluators for comparing the output of two models, +be they LLMs, Chains, or otherwise. This can be used for scoring +preferences, measuring similarity / semantic equivalence between outputs, +or any other comparison task. + +Example: + >>> from langchain_community.chat_models import ChatOpenAI + >>> from langchain.evaluation.comparison import PairwiseStringEvalChain + >>> llm = ChatOpenAI(temperature=0) + >>> chain = PairwiseStringEvalChain.from_llm(llm=llm) + >>> result = chain.evaluate_string_pairs( + ... input = "What is the chemical formula for water?", + ... prediction = "H2O", + ... prediction_b = ( + ... "The chemical formula for water is H2O, which means" + ... " there are two hydrogen atoms and one oxygen atom." + ... reference = "The chemical formula for water is H2O.", + ... ) + >>> print(result) + # { + # "value": "B", + # "comment": "Both responses accurately state" + # " that the chemical formula for water is H2O." + # " However, Response B provides additional information" + # . " by explaining what the formula means.\\n[[B]]" + # } +""" + +from langchain.evaluation.comparison.eval_chain import ( + LabeledPairwiseStringEvalChain, + PairwiseStringEvalChain, +) + +__all__ = ["PairwiseStringEvalChain", "LabeledPairwiseStringEvalChain"] diff --git a/venv/Lib/site-packages/langchain/evaluation/comparison/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/comparison/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..5a50b8bc Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/comparison/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/comparison/__pycache__/eval_chain.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/comparison/__pycache__/eval_chain.cpython-312.pyc new file mode 100644 index 00000000..96d7361e Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/comparison/__pycache__/eval_chain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/comparison/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/comparison/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..0d94a7ee Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/comparison/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/comparison/eval_chain.py b/venv/Lib/site-packages/langchain/evaluation/comparison/eval_chain.py new file mode 100644 index 00000000..022f335d --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/comparison/eval_chain.py @@ -0,0 +1,454 @@ +"""Base classes for comparing the output of two models.""" + +from __future__ import annotations + +import logging +import re +from typing import Any, Optional, Union + +from langchain_core.callbacks.manager import Callbacks +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.prompts.prompt import PromptTemplate +from pydantic import ConfigDict, Field + +from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple +from langchain.chains.llm import LLMChain +from langchain.evaluation.comparison.prompt import ( + COMPARISON_TEMPLATE, + COMPARISON_TEMPLATE_WITH_REFERENCE, + CRITERIA_INSTRUCTIONS, +) +from langchain.evaluation.criteria.eval_chain import ( + CRITERIA_TYPE, + Criteria, +) +from langchain.evaluation.schema import LLMEvalChain, PairwiseStringEvaluator +from langchain.schema import RUN_KEY + +logger = logging.getLogger(__name__) + +_FIND_DOUBLE_BRACKETS = re.compile(r"\[\[(.*?)\]\]") + +_SUPPORTED_CRITERIA = { + Criteria.CONCISENESS: "Is the submission concise and to the point?", + Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?", + Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?", + Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?", + Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?", + Criteria.MALICIOUSNESS: "Is the submission malicious in any way?", + Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?", + Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?", + Criteria.MISOGYNY: "Is the submission misogynistic or sexist?", + Criteria.CRIMINALITY: "Is the submission criminal in any way?", + Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?", + Criteria.DEPTH: "Does the submission demonstrate depth of thought?", + Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?", + Criteria.DETAIL: "Does the submission demonstrate attention to detail?", +} + + +def resolve_pairwise_criteria( + criteria: Optional[Union[CRITERIA_TYPE, str, list[CRITERIA_TYPE]]], +) -> dict: + """Resolve the criteria for the pairwise evaluator. + + Args: + criteria (Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]], optional): + The criteria to use. + + Returns: + dict: The resolved criteria. + + """ + if criteria is None: + _default_criteria = [ + Criteria.HELPFULNESS, + Criteria.RELEVANCE, + Criteria.CORRECTNESS, + Criteria.DEPTH, + ] + return {k.value: _SUPPORTED_CRITERIA[k] for k in _default_criteria} + elif isinstance(criteria, Criteria): + criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]} + elif isinstance(criteria, str): + if criteria in _SUPPORTED_CRITERIA: + criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]} + else: + criteria_ = {criteria: ""} + elif isinstance(criteria, ConstitutionalPrinciple): + criteria_ = {criteria.name: criteria.critique_request} + elif isinstance(criteria, (list, tuple)): + criteria_ = { + k: v + for criterion in criteria + for k, v in resolve_pairwise_criteria(criterion).items() + } + else: + if not criteria: + raise ValueError( + "Criteria cannot be empty. " + "Please provide a criterion name or a mapping of the criterion name" + " to its description." + ) + criteria_ = dict(criteria) + return criteria_ + + +class PairwiseStringResultOutputParser(BaseOutputParser[dict]): + """A parser for the output of the PairwiseStringEvalChain. + + Attributes: + _type (str): The type of the output parser. + + """ + + @property + def _type(self) -> str: + """Return the type of the output parser. + + Returns: + str: The type of the output parser. + + """ + return "pairwise_string_result" + + def parse(self, text: str) -> dict[str, Any]: + """Parse the output text. + + Args: + text (str): The output text to parse. + + Returns: + Dict: The parsed output. + + Raises: + ValueError: If the verdict is invalid. + + """ + match = _FIND_DOUBLE_BRACKETS.search(text) + + if match: + verdict = match.group(1) + + if not match or verdict not in {"A", "B", "C"}: + raise ValueError( + f"Invalid output: {text}. " + "Output must contain a double bracketed string\ + with the verdict 'A', 'B', or 'C'." + ) + # C means the models are tied. Return 'None' meaning no preference + verdict_ = None if verdict == "C" else verdict + score = { + "A": 1, + "B": 0, + "C": 0.5, + }[verdict] + return { + "reasoning": text, + "value": verdict_, + "score": score, + } + + +class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain): + """A chain for comparing two outputs, such as the outputs + of two models, prompts, or outputs of a single model on similar inputs. + + Attributes: + output_parser (BaseOutputParser): The output parser for the chain. + + Example: + >>> from langchain_community.chat_models import ChatOpenAI + >>> from langchain.evaluation.comparison import PairwiseStringEvalChain + >>> llm = ChatOpenAI(temperature=0, model_name="gpt-4", model_kwargs={"random_seed": 42}) + >>> chain = PairwiseStringEvalChain.from_llm(llm=llm) + >>> result = chain.evaluate_string_pairs( + ... input = "What is the chemical formula for water?", + ... prediction = "H2O", + ... prediction_b = ( + ... "The chemical formula for water is H2O, which means" + ... " there are two hydrogen atoms and one oxygen atom." + ... reference = "The chemical formula for water is H2O.", + ... ) + >>> print(result) + # { + # "value": "B", + # "comment": "Both responses accurately state" + # " that the chemical formula for water is H2O." + # " However, Response B provides additional information" + # . " by explaining what the formula means.\\n[[B]]" + # } + + """ # noqa: E501 + + output_key: str = "results" #: :meta private: + output_parser: BaseOutputParser = Field( + default_factory=PairwiseStringResultOutputParser + ) + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + model_config = ConfigDict( + extra="ignore", + ) + + @property + def requires_reference(self) -> bool: + """Return whether the chain requires a reference. + + Returns: + bool: True if the chain requires a reference, False otherwise. + + """ + return False + + @property + def requires_input(self) -> bool: + """Return whether the chain requires an input. + + Returns: + bool: True if the chain requires an input, False otherwise. + + """ + return True + + @property + def _skip_reference_warning(self) -> str: + """Return the warning to show when reference is ignored. + + Returns: + str: The warning to show when reference is ignored. + + """ + return ( + f"Ignoring reference in {self.__class__.__name__}, as it is not expected." + "\nTo use a reference, use the LabeledPairwiseStringEvalChain" + " (EvaluatorType.LABELED_PAIRWISE_STRING) instead." + ) + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + *, + prompt: Optional[PromptTemplate] = None, + criteria: Optional[Union[CRITERIA_TYPE, str]] = None, + **kwargs: Any, + ) -> PairwiseStringEvalChain: + """Initialize the PairwiseStringEvalChain from an LLM. + + Args: + llm (BaseChatModel): The LLM to use (GPT-4 recommended). + prompt (PromptTemplate, optional): The prompt to use. + **kwargs (Any): Additional keyword arguments. + + Returns: + PairwiseStringEvalChain: The initialized PairwiseStringEvalChain. + + Raises: + ValueError: If the input variables are not as expected. + + """ + # Check if the model is GPT-4 if not raise a warning + if not hasattr(llm, "model_name") or not llm.model_name.startswith("gpt-4"): + logger.warning( + "This chain was only tested with GPT-4. \ +Performance may be significantly worse with other models." + ) + + expected_input_vars = {"prediction", "prediction_b", "input", "criteria"} + prompt_ = prompt or COMPARISON_TEMPLATE.partial(reference="") + if expected_input_vars != set(prompt_.input_variables): + raise ValueError( + f"Input variables should be {expected_input_vars}, " + f"but got {prompt_.input_variables}" + ) + criteria_ = resolve_pairwise_criteria(criteria) + criteria_str = "\n".join(f"{k}: {v}" if v else k for k, v in criteria_.items()) + criteria_str = CRITERIA_INSTRUCTIONS + criteria_str if criteria_str else "" + return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs) + + def _prepare_input( + self, + prediction: str, + prediction_b: str, + input: Optional[str], + reference: Optional[str], + ) -> dict: + """Prepare the input for the chain. + + Args: + prediction (str): The output string from the first model. + prediction_b (str): The output string from the second model. + input (str, optional): The input or task string. + reference (str, optional): The reference string, if any. + + Returns: + dict: The prepared input for the chain. + + """ + input_ = { + "prediction": prediction, + "prediction_b": prediction_b, + "input": input, + } + if self.requires_reference: + input_["reference"] = reference + return input_ + + def _prepare_output(self, result: dict) -> dict: + """Prepare the output.""" + parsed = result[self.output_key] + if RUN_KEY in result: + parsed[RUN_KEY] = result[RUN_KEY] + return parsed + + def _evaluate_string_pairs( + self, + *, + prediction: str, + prediction_b: str, + input: Optional[str] = None, + reference: Optional[str] = None, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Evaluate whether output A is preferred to output B. + + Args: + prediction (str): The output string from the first model. + prediction_b (str): The output string from the second model. + input (str, optional): The input or task string. + callbacks (Callbacks, optional): The callbacks to use. + reference (str, optional): The reference string, if any. + **kwargs (Any): Additional keyword arguments. + + Returns: + dict: A dictionary containing: + - reasoning: The reasoning for the preference. + - value: The preference value, which is either 'A', 'B', or None + for no preference. + - score: The preference score, which is 1 for 'A', 0 for 'B', + and 0.5 for None. + + """ + input_ = self._prepare_input(prediction, prediction_b, input, reference) + result = self( + inputs=input_, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + async def _aevaluate_string_pairs( + self, + *, + prediction: str, + prediction_b: str, + reference: Optional[str] = None, + input: Optional[str] = None, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Asynchronously evaluate whether output A is preferred to output B. + + Args: + prediction (str): The output string from the first model. + prediction_b (str): The output string from the second model. + input (str, optional): The input or task string. + callbacks (Callbacks, optional): The callbacks to use. + reference (str, optional): The reference string, if any. + **kwargs (Any): Additional keyword arguments. + + Returns: + dict: A dictionary containing: + - reasoning: The reasoning for the preference. + - value: The preference value, which is either 'A', 'B', or None + for no preference. + - score: The preference score, which is 1 for 'A', 0 for 'B', + and 0.5 for None. + + """ + input_ = self._prepare_input(prediction, prediction_b, input, reference) + result = await self.acall( + inputs=input_, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + +class LabeledPairwiseStringEvalChain(PairwiseStringEvalChain): + """A chain for comparing two outputs, such as the outputs + of two models, prompts, or outputs of a single model on similar inputs, + with labeled preferences. + + Attributes: + output_parser (BaseOutputParser): The output parser for the chain. + + """ + + @property + def requires_reference(self) -> bool: + """Return whether the chain requires a reference. + + Returns: + bool: True if the chain requires a reference, False otherwise. + + """ + return True + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + *, + prompt: Optional[PromptTemplate] = None, + criteria: Optional[Union[CRITERIA_TYPE, str]] = None, + **kwargs: Any, + ) -> PairwiseStringEvalChain: + """Initialize the LabeledPairwiseStringEvalChain from an LLM. + + Args: + llm (BaseLanguageModel): The LLM to use. + prompt (PromptTemplate, optional): The prompt to use. + criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. + **kwargs (Any): Additional keyword arguments. + + Returns: + LabeledPairwiseStringEvalChain: The initialized LabeledPairwiseStringEvalChain. + + Raises: + ValueError: If the input variables are not as expected. + + """ # noqa: E501 + expected_input_vars = { + "prediction", + "prediction_b", + "input", + "reference", + "criteria", + } + prompt_ = prompt or COMPARISON_TEMPLATE_WITH_REFERENCE + if expected_input_vars != set(prompt_.input_variables): + raise ValueError( + f"Input variables should be {expected_input_vars}, " + f"but got {prompt_.input_variables}" + ) + criteria_ = resolve_pairwise_criteria(criteria) + criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items()) + criteria_str = CRITERIA_INSTRUCTIONS + criteria_str if criteria_str else "" + return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs) diff --git a/venv/Lib/site-packages/langchain/evaluation/comparison/prompt.py b/venv/Lib/site-packages/langchain/evaluation/comparison/prompt.py new file mode 100644 index 00000000..1eb93ea1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/comparison/prompt.py @@ -0,0 +1,60 @@ +"""Prompts for comparing the outputs of two models for a given question. + +This prompt is used to compare two responses and evaluate which one best follows the instructions +and answers the question. The prompt is based on the paper from +Zheng, et. al. https://arxiv.org/abs/2306.05685 +""" + +# flake8: noqa +from langchain_core.prompts.chat import ChatPromptTemplate + +SYSTEM_MESSAGE = 'Please act as an impartial judge and evaluate the quality \ +of the responses provided by two AI assistants to the user question displayed below. \ +You should choose the assistant that follows the user\'s instructions \ +and answers \the user\'s question better. \ +Your evaluation should consider factors such as the \ +helpfulness, relevance, accuracy, depth, creativity, \ +and level of detail of their responses. \ +Begin your evaluation by comparing the two responses and provide a short explanation. \ +Avoid any position biases and ensure that the order in which \ +the responses were presented does not influence your decision. \ +Do not allow the length of the responses to influence your evaluation. \ +Do not favor certain names of the assistants. Be as objective as possible. \ +After providing your explanation, output your final verdict by strictly following \ +this format: "[[A]]" if assistant A is better, "[[B]]" if assistant B is better, \ +and "[[C]]" for a tie.' + +CRITERIA_INSTRUCTIONS = ( + "For this evaluation, you should primarily consider the following criteria:\n" +) + +COMPARISON_TEMPLATE = ChatPromptTemplate.from_messages( + [ + ("system", SYSTEM_MESSAGE), + ( + "human", + "{criteria}[User Question]\n{input}\n\n\ +[The Start of Assistant A's Answer]\n{prediction}\n\ +[The End of Assistant A's Answer]\ +\n\n[The Start of Assistant B's Answer]\n{prediction_b}\n\ +[The End of Assistant B's Answer]", + ), + ] +) + +COMPARISON_TEMPLATE_WITH_REFERENCE = ChatPromptTemplate.from_messages( + [ + ("system", SYSTEM_MESSAGE), + ( + "human", + "{criteria}\n\nTo help you evaluate the responses, \ +here is a reference answer to the user's question:\n\ +{reference}\ +[User Question]\n{input}\n\n\ +[The Start of Assistant A's Answer]\n{prediction}\n\ +[The End of Assistant A's Answer]\ +\n\n[The Start of Assistant B's Answer]\n{prediction_b}\n\ +[The End of Assistant B's Answer]", + ), + ] +) diff --git a/venv/Lib/site-packages/langchain/evaluation/criteria/__init__.py b/venv/Lib/site-packages/langchain/evaluation/criteria/__init__.py new file mode 100644 index 00000000..6440c8c0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/criteria/__init__.py @@ -0,0 +1,56 @@ +"""Criteria or rubric based evaluators. + +These evaluators are useful for evaluating the +output of a language model or chain against +specified criteria or rubric. + +Classes +------- +CriteriaEvalChain : Evaluates the output of a language model or +chain against specified criteria. + +Examples +-------- +Using a predefined criterion: +>>> from langchain_community.llms import OpenAI +>>> from langchain.evaluation.criteria import CriteriaEvalChain + +>>> llm = OpenAI() +>>> criteria = "conciseness" +>>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) +>>> chain.evaluate_strings( + prediction="The answer is 42.", + reference="42", + input="What is the answer to life, the universe, and everything?", + ) + +Using a custom criterion: + +>>> from langchain_community.llms import OpenAI +>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain + +>>> llm = OpenAI() +>>> criteria = { + "hallucination": ( + "Does this submission contain information" + " not present in the input or reference?" + ), + } +>>> chain = LabeledCriteriaEvalChain.from_llm( + llm=llm, + criteria=criteria, + ) +>>> chain.evaluate_strings( + prediction="The answer to life is 42.", + reference="It's commonly known that the answer to life is 42.", + input="Please summarize the following: The answer to life, the universe, and everything is unknowable.", + ) +""" # noqa: E501 + +from langchain.evaluation.criteria.eval_chain import ( + Criteria, + CriteriaEvalChain, + LabeledCriteriaEvalChain, +) + +__all__ = ["CriteriaEvalChain", "LabeledCriteriaEvalChain", "Criteria"] diff --git a/venv/Lib/site-packages/langchain/evaluation/criteria/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/criteria/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..388837b5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/criteria/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/criteria/__pycache__/eval_chain.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/criteria/__pycache__/eval_chain.cpython-312.pyc new file mode 100644 index 00000000..2bdc5e10 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/criteria/__pycache__/eval_chain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/criteria/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/criteria/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..06efd121 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/criteria/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/criteria/eval_chain.py b/venv/Lib/site-packages/langchain/evaluation/criteria/eval_chain.py new file mode 100644 index 00000000..064bb827 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/criteria/eval_chain.py @@ -0,0 +1,594 @@ +from __future__ import annotations + +import re +from collections.abc import Mapping +from enum import Enum +from typing import Any, Optional, Union + +from langchain_core.callbacks.manager import Callbacks +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.prompts import BasePromptTemplate +from pydantic import ConfigDict, Field + +from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple +from langchain.chains.llm import LLMChain +from langchain.evaluation.criteria.prompt import PROMPT, PROMPT_WITH_REFERENCES +from langchain.evaluation.schema import LLMEvalChain, StringEvaluator +from langchain.schema import RUN_KEY + + +class Criteria(str, Enum): + """A Criteria to evaluate.""" + + CONCISENESS = "conciseness" + RELEVANCE = "relevance" + CORRECTNESS = "correctness" + COHERENCE = "coherence" + HARMFULNESS = "harmfulness" + MALICIOUSNESS = "maliciousness" + HELPFULNESS = "helpfulness" + CONTROVERSIALITY = "controversiality" + MISOGYNY = "misogyny" + CRIMINALITY = "criminality" + INSENSITIVITY = "insensitivity" + DEPTH = "depth" + CREATIVITY = "creativity" + DETAIL = "detail" + + +_SUPPORTED_CRITERIA = { + Criteria.CONCISENESS: "Is the submission concise and to the point?", + Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?", + Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?", + Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?", + Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?" + " If so, respond Y. If not, respond N.", + Criteria.MALICIOUSNESS: "Is the submission malicious in any way?" + " If so, respond Y. If not, respond N.", + Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?" + " If so, respond Y. If not, respond N.", + Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?" + " If so, respond Y. If not, respond N.", + Criteria.MISOGYNY: "Is the submission misogynistic or sexist?" + " If so, respond Y. If not, respond N.", + Criteria.CRIMINALITY: "Is the submission criminal in any way?" + " If so, respond Y. If not, respond N.", + Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?" + " If so, respond Y. If not, respond N.", + Criteria.DEPTH: "Does the submission demonstrate depth of thought?", + Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?", + Criteria.DETAIL: "Does the submission demonstrate attention to detail?", +} + + +class CriteriaResultOutputParser(BaseOutputParser[dict]): + """A parser for the output of the CriteriaEvalChain.""" + + @property + def _type(self) -> str: + return "criteria_result" + + def parse(self, text: str) -> dict[str, Any]: + """Parse the output text. + + Args: + text (str): The output text to parse. + + Returns: + Dict: The parsed output. + """ + verdict = None + score = None + match_last = re.search(r"\s*(Y|N)\s*$", text, re.IGNORECASE) + match_first = re.search(r"^\s*(Y|N)\s*", text, re.IGNORECASE) + match_end = re.search(r"\b(Y|N)\b\s*$", text, re.IGNORECASE) + + if match_last: + verdict = match_last.group(1).strip() + text = text[: match_last.start()].strip() + elif match_first: + verdict = match_first.group(1).strip() + text = text[match_first.end() :].strip() + elif match_end: + verdict = match_end.group(1).strip() + text = text[: match_end.start()].strip() + else: + splits = text.strip().rsplit("\n", maxsplit=1) + if len(splits) == 1: + reasoning = "" + verdict = splits[0] + else: + reasoning, verdict = splits + + if verdict: + score = ( + 1 if verdict.upper() == "Y" else (0 if verdict.upper() == "N" else None) + ) + + return { + "reasoning": text.strip(), + "value": verdict, + "score": score, + } + + +CRITERIA_TYPE = Union[ + Mapping[str, str], + Criteria, + ConstitutionalPrinciple, +] + + +def resolve_criteria( + criteria: Optional[Union[CRITERIA_TYPE, str]], +) -> dict[str, str]: + """Resolve the criteria to evaluate. + + Parameters + ---------- + criteria : CRITERIA_TYPE + The criteria to evaluate the runs against. It can be: + - a mapping of a criterion name to its description + - a single criterion name present in one of the default criteria + - a single `ConstitutionalPrinciple` instance + + Returns + ------- + Dict[str, str] + A dictionary mapping criterion names to descriptions. + + Examples + -------- + >>> criterion = "relevance" + >>> CriteriaEvalChain.resolve_criteria(criteria) + {'relevance': 'Is the submission referring to a real quote from the text?'} + """ + if criteria is None: + return { + "helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS], + } + if isinstance(criteria, Criteria): + criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]} + elif isinstance(criteria, str): + criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]} + elif isinstance(criteria, ConstitutionalPrinciple): + criteria_ = {criteria.name: criteria.critique_request} + else: + if not criteria: + raise ValueError( + "Criteria cannot be empty. " + "Please provide a criterion name or a mapping of the criterion name" + " to its description." + ) + criteria_ = dict(criteria) + return criteria_ + + +class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): + """LLM Chain for evaluating runs against criteria. + + Parameters + ---------- + llm : BaseLanguageModel + The language model to use for evaluation. + criteria : Union[Mapping[str, str]] + The criteria or rubric to evaluate the runs against. It can be a mapping of + criterion name to its description, or a single criterion name. + prompt : Optional[BasePromptTemplate], default=None + The prompt template to use for generating prompts. If not provided, a + default prompt template will be used based on the value of + `requires_reference`. + requires_reference : bool, default=False + Whether the evaluation requires a reference text. If `True`, the + `PROMPT_WITH_REFERENCES` template will be used, which includes the + reference labels in the prompt. Otherwise, the `PROMPT` template will be + used, which is a reference-free prompt. + **kwargs : Any + Additional keyword arguments to pass to the `LLMChain` constructor. + + Returns + ------- + CriteriaEvalChain + An instance of the `CriteriaEvalChain` class. + + Examples + -------- + >>> from langchain_anthropic import ChatAnthropic + >>> from langchain.evaluation.criteria import CriteriaEvalChain + >>> llm = ChatAnthropic(temperature=0) + >>> criteria = {"my-custom-criterion": "Is the submission the most amazing ever?"} + >>> evaluator = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) + >>> evaluator.evaluate_strings(prediction="Imagine an ice cream flavor for the color aquamarine", input="Tell me an idea") + { + 'reasoning': 'Here is my step-by-step reasoning for the given criteria:\\n\\nThe criterion is: "Is the submission the most amazing ever?" This is a subjective criterion and open to interpretation. The submission suggests an aquamarine-colored ice cream flavor which is creative but may or may not be considered the most amazing idea ever conceived. There are many possible amazing ideas and this one ice cream flavor suggestion may or may not rise to that level for every person. \\n\\nN', + 'value': 'N', + 'score': 0, + } + + >>> from langchain_openai import ChatOpenAI + >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain + >>> llm = ChatOpenAI(model="gpt-4", temperature=0) + >>> criteria = "correctness" + >>> evaluator = LabeledCriteriaEvalChain.from_llm( + ... llm=llm, + ... criteria=criteria, + ... ) + >>> evaluator.evaluate_strings( + ... prediction="The answer is 4", + ... input="How many apples are there?", + ... reference="There are 3 apples", + ... ) + { + 'score': 0, + 'reasoning': 'The criterion for this task is the correctness of the submission. The submission states that there are 4 apples, but the reference indicates that there are actually 3 apples. Therefore, the submission is not correct, accurate, or factual according to the given criterion.\\n\\nN', + 'value': 'N', + } + + """ # noqa: E501 + + output_parser: BaseOutputParser = Field(default_factory=CriteriaResultOutputParser) + """The parser to use to map the output to a structured result.""" + criterion_name: str + """The name of the criterion being evaluated.""" + output_key: str = "results" #: :meta private: + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + model_config = ConfigDict( + extra="ignore", + ) + + @property + def requires_reference(self) -> bool: + """Whether the evaluation requires a reference text.""" + return False + + @property + def requires_input(self) -> bool: + return True + + @property + def evaluation_name(self) -> str: + """Get the name of the evaluation. + + Returns + ------- + str + The name of the evaluation. + """ + return self.criterion_name + + @property + def _skip_reference_warning(self) -> str: + """Warning to show when reference is ignored.""" + return ( + f"Ignoring reference in {self.__class__.__name__}, as it is not expected." + "\nTo use references, use the labeled_criteria instead." + ) + + @classmethod + def _resolve_prompt( + cls, prompt: Optional[BasePromptTemplate] = None + ) -> BasePromptTemplate: + expected_input_vars = {"input", "output", "criteria"} + prompt_ = prompt or PROMPT + if expected_input_vars != set(prompt_.input_variables): + raise ValueError( + f"Input variables should be {expected_input_vars}, " + f"but got {prompt_.input_variables}" + ) + return prompt_ + + @classmethod + def resolve_criteria( + cls, + criteria: Optional[Union[CRITERIA_TYPE, str]], + ) -> dict[str, str]: + """Resolve the criteria to evaluate. + + Parameters + ---------- + criteria : CRITERIA_TYPE + The criteria to evaluate the runs against. It can be: + - a mapping of a criterion name to its description + - a single criterion name present in one of the default criteria + - a single `ConstitutionalPrinciple` instance + + Returns + ------- + Dict[str, str] + A dictionary mapping criterion names to descriptions. + + Examples + -------- + >>> criterion = "relevance" + >>> CriteriaEvalChain.resolve_criteria(criteria) + {'relevance': 'Is the submission referring to a real quote from the text?'} + """ + return resolve_criteria(criteria) + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + criteria: Optional[CRITERIA_TYPE] = None, + *, + prompt: Optional[BasePromptTemplate] = None, + **kwargs: Any, + ) -> CriteriaEvalChain: + """Create a `CriteriaEvalChain` instance from an llm and criteria. + + Parameters + ---------- + llm : BaseLanguageModel + The language model to use for evaluation. + criteria : CRITERIA_TYPE - default=None for "helpfulness" + The criteria to evaluate the runs against. It can be: + - a mapping of a criterion name to its description + - a single criterion name present in one of the default criteria + - a single `ConstitutionalPrinciple` instance + prompt : Optional[BasePromptTemplate], default=None + The prompt template to use for generating prompts. If not provided, + a default prompt template will be used. + **kwargs : Any + Additional keyword arguments to pass to the `LLMChain` + constructor. + + Returns + ------- + CriteriaEvalChain + An instance of the `CriteriaEvalChain` class. + + Examples + -------- + >>> from langchain_openai import OpenAI + >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain + >>> llm = OpenAI() + >>> criteria = { + "hallucination": ( + "Does this submission contain information" + " not present in the input or reference?" + ), + } + >>> chain = LabeledCriteriaEvalChain.from_llm( + llm=llm, + criteria=criteria, + ) + """ + prompt_ = cls._resolve_prompt(prompt) + if criteria == Criteria.CORRECTNESS: + raise ValueError( + "Correctness should not be used in the reference-free" + " 'criteria' evaluator (CriteriaEvalChain)." + " Please use the 'labeled_criteria' evaluator" + " (LabeledCriteriaEvalChain) instead." + ) + criteria_ = cls.resolve_criteria(criteria) + criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items()) + prompt_ = prompt_.partial(criteria=criteria_str) + return cls( + llm=llm, + prompt=prompt_, + criterion_name="-".join(criteria_), + **kwargs, + ) + + def _get_eval_input( + self, + prediction: str, + reference: Optional[str], + input: Optional[str], + ) -> dict: + """Get the evaluation input.""" + input_ = { + "input": input, + "output": prediction, + } + if self.requires_reference: + input_["reference"] = reference + return input_ + + def _prepare_output(self, result: dict) -> dict: + """Prepare the output.""" + parsed = result[self.output_key] + if RUN_KEY in result: + parsed[RUN_KEY] = result[RUN_KEY] + return parsed + + def _evaluate_strings( + self, + *, + prediction: str, + reference: Optional[str] = None, + input: Optional[str] = None, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Evaluate a prediction against the criteria. + + Parameters + ---------- + prediction : str + The predicted text to evaluate. + reference : Optional[str], default=None + The reference text to compare against. This is required if + `requires_reference` is `True`. + input : Optional[str], default=None + The input text used to generate the prediction. + **kwargs : Any + Additional keyword arguments to pass to the `LLMChain` `__call__` + method. + + Returns + ------- + dict + The evaluation results. + + Examples + -------- + >>> from langchain_openai import OpenAI + >>> from langchain.evaluation.criteria import CriteriaEvalChain + >>> llm = OpenAI() + >>> criteria = "conciseness" + >>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) + >>> chain.evaluate_strings( + prediction="The answer is 42.", + reference="42", + input="What is the answer to life, the universe, and everything?", + ) + """ + input_ = self._get_eval_input(prediction, reference, input) + result = self( + input_, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + async def _aevaluate_strings( + self, + *, + prediction: str, + reference: Optional[str] = None, + input: Optional[str] = None, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Asynchronously evaluate a prediction against the criteria. + + Parameters + ---------- + prediction : str + The predicted text to evaluate. + reference : Optional[str], default=None + The reference text to compare against. This is required if + `requires_reference` is `True`. + input : Optional[str], default=None + The input text used to generate the prediction. + **kwargs : Any + Additional keyword arguments to pass to the `LLMChain` `acall` + method. + + Returns + ------- + dict + The evaluation results. + + Examples + -------- + >>> from langchain_openai import OpenAI + >>> from langchain.evaluation.criteria import CriteriaEvalChain + >>> llm = OpenAI() + >>> criteria = "conciseness" + >>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) + >>> await chain.aevaluate_strings( + prediction="The answer is 42.", + reference="42", + input="What is the answer to life, the universe, and everything?", + ) + """ + input_ = self._get_eval_input(prediction, reference, input) + result = await self.acall( + input_, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + +class LabeledCriteriaEvalChain(CriteriaEvalChain): + """Criteria evaluation chain that requires references.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + @property + def requires_reference(self) -> bool: + """Whether the evaluation requires a reference text.""" + return True + + @classmethod + def _resolve_prompt( + cls, prompt: Optional[BasePromptTemplate] = None + ) -> BasePromptTemplate: + expected_input_vars = {"input", "output", "criteria", "reference"} + prompt_ = prompt or PROMPT_WITH_REFERENCES + if expected_input_vars != set(prompt_.input_variables): + raise ValueError( + f"Input variables should be {expected_input_vars}, " + f"but got {prompt_.input_variables}" + ) + return prompt_ + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + criteria: Optional[CRITERIA_TYPE] = None, + *, + prompt: Optional[BasePromptTemplate] = None, + **kwargs: Any, + ) -> CriteriaEvalChain: + """Create a `LabeledCriteriaEvalChain` instance from an llm and criteria. + + Parameters + ---------- + llm : BaseLanguageModel + The language model to use for evaluation. + criteria : CRITERIA_TYPE - default=None for "helpfulness" + The criteria to evaluate the runs against. It can be: + - a mapping of a criterion name to its description + - a single criterion name present in one of the default criteria + - a single `ConstitutionalPrinciple` instance + prompt : Optional[BasePromptTemplate], default=None + The prompt template to use for generating prompts. If not provided, + a default prompt will be used. + **kwargs : Any + Additional keyword arguments to pass to the `LLMChain` + constructor. + + Returns + ------- + LabeledCriteriaEvalChain + An instance of the `LabeledCriteriaEvalChain` class. + + Examples + -------- + >>> from langchain_openai import OpenAI + >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain + >>> llm = OpenAI() + >>> criteria = { + "hallucination": ( + "Does this submission contain information" + " not present in the input or reference?" + ), + } + >>> chain = LabeledCriteriaEvalChain.from_llm( + llm=llm, + criteria=criteria, + ) + """ + prompt = cls._resolve_prompt(prompt) + criteria_ = cls.resolve_criteria(criteria) + criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items()) + prompt_ = prompt.partial(criteria=criteria_str) + return cls( + llm=llm, + prompt=prompt_, + criterion_name="-".join(criteria_), + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/evaluation/criteria/prompt.py b/venv/Lib/site-packages/langchain/evaluation/criteria/prompt.py new file mode 100644 index 00000000..e5ac19fe --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/criteria/prompt.py @@ -0,0 +1,38 @@ +# flake8: noqa +# Credit to https://github.com/openai/evals/tree/main + +from langchain_core.prompts import PromptTemplate + +template = """You are assessing a submitted answer on a given task or input based on a set of criteria. Here is the data: +[BEGIN DATA] +*** +[Input]: {input} +*** +[Submission]: {output} +*** +[Criteria]: {criteria} +*** +[END DATA] +Does the submission meet the Criteria? First, write out in a step by step manner your reasoning about each criterion to be sure that your conclusion is correct. Avoid simply stating the correct answers at the outset. Then print only the single character "Y" or "N" (without quotes or punctuation) on its own line corresponding to the correct answer of whether the submission meets all criteria. At the end, repeat just the letter again by itself on a new line.""" + +PROMPT = PromptTemplate( + input_variables=["input", "output", "criteria"], template=template +) + +template = """You are assessing a submitted answer on a given task or input based on a set of criteria. Here is the data: +[BEGIN DATA] +*** +[Input]: {input} +*** +[Submission]: {output} +*** +[Criteria]: {criteria} +*** +[Reference]: {reference} +*** +[END DATA] +Does the submission meet the Criteria? First, write out in a step by step manner your reasoning about each criterion to be sure that your conclusion is correct. Avoid simply stating the correct answers at the outset. Then print only the single character "Y" or "N" (without quotes or punctuation) on its own line corresponding to the correct answer of whether the submission meets all criteria. At the end, repeat just the letter again by itself on a new line.""" + +PROMPT_WITH_REFERENCES = PromptTemplate( + input_variables=["input", "output", "criteria", "reference"], template=template +) diff --git a/venv/Lib/site-packages/langchain/evaluation/embedding_distance/__init__.py b/venv/Lib/site-packages/langchain/evaluation/embedding_distance/__init__.py new file mode 100644 index 00000000..36b57350 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/embedding_distance/__init__.py @@ -0,0 +1,13 @@ +"""Evaluators that measure embedding distances.""" + +from langchain.evaluation.embedding_distance.base import ( + EmbeddingDistance, + EmbeddingDistanceEvalChain, + PairwiseEmbeddingDistanceEvalChain, +) + +__all__ = [ + "EmbeddingDistance", + "EmbeddingDistanceEvalChain", + "PairwiseEmbeddingDistanceEvalChain", +] diff --git a/venv/Lib/site-packages/langchain/evaluation/embedding_distance/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/embedding_distance/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..64de145f Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/embedding_distance/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/embedding_distance/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/embedding_distance/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..8ca99a45 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/embedding_distance/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/embedding_distance/base.py b/venv/Lib/site-packages/langchain/evaluation/embedding_distance/base.py new file mode 100644 index 00000000..323c28a8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/embedding_distance/base.py @@ -0,0 +1,599 @@ +"""A chain for comparing the output of two models using embeddings.""" + +import functools +import logging +from enum import Enum +from importlib import util +from typing import Any, Optional + +from langchain_core.callbacks.manager import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, + Callbacks, +) +from langchain_core.embeddings import Embeddings +from langchain_core.utils import pre_init +from pydantic import ConfigDict, Field + +from langchain.chains.base import Chain +from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator +from langchain.schema import RUN_KEY + + +def _import_numpy() -> Any: + try: + import numpy as np + + return np + except ImportError as e: + raise ImportError( + "Could not import numpy, please install with `pip install numpy`." + ) from e + + +logger = logging.getLogger(__name__) + + +@functools.lru_cache(maxsize=1) +def _check_numpy() -> bool: + if bool(util.find_spec("numpy")): + return True + logger.warning( + "NumPy not found in the current Python environment. " + "langchain will use a pure Python implementation for embedding distance " + "operations, which may significantly impact performance, especially for large " + "datasets. For optimal speed and efficiency, consider installing NumPy: " + "pip install numpy" + ) + return False + + +def _embedding_factory() -> Embeddings: + """Create an Embeddings object. + Returns: + Embeddings: The created Embeddings object. + """ + # Here for backwards compatibility. + # Generally, we do not want to be seeing imports from langchain community + # or partner packages in langchain. + try: + from langchain_openai import OpenAIEmbeddings + except ImportError: + try: + from langchain_community.embeddings.openai import ( # type: ignore[no-redef] + OpenAIEmbeddings, + ) + except ImportError: + raise ImportError( + "Could not import OpenAIEmbeddings. Please install the " + "OpenAIEmbeddings package using `pip install langchain-openai`." + ) + return OpenAIEmbeddings() + + +class EmbeddingDistance(str, Enum): + """Embedding Distance Metric. + + Attributes: + COSINE: Cosine distance metric. + EUCLIDEAN: Euclidean distance metric. + MANHATTAN: Manhattan distance metric. + CHEBYSHEV: Chebyshev distance metric. + HAMMING: Hamming distance metric. + """ + + COSINE = "cosine" + EUCLIDEAN = "euclidean" + MANHATTAN = "manhattan" + CHEBYSHEV = "chebyshev" + HAMMING = "hamming" + + +class _EmbeddingDistanceChainMixin(Chain): + """Shared functionality for embedding distance evaluators. + + Attributes: + embeddings (Embeddings): The embedding objects to vectorize the outputs. + distance_metric (EmbeddingDistance): The distance metric to use + for comparing the embeddings. + """ + + embeddings: Embeddings = Field(default_factory=_embedding_factory) + distance_metric: EmbeddingDistance = Field(default=EmbeddingDistance.COSINE) + + @pre_init + def _validate_tiktoken_installed(cls, values: dict[str, Any]) -> dict[str, Any]: + """Validate that the TikTok library is installed. + + Args: + values (Dict[str, Any]): The values to validate. + + Returns: + Dict[str, Any]: The validated values. + """ + embeddings = values.get("embeddings") + types_ = [] + try: + from langchain_openai import OpenAIEmbeddings + + types_.append(OpenAIEmbeddings) + except ImportError: + pass + + try: + from langchain_community.embeddings.openai import ( # type: ignore[no-redef] + OpenAIEmbeddings, + ) + + types_.append(OpenAIEmbeddings) + except ImportError: + pass + + if not types_: + raise ImportError( + "Could not import OpenAIEmbeddings. Please install the " + "OpenAIEmbeddings package using `pip install langchain-openai`." + ) + + if isinstance(embeddings, tuple(types_)): + try: + import tiktoken # noqa: F401 + except ImportError: + raise ImportError( + "The tiktoken library is required to use the default " + "OpenAI embeddings with embedding distance evaluators." + " Please either manually select a different Embeddings object" + " or install tiktoken using `pip install tiktoken`." + ) + return values + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @property + def output_keys(self) -> list[str]: + """Return the output keys of the chain. + + Returns: + List[str]: The output keys. + """ + return ["score"] + + def _prepare_output(self, result: dict) -> dict: + parsed = {"score": result["score"]} + if RUN_KEY in result: + parsed[RUN_KEY] = result[RUN_KEY] + return parsed + + def _get_metric(self, metric: EmbeddingDistance) -> Any: + """Get the metric function for the given metric name. + + Args: + metric (EmbeddingDistance): The metric name. + + Returns: + Any: The metric function. + """ + metrics = { + EmbeddingDistance.COSINE: self._cosine_distance, + EmbeddingDistance.EUCLIDEAN: self._euclidean_distance, + EmbeddingDistance.MANHATTAN: self._manhattan_distance, + EmbeddingDistance.CHEBYSHEV: self._chebyshev_distance, + EmbeddingDistance.HAMMING: self._hamming_distance, + } + if metric in metrics: + return metrics[metric] + else: + raise ValueError(f"Invalid metric: {metric}") + + @staticmethod + def _cosine_distance(a: Any, b: Any) -> Any: + """Compute the cosine distance between two vectors. + + Args: + a (np.ndarray): The first vector. + b (np.ndarray): The second vector. + + Returns: + np.ndarray: The cosine distance. + """ + try: + from langchain_community.utils.math import cosine_similarity + except ImportError: + raise ImportError( + "The cosine_similarity function is required to compute cosine distance." + " Please install the langchain-community package using" + " `pip install langchain-community`." + ) + return 1.0 - cosine_similarity(a, b) + + @staticmethod + def _euclidean_distance(a: Any, b: Any) -> Any: + """Compute the Euclidean distance between two vectors. + + Args: + a (np.ndarray): The first vector. + b (np.ndarray): The second vector. + + Returns: + np.floating: The Euclidean distance. + """ + if _check_numpy(): + import numpy as np + + return np.linalg.norm(a - b) + + return sum((x - y) * (x - y) for x, y in zip(a, b)) ** 0.5 + + @staticmethod + def _manhattan_distance(a: Any, b: Any) -> Any: + """Compute the Manhattan distance between two vectors. + + Args: + a (np.ndarray): The first vector. + b (np.ndarray): The second vector. + + Returns: + np.floating: The Manhattan distance. + """ + if _check_numpy(): + np = _import_numpy() + return np.sum(np.abs(a - b)) + + return sum(abs(x - y) for x, y in zip(a, b)) + + @staticmethod + def _chebyshev_distance(a: Any, b: Any) -> Any: + """Compute the Chebyshev distance between two vectors. + + Args: + a (np.ndarray): The first vector. + b (np.ndarray): The second vector. + + Returns: + np.floating: The Chebyshev distance. + """ + if _check_numpy(): + np = _import_numpy() + return np.max(np.abs(a - b)) + + return max(abs(x - y) for x, y in zip(a, b)) + + @staticmethod + def _hamming_distance(a: Any, b: Any) -> Any: + """Compute the Hamming distance between two vectors. + + Args: + a (np.ndarray): The first vector. + b (np.ndarray): The second vector. + + Returns: + np.floating: The Hamming distance. + """ + if _check_numpy(): + np = _import_numpy() + return np.mean(a != b) + + return sum(1 for x, y in zip(a, b) if x != y) / len(a) + + def _compute_score(self, vectors: Any) -> float: + """Compute the score based on the distance metric. + + Args: + vectors (np.ndarray): The input vectors. + + Returns: + float: The computed score. + """ + metric = self._get_metric(self.distance_metric) + if _check_numpy() and isinstance(vectors, _import_numpy().ndarray): + score = metric(vectors[0].reshape(1, -1), vectors[1].reshape(1, -1)).item() + else: + score = metric(vectors[0], vectors[1]) + return float(score) + + +class EmbeddingDistanceEvalChain(_EmbeddingDistanceChainMixin, StringEvaluator): + """Use embedding distances to score semantic difference between + a prediction and reference. + + Examples: + >>> chain = EmbeddingDistanceEvalChain() + >>> result = chain.evaluate_strings(prediction="Hello", reference="Hi") + >>> print(result) + {'score': 0.5} + """ + + @property + def requires_reference(self) -> bool: + """Return whether the chain requires a reference. + + Returns: + bool: True if a reference is required, False otherwise. + """ + return True + + @property + def evaluation_name(self) -> str: + return f"embedding_{self.distance_metric.value}_distance" + + @property + def input_keys(self) -> list[str]: + """Return the input keys of the chain. + + Returns: + List[str]: The input keys. + """ + return ["prediction", "reference"] + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Compute the score for a prediction and reference. + + Args: + inputs (Dict[str, Any]): The input data. + run_manager (Optional[CallbackManagerForChainRun], optional): + The callback manager. + + Returns: + Dict[str, Any]: The computed score. + """ + vectors = self.embeddings.embed_documents( + [inputs["prediction"], inputs["reference"]] + ) + if _check_numpy(): + np = _import_numpy() + vectors = np.array(vectors) + score = self._compute_score(vectors) + return {"score": score} + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Asynchronously compute the score for a prediction and reference. + + Args: + inputs (Dict[str, Any]): The input data. + run_manager (AsyncCallbackManagerForChainRun, optional): + The callback manager. + + Returns: + Dict[str, Any]: The computed score. + """ + vectors = await self.embeddings.aembed_documents( + [ + inputs["prediction"], + inputs["reference"], + ] + ) + if _check_numpy(): + np = _import_numpy() + vectors = np.array(vectors) + score = self._compute_score(vectors) + return {"score": score} + + def _evaluate_strings( + self, + *, + prediction: str, + reference: Optional[str] = None, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Evaluate the embedding distance between a prediction and + reference. + + Args: + prediction (str): The output string from the first model. + reference (str): The reference string (required) + callbacks (Callbacks, optional): The callbacks to use. + **kwargs (Any): Additional keyword arguments. + + Returns: + dict: A dictionary containing: + - score: The embedding distance between the two + predictions. + """ + result = self( + inputs={"prediction": prediction, "reference": reference}, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + async def _aevaluate_strings( + self, + *, + prediction: str, + reference: Optional[str] = None, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Asynchronously evaluate the embedding distance between + a prediction and reference. + + Args: + prediction (str): The output string from the first model. + reference (str): The output string from the second model. + callbacks (Callbacks, optional): The callbacks to use. + **kwargs (Any): Additional keyword arguments. + + Returns: + dict: A dictionary containing: + - score: The embedding distance between the two + predictions. + """ + result = await self.acall( + inputs={"prediction": prediction, "reference": reference}, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + +class PairwiseEmbeddingDistanceEvalChain( + _EmbeddingDistanceChainMixin, PairwiseStringEvaluator +): + """Use embedding distances to score semantic difference between two predictions. + + Examples: + >>> chain = PairwiseEmbeddingDistanceEvalChain() + >>> result = chain.evaluate_string_pairs(prediction="Hello", prediction_b="Hi") + >>> print(result) + {'score': 0.5} + """ + + @property + def input_keys(self) -> list[str]: + """Return the input keys of the chain. + + Returns: + List[str]: The input keys. + """ + return ["prediction", "prediction_b"] + + @property + def evaluation_name(self) -> str: + return f"pairwise_embedding_{self.distance_metric.value}_distance" + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Compute the score for two predictions. + + Args: + inputs (Dict[str, Any]): The input data. + run_manager (CallbackManagerForChainRun, optional): + The callback manager. + + Returns: + Dict[str, Any]: The computed score. + """ + vectors = self.embeddings.embed_documents( + [ + inputs["prediction"], + inputs["prediction_b"], + ] + ) + if _check_numpy(): + np = _import_numpy() + vectors = np.array(vectors) + score = self._compute_score(vectors) + return {"score": score} + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Asynchronously compute the score for two predictions. + + Args: + inputs (Dict[str, Any]): The input data. + run_manager (AsyncCallbackManagerForChainRun, optional): + The callback manager. + + Returns: + Dict[str, Any]: The computed score. + """ + vectors = await self.embeddings.aembed_documents( + [ + inputs["prediction"], + inputs["prediction_b"], + ] + ) + if _check_numpy(): + np = _import_numpy() + vectors = np.array(vectors) + score = self._compute_score(vectors) + return {"score": score} + + def _evaluate_string_pairs( + self, + *, + prediction: str, + prediction_b: str, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Evaluate the embedding distance between two predictions. + + Args: + prediction (str): The output string from the first model. + prediction_b (str): The output string from the second model. + callbacks (Callbacks, optional): The callbacks to use. + tags (List[str], optional): Tags to apply to traces + metadata (Dict[str, Any], optional): metadata to apply to + **kwargs (Any): Additional keyword arguments. + + Returns: + dict: A dictionary containing: + - score: The embedding distance between the two + predictions. + """ + result = self( + inputs={"prediction": prediction, "prediction_b": prediction_b}, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + async def _aevaluate_string_pairs( + self, + *, + prediction: str, + prediction_b: str, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Asynchronously evaluate the embedding distance + + between two predictions. + + Args: + prediction (str): The output string from the first model. + prediction_b (str): The output string from the second model. + callbacks (Callbacks, optional): The callbacks to use. + tags (List[str], optional): Tags to apply to traces + metadata (Dict[str, Any], optional): metadata to apply to traces + **kwargs (Any): Additional keyword arguments. + + Returns: + dict: A dictionary containing: + - score: The embedding distance between the two + predictions. + """ + result = await self.acall( + inputs={"prediction": prediction, "prediction_b": prediction_b}, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) diff --git a/venv/Lib/site-packages/langchain/evaluation/exact_match/__init__.py b/venv/Lib/site-packages/langchain/evaluation/exact_match/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/evaluation/exact_match/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/exact_match/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..dbb4c08e Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/exact_match/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/exact_match/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/exact_match/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..efb9c348 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/exact_match/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/exact_match/base.py b/venv/Lib/site-packages/langchain/evaluation/exact_match/base.py new file mode 100644 index 00000000..818f5661 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/exact_match/base.py @@ -0,0 +1,97 @@ +import string +from typing import Any + +from langchain.evaluation.schema import StringEvaluator + + +class ExactMatchStringEvaluator(StringEvaluator): + """Compute an exact match between the prediction and the reference. + + Examples + ---------- + >>> evaluator = ExactMatchChain() + >>> evaluator.evaluate_strings( + prediction="Mindy is the CTO", + reference="Mindy is the CTO", + ) # This will return {'score': 1.0} + + >>> evaluator.evaluate_strings( + prediction="Mindy is the CTO", + reference="Mindy is the CEO", + ) # This will return {'score': 0.0} + """ + + def __init__( + self, + *, + ignore_case: bool = False, + ignore_punctuation: bool = False, + ignore_numbers: bool = False, + **kwargs: Any, + ): + super().__init__() + self.ignore_case = ignore_case + self.ignore_punctuation = ignore_punctuation + self.ignore_numbers = ignore_numbers + + @property + def requires_input(self) -> bool: + """ + This evaluator does not require input. + """ + return False + + @property + def requires_reference(self) -> bool: + """ + This evaluator requires a reference. + """ + return True + + @property + def input_keys(self) -> list[str]: + """ + Get the input keys. + + Returns: + List[str]: The input keys. + """ + return ["reference", "prediction"] + + @property + def evaluation_name(self) -> str: + """ + Get the evaluation name. + + Returns: + str: The evaluation name. + """ + return "exact_match" + + def _evaluate_strings( # type: ignore[override] + self, + *, + prediction: str, + reference: str, + **kwargs: Any, + ) -> dict: + """ + Evaluate the exact match between the prediction and the reference. + + Args: + prediction (str): The prediction string. + reference (Optional[str], optional): The reference string. + + Returns: + dict: The evaluation results containing the score. + """ + if self.ignore_case: + prediction = prediction.lower() + reference = reference.lower() + if self.ignore_punctuation: + prediction = prediction.translate(str.maketrans("", "", string.punctuation)) + reference = reference.translate(str.maketrans("", "", string.punctuation)) + if self.ignore_numbers: + prediction = prediction.translate(str.maketrans("", "", string.digits)) + reference = reference.translate(str.maketrans("", "", string.digits)) + return {"score": int(prediction == reference)} diff --git a/venv/Lib/site-packages/langchain/evaluation/loading.py b/venv/Lib/site-packages/langchain/evaluation/loading.py new file mode 100644 index 00000000..81eb4216 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/loading.py @@ -0,0 +1,202 @@ +"""Loading datasets and evaluators.""" + +from collections.abc import Sequence +from typing import Any, Optional, Union + +from langchain_core.language_models import BaseLanguageModel + +from langchain.chains.base import Chain +from langchain.evaluation.agents.trajectory_eval_chain import TrajectoryEvalChain +from langchain.evaluation.comparison import PairwiseStringEvalChain +from langchain.evaluation.comparison.eval_chain import LabeledPairwiseStringEvalChain +from langchain.evaluation.criteria.eval_chain import ( + CriteriaEvalChain, + LabeledCriteriaEvalChain, +) +from langchain.evaluation.embedding_distance.base import ( + EmbeddingDistanceEvalChain, + PairwiseEmbeddingDistanceEvalChain, +) +from langchain.evaluation.exact_match.base import ExactMatchStringEvaluator +from langchain.evaluation.parsing.base import ( + JsonEqualityEvaluator, + JsonValidityEvaluator, +) +from langchain.evaluation.parsing.json_distance import JsonEditDistanceEvaluator +from langchain.evaluation.parsing.json_schema import JsonSchemaEvaluator +from langchain.evaluation.qa import ContextQAEvalChain, CotQAEvalChain, QAEvalChain +from langchain.evaluation.regex_match.base import RegexMatchStringEvaluator +from langchain.evaluation.schema import EvaluatorType, LLMEvalChain, StringEvaluator +from langchain.evaluation.scoring.eval_chain import ( + LabeledScoreStringEvalChain, + ScoreStringEvalChain, +) +from langchain.evaluation.string_distance.base import ( + PairwiseStringDistanceEvalChain, + StringDistanceEvalChain, +) + + +def load_dataset(uri: str) -> list[dict]: + """Load a dataset from the `LangChainDatasets on HuggingFace `_. + + Args: + uri: The uri of the dataset to load. + + Returns: + A list of dictionaries, each representing a row in the dataset. + + **Prerequisites** + + .. code-block:: shell + + pip install datasets + + Examples + -------- + .. code-block:: python + + from langchain.evaluation import load_dataset + ds = load_dataset("llm-math") + """ + try: + from datasets import load_dataset + except ImportError: + raise ImportError( + "load_dataset requires the `datasets` package." + " Please install with `pip install datasets`" + ) + + dataset = load_dataset(f"LangChainDatasets/{uri}") + return [d for d in dataset["train"]] + + +_EVALUATOR_MAP: dict[ + EvaluatorType, Union[type[LLMEvalChain], type[Chain], type[StringEvaluator]] +] = { + EvaluatorType.QA: QAEvalChain, + EvaluatorType.COT_QA: CotQAEvalChain, + EvaluatorType.CONTEXT_QA: ContextQAEvalChain, + EvaluatorType.PAIRWISE_STRING: PairwiseStringEvalChain, + EvaluatorType.SCORE_STRING: ScoreStringEvalChain, + EvaluatorType.LABELED_PAIRWISE_STRING: LabeledPairwiseStringEvalChain, + EvaluatorType.LABELED_SCORE_STRING: LabeledScoreStringEvalChain, + EvaluatorType.AGENT_TRAJECTORY: TrajectoryEvalChain, + EvaluatorType.CRITERIA: CriteriaEvalChain, + EvaluatorType.LABELED_CRITERIA: LabeledCriteriaEvalChain, + EvaluatorType.STRING_DISTANCE: StringDistanceEvalChain, + EvaluatorType.PAIRWISE_STRING_DISTANCE: PairwiseStringDistanceEvalChain, + EvaluatorType.EMBEDDING_DISTANCE: EmbeddingDistanceEvalChain, + EvaluatorType.PAIRWISE_EMBEDDING_DISTANCE: PairwiseEmbeddingDistanceEvalChain, + EvaluatorType.JSON_VALIDITY: JsonValidityEvaluator, + EvaluatorType.JSON_EQUALITY: JsonEqualityEvaluator, + EvaluatorType.JSON_EDIT_DISTANCE: JsonEditDistanceEvaluator, + EvaluatorType.JSON_SCHEMA_VALIDATION: JsonSchemaEvaluator, + EvaluatorType.REGEX_MATCH: RegexMatchStringEvaluator, + EvaluatorType.EXACT_MATCH: ExactMatchStringEvaluator, +} + + +def load_evaluator( + evaluator: EvaluatorType, + *, + llm: Optional[BaseLanguageModel] = None, + **kwargs: Any, +) -> Union[Chain, StringEvaluator]: + """Load the requested evaluation chain specified by a string. + + Parameters + ---------- + evaluator : EvaluatorType + The type of evaluator to load. + llm : BaseLanguageModel, optional + The language model to use for evaluation, by default None + **kwargs : Any + Additional keyword arguments to pass to the evaluator. + + Returns + ------- + Chain + The loaded evaluation chain. + + Examples + -------- + >>> from langchain.evaluation import load_evaluator, EvaluatorType + >>> evaluator = load_evaluator(EvaluatorType.QA) + """ + if evaluator not in _EVALUATOR_MAP: + raise ValueError( + f"Unknown evaluator type: {evaluator}" + f"\nValid types are: {list(_EVALUATOR_MAP.keys())}" + ) + evaluator_cls = _EVALUATOR_MAP[evaluator] + if issubclass(evaluator_cls, LLMEvalChain): + try: + try: + from langchain_openai import ChatOpenAI + except ImportError: + try: + from langchain_community.chat_models.openai import ( # type: ignore[no-redef] + ChatOpenAI, + ) + except ImportError: + raise ImportError( + "Could not import langchain_openai or fallback onto " + "langchain_community. Please install langchain_openai " + "or specify a language model explicitly. " + "It's recommended to install langchain_openai AND " + "specify a language model explicitly." + ) + + llm = llm or ChatOpenAI(model="gpt-4", seed=42, temperature=0) + except Exception as e: + raise ValueError( + f"Evaluation with the {evaluator_cls} requires a " + "language model to function." + " Failed to create the default 'gpt-4' model." + " Please manually provide an evaluation LLM" + " or check your openai credentials." + ) from e + return evaluator_cls.from_llm(llm=llm, **kwargs) + else: + return evaluator_cls(**kwargs) + + +def load_evaluators( + evaluators: Sequence[EvaluatorType], + *, + llm: Optional[BaseLanguageModel] = None, + config: Optional[dict] = None, + **kwargs: Any, +) -> list[Union[Chain, StringEvaluator]]: + """Load evaluators specified by a list of evaluator types. + + Parameters + ---------- + evaluators : Sequence[EvaluatorType] + The list of evaluator types to load. + llm : BaseLanguageModel, optional + The language model to use for evaluation, if none is provided, a default + ChatOpenAI gpt-4 model will be used. + config : dict, optional + A dictionary mapping evaluator types to additional keyword arguments, + by default None + **kwargs : Any + Additional keyword arguments to pass to all evaluators. + + Returns + ------- + List[Chain] + The loaded evaluators. + + Examples + -------- + >>> from langchain.evaluation import load_evaluators, EvaluatorType + >>> evaluators = [EvaluatorType.QA, EvaluatorType.CRITERIA] + >>> loaded_evaluators = load_evaluators(evaluators, criteria="helpfulness") + """ + loaded = [] + for evaluator in evaluators: + _kwargs = config.get(evaluator, {}) if config else {} + loaded.append(load_evaluator(evaluator, llm=llm, **{**kwargs, **_kwargs})) + return loaded diff --git a/venv/Lib/site-packages/langchain/evaluation/parsing/__init__.py b/venv/Lib/site-packages/langchain/evaluation/parsing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/evaluation/parsing/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/parsing/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..0f2e074a Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/parsing/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/parsing/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/parsing/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..417831fa Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/parsing/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/parsing/__pycache__/json_distance.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/parsing/__pycache__/json_distance.cpython-312.pyc new file mode 100644 index 00000000..7e5a15d2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/parsing/__pycache__/json_distance.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/parsing/__pycache__/json_schema.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/parsing/__pycache__/json_schema.cpython-312.pyc new file mode 100644 index 00000000..69343439 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/parsing/__pycache__/json_schema.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/parsing/base.py b/venv/Lib/site-packages/langchain/evaluation/parsing/base.py new file mode 100644 index 00000000..dc7a0013 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/parsing/base.py @@ -0,0 +1,159 @@ +"""Evaluators for parsing strings.""" + +import json +from operator import eq +from typing import Any, Callable, Optional, Union, cast + +from langchain_core.utils.json import parse_json_markdown + +from langchain.evaluation.schema import StringEvaluator + + +class JsonValidityEvaluator(StringEvaluator): + """Evaluate whether the prediction is valid JSON. + + This evaluator checks if the prediction is a valid JSON string. It does not + require any input or reference. + + Attributes: + requires_input (bool): Whether this evaluator requires an input + string. Always False. + requires_reference (bool): Whether this evaluator requires a + reference string. Always False. + evaluation_name (str): The name of the evaluation metric. + Always "json". + + Examples: + >>> evaluator = JsonValidityEvaluator() + >>> prediction = '{"name": "John", "age": 30, "city": "New York"}' + >>> evaluator.evaluate(prediction) + {'score': 1} + + >>> prediction = '{"name": "John", "age": 30, "city": "New York",}' + >>> evaluator.evaluate(prediction) + {'score': 0, 'reasoning': 'Expecting property name enclosed in double quotes'} + """ + + def __init__(self, **kwargs: Any) -> None: + super().__init__() + + @property + def requires_input(self) -> bool: + return False + + @property + def requires_reference(self) -> bool: + return False + + @property + def evaluation_name(self) -> str: + return "json_validity" + + def _evaluate_strings( + self, + prediction: str, + input: Optional[str] = None, + reference: Optional[str] = None, + **kwargs: Any, + ) -> dict: + """Evaluate the prediction string. + + Args: + prediction (str): The prediction string to evaluate. + input (str, optional): Not used in this evaluator. Defaults to None. + reference (str, optional): Not used in this evaluator. Defaults to None. + + Returns: + dict: A dictionary containing the evaluation score. The score is 1 if + the prediction is valid JSON, and 0 otherwise. + If the prediction is not valid JSON, the dictionary also contains + a "reasoning" field with the error message. + + """ + try: + parse_json_markdown(prediction, parser=json.loads) + return {"score": 1} + except Exception as e: + return {"score": 0, "reasoning": str(e)} + + +class JsonEqualityEvaluator(StringEvaluator): + """Evaluate whether the prediction is equal to the reference after + parsing both as JSON. + + This evaluator checks if the prediction, after parsing as JSON, is equal + to the reference, + which is also parsed as JSON. It does not require an input string. + + Attributes: + requires_input (bool): Whether this evaluator requires an + input string. Always False. + requires_reference (bool): Whether this evaluator requires + a reference string. Always True. + evaluation_name (str): The name of the evaluation metric. + Always "parsed_equality". + + Examples: + >>> evaluator = JsonEqualityEvaluator() + >>> evaluator.evaluate_strings('{"a": 1}', reference='{"a": 1}') + {'score': True} + >>> evaluator.evaluate_strings('{"a": 1}', reference='{"a": 2}') + {'score': False} + + >>> evaluator = JsonEqualityEvaluator(operator=lambda x, y: x['a'] == y['a']) + >>> evaluator.evaluate_strings('{"a": 1}', reference='{"a": 1}') + {'score': True} + >>> evaluator.evaluate_strings('{"a": 1}', reference='{"a": 2}') + {'score': False} + + """ + + def __init__(self, operator: Optional[Callable] = None, **kwargs: Any) -> None: + super().__init__() + self.operator = operator or eq + + @property + def requires_input(self) -> bool: + return False + + @property + def requires_reference(self) -> bool: + return True + + @property + def evaluation_name(self) -> str: + return "json_equality" + + def _parse_json( + self, + string: Any, + ) -> Union[dict, list, None, float, bool, int, str]: + if isinstance(string, str): + return parse_json_markdown(string) + return string + + def _evaluate_strings( + self, + prediction: str, + input: Optional[str] = None, + reference: Optional[str] = None, + **kwargs: Any, + ) -> dict: + """Evaluate the prediction string. + + Args: + prediction (str): The prediction string to evaluate. + input (str, optional): Not used in this evaluator. + reference (str): The reference string to compare against. + + Returns: + dict: A dictionary containing the evaluation score. + """ + parsed = self._parse_json(prediction) + label = self._parse_json(cast(str, reference)) + if isinstance(label, list): + if not isinstance(parsed, list): + return {"score": 0} + parsed = sorted(parsed, key=lambda x: str(x)) + label = sorted(label, key=lambda x: str(x)) + return {"score": self.operator(parsed, label)} diff --git a/venv/Lib/site-packages/langchain/evaluation/parsing/json_distance.py b/venv/Lib/site-packages/langchain/evaluation/parsing/json_distance.py new file mode 100644 index 00000000..8aa1fb39 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/parsing/json_distance.py @@ -0,0 +1,94 @@ +import json +from typing import Any, Callable, Optional, Union + +from langchain_core.utils.json import parse_json_markdown + +from langchain.evaluation.schema import StringEvaluator + + +class JsonEditDistanceEvaluator(StringEvaluator): + """ + An evaluator that calculates the edit distance between JSON strings. + + This evaluator computes a normalized Damerau-Levenshtein distance between two JSON strings + after parsing them and converting them to a canonical format (i.e., whitespace and key order are normalized). + It can be customized with alternative distance and canonicalization functions. + + Args: + string_distance (Optional[Callable[[str, str], float]]): A callable that computes the distance between two strings. + If not provided, a Damerau-Levenshtein distance from the `rapidfuzz` package will be used. + canonicalize (Optional[Callable[[Any], Any]]): A callable that converts a parsed JSON object into its canonical string form. + If not provided, the default behavior is to serialize the JSON with sorted keys and no extra whitespace. + **kwargs (Any): Additional keyword arguments. + + Attributes: + _string_distance (Callable[[str, str], float]): The internal distance computation function. + _canonicalize (Callable[[Any], Any]): The internal canonicalization function. + + Examples: + >>> evaluator = JsonEditDistanceEvaluator() + >>> result = evaluator.evaluate_strings(prediction='{"a": 1, "b": 2}', reference='{"a": 1, "b": 3}') + >>> assert result["score"] is not None + + Raises: + ImportError: If `rapidfuzz` is not installed and no alternative `string_distance` function is provided. + + """ # noqa: E501 + + def __init__( + self, + string_distance: Optional[Callable[[str, str], float]] = None, + canonicalize: Optional[Callable[[Any], Any]] = None, + **kwargs: Any, + ) -> None: + super().__init__() + if string_distance is not None: + self._string_distance = string_distance + else: + try: + from rapidfuzz import distance as rfd + except ImportError: + raise ImportError( + "The default string_distance operator for the " + " JsonEditDistanceEvaluator requires installation of " + "the rapidfuzz package. " + "Please install it with `pip install rapidfuzz`." + ) + self._string_distance = rfd.DamerauLevenshtein.normalized_distance + if canonicalize is not None: + self._canonicalize = canonicalize + else: + self._canonicalize = lambda x: json.dumps( + x, + separators=(",", ":"), + sort_keys=True, # eliminate whitespace + ) + + @property + def requires_input(self) -> bool: + return False + + @property + def requires_reference(self) -> bool: + return True + + @property + def evaluation_name(self) -> str: + return "json_edit_distance" + + def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]: + if isinstance(node, str): + return parse_json_markdown(node) + return node + + def _evaluate_strings( + self, + prediction: str, + input: Optional[str] = None, + reference: Optional[str] = None, + **kwargs: Any, + ) -> dict: + parsed = self._canonicalize(self._parse_json(prediction)) + label = self._canonicalize(self._parse_json(reference)) + distance = self._string_distance(parsed, label) + return {"score": distance} diff --git a/venv/Lib/site-packages/langchain/evaluation/parsing/json_schema.py b/venv/Lib/site-packages/langchain/evaluation/parsing/json_schema.py new file mode 100644 index 00000000..ff3d3f80 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/parsing/json_schema.py @@ -0,0 +1,96 @@ +from typing import Any, Union + +from langchain_core.utils.json import parse_json_markdown + +from langchain.evaluation.schema import StringEvaluator + + +class JsonSchemaEvaluator(StringEvaluator): + """An evaluator that validates a JSON prediction against a JSON schema reference. + + This evaluator checks if a given JSON prediction conforms to the provided JSON schema. + If the prediction is valid, the score is True (no errors). Otherwise, the score is False (error occurred). + + Attributes: + requires_input (bool): Whether the evaluator requires input. + requires_reference (bool): Whether the evaluator requires reference. + evaluation_name (str): The name of the evaluation. + + Examples: + evaluator = JsonSchemaEvaluator() + result = evaluator.evaluate_strings( + prediction='{"name": "John", "age": 30}', + reference={ + "type": "object", + "properties": { + "name": {"type": "string"}, + "age": {"type": "integer"} + } + } + ) + assert result["score"] is not None + + """ # noqa: E501 + + def __init__(self, **kwargs: Any) -> None: + """Initializes the JsonSchemaEvaluator. + + Args: + kwargs: Additional keyword arguments. + + Raises: + ImportError: If the jsonschema package is not installed. + """ + super().__init__() + try: + import jsonschema # noqa: F401 + except ImportError: + raise ImportError( + "The JsonSchemaEvaluator requires the jsonschema package." + " Please install it with `pip install jsonschema`." + ) + + @property + def requires_input(self) -> bool: + """Returns whether the evaluator requires input.""" + return False + + @property + def requires_reference(self) -> bool: + """Returns whether the evaluator requires reference.""" + return True + + @property + def evaluation_name(self) -> str: + """Returns the name of the evaluation.""" + return "json_schema_validation" + + def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]: + if isinstance(node, str): + return parse_json_markdown(node) + elif hasattr(node, "schema") and callable(getattr(node, "schema")): + # Pydantic model + return getattr(node, "schema")() + return node + + def _validate(self, prediction: Any, schema: Any) -> dict: + from jsonschema import ValidationError, validate + + try: + validate(instance=prediction, schema=schema) + return { + "score": True, + } + except ValidationError as e: + return {"score": False, "reasoning": repr(e)} + + def _evaluate_strings( + self, + prediction: Union[str, Any], + input: Union[str, Any] = None, + reference: Union[str, Any] = None, + **kwargs: Any, + ) -> dict: + parsed_prediction = self._parse_json(prediction) + schema = self._parse_json(reference) + return self._validate(parsed_prediction, schema) diff --git a/venv/Lib/site-packages/langchain/evaluation/qa/__init__.py b/venv/Lib/site-packages/langchain/evaluation/qa/__init__.py new file mode 100644 index 00000000..4677fa1f --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/qa/__init__.py @@ -0,0 +1,10 @@ +"""Chains and utils related to evaluating question answering functionality.""" + +from langchain.evaluation.qa.eval_chain import ( + ContextQAEvalChain, + CotQAEvalChain, + QAEvalChain, +) +from langchain.evaluation.qa.generate_chain import QAGenerateChain + +__all__ = ["QAEvalChain", "QAGenerateChain", "ContextQAEvalChain", "CotQAEvalChain"] diff --git a/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..5afae068 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/eval_chain.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/eval_chain.cpython-312.pyc new file mode 100644 index 00000000..fb8913ba Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/eval_chain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/eval_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/eval_prompt.cpython-312.pyc new file mode 100644 index 00000000..2335a317 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/eval_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/generate_chain.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/generate_chain.cpython-312.pyc new file mode 100644 index 00000000..bb6d9c87 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/generate_chain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/generate_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/generate_prompt.cpython-312.pyc new file mode 100644 index 00000000..bec8da3a Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/qa/__pycache__/generate_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/qa/eval_chain.py b/venv/Lib/site-packages/langchain/evaluation/qa/eval_chain.py new file mode 100644 index 00000000..8a789ec9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/qa/eval_chain.py @@ -0,0 +1,357 @@ +"""LLM Chains for evaluating question answering.""" + +from __future__ import annotations + +import re +import string +from collections.abc import Sequence +from typing import Any, Optional + +from langchain_core.callbacks.manager import Callbacks +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import PromptTemplate +from pydantic import ConfigDict + +from langchain.chains.llm import LLMChain +from langchain.evaluation.qa.eval_prompt import CONTEXT_PROMPT, COT_PROMPT, PROMPT +from langchain.evaluation.schema import LLMEvalChain, StringEvaluator +from langchain.schema import RUN_KEY + + +def _get_score(text: str) -> Optional[tuple[str, int]]: + match = re.search(r"grade:\s*(correct|incorrect)", text.strip(), re.IGNORECASE) + if match: + if match.group(1).upper() == "CORRECT": + return "CORRECT", 1 + elif match.group(1).upper() == "INCORRECT": + return "INCORRECT", 0 + try: + first_word = ( + text.strip().split()[0].translate(str.maketrans("", "", string.punctuation)) + ) + if first_word.upper() == "CORRECT": + return "CORRECT", 1 + elif first_word.upper() == "INCORRECT": + return "INCORRECT", 0 + last_word = ( + text.strip() + .split()[-1] + .translate(str.maketrans("", "", string.punctuation)) + ) + if last_word.upper() == "CORRECT": + return "CORRECT", 1 + elif last_word.upper() == "INCORRECT": + return "INCORRECT", 0 + except IndexError: + pass + return None + + +def _parse_string_eval_output(text: str) -> dict: + """Parse the output text. + + Args: + text (str): The output text to parse. + + Returns: + Any: The parsed output. + """ + reasoning = text.strip() + parsed_scores = _get_score(reasoning) + if parsed_scores is None: + value, score = None, None + else: + value, score = parsed_scores + return { + "reasoning": reasoning, + "value": value, + "score": score, + } + + +class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): + """LLM Chain for evaluating question answering.""" + + output_key: str = "results" #: :meta private: + + model_config = ConfigDict( + extra="ignore", + ) + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + @property + def evaluation_name(self) -> str: + return "correctness" + + @property + def requires_reference(self) -> bool: + return True + + @property + def requires_input(self) -> bool: + return True + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + prompt: Optional[PromptTemplate] = None, + **kwargs: Any, + ) -> QAEvalChain: + """Load QA Eval Chain from LLM. + + Args: + llm (BaseLanguageModel): the base language model to use. + + prompt (PromptTemplate): A prompt template containing the input_variables: + 'input', 'answer' and 'result' that will be used as the prompt + for evaluation. + Defaults to PROMPT. + + **kwargs: additional keyword arguments. + + Returns: + QAEvalChain: the loaded QA eval chain. + """ + prompt = prompt or PROMPT + expected_input_vars = {"query", "answer", "result"} + if expected_input_vars != set(prompt.input_variables): + raise ValueError( + f"Input variables should be {expected_input_vars}, " + f"but got {prompt.input_variables}" + ) + return cls(llm=llm, prompt=prompt, **kwargs) + + def evaluate( + self, + examples: Sequence[dict], + predictions: Sequence[dict], + question_key: str = "query", + answer_key: str = "answer", + prediction_key: str = "result", + *, + callbacks: Callbacks = None, + ) -> list[dict]: + """Evaluate question answering examples and predictions.""" + inputs = [ + { + "query": example[question_key], + "answer": example[answer_key], + "result": predictions[i][prediction_key], + } + for i, example in enumerate(examples) + ] + + return self.apply(inputs, callbacks=callbacks) + + def _prepare_output(self, result: dict) -> dict: + parsed_result = _parse_string_eval_output(result[self.output_key]) + if RUN_KEY in result: + parsed_result[RUN_KEY] = result[RUN_KEY] + return parsed_result + + def _evaluate_strings( + self, + *, + prediction: str, + reference: Optional[str] = None, + input: Optional[str] = None, + callbacks: Callbacks = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Evaluate Chain or LLM output, based on optional input and label. + + Args: + prediction (str): the LLM or chain prediction to evaluate. + reference (Optional[str], optional): the reference label + to evaluate against. + input (Optional[str], optional): the input to consider during evaluation + callbacks (Callbacks, optional): the callbacks to use for tracing. + include_run_info (bool, optional): whether to include run info in the + returned results. + **kwargs: additional keyword arguments, including callbacks, tags, etc. + Returns: + dict: The evaluation results containing the score or value. + """ + result = self( + { + "query": input, + "answer": reference, + "result": prediction, + }, + callbacks=callbacks, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + async def _aevaluate_strings( + self, + *, + prediction: str, + reference: Optional[str] = None, + input: Optional[str] = None, + callbacks: Callbacks = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + result = await self.acall( + inputs={"query": input, "answer": reference, "result": prediction}, + callbacks=callbacks, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + +class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): + """LLM Chain for evaluating QA w/o GT based on context""" + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + @property + def requires_reference(self) -> bool: + """Whether the chain requires a reference string.""" + return True + + @property + def requires_input(self) -> bool: + """Whether the chain requires an input string.""" + return True + + model_config = ConfigDict( + extra="ignore", + ) + + @classmethod + def _validate_input_vars(cls, prompt: PromptTemplate) -> None: + expected_input_vars = {"query", "context", "result"} + if expected_input_vars != set(prompt.input_variables): + raise ValueError( + f"Input variables should be {expected_input_vars}, " + f"but got {prompt.input_variables}" + ) + + @property + def evaluation_name(self) -> str: + return "Contextual Accuracy" + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + prompt: Optional[PromptTemplate] = None, + **kwargs: Any, + ) -> ContextQAEvalChain: + """Load QA Eval Chain from LLM. + + Args: + llm (BaseLanguageModel): the base language model to use. + + prompt (PromptTemplate): A prompt template containing the input_variables: + 'query', 'context' and 'result' that will be used as the prompt + for evaluation. + Defaults to PROMPT. + + **kwargs: additional keyword arguments. + + Returns: + ContextQAEvalChain: the loaded QA eval chain. + """ + prompt = prompt or CONTEXT_PROMPT + cls._validate_input_vars(prompt) + return cls(llm=llm, prompt=prompt, **kwargs) + + def evaluate( + self, + examples: list[dict], + predictions: list[dict], + question_key: str = "query", + context_key: str = "context", + prediction_key: str = "result", + *, + callbacks: Callbacks = None, + ) -> list[dict]: + """Evaluate question answering examples and predictions.""" + inputs = [ + { + "query": example[question_key], + "context": example[context_key], + "result": predictions[i][prediction_key], + } + for i, example in enumerate(examples) + ] + + return self.apply(inputs, callbacks=callbacks) + + def _prepare_output(self, result: dict) -> dict: + parsed_result = _parse_string_eval_output(result[self.output_key]) + if RUN_KEY in result: + parsed_result[RUN_KEY] = result[RUN_KEY] + return parsed_result + + def _evaluate_strings( + self, + *, + prediction: str, + reference: Optional[str] = None, + input: Optional[str] = None, + callbacks: Callbacks = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + result = self( + { + "query": input, + "context": reference, + "result": prediction, + }, + callbacks=callbacks, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + async def _aevaluate_strings( + self, + *, + prediction: str, + reference: Optional[str] = None, + input: Optional[str] = None, + callbacks: Callbacks = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + result = await self.acall( + inputs={"query": input, "context": reference, "result": prediction}, + callbacks=callbacks, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + +class CotQAEvalChain(ContextQAEvalChain): + """LLM Chain for evaluating QA using chain of thought reasoning.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + @property + def evaluation_name(self) -> str: + return "COT Contextual Accuracy" + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + prompt: Optional[PromptTemplate] = None, + **kwargs: Any, + ) -> CotQAEvalChain: + """Load QA Eval Chain from LLM.""" + prompt = prompt or COT_PROMPT + cls._validate_input_vars(prompt) + return cls(llm=llm, prompt=prompt, **kwargs) diff --git a/venv/Lib/site-packages/langchain/evaluation/qa/eval_prompt.py b/venv/Lib/site-packages/langchain/evaluation/qa/eval_prompt.py new file mode 100644 index 00000000..d29a7858 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/qa/eval_prompt.py @@ -0,0 +1,79 @@ +# flake8: noqa +from langchain_core.prompts import PromptTemplate + +template = """You are a teacher grading a quiz. +You are given a question, the student's answer, and the true answer, and are asked to score the student answer as either CORRECT or INCORRECT. + +Example Format: +QUESTION: question here +STUDENT ANSWER: student's answer here +TRUE ANSWER: true answer here +GRADE: CORRECT or INCORRECT here + +Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! + +QUESTION: {query} +STUDENT ANSWER: {result} +TRUE ANSWER: {answer} +GRADE:""" +PROMPT = PromptTemplate( + input_variables=["query", "result", "answer"], template=template +) + +context_template = """You are a teacher grading a quiz. +You are given a question, the context the question is about, and the student's answer. You are asked to score the student's answer as either CORRECT or INCORRECT, based on the context. + +Example Format: +QUESTION: question here +CONTEXT: context the question is about here +STUDENT ANSWER: student's answer here +GRADE: CORRECT or INCORRECT here + +Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! + +QUESTION: {query} +CONTEXT: {context} +STUDENT ANSWER: {result} +GRADE:""" +CONTEXT_PROMPT = PromptTemplate( + input_variables=["query", "context", "result"], template=context_template +) + + +cot_template = """You are a teacher grading a quiz. +You are given a question, the context the question is about, and the student's answer. You are asked to score the student's answer as either CORRECT or INCORRECT, based on the context. +Write out in a step by step manner your reasoning to be sure that your conclusion is correct. Avoid simply stating the correct answer at the outset. + +Example Format: +QUESTION: question here +CONTEXT: context the question is about here +STUDENT ANSWER: student's answer here +EXPLANATION: step by step reasoning here +GRADE: CORRECT or INCORRECT here + +Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! + +QUESTION: {query} +CONTEXT: {context} +STUDENT ANSWER: {result} +EXPLANATION:""" +COT_PROMPT = PromptTemplate( + input_variables=["query", "context", "result"], template=cot_template +) + + +template = """You are comparing a submitted answer to an expert answer on a given SQL coding question. Here is the data: +[BEGIN DATA] +*** +[Question]: {query} +*** +[Expert]: {answer} +*** +[Submission]: {result} +*** +[END DATA] +Compare the content and correctness of the submitted SQL with the expert answer. Ignore any differences in whitespace, style, or output column names. The submitted answer may either be correct or incorrect. Determine which case applies. First, explain in detail the similarities or differences between the expert answer and the submission, ignoring superficial aspects such as whitespace, style or output column names. Do not state the final answer in your initial explanation. Then, respond with either "CORRECT" or "INCORRECT" (without quotes or punctuation) on its own line. This should correspond to whether the submitted SQL and the expert answer are semantically the same or different, respectively. Then, repeat your final answer on a new line.""" + +SQL_PROMPT = PromptTemplate( + input_variables=["query", "answer", "result"], template=template +) diff --git a/venv/Lib/site-packages/langchain/evaluation/qa/generate_chain.py b/venv/Lib/site-packages/langchain/evaluation/qa/generate_chain.py new file mode 100644 index 00000000..94cf36d4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/qa/generate_chain.py @@ -0,0 +1,33 @@ +"""LLM Chain for generating examples for question answering.""" + +from __future__ import annotations + +from typing import Any + +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import BaseLLMOutputParser +from pydantic import Field + +from langchain.chains.llm import LLMChain +from langchain.evaluation.qa.generate_prompt import PROMPT +from langchain.output_parsers.regex import RegexParser + +_QA_OUTPUT_PARSER = RegexParser( + regex=r"QUESTION: (.*?)\n+ANSWER: (.*)", output_keys=["query", "answer"] +) + + +class QAGenerateChain(LLMChain): + """LLM Chain for generating examples for question answering.""" + + output_parser: BaseLLMOutputParser = Field(default=_QA_OUTPUT_PARSER) + output_key: str = "qa_pairs" + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + @classmethod + def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain: + """Load QA Generate Chain from LLM.""" + return cls(llm=llm, prompt=PROMPT, **kwargs) diff --git a/venv/Lib/site-packages/langchain/evaluation/qa/generate_prompt.py b/venv/Lib/site-packages/langchain/evaluation/qa/generate_prompt.py new file mode 100644 index 00000000..50dc318b --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/qa/generate_prompt.py @@ -0,0 +1,23 @@ +# flake8: noqa +from langchain.output_parsers.regex import RegexParser +from langchain_core.prompts import PromptTemplate + +template = """You are a teacher coming up with questions to ask on a quiz. +Given the following document, please generate a question and answer based on that document. + +Example Format: + +... + +QUESTION: question here +ANSWER: answer here + +These questions should be detailed and be based explicitly on information in the document. Begin! + + +{doc} +""" +PROMPT = PromptTemplate( + input_variables=["doc"], + template=template, +) diff --git a/venv/Lib/site-packages/langchain/evaluation/regex_match/__init__.py b/venv/Lib/site-packages/langchain/evaluation/regex_match/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/evaluation/regex_match/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/regex_match/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..ce764e7c Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/regex_match/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/regex_match/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/regex_match/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..55a359f8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/regex_match/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/regex_match/base.py b/venv/Lib/site-packages/langchain/evaluation/regex_match/base.py new file mode 100644 index 00000000..71b9c9e5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/regex_match/base.py @@ -0,0 +1,86 @@ +import re +from typing import Any + +from langchain.evaluation.schema import StringEvaluator + + +class RegexMatchStringEvaluator(StringEvaluator): + """Compute a regex match between the prediction and the reference. + + Examples + ---------- + >>> evaluator = RegexMatchStringEvaluator(flags=re.IGNORECASE) + >>> evaluator.evaluate_strings( + prediction="Mindy is the CTO", + reference="^mindy.*cto$", + ) # This will return {'score': 1.0} due to the IGNORECASE flag + + >>> evaluator = RegexMatchStringEvaluator() + >>> evaluator.evaluate_strings( + prediction="Mindy is the CTO", + reference="^Mike.*CEO$", + ) # This will return {'score': 0.0} + + >>> evaluator.evaluate_strings( + prediction="Mindy is the CTO", + reference="^Mike.*CEO$|^Mindy.*CTO$", + ) # This will return {'score': 1.0} as the prediction matches the second pattern in the union + """ # noqa: E501 + + def __init__(self, *, flags: int = 0, **kwargs: Any): # Default is no flags + super().__init__() + self.flags = flags + + @property + def requires_input(self) -> bool: + """ + This evaluator does not require input. + """ + return False + + @property + def requires_reference(self) -> bool: + """ + This evaluator requires a reference. + """ + return True + + @property + def input_keys(self) -> list[str]: + """ + Get the input keys. + + Returns: + List[str]: The input keys. + """ + return ["reference", "prediction"] + + @property + def evaluation_name(self) -> str: + """ + Get the evaluation name. + + Returns: + str: The evaluation name. + """ + return "regex_match" + + def _evaluate_strings( # type: ignore[override] + self, + *, + prediction: str, + reference: str, + **kwargs: Any, + ) -> dict: + """ + Evaluate the regex match between the prediction and the reference. + + Args: + prediction (str): The prediction string. + reference (Optional[str], optional): The reference regex pattern. + + Returns: + dict: The evaluation results containing the score. + """ + match = re.match(reference, prediction, flags=self.flags) + return {"score": int(bool(match))} diff --git a/venv/Lib/site-packages/langchain/evaluation/schema.py b/venv/Lib/site-packages/langchain/evaluation/schema.py new file mode 100644 index 00000000..a03bbd78 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/schema.py @@ -0,0 +1,483 @@ +"""Interfaces to be implemented by general evaluators.""" + +from __future__ import annotations + +import logging +from abc import ABC, abstractmethod +from collections.abc import Sequence +from enum import Enum +from typing import Any, Optional, Union +from warnings import warn + +from langchain_core.agents import AgentAction +from langchain_core.language_models import BaseLanguageModel +from langchain_core.runnables.config import run_in_executor + +from langchain.chains.base import Chain + +logger = logging.getLogger(__name__) + + +class EvaluatorType(str, Enum): + """The types of the evaluators.""" + + QA = "qa" + """Question answering evaluator, which grades answers to questions + directly using an LLM.""" + COT_QA = "cot_qa" + """Chain of thought question answering evaluator, which grades + answers to questions using + chain of thought 'reasoning'.""" + CONTEXT_QA = "context_qa" + """Question answering evaluator that incorporates 'context' in the response.""" + PAIRWISE_STRING = "pairwise_string" + """The pairwise string evaluator, which predicts the preferred prediction from + between two models.""" + SCORE_STRING = "score_string" + """The scored string evaluator, which gives a score between 1 and 10 + to a prediction.""" + LABELED_PAIRWISE_STRING = "labeled_pairwise_string" + """The labeled pairwise string evaluator, which predicts the preferred prediction + from between two models based on a ground truth reference label.""" + LABELED_SCORE_STRING = "labeled_score_string" + """The labeled scored string evaluator, which gives a score between 1 and 10 + to a prediction based on a ground truth reference label.""" + AGENT_TRAJECTORY = "trajectory" + """The agent trajectory evaluator, which grades the agent's intermediate steps.""" + CRITERIA = "criteria" + """The criteria evaluator, which evaluates a model based on a + custom set of criteria without any reference labels.""" + LABELED_CRITERIA = "labeled_criteria" + """The labeled criteria evaluator, which evaluates a model based on a + custom set of criteria, with a reference label.""" + STRING_DISTANCE = "string_distance" + """Compare predictions to a reference answer using string edit distances.""" + EXACT_MATCH = "exact_match" + """Compare predictions to a reference answer using exact matching.""" + REGEX_MATCH = "regex_match" + """Compare predictions to a reference answer using regular expressions.""" + PAIRWISE_STRING_DISTANCE = "pairwise_string_distance" + """Compare predictions based on string edit distances.""" + EMBEDDING_DISTANCE = "embedding_distance" + """Compare a prediction to a reference label using embedding distance.""" + PAIRWISE_EMBEDDING_DISTANCE = "pairwise_embedding_distance" + """Compare two predictions using embedding distance.""" + JSON_VALIDITY = "json_validity" + """Check if a prediction is valid JSON.""" + JSON_EQUALITY = "json_equality" + """Check if a prediction is equal to a reference JSON.""" + JSON_EDIT_DISTANCE = "json_edit_distance" + """Compute the edit distance between two JSON strings after canonicalization.""" + JSON_SCHEMA_VALIDATION = "json_schema_validation" + """Check if a prediction is valid JSON according to a JSON schema.""" + + +class LLMEvalChain(Chain): + """A base class for evaluators that use an LLM.""" + + @classmethod + @abstractmethod + def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> LLMEvalChain: + """Create a new evaluator from an LLM.""" + + +class _EvalArgsMixin: + """Mixin for checking evaluation arguments.""" + + @property + def requires_reference(self) -> bool: + """Whether this evaluator requires a reference label.""" + return False + + @property + def requires_input(self) -> bool: + """Whether this evaluator requires an input string.""" + return False + + @property + def _skip_input_warning(self) -> str: + """Warning to show when input is ignored.""" + return f"Ignoring input in {self.__class__.__name__}, as it is not expected." + + @property + def _skip_reference_warning(self) -> str: + """Warning to show when reference is ignored.""" + return ( + f"Ignoring reference in {self.__class__.__name__}, as it is not expected." + ) + + def _check_evaluation_args( + self, + reference: Optional[str] = None, + input: Optional[str] = None, + ) -> None: + """Check if the evaluation arguments are valid. + + Args: + reference (Optional[str], optional): The reference label. + input (Optional[str], optional): The input string. + Raises: + ValueError: If the evaluator requires an input string but none is provided, + or if the evaluator requires a reference label but none is provided. + """ + if self.requires_input and input is None: + raise ValueError(f"{self.__class__.__name__} requires an input string.") + elif input is not None and not self.requires_input: + warn(self._skip_input_warning) + if self.requires_reference and reference is None: + raise ValueError(f"{self.__class__.__name__} requires a reference string.") + elif reference is not None and not self.requires_reference: + warn(self._skip_reference_warning) + + +class StringEvaluator(_EvalArgsMixin, ABC): + """Grade, tag, or otherwise evaluate predictions relative to their inputs + and/or reference labels.""" + + @property + def evaluation_name(self) -> str: + """The name of the evaluation.""" + return self.__class__.__name__ + + @property + def requires_reference(self) -> bool: + """Whether this evaluator requires a reference label.""" + return False + + @abstractmethod + def _evaluate_strings( + self, + *, + prediction: Union[str, Any], + reference: Optional[Union[str, Any]] = None, + input: Optional[Union[str, Any]] = None, + **kwargs: Any, + ) -> dict: + """Evaluate Chain or LLM output, based on optional input and label. + + Args: + prediction (str): The LLM or chain prediction to evaluate. + reference (Optional[str], optional): The reference label to evaluate against. + input (Optional[str], optional): The input to consider during evaluation. + kwargs: Additional keyword arguments, including callbacks, tags, etc. + Returns: + dict: The evaluation results containing the score or value. + It is recommended that the dictionary contain the following keys: + - score: the score of the evaluation, if applicable. + - value: the string value of the evaluation, if applicable. + - reasoning: the reasoning for the evaluation, if applicable. + """ # noqa: E501 + + async def _aevaluate_strings( + self, + *, + prediction: Union[str, Any], + reference: Optional[Union[str, Any]] = None, + input: Optional[Union[str, Any]] = None, + **kwargs: Any, + ) -> dict: + """Asynchronously evaluate Chain or LLM output, based on optional input and label. + + Args: + prediction (str): The LLM or chain prediction to evaluate. + reference (Optional[str], optional): The reference label to evaluate against. + input (Optional[str], optional): The input to consider during evaluation. + kwargs: Additional keyword arguments, including callbacks, tags, etc. + Returns: + dict: The evaluation results containing the score or value. + It is recommended that the dictionary contain the following keys: + - score: the score of the evaluation, if applicable. + - value: the string value of the evaluation, if applicable. + - reasoning: the reasoning for the evaluation, if applicable. + """ # noqa: E501 + return await run_in_executor( + None, + self._evaluate_strings, + prediction=prediction, + reference=reference, + input=input, + **kwargs, + ) + + def evaluate_strings( + self, + *, + prediction: str, + reference: Optional[str] = None, + input: Optional[str] = None, + **kwargs: Any, + ) -> dict: + """Evaluate Chain or LLM output, based on optional input and label. + + Args: + prediction (str): The LLM or chain prediction to evaluate. + reference (Optional[str], optional): The reference label to evaluate against. + input (Optional[str], optional): The input to consider during evaluation. + kwargs: Additional keyword arguments, including callbacks, tags, etc. + Returns: + dict: The evaluation results containing the score or value. + """ # noqa: E501 + self._check_evaluation_args(reference=reference, input=input) + return self._evaluate_strings( + prediction=prediction, reference=reference, input=input, **kwargs + ) + + async def aevaluate_strings( + self, + *, + prediction: str, + reference: Optional[str] = None, + input: Optional[str] = None, + **kwargs: Any, + ) -> dict: + """Asynchronously evaluate Chain or LLM output, based on optional input and label. + + Args: + prediction (str): The LLM or chain prediction to evaluate. + reference (Optional[str], optional): The reference label to evaluate against. + input (Optional[str], optional): The input to consider during evaluation. + kwargs: Additional keyword arguments, including callbacks, tags, etc. + Returns: + dict: The evaluation results containing the score or value. + """ # noqa: E501 + self._check_evaluation_args(reference=reference, input=input) + return await self._aevaluate_strings( + prediction=prediction, reference=reference, input=input, **kwargs + ) + + +class PairwiseStringEvaluator(_EvalArgsMixin, ABC): + """Compare the output of two models (or two outputs of the same model).""" + + @abstractmethod + def _evaluate_string_pairs( + self, + *, + prediction: str, + prediction_b: str, + reference: Optional[str] = None, + input: Optional[str] = None, + **kwargs: Any, + ) -> dict: + """Evaluate the output string pairs. + + Args: + prediction (str): The output string from the first model. + prediction_b (str): The output string from the second model. + reference (Optional[str], optional): The expected output / reference string. + input (Optional[str], optional): The input string. + kwargs: Additional keyword arguments, such as callbacks and optional reference strings. + Returns: + dict: A dictionary containing the preference, scores, and/or other information. + """ # noqa: E501 + + async def _aevaluate_string_pairs( + self, + *, + prediction: str, + prediction_b: str, + reference: Optional[str] = None, + input: Optional[str] = None, + **kwargs: Any, + ) -> dict: + """Asynchronously evaluate the output string pairs. + + Args: + prediction (str): The output string from the first model. + prediction_b (str): The output string from the second model. + reference (Optional[str], optional): The expected output / reference string. + input (Optional[str], optional): The input string. + kwargs: Additional keyword arguments, such as callbacks and optional reference strings. + Returns: + dict: A dictionary containing the preference, scores, and/or other information. + """ # noqa: E501 + return await run_in_executor( + None, + self._evaluate_string_pairs, + prediction=prediction, + prediction_b=prediction_b, + reference=reference, + input=input, + **kwargs, + ) + + def evaluate_string_pairs( + self, + *, + prediction: str, + prediction_b: str, + reference: Optional[str] = None, + input: Optional[str] = None, + **kwargs: Any, + ) -> dict: + """Evaluate the output string pairs. + + Args: + prediction (str): The output string from the first model. + prediction_b (str): The output string from the second model. + reference (Optional[str], optional): The expected output / reference string. + input (Optional[str], optional): The input string. + kwargs: Additional keyword arguments, such as callbacks and optional reference strings. + Returns: + dict: A dictionary containing the preference, scores, and/or other information. + """ # noqa: E501 + self._check_evaluation_args(reference=reference, input=input) + return self._evaluate_string_pairs( + prediction=prediction, + prediction_b=prediction_b, + reference=reference, + input=input, + **kwargs, + ) + + async def aevaluate_string_pairs( + self, + *, + prediction: str, + prediction_b: str, + reference: Optional[str] = None, + input: Optional[str] = None, + **kwargs: Any, + ) -> dict: + """Asynchronously evaluate the output string pairs. + + Args: + prediction (str): The output string from the first model. + prediction_b (str): The output string from the second model. + reference (Optional[str], optional): The expected output / reference string. + input (Optional[str], optional): The input string. + kwargs: Additional keyword arguments, such as callbacks and optional reference strings. + Returns: + dict: A dictionary containing the preference, scores, and/or other information. + """ # noqa: E501 + self._check_evaluation_args(reference=reference, input=input) + return await self._aevaluate_string_pairs( + prediction=prediction, + prediction_b=prediction_b, + reference=reference, + input=input, + **kwargs, + ) + + +class AgentTrajectoryEvaluator(_EvalArgsMixin, ABC): + """Interface for evaluating agent trajectories.""" + + @property + def requires_input(self) -> bool: + """Whether this evaluator requires an input string.""" + return True + + @abstractmethod + def _evaluate_agent_trajectory( + self, + *, + prediction: str, + agent_trajectory: Sequence[tuple[AgentAction, str]], + input: str, + reference: Optional[str] = None, + **kwargs: Any, + ) -> dict: + """Evaluate a trajectory. + + Args: + prediction (str): The final predicted response. + agent_trajectory (List[Tuple[AgentAction, str]]): + The intermediate steps forming the agent trajectory. + input (str): The input to the agent. + reference (Optional[str]): The reference answer. + + Returns: + dict: The evaluation result. + """ + + async def _aevaluate_agent_trajectory( + self, + *, + prediction: str, + agent_trajectory: Sequence[tuple[AgentAction, str]], + input: str, + reference: Optional[str] = None, + **kwargs: Any, + ) -> dict: + """Asynchronously evaluate a trajectory. + + Args: + prediction (str): The final predicted response. + agent_trajectory (List[Tuple[AgentAction, str]]): + The intermediate steps forming the agent trajectory. + input (str): The input to the agent. + reference (Optional[str]): The reference answer. + + Returns: + dict: The evaluation result. + """ + return await run_in_executor( + None, + self._evaluate_agent_trajectory, + prediction=prediction, + agent_trajectory=agent_trajectory, + reference=reference, + input=input, + **kwargs, + ) + + def evaluate_agent_trajectory( + self, + *, + prediction: str, + agent_trajectory: Sequence[tuple[AgentAction, str]], + input: str, + reference: Optional[str] = None, + **kwargs: Any, + ) -> dict: + """Evaluate a trajectory. + + Args: + prediction (str): The final predicted response. + agent_trajectory (List[Tuple[AgentAction, str]]): + The intermediate steps forming the agent trajectory. + input (str): The input to the agent. + reference (Optional[str]): The reference answer. + + Returns: + dict: The evaluation result. + """ + self._check_evaluation_args(reference=reference, input=input) + return self._evaluate_agent_trajectory( + prediction=prediction, + input=input, + agent_trajectory=agent_trajectory, + reference=reference, + **kwargs, + ) + + async def aevaluate_agent_trajectory( + self, + *, + prediction: str, + agent_trajectory: Sequence[tuple[AgentAction, str]], + input: str, + reference: Optional[str] = None, + **kwargs: Any, + ) -> dict: + """Asynchronously evaluate a trajectory. + + Args: + prediction (str): The final predicted response. + agent_trajectory (List[Tuple[AgentAction, str]]): + The intermediate steps forming the agent trajectory. + input (str): The input to the agent. + reference (Optional[str]): The reference answer. + + Returns: + dict: The evaluation result. + """ + self._check_evaluation_args(reference=reference, input=input) + return await self._aevaluate_agent_trajectory( + prediction=prediction, + input=input, + agent_trajectory=agent_trajectory, + reference=reference, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/evaluation/scoring/__init__.py b/venv/Lib/site-packages/langchain/evaluation/scoring/__init__.py new file mode 100644 index 00000000..29b414bc --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/scoring/__init__.py @@ -0,0 +1,31 @@ +"""Scoring evaluators. + +This module contains evaluators for scoring on a 1-10 the output of models, +be they LLMs, Chains, or otherwise. This can be based on a variety of +criteria and or a reference answer. + +Example: + >>> from langchain_community.chat_models import ChatOpenAI + >>> from langchain.evaluation.scoring import ScoreStringEvalChain + >>> llm = ChatOpenAI(temperature=0, model_name="gpt-4") + >>> chain = ScoreStringEvalChain.from_llm(llm=llm) + >>> result = chain.evaluate_strings( + ... input = "What is the chemical formula for water?", + ... prediction = "H2O", + ... reference = "The chemical formula for water is H2O.", + ... ) + >>> print(result) + # { + # "score": 8, + # "comment": "The response accurately states " + # "that the chemical formula for water is H2O." + # "However, it does not provide an explanation of what the formula means." + # } +""" + +from langchain.evaluation.scoring.eval_chain import ( + LabeledScoreStringEvalChain, + ScoreStringEvalChain, +) + +__all__ = ["ScoreStringEvalChain", "LabeledScoreStringEvalChain"] diff --git a/venv/Lib/site-packages/langchain/evaluation/scoring/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/scoring/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..f30b335d Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/scoring/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/scoring/__pycache__/eval_chain.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/scoring/__pycache__/eval_chain.cpython-312.pyc new file mode 100644 index 00000000..2dadb071 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/scoring/__pycache__/eval_chain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/scoring/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/scoring/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..66166d8d Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/scoring/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/scoring/eval_chain.py b/venv/Lib/site-packages/langchain/evaluation/scoring/eval_chain.py new file mode 100644 index 00000000..a9d9e5c5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/scoring/eval_chain.py @@ -0,0 +1,468 @@ +"""Base classes for scoring the output of a model on a scale of 1-10.""" + +from __future__ import annotations + +import logging +import re +from typing import Any, Optional, Union + +from langchain_core.callbacks.manager import Callbacks +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.prompts.prompt import PromptTemplate +from pydantic import ConfigDict, Field + +from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple +from langchain.chains.llm import LLMChain +from langchain.evaluation.criteria.eval_chain import ( + CRITERIA_TYPE, + Criteria, +) +from langchain.evaluation.schema import LLMEvalChain, StringEvaluator +from langchain.evaluation.scoring.prompt import ( + CRITERIA_INSTRUCTIONS, + DEFAULT_CRITERIA, + SCORING_TEMPLATE, + SCORING_TEMPLATE_WITH_REFERENCE, +) +from langchain.schema import RUN_KEY + +logger = logging.getLogger(__name__) + +_FIND_DOUBLE_BRACKETS = re.compile(r"\[\[(.*?)\]\]") + +_SUPPORTED_CRITERIA = { + Criteria.CONCISENESS: "Is the submission concise and to the point?", + Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?", + Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?", + Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?", + Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?", + Criteria.MALICIOUSNESS: "Is the submission malicious in any way?", + Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?", + Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?", + Criteria.MISOGYNY: "Is the submission misogynistic or sexist?", + Criteria.CRIMINALITY: "Is the submission criminal in any way?", + Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?", + Criteria.DEPTH: "Does the submission demonstrate depth of thought?", + Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?", + Criteria.DETAIL: "Does the submission demonstrate attention to detail?", +} + + +def resolve_criteria( + criteria: Optional[Union[CRITERIA_TYPE, str, list[CRITERIA_TYPE]]], +) -> dict: + """Resolve the criteria for the pairwise evaluator. + + Args: + criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. + + Returns: + dict: The resolved criteria. + + """ + if criteria is None: + _default_criteria = [ + Criteria.HELPFULNESS, + Criteria.RELEVANCE, + Criteria.CORRECTNESS, + Criteria.DEPTH, + ] + return {k.value: _SUPPORTED_CRITERIA[k] for k in _default_criteria} + elif isinstance(criteria, Criteria): + criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]} + elif isinstance(criteria, str): + if criteria in _SUPPORTED_CRITERIA: + criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]} + else: + criteria_ = {criteria: ""} + elif isinstance(criteria, ConstitutionalPrinciple): + criteria_ = {criteria.name: criteria.critique_request} + elif isinstance(criteria, (list, tuple)): + criteria_ = { + k: v + for criterion in criteria + for k, v in resolve_criteria(criterion).items() + } + else: + if not criteria: + raise ValueError( + "Criteria cannot be empty. " + "Please provide a criterion name or a mapping of the criterion name" + " to its description." + ) + criteria_ = dict(criteria) + return criteria_ + + +class ScoreStringResultOutputParser(BaseOutputParser[dict]): + """A parser for the output of the ScoreStringEvalChain. + + Attributes: + _type (str): The type of the output parser. + + """ + + @property + def _type(self) -> str: + """Return the type of the output parser. + + Returns: + str: The type of the output parser. + + """ + return "pairwise_string_result" + + def parse(self, text: str) -> dict[str, Any]: + """Parse the output text. + + Args: + text (str): The output text to parse. + + Returns: + Dict: The parsed output. + + Raises: + ValueError: If the verdict is invalid. + + """ + match = _FIND_DOUBLE_BRACKETS.search(text) + + if match: + verdict = match.group(1) + + if not match or verdict not in list("123456789") + ["10"]: + raise ValueError( + f"Invalid output: {text}. " + "Output must contain a double bracketed string\ + with the verdict between 1 and 10." + ) + + return { + "reasoning": text, + "score": int(verdict), + } + + +class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain): + """A chain for scoring on a scale of 1-10 the output of a model. + + Attributes: + output_parser (BaseOutputParser): The output parser for the chain. + + Example: + >>> from langchain_community.chat_models import ChatOpenAI + >>> from langchain.evaluation.scoring import ScoreStringEvalChain + >>> llm = ChatOpenAI(temperature=0, model_name="gpt-4") + >>> chain = ScoreStringEvalChain.from_llm(llm=llm) + >>> result = chain.evaluate_strings( + ... input = "What is the chemical formula for water?", + ... prediction = "H2O", + ... reference = "The chemical formula for water is H2O.", + ... ) + >>> print(result) + # { + # "score": 8, + # "comment": "The response accurately states " + # "that the chemical formula for water is H2O." + # "However, it does not provide an explanation of what the formula means." + # } + + """ + + output_key: str = "results" #: :meta private: + output_parser: BaseOutputParser = Field( + default_factory=ScoreStringResultOutputParser + ) + normalize_by: Optional[float] = None + """The value to normalize the score by, if specified.""" + criterion_name: str + """The name of the criterion being evaluated.""" + + model_config = ConfigDict( + extra="ignore", + ) + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + @property + def requires_reference(self) -> bool: + """Return whether the chain requires a reference. + + Returns: + bool: True if the chain requires a reference, False otherwise. + + """ + return False + + @property + def requires_input(self) -> bool: + """Return whether the chain requires an input. + + Returns: + bool: True if the chain requires an input, False otherwise. + + """ + return True + + @property + def evaluation_name(self) -> str: + """Get the name of the evaluation. + + Returns + ------- + str + The name of the evaluation. + """ + return f"score_string:{self.criterion_name}" + + @property + def _skip_reference_warning(self) -> str: + """Return the warning to show when reference is ignored. + + Returns: + str: The warning to show when reference is ignored. + + """ + return ( + f"Ignoring reference in {self.__class__.__name__}, as it is not expected." + "\nTo use a reference, use the LabeledScoreStringEvalChain instead." + " (EvaluatorType.LABELED_SCORE_STRING) instead." + ) + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + *, + prompt: Optional[PromptTemplate] = None, + criteria: Optional[Union[CRITERIA_TYPE, str]] = None, + normalize_by: Optional[float] = None, + **kwargs: Any, + ) -> ScoreStringEvalChain: + """Initialize the ScoreStringEvalChain from an LLM. + + Args: + llm (BaseChatModel): The LLM to use (GPT-4 recommended). + prompt (PromptTemplate, optional): The prompt to use. + **kwargs (Any): Additional keyword arguments. + + Returns: + ScoreStringEvalChain: The initialized ScoreStringEvalChain. + + Raises: + ValueError: If the input variables are not as expected. + + """ + if not (hasattr(llm, "model_name") and not llm.model_name.startswith("gpt-4")): + logger.warning( + "This chain was only tested with GPT-4. \ +Performance may be significantly worse with other models." + ) + + expected_input_vars = {"prediction", "input", "criteria"} + prompt_ = prompt or SCORING_TEMPLATE.partial(reference="") + if expected_input_vars != set(prompt_.input_variables): + raise ValueError( + f"Input variables should be {expected_input_vars}, " + f"but got {prompt_.input_variables}" + ) + criteria_ = resolve_criteria(criteria) + criteria_str = "\n".join( + f"{k}: {v}" if v else k for k, v in criteria_.items() + ).strip() + criteria_str = ( + CRITERIA_INSTRUCTIONS + f"{criteria_str}\n" + if criteria_str + else DEFAULT_CRITERIA + ) + return cls( + llm=llm, + prompt=prompt_.partial(criteria=criteria_str), + normalize_by=normalize_by, + criterion_name="-".join(criteria_), + **kwargs, + ) + + def _prepare_input( + self, + prediction: str, + input: Optional[str], + reference: Optional[str], + ) -> dict: + """Prepare the input for the chain. + + Args: + prediction (str): The output string from the first model. + prediction_b (str): The output string from the second model. + input (str, optional): The input or task string. + reference (str, optional): The reference string, if any. + + Returns: + dict: The prepared input for the chain. + + """ + input_ = { + "prediction": prediction, + "input": input, + } + if self.requires_reference: + input_["reference"] = reference + return input_ + + def _prepare_output(self, result: dict) -> dict: + """Prepare the output.""" + parsed = result[self.output_key] + if RUN_KEY in result: + parsed[RUN_KEY] = result[RUN_KEY] + if "score" in parsed and self.normalize_by is not None: + parsed["score"] = parsed["score"] / self.normalize_by + return parsed + + def _evaluate_strings( + self, + *, + prediction: str, + input: Optional[str] = None, + reference: Optional[str] = None, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Score the output string. + + Args: + prediction (str): The output string from the first model. + input (str, optional): The input or task string. + callbacks (Callbacks, optional): The callbacks to use. + reference (str, optional): The reference string, if any. + **kwargs (Any): Additional keyword arguments. + + Returns: + dict: A dictionary containing: + - reasoning: The reasoning for the preference. + - score: A score between 1 and 10. + + """ + input_ = self._prepare_input(prediction, input, reference) + result = self( + inputs=input_, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + async def _aevaluate_string_pairs( + self, + *, + prediction: str, + reference: Optional[str] = None, + input: Optional[str] = None, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """Asynchronously score the output string. + + Args: + prediction (str): The output string from the first model. + input (str, optional): The input or task string. + callbacks (Callbacks, optional): The callbacks to use. + reference (str, optional): The reference string, if any. + **kwargs (Any): Additional keyword arguments. + + Returns: + dict: A dictionary containing: + - reasoning: The reasoning for the preference. + - score: A score between 1 and 10. + + """ + input_ = self._prepare_input(prediction, input, reference) + result = await self.acall( + inputs=input_, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + +class LabeledScoreStringEvalChain(ScoreStringEvalChain): + """A chain for scoring the output of a model on a scale of 1-10. + + Attributes: + output_parser (BaseOutputParser): The output parser for the chain. + + """ + + @property + def requires_reference(self) -> bool: + """Return whether the chain requires a reference. + + Returns: + bool: True if the chain requires a reference, False otherwise. + + """ + return True + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + *, + prompt: Optional[PromptTemplate] = None, + criteria: Optional[Union[CRITERIA_TYPE, str]] = None, + normalize_by: Optional[float] = None, + **kwargs: Any, + ) -> LabeledScoreStringEvalChain: + """Initialize the LabeledScoreStringEvalChain from an LLM. + + Args: + llm (BaseLanguageModel): The LLM to use. + prompt (PromptTemplate, optional): The prompt to use. + criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. + normalize_by (float, optional): The value to normalize the score by. + **kwargs (Any): Additional keyword arguments. + + Returns: + LabeledScoreStringEvalChain: The initialized LabeledScoreStringEvalChain. + + Raises: + ValueError: If the input variables are not as expected. + + """ + expected_input_vars = { + "prediction", + "input", + "reference", + "criteria", + } + prompt_ = prompt or SCORING_TEMPLATE_WITH_REFERENCE + if expected_input_vars != set(prompt_.input_variables): + raise ValueError( + f"Input variables should be {expected_input_vars}, " + f"but got {prompt_.input_variables}" + ) + criteria_ = resolve_criteria(criteria) + criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items()).strip() + criteria_str = ( + CRITERIA_INSTRUCTIONS + f"{criteria_str}\n" + if criteria_str + else DEFAULT_CRITERIA + ) + return cls( + llm=llm, + prompt=prompt_.partial(criteria=criteria_str), + normalize_by=normalize_by, + criterion_name="-".join(criteria_), + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/evaluation/scoring/prompt.py b/venv/Lib/site-packages/langchain/evaluation/scoring/prompt.py new file mode 100644 index 00000000..4181c93a --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/scoring/prompt.py @@ -0,0 +1,54 @@ +"""Prompts for scoring the outputs of a models for a given question. + +This prompt is used to score the responses and evaluate how it follows the instructions +and answers the question. The prompt is based on the paper from +Zheng, et. al. https://arxiv.org/abs/2306.05685 +""" + +# flake8: noqa +from langchain_core.prompts.chat import ChatPromptTemplate + +SYSTEM_MESSAGE = "You are a helpful assistant." + +CRITERIA_INSTRUCTIONS = ( + "For this evaluation, you should primarily consider the following criteria:\n" +) + +DEFAULT_CRITERIA = " Your evaluation \ +should consider factors such as the helpfulness, relevance, accuracy, \ +depth, creativity, and level of detail of the response." + +SCORING_TEMPLATE = ChatPromptTemplate.from_messages( + [ + ("system", SYSTEM_MESSAGE), + ( + "human", + '[Instruction]\nPlease act as an impartial judge \ +and evaluate the quality of the response provided by an AI \ +assistant to the user question displayed below. {criteria}Begin your evaluation \ +by providing a short explanation. Be as objective as possible. \ +After providing your explanation, you must rate the response on a scale of 1 to 10 \ +by strictly following this format: "[[rating]]", for example: "Rating: [[5]]".\n\n\ +[Question]\n{input}\n\n[The Start of Assistant\'s Answer]\n{prediction}\n\ +[The End of Assistant\'s Answer]', + ), + ] +) + +SCORING_TEMPLATE_WITH_REFERENCE = ChatPromptTemplate.from_messages( + [ + ("system", SYSTEM_MESSAGE), + ( + "human", + "[Instruction]\nPlease act as an impartial judge \ +and evaluate the quality of the response provided by an AI \ +assistant to the user question displayed below. {criteria}" + '[Ground truth]\n{reference}\nBegin your evaluation \ +by providing a short explanation. Be as objective as possible. \ +After providing your explanation, you must rate the response on a scale of 1 to 10 \ +by strictly following this format: "[[rating]]", for example: "Rating: [[5]]".\n\n\ +[Question]\n{input}\n\n[The Start of Assistant\'s Answer]\n{prediction}\n\ +[The End of Assistant\'s Answer]', + ), + ] +) diff --git a/venv/Lib/site-packages/langchain/evaluation/string_distance/__init__.py b/venv/Lib/site-packages/langchain/evaluation/string_distance/__init__.py new file mode 100644 index 00000000..9f14a8af --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/string_distance/__init__.py @@ -0,0 +1,13 @@ +"""String distance evaluators.""" + +from langchain.evaluation.string_distance.base import ( + PairwiseStringDistanceEvalChain, + StringDistance, + StringDistanceEvalChain, +) + +__all__ = [ + "PairwiseStringDistanceEvalChain", + "StringDistance", + "StringDistanceEvalChain", +] diff --git a/venv/Lib/site-packages/langchain/evaluation/string_distance/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/string_distance/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..83539216 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/string_distance/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/string_distance/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/evaluation/string_distance/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..17e41767 Binary files /dev/null and b/venv/Lib/site-packages/langchain/evaluation/string_distance/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/evaluation/string_distance/base.py b/venv/Lib/site-packages/langchain/evaluation/string_distance/base.py new file mode 100644 index 00000000..a2413d41 --- /dev/null +++ b/venv/Lib/site-packages/langchain/evaluation/string_distance/base.py @@ -0,0 +1,461 @@ +"""String distance evaluators based on the RapidFuzz library.""" + +from enum import Enum +from typing import Any, Callable, Optional + +from langchain_core.callbacks.manager import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, + Callbacks, +) +from langchain_core.utils import pre_init +from pydantic import Field + +from langchain.chains.base import Chain +from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator +from langchain.schema import RUN_KEY + + +def _load_rapidfuzz() -> Any: + """ + Load the RapidFuzz library. + + Raises: + ImportError: If the rapidfuzz library is not installed. + + Returns: + Any: The rapidfuzz.distance module. + """ + try: + import rapidfuzz + except ImportError: + raise ImportError( + "Please install the rapidfuzz library to use the FuzzyMatchStringEvaluator." + "Please install it with `pip install rapidfuzz`." + ) + return rapidfuzz.distance + + +class StringDistance(str, Enum): + """Distance metric to use. + + Attributes: + DAMERAU_LEVENSHTEIN: The Damerau-Levenshtein distance. + LEVENSHTEIN: The Levenshtein distance. + JARO: The Jaro distance. + JARO_WINKLER: The Jaro-Winkler distance. + HAMMING: The Hamming distance. + INDEL: The Indel distance. + """ + + DAMERAU_LEVENSHTEIN = "damerau_levenshtein" + LEVENSHTEIN = "levenshtein" + JARO = "jaro" + JARO_WINKLER = "jaro_winkler" + HAMMING = "hamming" + INDEL = "indel" + + +class _RapidFuzzChainMixin(Chain): + """Shared methods for the rapidfuzz string distance evaluators.""" + + distance: StringDistance = Field(default=StringDistance.JARO_WINKLER) + normalize_score: bool = Field(default=True) + """Whether to normalize the score to a value between 0 and 1. + Applies only to the Levenshtein and Damerau-Levenshtein distances.""" + + @pre_init + def validate_dependencies(cls, values: dict[str, Any]) -> dict[str, Any]: + """ + Validate that the rapidfuzz library is installed. + + Args: + values (Dict[str, Any]): The input values. + + Returns: + Dict[str, Any]: The validated values. + """ + _load_rapidfuzz() + return values + + @property + def output_keys(self) -> list[str]: + """ + Get the output keys. + + Returns: + List[str]: The output keys. + """ + return ["score"] + + def _prepare_output(self, result: dict[str, Any]) -> dict[str, Any]: + """ + Prepare the output dictionary. + + Args: + result (Dict[str, Any]): The evaluation results. + + Returns: + Dict[str, Any]: The prepared output dictionary. + """ + result = {"score": result["score"]} + if RUN_KEY in result: + result[RUN_KEY] = result[RUN_KEY].dict() + return result + + @staticmethod + def _get_metric(distance: str, normalize_score: bool = False) -> Callable: + """ + Get the distance metric function based on the distance type. + + Args: + distance (str): The distance type. + + Returns: + Callable: The distance metric function. + + Raises: + ValueError: If the distance metric is invalid. + """ + from rapidfuzz import distance as rf_distance + + module_map: dict[str, Any] = { + StringDistance.DAMERAU_LEVENSHTEIN: rf_distance.DamerauLevenshtein, + StringDistance.LEVENSHTEIN: rf_distance.Levenshtein, + StringDistance.JARO: rf_distance.Jaro, + StringDistance.JARO_WINKLER: rf_distance.JaroWinkler, + StringDistance.HAMMING: rf_distance.Hamming, + StringDistance.INDEL: rf_distance.Indel, + } + if distance not in module_map: + raise ValueError( + f"Invalid distance metric: {distance}" + f"\nMust be one of: {list(StringDistance)}" + ) + module = module_map[distance] + if normalize_score: + return module.normalized_distance + else: + return module.distance + + @property + def metric(self) -> Callable: + """ + Get the distance metric function. + + Returns: + Callable: The distance metric function. + """ + return _RapidFuzzChainMixin._get_metric( + self.distance, normalize_score=self.normalize_score + ) + + def compute_metric(self, a: str, b: str) -> float: + """ + Compute the distance between two strings. + + Args: + a (str): The first string. + b (str): The second string. + + Returns: + float: The distance between the two strings. + """ + return self.metric(a, b) + + +class StringDistanceEvalChain(StringEvaluator, _RapidFuzzChainMixin): + """Compute string distances between the prediction and the reference. + + Examples + ---------- + + >>> from langchain.evaluation import StringDistanceEvalChain + >>> evaluator = StringDistanceEvalChain() + >>> evaluator.evaluate_strings( + prediction="Mindy is the CTO", + reference="Mindy is the CEO", + ) + + Using the `load_evaluator` function: + + >>> from langchain.evaluation import load_evaluator + >>> evaluator = load_evaluator("string_distance") + >>> evaluator.evaluate_strings( + prediction="The answer is three", + reference="three", + ) + """ + + @property + def requires_input(self) -> bool: + """ + This evaluator does not require input. + """ + return False + + @property + def requires_reference(self) -> bool: + """ + This evaluator does not require a reference. + """ + return True + + @property + def input_keys(self) -> list[str]: + """ + Get the input keys. + + Returns: + List[str]: The input keys. + """ + return ["reference", "prediction"] + + @property + def evaluation_name(self) -> str: + """ + Get the evaluation name. + + Returns: + str: The evaluation name. + """ + return f"{self.distance.value}_distance" + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """ + Compute the string distance between the prediction and the reference. + + Args: + inputs (Dict[str, Any]): The input values. + run_manager (Optional[CallbackManagerForChainRun]): + The callback manager. + + Returns: + Dict[str, Any]: The evaluation results containing the score. + """ + return {"score": self.compute_metric(inputs["reference"], inputs["prediction"])} + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """ + Asynchronously compute the string distance between the prediction + and the reference. + + Args: + inputs (Dict[str, Any]): The input values. + run_manager (Optional[AsyncCallbackManagerForChainRun]: + The callback manager. + + Returns: + Dict[str, Any]: The evaluation results containing the score. + """ + return {"score": self.compute_metric(inputs["reference"], inputs["prediction"])} + + def _evaluate_strings( + self, + *, + prediction: str, + reference: Optional[str] = None, + input: Optional[str] = None, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """ + Evaluate the string distance between the prediction and the reference. + + Args: + prediction (str): The prediction string. + reference (Optional[str], optional): The reference string. + input (Optional[str], optional): The input string. + callbacks (Callbacks, optional): The callbacks to use. + kwargs: Additional keyword arguments. + + Returns: + dict: The evaluation results containing the score. + """ + result = self( + inputs={"prediction": prediction, "reference": reference}, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + + return self._prepare_output(result) + + async def _aevaluate_strings( + self, + *, + prediction: str, + reference: Optional[str] = None, + input: Optional[str] = None, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """ + Asynchronously evaluate the string distance between the + prediction and the reference. + + Args: + prediction (str): The prediction string. + reference (Optional[str], optional): The reference string. + input (Optional[str], optional): The input string. + callbacks (Callbacks, optional): The callbacks to use. + kwargs: Additional keyword arguments. + + Returns: + dict: The evaluation results containing the score. + """ + result = await self.acall( + inputs={"prediction": prediction, "reference": reference}, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + +class PairwiseStringDistanceEvalChain(PairwiseStringEvaluator, _RapidFuzzChainMixin): + """Compute string edit distances between two predictions.""" + + @property + def input_keys(self) -> list[str]: + """ + Get the input keys. + + Returns: + List[str]: The input keys. + """ + return ["prediction", "prediction_b"] + + @property + def evaluation_name(self) -> str: + """ + Get the evaluation name. + + Returns: + str: The evaluation name. + """ + return f"pairwise_{self.distance.value}_distance" + + def _call( + self, + inputs: dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """ + Compute the string distance between two predictions. + + Args: + inputs (Dict[str, Any]): The input values. + run_manager (CallbackManagerForChainRun , optional): + The callback manager. + + Returns: + Dict[str, Any]: The evaluation results containing the score. + """ + return { + "score": self.compute_metric(inputs["prediction"], inputs["prediction_b"]) + } + + async def _acall( + self, + inputs: dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """ + Asynchronously compute the string distance between two predictions. + + Args: + inputs (Dict[str, Any]): The input values. + run_manager (AsyncCallbackManagerForChainRun , optional): + The callback manager. + + Returns: + Dict[str, Any]: The evaluation results containing the score. + """ + return { + "score": self.compute_metric(inputs["prediction"], inputs["prediction_b"]) + } + + def _evaluate_string_pairs( + self, + *, + prediction: str, + prediction_b: str, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """ + Evaluate the string distance between two predictions. + + Args: + prediction (str): The first prediction string. + prediction_b (str): The second prediction string. + callbacks (Callbacks, optional): The callbacks to use. + tags (List[str], optional): Tags to apply to traces. + metadata (Dict[str, Any], optional): Metadata to apply to traces. + kwargs: Additional keyword arguments. + + Returns: + dict: The evaluation results containing the score. + """ + result = self( + inputs={"prediction": prediction, "prediction_b": prediction_b}, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) + + async def _aevaluate_string_pairs( + self, + *, + prediction: str, + prediction_b: str, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + include_run_info: bool = False, + **kwargs: Any, + ) -> dict: + """ + Asynchronously evaluate the string distance between two predictions. + + Args: + prediction (str): The first prediction string. + prediction_b (str): The second prediction string. + callbacks (Callbacks, optional): The callbacks to use. + tags (List[str], optional): Tags to apply to traces. + metadata (Dict[str, Any], optional): Metadata to apply to traces. + kwargs: Additional keyword arguments. + + Returns: + dict: The evaluation results containing the score. + """ + result = await self.acall( + inputs={"prediction": prediction, "prediction_b": prediction_b}, + callbacks=callbacks, + tags=tags, + metadata=metadata, + include_run_info=include_run_info, + ) + return self._prepare_output(result) diff --git a/venv/Lib/site-packages/langchain/example_generator.py b/venv/Lib/site-packages/langchain/example_generator.py new file mode 100644 index 00000000..a47e90e9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/example_generator.py @@ -0,0 +1,5 @@ +"""Keep here for backwards compatibility.""" + +from langchain.chains.example_generator import generate_example + +__all__ = ["generate_example"] diff --git a/venv/Lib/site-packages/langchain/formatting.py b/venv/Lib/site-packages/langchain/formatting.py new file mode 100644 index 00000000..158f74d0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/formatting.py @@ -0,0 +1,5 @@ +"""DEPRECATED: Kept for backwards compatibility.""" + +from langchain_core.utils.formatting import StrictFormatter, formatter + +__all__ = ["StrictFormatter", "formatter"] diff --git a/venv/Lib/site-packages/langchain/globals.py b/venv/Lib/site-packages/langchain/globals.py new file mode 100644 index 00000000..f60680ce --- /dev/null +++ b/venv/Lib/site-packages/langchain/globals.py @@ -0,0 +1,179 @@ +"""Global values and configuration that apply to all of LangChain.""" + +import warnings +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from langchain_core.caches import BaseCache + + +# DO NOT USE THESE VALUES DIRECTLY! +# Use them only via `get_()` and `set_()` below, +# or else your code may behave unexpectedly with other uses of these global settings: +# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 +_verbose: bool = False +_debug: bool = False +_llm_cache: Optional["BaseCache"] = None + + +def set_verbose(value: bool) -> None: + """Set a new value for the `verbose` global setting.""" + import langchain + + # We're about to run some deprecated code, don't report warnings from it. + # The user called the correct (non-deprecated) code path and shouldn't get warnings. + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=( + "Importing verbose from langchain root module is no longer supported" + ), + ) + # N.B.: This is a workaround for an unfortunate quirk of Python's + # module-level `__getattr__()` implementation: + # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 + # + # Remove it once `langchain.verbose` is no longer supported, and once all users + # have migrated to using `set_verbose()` here. + langchain.verbose = value + + global _verbose + _verbose = value + + +def get_verbose() -> bool: + """Get the value of the `verbose` global setting.""" + import langchain + + # We're about to run some deprecated code, don't report warnings from it. + # The user called the correct (non-deprecated) code path and shouldn't get warnings. + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=( + "Importing verbose from langchain root module is no longer supported" + ), + ) + # N.B.: This is a workaround for an unfortunate quirk of Python's + # module-level `__getattr__()` implementation: + # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 + # + # Remove it once `langchain.verbose` is no longer supported, and once all users + # have migrated to using `set_verbose()` here. + # + # In the meantime, the `verbose` setting is considered True if either the old + # or the new value are True. This accommodates users who haven't migrated + # to using `set_verbose()` yet. Those users are getting deprecation warnings + # directing them to use `set_verbose()` when they import `langhchain.verbose`. + old_verbose = langchain.verbose + + global _verbose + return _verbose or old_verbose + + +def set_debug(value: bool) -> None: + """Set a new value for the `debug` global setting.""" + import langchain + + # We're about to run some deprecated code, don't report warnings from it. + # The user called the correct (non-deprecated) code path and shouldn't get warnings. + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message="Importing debug from langchain root module is no longer supported", + ) + # N.B.: This is a workaround for an unfortunate quirk of Python's + # module-level `__getattr__()` implementation: + # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 + # + # Remove it once `langchain.debug` is no longer supported, and once all users + # have migrated to using `set_debug()` here. + langchain.debug = value + + global _debug + _debug = value + + +def get_debug() -> bool: + """Get the value of the `debug` global setting.""" + import langchain + + # We're about to run some deprecated code, don't report warnings from it. + # The user called the correct (non-deprecated) code path and shouldn't get warnings. + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message="Importing debug from langchain root module is no longer supported", + ) + # N.B.: This is a workaround for an unfortunate quirk of Python's + # module-level `__getattr__()` implementation: + # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 + # + # Remove it once `langchain.debug` is no longer supported, and once all users + # have migrated to using `set_debug()` here. + # + # In the meantime, the `debug` setting is considered True if either the old + # or the new value are True. This accommodates users who haven't migrated + # to using `set_debug()` yet. Those users are getting deprecation warnings + # directing them to use `set_debug()` when they import `langhchain.debug`. + old_debug = langchain.debug + + global _debug + return _debug or old_debug + + +def set_llm_cache(value: Optional["BaseCache"]) -> None: + """Set a new LLM cache, overwriting the previous value, if any.""" + import langchain + + # We're about to run some deprecated code, don't report warnings from it. + # The user called the correct (non-deprecated) code path and shouldn't get warnings. + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=( + "Importing llm_cache from langchain root module is no longer supported" + ), + ) + # N.B.: This is a workaround for an unfortunate quirk of Python's + # module-level `__getattr__()` implementation: + # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 + # + # Remove it once `langchain.llm_cache` is no longer supported, and + # once all users have migrated to using `set_llm_cache()` here. + langchain.llm_cache = value + + global _llm_cache + _llm_cache = value + + +def get_llm_cache() -> "BaseCache": + """Get the value of the `llm_cache` global setting.""" + import langchain + + # We're about to run some deprecated code, don't report warnings from it. + # The user called the correct (non-deprecated) code path and shouldn't get warnings. + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=( + "Importing llm_cache from langchain root module is no longer supported" + ), + ) + # N.B.: This is a workaround for an unfortunate quirk of Python's + # module-level `__getattr__()` implementation: + # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 + # + # Remove it once `langchain.llm_cache` is no longer supported, and + # once all users have migrated to using `set_llm_cache()` here. + # + # In the meantime, the `llm_cache` setting returns whichever of + # its two backing sources is truthy (not `None` and non-empty), + # or the old value if both are falsy. This accommodates users + # who haven't migrated to using `set_llm_cache()` yet. + # Those users are getting deprecation warnings directing them + # to use `set_llm_cache()` when they import `langhchain.llm_cache`. + old_llm_cache = langchain.llm_cache + + global _llm_cache + return _llm_cache or old_llm_cache diff --git a/venv/Lib/site-packages/langchain/graphs/__init__.py b/venv/Lib/site-packages/langchain/graphs/__init__.py new file mode 100644 index 00000000..58210016 --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/__init__.py @@ -0,0 +1,57 @@ +"""**Graphs** provide a natural language interface to graph databases.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs import ( + ArangoGraph, + FalkorDBGraph, + HugeGraph, + KuzuGraph, + MemgraphGraph, + NebulaGraph, + Neo4jGraph, + NeptuneGraph, + NetworkxEntityGraph, + RdfGraph, + ) + + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MemgraphGraph": "langchain_community.graphs", + "NetworkxEntityGraph": "langchain_community.graphs", + "Neo4jGraph": "langchain_community.graphs", + "NebulaGraph": "langchain_community.graphs", + "NeptuneGraph": "langchain_community.graphs", + "KuzuGraph": "langchain_community.graphs", + "HugeGraph": "langchain_community.graphs", + "RdfGraph": "langchain_community.graphs", + "ArangoGraph": "langchain_community.graphs", + "FalkorDBGraph": "langchain_community.graphs", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MemgraphGraph", + "NetworkxEntityGraph", + "Neo4jGraph", + "NebulaGraph", + "NeptuneGraph", + "KuzuGraph", + "HugeGraph", + "RdfGraph", + "ArangoGraph", + "FalkorDBGraph", +] diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..ca8fff5c Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/arangodb_graph.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/arangodb_graph.cpython-312.pyc new file mode 100644 index 00000000..ea38c172 Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/arangodb_graph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/falkordb_graph.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/falkordb_graph.cpython-312.pyc new file mode 100644 index 00000000..83d7ffea Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/falkordb_graph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/graph_document.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/graph_document.cpython-312.pyc new file mode 100644 index 00000000..68bd8db2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/graph_document.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/graph_store.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/graph_store.cpython-312.pyc new file mode 100644 index 00000000..96f1cbfb Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/graph_store.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/hugegraph.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/hugegraph.cpython-312.pyc new file mode 100644 index 00000000..57000402 Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/hugegraph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/kuzu_graph.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/kuzu_graph.cpython-312.pyc new file mode 100644 index 00000000..8eab41a0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/kuzu_graph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/memgraph_graph.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/memgraph_graph.cpython-312.pyc new file mode 100644 index 00000000..422e4903 Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/memgraph_graph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/nebula_graph.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/nebula_graph.cpython-312.pyc new file mode 100644 index 00000000..963b4b4e Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/nebula_graph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/neo4j_graph.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/neo4j_graph.cpython-312.pyc new file mode 100644 index 00000000..4299a722 Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/neo4j_graph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/neptune_graph.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/neptune_graph.cpython-312.pyc new file mode 100644 index 00000000..41dd3195 Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/neptune_graph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/networkx_graph.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/networkx_graph.cpython-312.pyc new file mode 100644 index 00000000..a57e7da7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/networkx_graph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/__pycache__/rdf_graph.cpython-312.pyc b/venv/Lib/site-packages/langchain/graphs/__pycache__/rdf_graph.cpython-312.pyc new file mode 100644 index 00000000..eff958ac Binary files /dev/null and b/venv/Lib/site-packages/langchain/graphs/__pycache__/rdf_graph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/graphs/arangodb_graph.py b/venv/Lib/site-packages/langchain/graphs/arangodb_graph.py new file mode 100644 index 00000000..bbfbf1ab --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/arangodb_graph.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs import ArangoGraph + from langchain_community.graphs.arangodb_graph import get_arangodb_client + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ArangoGraph": "langchain_community.graphs", + "get_arangodb_client": "langchain_community.graphs.arangodb_graph", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ArangoGraph", + "get_arangodb_client", +] diff --git a/venv/Lib/site-packages/langchain/graphs/falkordb_graph.py b/venv/Lib/site-packages/langchain/graphs/falkordb_graph.py new file mode 100644 index 00000000..2f8edf58 --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/falkordb_graph.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs import FalkorDBGraph + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"FalkorDBGraph": "langchain_community.graphs"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FalkorDBGraph", +] diff --git a/venv/Lib/site-packages/langchain/graphs/graph_document.py b/venv/Lib/site-packages/langchain/graphs/graph_document.py new file mode 100644 index 00000000..6848bca9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/graph_document.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs.graph_document import ( + GraphDocument, + Node, + Relationship, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "Node": "langchain_community.graphs.graph_document", + "Relationship": "langchain_community.graphs.graph_document", + "GraphDocument": "langchain_community.graphs.graph_document", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Node", + "Relationship", + "GraphDocument", +] diff --git a/venv/Lib/site-packages/langchain/graphs/graph_store.py b/venv/Lib/site-packages/langchain/graphs/graph_store.py new file mode 100644 index 00000000..8587d577 --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/graph_store.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs.graph_store import GraphStore + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GraphStore": "langchain_community.graphs.graph_store"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GraphStore", +] diff --git a/venv/Lib/site-packages/langchain/graphs/hugegraph.py b/venv/Lib/site-packages/langchain/graphs/hugegraph.py new file mode 100644 index 00000000..04435454 --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/hugegraph.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs import HugeGraph + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"HugeGraph": "langchain_community.graphs"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HugeGraph", +] diff --git a/venv/Lib/site-packages/langchain/graphs/kuzu_graph.py b/venv/Lib/site-packages/langchain/graphs/kuzu_graph.py new file mode 100644 index 00000000..f0c142ce --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/kuzu_graph.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs import KuzuGraph + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"KuzuGraph": "langchain_community.graphs"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "KuzuGraph", +] diff --git a/venv/Lib/site-packages/langchain/graphs/memgraph_graph.py b/venv/Lib/site-packages/langchain/graphs/memgraph_graph.py new file mode 100644 index 00000000..64962e69 --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/memgraph_graph.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs import MemgraphGraph + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MemgraphGraph": "langchain_community.graphs"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MemgraphGraph", +] diff --git a/venv/Lib/site-packages/langchain/graphs/nebula_graph.py b/venv/Lib/site-packages/langchain/graphs/nebula_graph.py new file mode 100644 index 00000000..7df6a266 --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/nebula_graph.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs import NebulaGraph + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NebulaGraph": "langchain_community.graphs"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NebulaGraph", +] diff --git a/venv/Lib/site-packages/langchain/graphs/neo4j_graph.py b/venv/Lib/site-packages/langchain/graphs/neo4j_graph.py new file mode 100644 index 00000000..abdff5b9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/neo4j_graph.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs import Neo4jGraph + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Neo4jGraph": "langchain_community.graphs"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Neo4jGraph", +] diff --git a/venv/Lib/site-packages/langchain/graphs/neptune_graph.py b/venv/Lib/site-packages/langchain/graphs/neptune_graph.py new file mode 100644 index 00000000..33989759 --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/neptune_graph.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs import NeptuneGraph + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NeptuneGraph": "langchain_community.graphs"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NeptuneGraph", +] diff --git a/venv/Lib/site-packages/langchain/graphs/networkx_graph.py b/venv/Lib/site-packages/langchain/graphs/networkx_graph.py new file mode 100644 index 00000000..1b360f15 --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/networkx_graph.py @@ -0,0 +1,36 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs import NetworkxEntityGraph + from langchain_community.graphs.networkx_graph import ( + KnowledgeTriple, + get_entities, + parse_triples, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "KnowledgeTriple": "langchain_community.graphs.networkx_graph", + "parse_triples": "langchain_community.graphs.networkx_graph", + "get_entities": "langchain_community.graphs.networkx_graph", + "NetworkxEntityGraph": "langchain_community.graphs", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "KnowledgeTriple", + "parse_triples", + "get_entities", + "NetworkxEntityGraph", +] diff --git a/venv/Lib/site-packages/langchain/graphs/rdf_graph.py b/venv/Lib/site-packages/langchain/graphs/rdf_graph.py new file mode 100644 index 00000000..7f85ffb8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/graphs/rdf_graph.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs import RdfGraph + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"RdfGraph": "langchain_community.graphs"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RdfGraph", +] diff --git a/venv/Lib/site-packages/langchain/hub.py b/venv/Lib/site-packages/langchain/hub.py new file mode 100644 index 00000000..d4323b9c --- /dev/null +++ b/venv/Lib/site-packages/langchain/hub.py @@ -0,0 +1,133 @@ +"""Interface with the LangChain Hub.""" + +from __future__ import annotations + +import json +from collections.abc import Sequence +from typing import Any, Optional + +from langchain_core.load.dump import dumps +from langchain_core.load.load import loads +from langchain_core.prompts import BasePromptTemplate + + +def _get_client( + api_key: Optional[str] = None, + api_url: Optional[str] = None, +) -> Any: + try: + from langsmith import Client as LangSmithClient + + ls_client = LangSmithClient(api_url, api_key=api_key) + if hasattr(ls_client, "push_prompt") and hasattr(ls_client, "pull_prompt"): + return ls_client + else: + from langchainhub import Client as LangChainHubClient + + return LangChainHubClient(api_url, api_key=api_key) + except ImportError: + try: + from langchainhub import Client as LangChainHubClient + + return LangChainHubClient(api_url, api_key=api_key) + except ImportError as e: + raise ImportError( + "Could not import langsmith or langchainhub (deprecated)," + "please install with `pip install langsmith`." + ) from e + + +def push( + repo_full_name: str, + object: Any, + *, + api_url: Optional[str] = None, + api_key: Optional[str] = None, + parent_commit_hash: Optional[str] = None, + new_repo_is_public: bool = False, + new_repo_description: Optional[str] = None, + readme: Optional[str] = None, + tags: Optional[Sequence[str]] = None, +) -> str: + """ + Push an object to the hub and returns the URL it can be viewed at in a browser. + + :param repo_full_name: The full name of the prompt to push to in the format of + `owner/prompt_name` or `prompt_name`. + :param object: The LangChain to serialize and push to the hub. + :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service + if you have an api key set, or a localhost instance if not. + :param api_key: The API key to use to authenticate with the LangChain Hub API. + :param parent_commit_hash: The commit hash of the parent commit to push to. Defaults + to the latest commit automatically. + :param new_repo_is_public: Whether the prompt should be public. Defaults to + False (Private by default). + :param new_repo_description: The description of the prompt. Defaults to an empty + string. + """ + client = _get_client(api_key=api_key, api_url=api_url) + + # Then it's langsmith + if hasattr(client, "push_prompt"): + return client.push_prompt( + repo_full_name, + object=object, + parent_commit_hash=parent_commit_hash, + is_public=new_repo_is_public, + description=new_repo_description, + readme=readme, + tags=tags, + ) + + # Then it's langchainhub + manifest_json = dumps(object) + message = client.push( + repo_full_name, + manifest_json, + parent_commit_hash=parent_commit_hash, + new_repo_is_public=new_repo_is_public, + new_repo_description=new_repo_description, + ) + return message + + +def pull( + owner_repo_commit: str, + *, + include_model: Optional[bool] = None, + api_url: Optional[str] = None, + api_key: Optional[str] = None, +) -> Any: + """ + Pull an object from the hub and returns it as a LangChain object. + + :param owner_repo_commit: The full name of the prompt to pull from in the format of + `owner/prompt_name:commit_hash` or `owner/prompt_name` + or just `prompt_name` if it's your own prompt. + :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service + if you have an api key set, or a localhost instance if not. + :param api_key: The API key to use to authenticate with the LangChain Hub API. + """ + client = _get_client(api_key=api_key, api_url=api_url) + + # Then it's langsmith + if hasattr(client, "pull_prompt"): + response = client.pull_prompt(owner_repo_commit, include_model=include_model) + return response + + # Then it's langchainhub + if hasattr(client, "pull_repo"): + # >= 0.1.15 + res_dict = client.pull_repo(owner_repo_commit) + obj = loads(json.dumps(res_dict["manifest"])) + if isinstance(obj, BasePromptTemplate): + if obj.metadata is None: + obj.metadata = {} + obj.metadata["lc_hub_owner"] = res_dict["owner"] + obj.metadata["lc_hub_repo"] = res_dict["repo"] + obj.metadata["lc_hub_commit_hash"] = res_dict["commit_hash"] + return obj + + # Then it's < 0.1.15 langchainhub + resp: str = client.pull(owner_repo_commit) + return loads(resp) diff --git a/venv/Lib/site-packages/langchain/indexes/__init__.py b/venv/Lib/site-packages/langchain/indexes/__init__.py new file mode 100644 index 00000000..11b95a38 --- /dev/null +++ b/venv/Lib/site-packages/langchain/indexes/__init__.py @@ -0,0 +1,50 @@ +"""**Index** is used to avoid writing duplicated content +into the vectostore and to avoid over-writing content if it's unchanged. + +Indexes also : + +* Create knowledge graphs from data. + +* Support indexing workflows from LangChain data loaders to vectorstores. + +Importantly, Index keeps on working even if the content being written is derived +via a set of transformations from some source content (e.g., indexing children +documents that were derived from parent documents by chunking.) +""" + +from typing import TYPE_CHECKING, Any + +from langchain_core.indexing.api import IndexingResult, aindex, index + +from langchain._api import create_importer +from langchain.indexes._sql_record_manager import SQLRecordManager +from langchain.indexes.vectorstore import VectorstoreIndexCreator + +if TYPE_CHECKING: + from langchain_community.graphs.index_creator import GraphIndexCreator + + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GraphIndexCreator": "langchain_community.graphs.index_creator", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + # Keep sorted + "aindex", + "GraphIndexCreator", + "index", + "IndexingResult", + "SQLRecordManager", + "VectorstoreIndexCreator", +] diff --git a/venv/Lib/site-packages/langchain/indexes/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/indexes/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..1d40b8ea Binary files /dev/null and b/venv/Lib/site-packages/langchain/indexes/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/indexes/__pycache__/_api.cpython-312.pyc b/venv/Lib/site-packages/langchain/indexes/__pycache__/_api.cpython-312.pyc new file mode 100644 index 00000000..a40dd4d5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/indexes/__pycache__/_api.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/indexes/__pycache__/_sql_record_manager.cpython-312.pyc b/venv/Lib/site-packages/langchain/indexes/__pycache__/_sql_record_manager.cpython-312.pyc new file mode 100644 index 00000000..bf75c37f Binary files /dev/null and b/venv/Lib/site-packages/langchain/indexes/__pycache__/_sql_record_manager.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/indexes/__pycache__/graph.cpython-312.pyc b/venv/Lib/site-packages/langchain/indexes/__pycache__/graph.cpython-312.pyc new file mode 100644 index 00000000..fa7ccb5b Binary files /dev/null and b/venv/Lib/site-packages/langchain/indexes/__pycache__/graph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/indexes/__pycache__/vectorstore.cpython-312.pyc b/venv/Lib/site-packages/langchain/indexes/__pycache__/vectorstore.cpython-312.pyc new file mode 100644 index 00000000..908879ce Binary files /dev/null and b/venv/Lib/site-packages/langchain/indexes/__pycache__/vectorstore.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/indexes/_api.py b/venv/Lib/site-packages/langchain/indexes/_api.py new file mode 100644 index 00000000..d5919af9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/indexes/_api.py @@ -0,0 +1,5 @@ +from langchain_core.indexing.api import _abatch, _batch, _HashedDocument + +# Please do not use these in your application. These are private APIs. +# Here to avoid changing unit tests during a migration. +__all__ = ["_HashedDocument", "_abatch", "_batch"] diff --git a/venv/Lib/site-packages/langchain/indexes/_sql_record_manager.py b/venv/Lib/site-packages/langchain/indexes/_sql_record_manager.py new file mode 100644 index 00000000..d18546f0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/indexes/_sql_record_manager.py @@ -0,0 +1,522 @@ +"""Implementation of a record management layer in SQLAlchemy. + +The management layer uses SQLAlchemy to track upserted records. + +Currently, this layer only works with SQLite; hopwever, should be adaptable +to other SQL implementations with minimal effort. + +Currently, includes an implementation that uses SQLAlchemy which should +allow it to work with a variety of SQL as a backend. + +* Each key is associated with an updated_at field. +* This filed is updated whenever the key is updated. +* Keys can be listed based on the updated at field. +* Keys can be deleted. +""" + +import contextlib +import decimal +import uuid +from collections.abc import AsyncGenerator, Generator, Sequence +from typing import Any, Optional, Union + +from langchain_core.indexing import RecordManager +from sqlalchemy import ( + Column, + Float, + Index, + String, + UniqueConstraint, + and_, + create_engine, + delete, + select, + text, +) +from sqlalchemy.engine import URL, Engine +from sqlalchemy.ext.asyncio import ( + AsyncEngine, + AsyncSession, + create_async_engine, +) +from sqlalchemy.orm import Query, Session, declarative_base, sessionmaker + +try: + from sqlalchemy.ext.asyncio import async_sessionmaker +except ImportError: + # dummy for sqlalchemy < 2 + async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore[assignment,misc] + +Base = declarative_base() + + +class UpsertionRecord(Base): # type: ignore[valid-type,misc] + """Table used to keep track of when a key was last updated.""" + + # ATTENTION: + # Prior to modifying this table, please determine whether + # we should create migrations for this table to make sure + # users do not experience data loss. + __tablename__ = "upsertion_record" + + uuid = Column( + String, + index=True, + default=lambda: str(uuid.uuid4()), + primary_key=True, + nullable=False, + ) + key = Column(String, index=True) + # Using a non-normalized representation to handle `namespace` attribute. + # If the need arises, this attribute can be pulled into a separate Collection + # table at some time later. + namespace = Column(String, index=True, nullable=False) + group_id = Column(String, index=True, nullable=True) + + # The timestamp associated with the last record upsertion. + updated_at = Column(Float, index=True) + + __table_args__ = ( + UniqueConstraint("key", "namespace", name="uix_key_namespace"), + Index("ix_key_namespace", "key", "namespace"), + ) + + +class SQLRecordManager(RecordManager): + """A SQL Alchemy based implementation of the record manager.""" + + def __init__( + self, + namespace: str, + *, + engine: Optional[Union[Engine, AsyncEngine]] = None, + db_url: Union[None, str, URL] = None, + engine_kwargs: Optional[dict[str, Any]] = None, + async_mode: bool = False, + ) -> None: + """Initialize the SQLRecordManager. + + This class serves as a manager persistence layer that uses an SQL + backend to track upserted records. You should specify either a db_url + to create an engine or provide an existing engine. + + Args: + namespace: The namespace associated with this record manager. + engine: An already existing SQL Alchemy engine. + Default is None. + db_url: A database connection string used to create + an SQL Alchemy engine. Default is None. + engine_kwargs: Additional keyword arguments + to be passed when creating the engine. Default is an empty dictionary. + async_mode: Whether to create an async engine. + Driver should support async operations. + It only applies if db_url is provided. + Default is False. + + Raises: + ValueError: If both db_url and engine are provided or neither. + AssertionError: If something unexpected happens during engine configuration. + """ + super().__init__(namespace=namespace) + if db_url is None and engine is None: + raise ValueError("Must specify either db_url or engine") + + if db_url is not None and engine is not None: + raise ValueError("Must specify either db_url or engine, not both") + + _engine: Union[Engine, AsyncEngine] + if db_url: + if async_mode: + _engine = create_async_engine(db_url, **(engine_kwargs or {})) + else: + _engine = create_engine(db_url, **(engine_kwargs or {})) + elif engine: + _engine = engine + + else: + raise AssertionError("Something went wrong with configuration of engine.") + + _session_factory: Union[sessionmaker[Session], async_sessionmaker[AsyncSession]] + if isinstance(_engine, AsyncEngine): + _session_factory = async_sessionmaker(bind=_engine) + else: + _session_factory = sessionmaker(bind=_engine) + + self.engine = _engine + self.dialect = _engine.dialect.name + self.session_factory = _session_factory + + def create_schema(self) -> None: + """Create the database schema.""" + if isinstance(self.engine, AsyncEngine): + raise AssertionError("This method is not supported for async engines.") + + Base.metadata.create_all(self.engine) + + async def acreate_schema(self) -> None: + """Create the database schema.""" + + if not isinstance(self.engine, AsyncEngine): + raise AssertionError("This method is not supported for sync engines.") + + async with self.engine.begin() as session: + await session.run_sync(Base.metadata.create_all) + + @contextlib.contextmanager + def _make_session(self) -> Generator[Session, None, None]: + """Create a session and close it after use.""" + + if isinstance(self.session_factory, async_sessionmaker): + raise AssertionError("This method is not supported for async engines.") + + session = self.session_factory() + try: + yield session + finally: + session.close() + + @contextlib.asynccontextmanager + async def _amake_session(self) -> AsyncGenerator[AsyncSession, None]: + """Create a session and close it after use.""" + + if not isinstance(self.session_factory, async_sessionmaker): + raise AssertionError("This method is not supported for sync engines.") + + async with self.session_factory() as session: + yield session + + def get_time(self) -> float: + """Get the current server time as a timestamp. + + Please note it's critical that time is obtained from the server since + we want a monotonic clock. + """ + with self._make_session() as session: + # * SQLite specific implementation, can be changed based on dialect. + # * For SQLite, unlike unixepoch it will work with older versions of SQLite. + # ---- + # julianday('now'): Julian day number for the current date and time. + # The Julian day is a continuous count of days, starting from a + # reference date (Julian day number 0). + # 2440587.5 - constant represents the Julian day number for January 1, 1970 + # 86400.0 - constant represents the number of seconds + # in a day (24 hours * 60 minutes * 60 seconds) + if self.dialect == "sqlite": + query = text("SELECT (julianday('now') - 2440587.5) * 86400.0;") + elif self.dialect == "postgresql": + query = text("SELECT EXTRACT (EPOCH FROM CURRENT_TIMESTAMP);") + else: + raise NotImplementedError(f"Not implemented for dialect {self.dialect}") + + dt = session.execute(query).scalar() + if isinstance(dt, decimal.Decimal): + dt = float(dt) + if not isinstance(dt, float): + raise AssertionError(f"Unexpected type for datetime: {type(dt)}") + return dt + + async def aget_time(self) -> float: + """Get the current server time as a timestamp. + + Please note it's critical that time is obtained from the server since + we want a monotonic clock. + """ + async with self._amake_session() as session: + # * SQLite specific implementation, can be changed based on dialect. + # * For SQLite, unlike unixepoch it will work with older versions of SQLite. + # ---- + # julianday('now'): Julian day number for the current date and time. + # The Julian day is a continuous count of days, starting from a + # reference date (Julian day number 0). + # 2440587.5 - constant represents the Julian day number for January 1, 1970 + # 86400.0 - constant represents the number of seconds + # in a day (24 hours * 60 minutes * 60 seconds) + if self.dialect == "sqlite": + query = text("SELECT (julianday('now') - 2440587.5) * 86400.0;") + elif self.dialect == "postgresql": + query = text("SELECT EXTRACT (EPOCH FROM CURRENT_TIMESTAMP);") + else: + raise NotImplementedError(f"Not implemented for dialect {self.dialect}") + + dt = (await session.execute(query)).scalar_one_or_none() + + if isinstance(dt, decimal.Decimal): + dt = float(dt) + if not isinstance(dt, float): + raise AssertionError(f"Unexpected type for datetime: {type(dt)}") + return dt + + def update( + self, + keys: Sequence[str], + *, + group_ids: Optional[Sequence[Optional[str]]] = None, + time_at_least: Optional[float] = None, + ) -> None: + """Upsert records into the SQLite database.""" + if group_ids is None: + group_ids = [None] * len(keys) + + if len(keys) != len(group_ids): + raise ValueError( + f"Number of keys ({len(keys)}) does not match number of " + f"group_ids ({len(group_ids)})" + ) + + # Get the current time from the server. + # This makes an extra round trip to the server, should not be a big deal + # if the batch size is large enough. + # Getting the time here helps us compare it against the time_at_least + # and raise an error if there is a time sync issue. + # Here, we're just being extra careful to minimize the chance of + # data loss due to incorrectly deleting records. + update_time = self.get_time() + + if time_at_least and update_time < time_at_least: + # Safeguard against time sync issues + raise AssertionError(f"Time sync issue: {update_time} < {time_at_least}") + + records_to_upsert = [ + { + "key": key, + "namespace": self.namespace, + "updated_at": update_time, + "group_id": group_id, + } + for key, group_id in zip(keys, group_ids) + ] + + with self._make_session() as session: + if self.dialect == "sqlite": + from sqlalchemy.dialects.sqlite import Insert as SqliteInsertType + from sqlalchemy.dialects.sqlite import insert as sqlite_insert + + # Note: uses SQLite insert to make on_conflict_do_update work. + # This code needs to be generalized a bit to work with more dialects. + sqlite_insert_stmt: SqliteInsertType = sqlite_insert( + UpsertionRecord + ).values(records_to_upsert) + stmt = sqlite_insert_stmt.on_conflict_do_update( + [UpsertionRecord.key, UpsertionRecord.namespace], + set_=dict( + updated_at=sqlite_insert_stmt.excluded.updated_at, + group_id=sqlite_insert_stmt.excluded.group_id, + ), + ) + elif self.dialect == "postgresql": + from sqlalchemy.dialects.postgresql import Insert as PgInsertType + from sqlalchemy.dialects.postgresql import insert as pg_insert + + # Note: uses postgresql insert to make on_conflict_do_update work. + # This code needs to be generalized a bit to work with more dialects. + pg_insert_stmt: PgInsertType = pg_insert(UpsertionRecord).values( + records_to_upsert + ) + stmt = pg_insert_stmt.on_conflict_do_update( # type: ignore[assignment] + "uix_key_namespace", # Name of constraint + set_=dict( + updated_at=pg_insert_stmt.excluded.updated_at, + group_id=pg_insert_stmt.excluded.group_id, + ), + ) + else: + raise NotImplementedError(f"Unsupported dialect {self.dialect}") + + session.execute(stmt) + session.commit() + + async def aupdate( + self, + keys: Sequence[str], + *, + group_ids: Optional[Sequence[Optional[str]]] = None, + time_at_least: Optional[float] = None, + ) -> None: + """Upsert records into the SQLite database.""" + if group_ids is None: + group_ids = [None] * len(keys) + + if len(keys) != len(group_ids): + raise ValueError( + f"Number of keys ({len(keys)}) does not match number of " + f"group_ids ({len(group_ids)})" + ) + + # Get the current time from the server. + # This makes an extra round trip to the server, should not be a big deal + # if the batch size is large enough. + # Getting the time here helps us compare it against the time_at_least + # and raise an error if there is a time sync issue. + # Here, we're just being extra careful to minimize the chance of + # data loss due to incorrectly deleting records. + update_time = await self.aget_time() + + if time_at_least and update_time < time_at_least: + # Safeguard against time sync issues + raise AssertionError(f"Time sync issue: {update_time} < {time_at_least}") + + records_to_upsert = [ + { + "key": key, + "namespace": self.namespace, + "updated_at": update_time, + "group_id": group_id, + } + for key, group_id in zip(keys, group_ids) + ] + + async with self._amake_session() as session: + if self.dialect == "sqlite": + from sqlalchemy.dialects.sqlite import Insert as SqliteInsertType + from sqlalchemy.dialects.sqlite import insert as sqlite_insert + + # Note: uses SQLite insert to make on_conflict_do_update work. + # This code needs to be generalized a bit to work with more dialects. + sqlite_insert_stmt: SqliteInsertType = sqlite_insert( + UpsertionRecord + ).values(records_to_upsert) + stmt = sqlite_insert_stmt.on_conflict_do_update( + [UpsertionRecord.key, UpsertionRecord.namespace], + set_=dict( + updated_at=sqlite_insert_stmt.excluded.updated_at, + group_id=sqlite_insert_stmt.excluded.group_id, + ), + ) + elif self.dialect == "postgresql": + from sqlalchemy.dialects.postgresql import Insert as PgInsertType + from sqlalchemy.dialects.postgresql import insert as pg_insert + + # Note: uses SQLite insert to make on_conflict_do_update work. + # This code needs to be generalized a bit to work with more dialects. + pg_insert_stmt: PgInsertType = pg_insert(UpsertionRecord).values( + records_to_upsert + ) + stmt = pg_insert_stmt.on_conflict_do_update( # type: ignore[assignment] + "uix_key_namespace", # Name of constraint + set_=dict( + updated_at=pg_insert_stmt.excluded.updated_at, + group_id=pg_insert_stmt.excluded.group_id, + ), + ) + else: + raise NotImplementedError(f"Unsupported dialect {self.dialect}") + + await session.execute(stmt) + await session.commit() + + def exists(self, keys: Sequence[str]) -> list[bool]: + """Check if the given keys exist in the SQLite database.""" + session: Session + with self._make_session() as session: + filtered_query: Query = session.query(UpsertionRecord.key).filter( + and_( + UpsertionRecord.key.in_(keys), + UpsertionRecord.namespace == self.namespace, + ) + ) + records = filtered_query.all() + found_keys = set(r.key for r in records) + return [k in found_keys for k in keys] + + async def aexists(self, keys: Sequence[str]) -> list[bool]: + """Check if the given keys exist in the SQLite database.""" + async with self._amake_session() as session: + records = ( + ( + await session.execute( + select(UpsertionRecord.key).where( + and_( + UpsertionRecord.key.in_(keys), + UpsertionRecord.namespace == self.namespace, + ) + ) + ) + ) + .scalars() + .all() + ) + found_keys = set(records) + return [k in found_keys for k in keys] + + def list_keys( + self, + *, + before: Optional[float] = None, + after: Optional[float] = None, + group_ids: Optional[Sequence[str]] = None, + limit: Optional[int] = None, + ) -> list[str]: + """List records in the SQLite database based on the provided date range.""" + session: Session + with self._make_session() as session: + query: Query = session.query(UpsertionRecord).filter( + UpsertionRecord.namespace == self.namespace + ) + + if after: + query = query.filter(UpsertionRecord.updated_at > after) + if before: + query = query.filter(UpsertionRecord.updated_at < before) + if group_ids: + query = query.filter(UpsertionRecord.group_id.in_(group_ids)) + + if limit: + query = query.limit(limit) + records = query.all() + return [r.key for r in records] + + async def alist_keys( + self, + *, + before: Optional[float] = None, + after: Optional[float] = None, + group_ids: Optional[Sequence[str]] = None, + limit: Optional[int] = None, + ) -> list[str]: + """List records in the SQLite database based on the provided date range.""" + session: AsyncSession + async with self._amake_session() as session: + query: Query = select(UpsertionRecord.key).filter( # type: ignore[assignment] + UpsertionRecord.namespace == self.namespace + ) + + # mypy does not recognize .all() or .filter() + if after: + query = query.filter(UpsertionRecord.updated_at > after) + if before: + query = query.filter(UpsertionRecord.updated_at < before) + if group_ids: + query = query.filter(UpsertionRecord.group_id.in_(group_ids)) + + if limit: + query = query.limit(limit) + records = (await session.execute(query)).scalars().all() + return list(records) + + def delete_keys(self, keys: Sequence[str]) -> None: + """Delete records from the SQLite database.""" + session: Session + with self._make_session() as session: + filtered_query: Query = session.query(UpsertionRecord).filter( + and_( + UpsertionRecord.key.in_(keys), + UpsertionRecord.namespace == self.namespace, + ) + ) + + filtered_query.delete() + session.commit() + + async def adelete_keys(self, keys: Sequence[str]) -> None: + """Delete records from the SQLite database.""" + async with self._amake_session() as session: + await session.execute( + delete(UpsertionRecord).where( + and_( + UpsertionRecord.key.in_(keys), + UpsertionRecord.namespace == self.namespace, + ) + ) + ) + + await session.commit() diff --git a/venv/Lib/site-packages/langchain/indexes/graph.py b/venv/Lib/site-packages/langchain/indexes/graph.py new file mode 100644 index 00000000..e382c428 --- /dev/null +++ b/venv/Lib/site-packages/langchain/indexes/graph.py @@ -0,0 +1,28 @@ +"""**Graphs** provide a natural language interface to graph databases.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs.index_creator import GraphIndexCreator + from langchain_community.graphs.networkx_graph import NetworkxEntityGraph + + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GraphIndexCreator": "langchain_community.graphs.index_creator", + "NetworkxEntityGraph": "langchain_community.graphs.networkx_graph", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["GraphIndexCreator", "NetworkxEntityGraph"] diff --git a/venv/Lib/site-packages/langchain/indexes/prompts/__init__.py b/venv/Lib/site-packages/langchain/indexes/prompts/__init__.py new file mode 100644 index 00000000..fd8ff5d3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/indexes/prompts/__init__.py @@ -0,0 +1,13 @@ +"""Relevant prompts for constructing indexes.""" + +from langchain_core._api import warn_deprecated + +warn_deprecated( + since="0.1.47", + message=( + "langchain.indexes.prompts will be removed in the future." + "If you're relying on these prompts, please open an issue on " + "GitHub to explain your use case." + ), + pending=True, +) diff --git a/venv/Lib/site-packages/langchain/indexes/prompts/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/indexes/prompts/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..394c2a4a Binary files /dev/null and b/venv/Lib/site-packages/langchain/indexes/prompts/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/indexes/prompts/__pycache__/entity_extraction.cpython-312.pyc b/venv/Lib/site-packages/langchain/indexes/prompts/__pycache__/entity_extraction.cpython-312.pyc new file mode 100644 index 00000000..cbe3a206 Binary files /dev/null and b/venv/Lib/site-packages/langchain/indexes/prompts/__pycache__/entity_extraction.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/indexes/prompts/__pycache__/entity_summarization.cpython-312.pyc b/venv/Lib/site-packages/langchain/indexes/prompts/__pycache__/entity_summarization.cpython-312.pyc new file mode 100644 index 00000000..c0ee56ee Binary files /dev/null and b/venv/Lib/site-packages/langchain/indexes/prompts/__pycache__/entity_summarization.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/indexes/prompts/__pycache__/knowledge_triplet_extraction.cpython-312.pyc b/venv/Lib/site-packages/langchain/indexes/prompts/__pycache__/knowledge_triplet_extraction.cpython-312.pyc new file mode 100644 index 00000000..56eb39b9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/indexes/prompts/__pycache__/knowledge_triplet_extraction.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/indexes/prompts/entity_extraction.py b/venv/Lib/site-packages/langchain/indexes/prompts/entity_extraction.py new file mode 100644 index 00000000..416ba13e --- /dev/null +++ b/venv/Lib/site-packages/langchain/indexes/prompts/entity_extraction.py @@ -0,0 +1,40 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places. + +The conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line. + +Return the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation). + +EXAMPLE +Conversation history: +Person #1: how's it going today? +AI: "It's going great! How about you?" +Person #1: good! busy working on Langchain. lots to do. +AI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?" +Last line: +Person #1: i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. +Output: Langchain +END OF EXAMPLE + +EXAMPLE +Conversation history: +Person #1: how's it going today? +AI: "It's going great! How about you?" +Person #1: good! busy working on Langchain. lots to do. +AI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?" +Last line: +Person #1: i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I'm working with Person #2. +Output: Langchain, Person #2 +END OF EXAMPLE + +Conversation history (for reference only): +{history} +Last line of conversation (for extraction): +Human: {input} + +Output:""" +ENTITY_EXTRACTION_PROMPT = PromptTemplate( + input_variables=["history", "input"], template=_DEFAULT_ENTITY_EXTRACTION_TEMPLATE +) diff --git a/venv/Lib/site-packages/langchain/indexes/prompts/entity_summarization.py b/venv/Lib/site-packages/langchain/indexes/prompts/entity_summarization.py new file mode 100644 index 00000000..aa8ec6ef --- /dev/null +++ b/venv/Lib/site-packages/langchain/indexes/prompts/entity_summarization.py @@ -0,0 +1,25 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +_DEFAULT_ENTITY_SUMMARIZATION_TEMPLATE = """You are an AI assistant helping a human keep track of facts about relevant people, places, and concepts in their life. Update the summary of the provided entity in the "Entity" section based on the last line of your conversation with the human. If you are writing the summary for the first time, return a single sentence. +The update should only include facts that are relayed in the last line of conversation about the provided entity, and should only contain facts about the provided entity. + +If there is no new information about the provided entity or the information is not worth noting (not an important or relevant fact to remember long-term), return the existing summary unchanged. + +Full conversation history (for context): +{history} + +Entity to summarize: +{entity} + +Existing summary of {entity}: +{summary} + +Last line of conversation: +Human: {input} +Updated summary:""" + +ENTITY_SUMMARIZATION_PROMPT = PromptTemplate( + input_variables=["entity", "summary", "history", "input"], + template=_DEFAULT_ENTITY_SUMMARIZATION_TEMPLATE, +) diff --git a/venv/Lib/site-packages/langchain/indexes/prompts/knowledge_triplet_extraction.py b/venv/Lib/site-packages/langchain/indexes/prompts/knowledge_triplet_extraction.py new file mode 100644 index 00000000..47f59d9d --- /dev/null +++ b/venv/Lib/site-packages/langchain/indexes/prompts/knowledge_triplet_extraction.py @@ -0,0 +1,38 @@ +# flake8: noqa + +from langchain_core.prompts.prompt import PromptTemplate + +KG_TRIPLE_DELIMITER = "<|>" + +_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = ( + "You are a networked intelligence helping a human track knowledge triples" + " about all relevant people, things, concepts, etc. and integrating" + " them with your knowledge stored within your weights" + " as well as that stored in a knowledge graph." + " Extract all of the knowledge triples from the text." + " A knowledge triple is a clause that contains a subject, a predicate," + " and an object. The subject is the entity being described," + " the predicate is the property of the subject that is being" + " described, and the object is the value of the property.\n\n" + "EXAMPLE\n" + "It's a state in the US. It's also the number 1 producer of gold in the US.\n\n" + f"Output: (Nevada, is a, state){KG_TRIPLE_DELIMITER}(Nevada, is in, US)" + f"{KG_TRIPLE_DELIMITER}(Nevada, is the number 1 producer of, gold)\n" + "END OF EXAMPLE\n\n" + "EXAMPLE\n" + "I'm going to the store.\n\n" + "Output: NONE\n" + "END OF EXAMPLE\n\n" + "EXAMPLE\n" + "Oh huh. I know Descartes likes to drive antique scooters and play the mandolin.\n" + f"Output: (Descartes, likes to drive, antique scooters){KG_TRIPLE_DELIMITER}(Descartes, plays, mandolin)\n" + "END OF EXAMPLE\n\n" + "EXAMPLE\n" + "{text}" + "Output:" +) + +KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT = PromptTemplate( + input_variables=["text"], + template=_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE, +) diff --git a/venv/Lib/site-packages/langchain/indexes/vectorstore.py b/venv/Lib/site-packages/langchain/indexes/vectorstore.py new file mode 100644 index 00000000..db2d8fc5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/indexes/vectorstore.py @@ -0,0 +1,253 @@ +"""Vectorstore stubs for the indexing api.""" + +from typing import Any, Optional + +from langchain_core.document_loaders import BaseLoader +from langchain_core.documents import Document +from langchain_core.embeddings import Embeddings +from langchain_core.language_models import BaseLanguageModel +from langchain_core.vectorstores import VectorStore +from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter +from pydantic import BaseModel, ConfigDict, Field + +from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain +from langchain.chains.retrieval_qa.base import RetrievalQA + + +def _get_default_text_splitter() -> TextSplitter: + """Return the default text splitter used for chunking documents.""" + return RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) + + +class VectorStoreIndexWrapper(BaseModel): + """Wrapper around a vectorstore for easy access.""" + + vectorstore: VectorStore + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + def query( + self, + question: str, + llm: Optional[BaseLanguageModel] = None, + retriever_kwargs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> str: + """Query the vectorstore using the provided LLM. + + Args: + question: The question or prompt to query. + llm: The language model to use. Must not be None. + retriever_kwargs: Optional keyword arguments for the retriever. + **kwargs: Additional keyword arguments forwarded to the chain. + + Returns: + The result string from the RetrievalQA chain. + """ + if llm is None: + raise NotImplementedError( + "This API has been changed to require an LLM. " + "Please provide an llm to use for querying the vectorstore.\n" + "For example,\n" + "from langchain_openai import OpenAI\n" + "llm = OpenAI(temperature=0)" + ) + retriever_kwargs = retriever_kwargs or {} + chain = RetrievalQA.from_chain_type( + llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs + ) + return chain.invoke({chain.input_key: question})[chain.output_key] + + async def aquery( + self, + question: str, + llm: Optional[BaseLanguageModel] = None, + retriever_kwargs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> str: + """Asynchronously query the vectorstore using the provided LLM. + + Args: + question: The question or prompt to query. + llm: The language model to use. Must not be None. + retriever_kwargs: Optional keyword arguments for the retriever. + **kwargs: Additional keyword arguments forwarded to the chain. + + Returns: + The asynchronous result string from the RetrievalQA chain. + """ + if llm is None: + raise NotImplementedError( + "This API has been changed to require an LLM. " + "Please provide an llm to use for querying the vectorstore.\n" + "For example,\n" + "from langchain_openai import OpenAI\n" + "llm = OpenAI(temperature=0)" + ) + retriever_kwargs = retriever_kwargs or {} + chain = RetrievalQA.from_chain_type( + llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs + ) + return (await chain.ainvoke({chain.input_key: question}))[chain.output_key] + + def query_with_sources( + self, + question: str, + llm: Optional[BaseLanguageModel] = None, + retriever_kwargs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> dict: + """Query the vectorstore and retrieve the answer along with sources. + + Args: + question: The question or prompt to query. + llm: The language model to use. Must not be None. + retriever_kwargs: Optional keyword arguments for the retriever. + **kwargs: Additional keyword arguments forwarded to the chain. + + Returns: + A dictionary containing the answer and source documents. + """ + if llm is None: + raise NotImplementedError( + "This API has been changed to require an LLM. " + "Please provide an llm to use for querying the vectorstore.\n" + "For example,\n" + "from langchain_openai import OpenAI\n" + "llm = OpenAI(temperature=0)" + ) + retriever_kwargs = retriever_kwargs or {} + chain = RetrievalQAWithSourcesChain.from_chain_type( + llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs + ) + return chain.invoke({chain.question_key: question}) + + async def aquery_with_sources( + self, + question: str, + llm: Optional[BaseLanguageModel] = None, + retriever_kwargs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> dict: + """Asynchronously query the vectorstore and retrieve the answer and sources. + + Args: + question: The question or prompt to query. + llm: The language model to use. Must not be None. + retriever_kwargs: Optional keyword arguments for the retriever. + **kwargs: Additional keyword arguments forwarded to the chain. + + Returns: + A dictionary containing the answer and source documents. + """ + if llm is None: + raise NotImplementedError( + "This API has been changed to require an LLM. " + "Please provide an llm to use for querying the vectorstore.\n" + "For example,\n" + "from langchain_openai import OpenAI\n" + "llm = OpenAI(temperature=0)" + ) + retriever_kwargs = retriever_kwargs or {} + chain = RetrievalQAWithSourcesChain.from_chain_type( + llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs + ) + return await chain.ainvoke({chain.question_key: question}) + + +def _get_in_memory_vectorstore() -> type[VectorStore]: + """Get the InMemoryVectorStore.""" + import warnings + + try: + from langchain_community.vectorstores.inmemory import InMemoryVectorStore + except ImportError: + raise ImportError( + "Please install langchain-community to use the InMemoryVectorStore." + ) + warnings.warn( + "Using InMemoryVectorStore as the default vectorstore." + "This memory store won't persist data. You should explicitly" + "specify a vectorstore when using VectorstoreIndexCreator" + ) + return InMemoryVectorStore + + +class VectorstoreIndexCreator(BaseModel): + """Logic for creating indexes.""" + + vectorstore_cls: type[VectorStore] = Field( + default_factory=_get_in_memory_vectorstore + ) + embedding: Embeddings + text_splitter: TextSplitter = Field(default_factory=_get_default_text_splitter) + vectorstore_kwargs: dict = Field(default_factory=dict) + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + def from_loaders(self, loaders: list[BaseLoader]) -> VectorStoreIndexWrapper: + """Create a vectorstore index from a list of loaders. + + Args: + loaders: A list of `BaseLoader` instances to load documents. + + Returns: + A `VectorStoreIndexWrapper` containing the constructed vectorstore. + """ + docs = [] + for loader in loaders: + docs.extend(loader.load()) + return self.from_documents(docs) + + async def afrom_loaders(self, loaders: list[BaseLoader]) -> VectorStoreIndexWrapper: + """Asynchronously create a vectorstore index from a list of loaders. + + Args: + loaders: A list of `BaseLoader` instances to load documents. + + Returns: + A `VectorStoreIndexWrapper` containing the constructed vectorstore. + """ + docs = [] + for loader in loaders: + async for doc in loader.alazy_load(): + docs.append(doc) + return await self.afrom_documents(docs) + + def from_documents(self, documents: list[Document]) -> VectorStoreIndexWrapper: + """Create a vectorstore index from a list of documents. + + Args: + documents: A list of `Document` objects. + + Returns: + A `VectorStoreIndexWrapper` containing the constructed vectorstore. + """ + sub_docs = self.text_splitter.split_documents(documents) + vectorstore = self.vectorstore_cls.from_documents( + sub_docs, self.embedding, **self.vectorstore_kwargs + ) + return VectorStoreIndexWrapper(vectorstore=vectorstore) + + async def afrom_documents( + self, documents: list[Document] + ) -> VectorStoreIndexWrapper: + """Asynchronously create a vectorstore index from a list of documents. + + Args: + documents: A list of `Document` objects. + + Returns: + A `VectorStoreIndexWrapper` containing the constructed vectorstore. + """ + sub_docs = self.text_splitter.split_documents(documents) + vectorstore = await self.vectorstore_cls.afrom_documents( + sub_docs, self.embedding, **self.vectorstore_kwargs + ) + return VectorStoreIndexWrapper(vectorstore=vectorstore) diff --git a/venv/Lib/site-packages/langchain/input.py b/venv/Lib/site-packages/langchain/input.py new file mode 100644 index 00000000..91c1dbde --- /dev/null +++ b/venv/Lib/site-packages/langchain/input.py @@ -0,0 +1,15 @@ +"""DEPRECATED: Kept for backwards compatibility.""" + +from langchain_core.utils.input import ( + get_bolded_text, + get_color_mapping, + get_colored_text, + print_text, +) + +__all__ = [ + "get_bolded_text", + "get_color_mapping", + "get_colored_text", + "print_text", +] diff --git a/venv/Lib/site-packages/langchain/llms/__init__.py b/venv/Lib/site-packages/langchain/llms/__init__.py new file mode 100644 index 00000000..1666e48b --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/__init__.py @@ -0,0 +1,734 @@ +""" +**LLM** classes provide +access to the large language model (**LLM**) APIs and services. + +**Class hierarchy:** + +.. code-block:: + + BaseLanguageModel --> BaseLLM --> LLM --> # Examples: AI21, HuggingFaceHub, OpenAI + +**Main helpers:** + +.. code-block:: + + LLMResult, PromptValue, + CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun, + CallbackManager, AsyncCallbackManager, + AIMessage, BaseMessage +""" # noqa: E501 + +import warnings +from typing import Any, Callable + +from langchain_core._api import LangChainDeprecationWarning +from langchain_core.language_models.llms import BaseLLM + +from langchain._api.interactive_env import is_interactive_env + + +def _import_ai21() -> Any: + from langchain_community.llms.ai21 import AI21 + + return AI21 + + +def _import_aleph_alpha() -> Any: + from langchain_community.llms.aleph_alpha import AlephAlpha + + return AlephAlpha + + +def _import_amazon_api_gateway() -> Any: + from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway + + return AmazonAPIGateway + + +def _import_anthropic() -> Any: + from langchain_community.llms.anthropic import Anthropic + + return Anthropic + + +def _import_anyscale() -> Any: + from langchain_community.llms.anyscale import Anyscale + + return Anyscale + + +def _import_arcee() -> Any: + from langchain_community.llms.arcee import Arcee + + return Arcee + + +def _import_aviary() -> Any: + from langchain_community.llms.aviary import Aviary + + return Aviary + + +def _import_azureml_endpoint() -> Any: + from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint + + return AzureMLOnlineEndpoint + + +def _import_baidu_qianfan_endpoint() -> Any: + from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint + + return QianfanLLMEndpoint + + +def _import_bananadev() -> Any: + from langchain_community.llms.bananadev import Banana + + return Banana + + +def _import_baseten() -> Any: + from langchain_community.llms.baseten import Baseten + + return Baseten + + +def _import_beam() -> Any: + from langchain_community.llms.beam import Beam + + return Beam + + +def _import_bedrock() -> Any: + from langchain_community.llms.bedrock import Bedrock + + return Bedrock + + +def _import_bittensor() -> Any: + from langchain_community.llms.bittensor import NIBittensorLLM + + return NIBittensorLLM + + +def _import_cerebriumai() -> Any: + from langchain_community.llms.cerebriumai import CerebriumAI + + return CerebriumAI + + +def _import_chatglm() -> Any: + from langchain_community.llms.chatglm import ChatGLM + + return ChatGLM + + +def _import_clarifai() -> Any: + from langchain_community.llms.clarifai import Clarifai + + return Clarifai + + +def _import_cohere() -> Any: + from langchain_community.llms.cohere import Cohere + + return Cohere + + +def _import_ctransformers() -> Any: + from langchain_community.llms.ctransformers import CTransformers + + return CTransformers + + +def _import_ctranslate2() -> Any: + from langchain_community.llms.ctranslate2 import CTranslate2 + + return CTranslate2 + + +def _import_databricks() -> Any: + from langchain_community.llms.databricks import Databricks + + return Databricks + + +def _import_databricks_chat() -> Any: + from langchain_community.chat_models.databricks import ChatDatabricks + + return ChatDatabricks + + +def _import_deepinfra() -> Any: + from langchain_community.llms.deepinfra import DeepInfra + + return DeepInfra + + +def _import_deepsparse() -> Any: + from langchain_community.llms.deepsparse import DeepSparse + + return DeepSparse + + +def _import_edenai() -> Any: + from langchain_community.llms.edenai import EdenAI + + return EdenAI + + +def _import_fake() -> Any: + from langchain_core.language_models import FakeListLLM + + return FakeListLLM + + +def _import_fireworks() -> Any: + from langchain_community.llms.fireworks import Fireworks + + return Fireworks + + +def _import_forefrontai() -> Any: + from langchain_community.llms.forefrontai import ForefrontAI + + return ForefrontAI + + +def _import_gigachat() -> Any: + from langchain_community.llms.gigachat import GigaChat + + return GigaChat + + +def _import_google_palm() -> Any: + from langchain_community.llms.google_palm import GooglePalm + + return GooglePalm + + +def _import_gooseai() -> Any: + from langchain_community.llms.gooseai import GooseAI + + return GooseAI + + +def _import_gpt4all() -> Any: + from langchain_community.llms.gpt4all import GPT4All + + return GPT4All + + +def _import_gradient_ai() -> Any: + from langchain_community.llms.gradient_ai import GradientLLM + + return GradientLLM + + +def _import_huggingface_endpoint() -> Any: + from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint + + return HuggingFaceEndpoint + + +def _import_huggingface_hub() -> Any: + from langchain_community.llms.huggingface_hub import HuggingFaceHub + + return HuggingFaceHub + + +def _import_huggingface_pipeline() -> Any: + from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline + + return HuggingFacePipeline + + +def _import_huggingface_text_gen_inference() -> Any: + from langchain_community.llms.huggingface_text_gen_inference import ( + HuggingFaceTextGenInference, + ) + + return HuggingFaceTextGenInference + + +def _import_human() -> Any: + from langchain_community.llms.human import HumanInputLLM + + return HumanInputLLM + + +def _import_javelin_ai_gateway() -> Any: + from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway + + return JavelinAIGateway + + +def _import_koboldai() -> Any: + from langchain_community.llms.koboldai import KoboldApiLLM + + return KoboldApiLLM + + +def _import_llamacpp() -> Any: + from langchain_community.llms.llamacpp import LlamaCpp + + return LlamaCpp + + +def _import_manifest() -> Any: + from langchain_community.llms.manifest import ManifestWrapper + + return ManifestWrapper + + +def _import_minimax() -> Any: + from langchain_community.llms.minimax import Minimax + + return Minimax + + +def _import_mlflow() -> Any: + from langchain_community.llms.mlflow import Mlflow + + return Mlflow + + +def _import_mlflow_chat() -> Any: + from langchain_community.chat_models.mlflow import ChatMlflow + + return ChatMlflow + + +def _import_mlflow_ai_gateway() -> Any: + from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway + + return MlflowAIGateway + + +def _import_modal() -> Any: + from langchain_community.llms.modal import Modal + + return Modal + + +def _import_mosaicml() -> Any: + from langchain_community.llms.mosaicml import MosaicML + + return MosaicML + + +def _import_nlpcloud() -> Any: + from langchain_community.llms.nlpcloud import NLPCloud + + return NLPCloud + + +def _import_octoai_endpoint() -> Any: + from langchain_community.llms.octoai_endpoint import OctoAIEndpoint + + return OctoAIEndpoint + + +def _import_ollama() -> Any: + from langchain_community.llms.ollama import Ollama + + return Ollama + + +def _import_opaqueprompts() -> Any: + from langchain_community.llms.opaqueprompts import OpaquePrompts + + return OpaquePrompts + + +def _import_azure_openai() -> Any: + from langchain_community.llms.openai import AzureOpenAI + + return AzureOpenAI + + +def _import_openai() -> Any: + from langchain_community.llms.openai import OpenAI + + return OpenAI + + +def _import_openai_chat() -> Any: + from langchain_community.llms.openai import OpenAIChat + + return OpenAIChat + + +def _import_openllm() -> Any: + from langchain_community.llms.openllm import OpenLLM + + return OpenLLM + + +def _import_openlm() -> Any: + from langchain_community.llms.openlm import OpenLM + + return OpenLM + + +def _import_pai_eas_endpoint() -> Any: + from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint + + return PaiEasEndpoint + + +def _import_petals() -> Any: + from langchain_community.llms.petals import Petals + + return Petals + + +def _import_pipelineai() -> Any: + from langchain_community.llms.pipelineai import PipelineAI + + return PipelineAI + + +def _import_predibase() -> Any: + from langchain_community.llms.predibase import Predibase + + return Predibase + + +def _import_predictionguard() -> Any: + from langchain_community.llms.predictionguard import PredictionGuard + + return PredictionGuard + + +def _import_promptlayer() -> Any: + from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI + + return PromptLayerOpenAI + + +def _import_promptlayer_chat() -> Any: + from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat + + return PromptLayerOpenAIChat + + +def _import_replicate() -> Any: + from langchain_community.llms.replicate import Replicate + + return Replicate + + +def _import_rwkv() -> Any: + from langchain_community.llms.rwkv import RWKV + + return RWKV + + +def _import_sagemaker_endpoint() -> Any: + from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint + + return SagemakerEndpoint + + +def _import_self_hosted() -> Any: + from langchain_community.llms.self_hosted import SelfHostedPipeline + + return SelfHostedPipeline + + +def _import_self_hosted_hugging_face() -> Any: + from langchain_community.llms.self_hosted_hugging_face import ( + SelfHostedHuggingFaceLLM, + ) + + return SelfHostedHuggingFaceLLM + + +def _import_stochasticai() -> Any: + from langchain_community.llms.stochasticai import StochasticAI + + return StochasticAI + + +def _import_symblai_nebula() -> Any: + from langchain_community.llms.symblai_nebula import Nebula + + return Nebula + + +def _import_textgen() -> Any: + from langchain_community.llms.textgen import TextGen + + return TextGen + + +def _import_titan_takeoff() -> Any: + from langchain_community.llms.titan_takeoff import TitanTakeoff + + return TitanTakeoff + + +def _import_titan_takeoff_pro() -> Any: + from langchain_community.llms.titan_takeoff import TitanTakeoff + + return TitanTakeoff + + +def _import_together() -> Any: + from langchain_community.llms.together import Together + + return Together + + +def _import_tongyi() -> Any: + from langchain_community.llms.tongyi import Tongyi + + return Tongyi + + +def _import_vertex() -> Any: + from langchain_community.llms.vertexai import VertexAI + + return VertexAI + + +def _import_vertex_model_garden() -> Any: + from langchain_community.llms.vertexai import VertexAIModelGarden + + return VertexAIModelGarden + + +def _import_vllm() -> Any: + from langchain_community.llms.vllm import VLLM + + return VLLM + + +def _import_vllm_openai() -> Any: + from langchain_community.llms.vllm import VLLMOpenAI + + return VLLMOpenAI + + +def _import_watsonxllm() -> Any: + from langchain_community.llms.watsonxllm import WatsonxLLM + + return WatsonxLLM + + +def _import_writer() -> Any: + from langchain_community.llms.writer import Writer + + return Writer + + +def _import_xinference() -> Any: + from langchain_community.llms.xinference import Xinference + + return Xinference + + +def _import_yandex_gpt() -> Any: + from langchain_community.llms.yandex import YandexGPT + + return YandexGPT + + +def _import_volcengine_maas() -> Any: + from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM + + return VolcEngineMaasLLM + + +def __getattr__(name: str) -> Any: + from langchain_community import llms + + # If not in interactive env, raise warning. + if not is_interactive_env(): + warnings.warn( + "Importing LLMs from langchain is deprecated. Importing from " + "langchain will no longer be supported as of langchain==0.2.0. " + "Please import from langchain-community instead:\n\n" + f"`from langchain_community.llms import {name}`.\n\n" + "To install langchain-community run `pip install -U langchain-community`.", + category=LangChainDeprecationWarning, + ) + + if name == "type_to_cls_dict": + # for backwards compatibility + type_to_cls_dict: dict[str, type[BaseLLM]] = { + k: v() for k, v in get_type_to_cls_dict().items() + } + return type_to_cls_dict + else: + return getattr(llms, name) + + +__all__ = [ + "AI21", + "AlephAlpha", + "AmazonAPIGateway", + "Anthropic", + "Anyscale", + "Arcee", + "Aviary", + "AzureMLOnlineEndpoint", + "AzureOpenAI", + "Banana", + "Baseten", + "Beam", + "Bedrock", + "CTransformers", + "CTranslate2", + "CerebriumAI", + "ChatGLM", + "Clarifai", + "Cohere", + "Databricks", + "DeepInfra", + "DeepSparse", + "EdenAI", + "FakeListLLM", + "Fireworks", + "ForefrontAI", + "GigaChat", + "GPT4All", + "GooglePalm", + "GooseAI", + "GradientLLM", + "HuggingFaceEndpoint", + "HuggingFaceHub", + "HuggingFacePipeline", + "HuggingFaceTextGenInference", + "HumanInputLLM", + "KoboldApiLLM", + "LlamaCpp", + "TextGen", + "ManifestWrapper", + "Minimax", + "MlflowAIGateway", + "Modal", + "MosaicML", + "Nebula", + "NIBittensorLLM", + "NLPCloud", + "Ollama", + "OpenAI", + "OpenAIChat", + "OpenLLM", + "OpenLM", + "PaiEasEndpoint", + "Petals", + "PipelineAI", + "Predibase", + "PredictionGuard", + "PromptLayerOpenAI", + "PromptLayerOpenAIChat", + "OpaquePrompts", + "RWKV", + "Replicate", + "SagemakerEndpoint", + "SelfHostedHuggingFaceLLM", + "SelfHostedPipeline", + "StochasticAI", + "TitanTakeoff", + "TitanTakeoffPro", + "Tongyi", + "VertexAI", + "VertexAIModelGarden", + "VLLM", + "VLLMOpenAI", + "WatsonxLLM", + "Writer", + "OctoAIEndpoint", + "Xinference", + "JavelinAIGateway", + "QianfanLLMEndpoint", + "YandexGPT", + "VolcEngineMaasLLM", +] + + +def get_type_to_cls_dict() -> dict[str, Callable[[], type[BaseLLM]]]: + return { + "ai21": _import_ai21, + "aleph_alpha": _import_aleph_alpha, + "amazon_api_gateway": _import_amazon_api_gateway, + "amazon_bedrock": _import_bedrock, + "anthropic": _import_anthropic, + "anyscale": _import_anyscale, + "arcee": _import_arcee, + "aviary": _import_aviary, + "azure": _import_azure_openai, + "azureml_endpoint": _import_azureml_endpoint, + "bananadev": _import_bananadev, + "baseten": _import_baseten, + "beam": _import_beam, + "cerebriumai": _import_cerebriumai, + "chat_glm": _import_chatglm, + "clarifai": _import_clarifai, + "cohere": _import_cohere, + "ctransformers": _import_ctransformers, + "ctranslate2": _import_ctranslate2, + "databricks": _import_databricks, + "databricks-chat": _import_databricks_chat, + "deepinfra": _import_deepinfra, + "deepsparse": _import_deepsparse, + "edenai": _import_edenai, + "fake-list": _import_fake, + "forefrontai": _import_forefrontai, + "giga-chat-model": _import_gigachat, + "google_palm": _import_google_palm, + "gooseai": _import_gooseai, + "gradient": _import_gradient_ai, + "gpt4all": _import_gpt4all, + "huggingface_endpoint": _import_huggingface_endpoint, + "huggingface_hub": _import_huggingface_hub, + "huggingface_pipeline": _import_huggingface_pipeline, + "huggingface_textgen_inference": _import_huggingface_text_gen_inference, + "human-input": _import_human, + "koboldai": _import_koboldai, + "llamacpp": _import_llamacpp, + "textgen": _import_textgen, + "minimax": _import_minimax, + "mlflow": _import_mlflow, + "mlflow-chat": _import_mlflow_chat, + "mlflow-ai-gateway": _import_mlflow_ai_gateway, + "modal": _import_modal, + "mosaic": _import_mosaicml, + "nebula": _import_symblai_nebula, + "nibittensor": _import_bittensor, + "nlpcloud": _import_nlpcloud, + "ollama": _import_ollama, + "openai": _import_openai, + "openlm": _import_openlm, + "pai_eas_endpoint": _import_pai_eas_endpoint, + "petals": _import_petals, + "pipelineai": _import_pipelineai, + "predibase": _import_predibase, + "opaqueprompts": _import_opaqueprompts, + "replicate": _import_replicate, + "rwkv": _import_rwkv, + "sagemaker_endpoint": _import_sagemaker_endpoint, + "self_hosted": _import_self_hosted, + "self_hosted_hugging_face": _import_self_hosted_hugging_face, + "stochasticai": _import_stochasticai, + "together": _import_together, + "tongyi": _import_tongyi, + "titan_takeoff": _import_titan_takeoff, + "titan_takeoff_pro": _import_titan_takeoff_pro, + "vertexai": _import_vertex, + "vertexai_model_garden": _import_vertex_model_garden, + "openllm": _import_openllm, + "openllm_client": _import_openllm, + "vllm": _import_vllm, + "vllm_openai": _import_vllm_openai, + "watsonxllm": _import_watsonxllm, + "writer": _import_writer, + "xinference": _import_xinference, + "javelin-ai-gateway": _import_javelin_ai_gateway, + "qianfan_endpoint": _import_baidu_qianfan_endpoint, + "yandex_gpt": _import_yandex_gpt, + "VolcEngineMaasLLM": _import_volcengine_maas, + } diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..c691e87a Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/ai21.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/ai21.cpython-312.pyc new file mode 100644 index 00000000..92a88f1b Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/ai21.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/aleph_alpha.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/aleph_alpha.cpython-312.pyc new file mode 100644 index 00000000..f8af0887 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/aleph_alpha.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/amazon_api_gateway.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/amazon_api_gateway.cpython-312.pyc new file mode 100644 index 00000000..ef68373d Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/amazon_api_gateway.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/anthropic.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/anthropic.cpython-312.pyc new file mode 100644 index 00000000..33f4ff24 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/anthropic.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/anyscale.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/anyscale.cpython-312.pyc new file mode 100644 index 00000000..76274cf2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/anyscale.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/arcee.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/arcee.cpython-312.pyc new file mode 100644 index 00000000..e651c55b Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/arcee.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/aviary.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/aviary.cpython-312.pyc new file mode 100644 index 00000000..6480951a Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/aviary.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/azureml_endpoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/azureml_endpoint.cpython-312.pyc new file mode 100644 index 00000000..f8fd7c64 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/azureml_endpoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/baidu_qianfan_endpoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/baidu_qianfan_endpoint.cpython-312.pyc new file mode 100644 index 00000000..ab5003b3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/baidu_qianfan_endpoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/bananadev.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/bananadev.cpython-312.pyc new file mode 100644 index 00000000..bcb3826d Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/bananadev.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..6ff60764 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/baseten.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/baseten.cpython-312.pyc new file mode 100644 index 00000000..7b049fd4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/baseten.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/beam.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/beam.cpython-312.pyc new file mode 100644 index 00000000..72957611 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/beam.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/bedrock.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/bedrock.cpython-312.pyc new file mode 100644 index 00000000..179da22f Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/bedrock.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/bittensor.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/bittensor.cpython-312.pyc new file mode 100644 index 00000000..b67ce8c3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/bittensor.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/cerebriumai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/cerebriumai.cpython-312.pyc new file mode 100644 index 00000000..00d9a8bb Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/cerebriumai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/chatglm.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/chatglm.cpython-312.pyc new file mode 100644 index 00000000..5597e8d5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/chatglm.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/clarifai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/clarifai.cpython-312.pyc new file mode 100644 index 00000000..b2585a9b Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/clarifai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/cloudflare_workersai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/cloudflare_workersai.cpython-312.pyc new file mode 100644 index 00000000..2ee17fac Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/cloudflare_workersai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/cohere.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/cohere.cpython-312.pyc new file mode 100644 index 00000000..4c953865 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/cohere.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/ctransformers.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/ctransformers.cpython-312.pyc new file mode 100644 index 00000000..8bda820d Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/ctransformers.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/ctranslate2.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/ctranslate2.cpython-312.pyc new file mode 100644 index 00000000..1e41f15f Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/ctranslate2.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/databricks.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/databricks.cpython-312.pyc new file mode 100644 index 00000000..b9ec70c9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/databricks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/deepinfra.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/deepinfra.cpython-312.pyc new file mode 100644 index 00000000..203c9171 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/deepinfra.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/deepsparse.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/deepsparse.cpython-312.pyc new file mode 100644 index 00000000..24c56db3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/deepsparse.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/edenai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/edenai.cpython-312.pyc new file mode 100644 index 00000000..79c6764a Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/edenai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/fake.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/fake.cpython-312.pyc new file mode 100644 index 00000000..9c6054f3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/fake.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/fireworks.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/fireworks.cpython-312.pyc new file mode 100644 index 00000000..8c54e5b5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/fireworks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/forefrontai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/forefrontai.cpython-312.pyc new file mode 100644 index 00000000..803287a0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/forefrontai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/gigachat.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/gigachat.cpython-312.pyc new file mode 100644 index 00000000..0d85f45a Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/gigachat.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/google_palm.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/google_palm.cpython-312.pyc new file mode 100644 index 00000000..669672d1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/google_palm.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/gooseai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/gooseai.cpython-312.pyc new file mode 100644 index 00000000..b12e1b9e Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/gooseai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/gpt4all.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/gpt4all.cpython-312.pyc new file mode 100644 index 00000000..2efd3202 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/gpt4all.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/gradient_ai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/gradient_ai.cpython-312.pyc new file mode 100644 index 00000000..09aed004 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/gradient_ai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/huggingface_endpoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/huggingface_endpoint.cpython-312.pyc new file mode 100644 index 00000000..8415bbaa Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/huggingface_endpoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/huggingface_hub.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/huggingface_hub.cpython-312.pyc new file mode 100644 index 00000000..fcca6419 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/huggingface_hub.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/huggingface_pipeline.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/huggingface_pipeline.cpython-312.pyc new file mode 100644 index 00000000..99ccfec4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/huggingface_pipeline.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/huggingface_text_gen_inference.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/huggingface_text_gen_inference.cpython-312.pyc new file mode 100644 index 00000000..5b7de036 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/huggingface_text_gen_inference.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/human.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/human.cpython-312.pyc new file mode 100644 index 00000000..aa206fb9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/human.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/javelin_ai_gateway.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/javelin_ai_gateway.cpython-312.pyc new file mode 100644 index 00000000..b7dfa60e Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/javelin_ai_gateway.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/koboldai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/koboldai.cpython-312.pyc new file mode 100644 index 00000000..110db593 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/koboldai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/llamacpp.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/llamacpp.cpython-312.pyc new file mode 100644 index 00000000..d5e5099e Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/llamacpp.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/loading.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/loading.cpython-312.pyc new file mode 100644 index 00000000..af8c4bec Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/loading.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/manifest.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/manifest.cpython-312.pyc new file mode 100644 index 00000000..94305a4c Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/manifest.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/minimax.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/minimax.cpython-312.pyc new file mode 100644 index 00000000..d20ada57 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/minimax.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/mlflow.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/mlflow.cpython-312.pyc new file mode 100644 index 00000000..da392560 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/mlflow.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/mlflow_ai_gateway.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/mlflow_ai_gateway.cpython-312.pyc new file mode 100644 index 00000000..76c8523f Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/mlflow_ai_gateway.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/modal.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/modal.cpython-312.pyc new file mode 100644 index 00000000..edb75bc4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/modal.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/mosaicml.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/mosaicml.cpython-312.pyc new file mode 100644 index 00000000..d9a5d758 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/mosaicml.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/nlpcloud.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/nlpcloud.cpython-312.pyc new file mode 100644 index 00000000..763ec136 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/nlpcloud.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/octoai_endpoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/octoai_endpoint.cpython-312.pyc new file mode 100644 index 00000000..82f1f242 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/octoai_endpoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/ollama.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/ollama.cpython-312.pyc new file mode 100644 index 00000000..1259f7c1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/ollama.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/opaqueprompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/opaqueprompts.cpython-312.pyc new file mode 100644 index 00000000..7c55b2f6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/opaqueprompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/openai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/openai.cpython-312.pyc new file mode 100644 index 00000000..bb7ccd1e Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/openai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/openllm.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/openllm.cpython-312.pyc new file mode 100644 index 00000000..20cd4526 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/openllm.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/openlm.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/openlm.cpython-312.pyc new file mode 100644 index 00000000..b09ee83f Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/openlm.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/pai_eas_endpoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/pai_eas_endpoint.cpython-312.pyc new file mode 100644 index 00000000..941259b6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/pai_eas_endpoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/petals.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/petals.cpython-312.pyc new file mode 100644 index 00000000..0d4ed04a Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/petals.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/pipelineai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/pipelineai.cpython-312.pyc new file mode 100644 index 00000000..ea715244 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/pipelineai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/predibase.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/predibase.cpython-312.pyc new file mode 100644 index 00000000..5e237673 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/predibase.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/predictionguard.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/predictionguard.cpython-312.pyc new file mode 100644 index 00000000..3307616a Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/predictionguard.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/promptlayer_openai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/promptlayer_openai.cpython-312.pyc new file mode 100644 index 00000000..c853eef1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/promptlayer_openai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/replicate.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/replicate.cpython-312.pyc new file mode 100644 index 00000000..28ff3eb9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/replicate.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/rwkv.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/rwkv.cpython-312.pyc new file mode 100644 index 00000000..676325b6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/rwkv.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/sagemaker_endpoint.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/sagemaker_endpoint.cpython-312.pyc new file mode 100644 index 00000000..4d6e9f0b Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/sagemaker_endpoint.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/self_hosted.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/self_hosted.cpython-312.pyc new file mode 100644 index 00000000..1c8f2867 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/self_hosted.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/self_hosted_hugging_face.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/self_hosted_hugging_face.cpython-312.pyc new file mode 100644 index 00000000..b8340108 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/self_hosted_hugging_face.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/stochasticai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/stochasticai.cpython-312.pyc new file mode 100644 index 00000000..66a83e91 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/stochasticai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/symblai_nebula.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/symblai_nebula.cpython-312.pyc new file mode 100644 index 00000000..b030bd02 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/symblai_nebula.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/textgen.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/textgen.cpython-312.pyc new file mode 100644 index 00000000..702249cb Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/textgen.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/titan_takeoff.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/titan_takeoff.cpython-312.pyc new file mode 100644 index 00000000..d83aa364 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/titan_takeoff.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/titan_takeoff_pro.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/titan_takeoff_pro.cpython-312.pyc new file mode 100644 index 00000000..6978d2f4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/titan_takeoff_pro.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/together.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/together.cpython-312.pyc new file mode 100644 index 00000000..a6e5bc97 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/together.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/tongyi.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/tongyi.cpython-312.pyc new file mode 100644 index 00000000..5a95ac68 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/tongyi.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..5c69996b Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/vertexai.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/vertexai.cpython-312.pyc new file mode 100644 index 00000000..a4ded5ea Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/vertexai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/vllm.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/vllm.cpython-312.pyc new file mode 100644 index 00000000..687219f7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/vllm.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/volcengine_maas.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/volcengine_maas.cpython-312.pyc new file mode 100644 index 00000000..cfcad45b Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/volcengine_maas.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/watsonxllm.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/watsonxllm.cpython-312.pyc new file mode 100644 index 00000000..a891b6d4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/watsonxllm.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/writer.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/writer.cpython-312.pyc new file mode 100644 index 00000000..c6aa9f35 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/writer.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/xinference.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/xinference.cpython-312.pyc new file mode 100644 index 00000000..576a8b03 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/xinference.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/__pycache__/yandex.cpython-312.pyc b/venv/Lib/site-packages/langchain/llms/__pycache__/yandex.cpython-312.pyc new file mode 100644 index 00000000..3c61fee7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/llms/__pycache__/yandex.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/llms/ai21.py b/venv/Lib/site-packages/langchain/llms/ai21.py new file mode 100644 index 00000000..7bdb2168 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/ai21.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import AI21 + from langchain_community.llms.ai21 import AI21PenaltyData + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AI21PenaltyData": "langchain_community.llms.ai21", + "AI21": "langchain_community.llms", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AI21PenaltyData", + "AI21", +] diff --git a/venv/Lib/site-packages/langchain/llms/aleph_alpha.py b/venv/Lib/site-packages/langchain/llms/aleph_alpha.py new file mode 100644 index 00000000..b6ec1858 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/aleph_alpha.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import AlephAlpha + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AlephAlpha": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AlephAlpha", +] diff --git a/venv/Lib/site-packages/langchain/llms/amazon_api_gateway.py b/venv/Lib/site-packages/langchain/llms/amazon_api_gateway.py new file mode 100644 index 00000000..341954ec --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/amazon_api_gateway.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import AmazonAPIGateway + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AmazonAPIGateway": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AmazonAPIGateway", +] diff --git a/venv/Lib/site-packages/langchain/llms/anthropic.py b/venv/Lib/site-packages/langchain/llms/anthropic.py new file mode 100644 index 00000000..3880d5c5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/anthropic.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Anthropic + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Anthropic": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Anthropic", +] diff --git a/venv/Lib/site-packages/langchain/llms/anyscale.py b/venv/Lib/site-packages/langchain/llms/anyscale.py new file mode 100644 index 00000000..b0b8359a --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/anyscale.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Anyscale + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Anyscale": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Anyscale", +] diff --git a/venv/Lib/site-packages/langchain/llms/arcee.py b/venv/Lib/site-packages/langchain/llms/arcee.py new file mode 100644 index 00000000..723b4e13 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/arcee.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Arcee + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Arcee": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Arcee", +] diff --git a/venv/Lib/site-packages/langchain/llms/aviary.py b/venv/Lib/site-packages/langchain/llms/aviary.py new file mode 100644 index 00000000..b8d706f8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/aviary.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Aviary + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Aviary": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Aviary", +] diff --git a/venv/Lib/site-packages/langchain/llms/azureml_endpoint.py b/venv/Lib/site-packages/langchain/llms/azureml_endpoint.py new file mode 100644 index 00000000..6b47cfbb --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/azureml_endpoint.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import AzureMLOnlineEndpoint + from langchain_community.llms.azureml_endpoint import ( + AzureMLEndpointClient, + ContentFormatterBase, + CustomOpenAIContentFormatter, + DollyContentFormatter, + GPT2ContentFormatter, + HFContentFormatter, + OSSContentFormatter, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AzureMLEndpointClient": "langchain_community.llms.azureml_endpoint", + "ContentFormatterBase": "langchain_community.llms.azureml_endpoint", + "GPT2ContentFormatter": "langchain_community.llms.azureml_endpoint", + "OSSContentFormatter": "langchain_community.llms.azureml_endpoint", + "HFContentFormatter": "langchain_community.llms.azureml_endpoint", + "DollyContentFormatter": "langchain_community.llms.azureml_endpoint", + "CustomOpenAIContentFormatter": "langchain_community.llms.azureml_endpoint", + "AzureMLOnlineEndpoint": "langchain_community.llms", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureMLEndpointClient", + "ContentFormatterBase", + "GPT2ContentFormatter", + "OSSContentFormatter", + "HFContentFormatter", + "DollyContentFormatter", + "CustomOpenAIContentFormatter", + "AzureMLOnlineEndpoint", +] diff --git a/venv/Lib/site-packages/langchain/llms/baidu_qianfan_endpoint.py b/venv/Lib/site-packages/langchain/llms/baidu_qianfan_endpoint.py new file mode 100644 index 00000000..b44ba195 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/baidu_qianfan_endpoint.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import QianfanLLMEndpoint + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"QianfanLLMEndpoint": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "QianfanLLMEndpoint", +] diff --git a/venv/Lib/site-packages/langchain/llms/bananadev.py b/venv/Lib/site-packages/langchain/llms/bananadev.py new file mode 100644 index 00000000..6c4489cd --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/bananadev.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Banana + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Banana": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Banana", +] diff --git a/venv/Lib/site-packages/langchain/llms/base.py b/venv/Lib/site-packages/langchain/llms/base.py new file mode 100644 index 00000000..85bd0dd2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/base.py @@ -0,0 +1,12 @@ +# Backwards compatibility. +from langchain_core.language_models import BaseLanguageModel +from langchain_core.language_models.llms import ( + LLM, + BaseLLM, +) + +__all__ = [ + "BaseLanguageModel", + "BaseLLM", + "LLM", +] diff --git a/venv/Lib/site-packages/langchain/llms/baseten.py b/venv/Lib/site-packages/langchain/llms/baseten.py new file mode 100644 index 00000000..983079f4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/baseten.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Baseten + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Baseten": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Baseten", +] diff --git a/venv/Lib/site-packages/langchain/llms/beam.py b/venv/Lib/site-packages/langchain/llms/beam.py new file mode 100644 index 00000000..c0c8601d --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/beam.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Beam + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Beam": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Beam", +] diff --git a/venv/Lib/site-packages/langchain/llms/bedrock.py b/venv/Lib/site-packages/langchain/llms/bedrock.py new file mode 100644 index 00000000..052bc8a7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/bedrock.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Bedrock + from langchain_community.llms.bedrock import BedrockBase + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BedrockBase": "langchain_community.llms.bedrock", + "Bedrock": "langchain_community.llms", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BedrockBase", + "Bedrock", +] diff --git a/venv/Lib/site-packages/langchain/llms/bittensor.py b/venv/Lib/site-packages/langchain/llms/bittensor.py new file mode 100644 index 00000000..6b9f295a --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/bittensor.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import NIBittensorLLM + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NIBittensorLLM": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NIBittensorLLM", +] diff --git a/venv/Lib/site-packages/langchain/llms/cerebriumai.py b/venv/Lib/site-packages/langchain/llms/cerebriumai.py new file mode 100644 index 00000000..c9ab943b --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/cerebriumai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import CerebriumAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"CerebriumAI": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CerebriumAI", +] diff --git a/venv/Lib/site-packages/langchain/llms/chatglm.py b/venv/Lib/site-packages/langchain/llms/chatglm.py new file mode 100644 index 00000000..2879ea51 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/chatglm.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import ChatGLM + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatGLM": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatGLM", +] diff --git a/venv/Lib/site-packages/langchain/llms/clarifai.py b/venv/Lib/site-packages/langchain/llms/clarifai.py new file mode 100644 index 00000000..8c7c6975 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/clarifai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Clarifai + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Clarifai": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Clarifai", +] diff --git a/venv/Lib/site-packages/langchain/llms/cloudflare_workersai.py b/venv/Lib/site-packages/langchain/llms/cloudflare_workersai.py new file mode 100644 index 00000000..13b008e0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/cloudflare_workersai.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CloudflareWorkersAI": "langchain_community.llms.cloudflare_workersai" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CloudflareWorkersAI", +] diff --git a/venv/Lib/site-packages/langchain/llms/cohere.py b/venv/Lib/site-packages/langchain/llms/cohere.py new file mode 100644 index 00000000..7b069862 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/cohere.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Cohere + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Cohere": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Cohere", +] diff --git a/venv/Lib/site-packages/langchain/llms/ctransformers.py b/venv/Lib/site-packages/langchain/llms/ctransformers.py new file mode 100644 index 00000000..1d1d8b64 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/ctransformers.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import CTransformers + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"CTransformers": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CTransformers", +] diff --git a/venv/Lib/site-packages/langchain/llms/ctranslate2.py b/venv/Lib/site-packages/langchain/llms/ctranslate2.py new file mode 100644 index 00000000..ec586f1e --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/ctranslate2.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import CTranslate2 + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"CTranslate2": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CTranslate2", +] diff --git a/venv/Lib/site-packages/langchain/llms/databricks.py b/venv/Lib/site-packages/langchain/llms/databricks.py new file mode 100644 index 00000000..f9a79417 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/databricks.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Databricks + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Databricks": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Databricks", +] diff --git a/venv/Lib/site-packages/langchain/llms/deepinfra.py b/venv/Lib/site-packages/langchain/llms/deepinfra.py new file mode 100644 index 00000000..6f15884f --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/deepinfra.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import DeepInfra + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DeepInfra": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DeepInfra", +] diff --git a/venv/Lib/site-packages/langchain/llms/deepsparse.py b/venv/Lib/site-packages/langchain/llms/deepsparse.py new file mode 100644 index 00000000..6de9260c --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/deepsparse.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import DeepSparse + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DeepSparse": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DeepSparse", +] diff --git a/venv/Lib/site-packages/langchain/llms/edenai.py b/venv/Lib/site-packages/langchain/llms/edenai.py new file mode 100644 index 00000000..c57c078b --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/edenai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import EdenAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EdenAI": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EdenAI", +] diff --git a/venv/Lib/site-packages/langchain/llms/fake.py b/venv/Lib/site-packages/langchain/llms/fake.py new file mode 100644 index 00000000..f26ccbe7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/fake.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms.fake import FakeStreamingListLLM + from langchain_core.language_models import FakeListLLM + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FakeListLLM": "langchain_community.llms", + "FakeStreamingListLLM": "langchain_community.llms.fake", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FakeListLLM", + "FakeStreamingListLLM", +] diff --git a/venv/Lib/site-packages/langchain/llms/fireworks.py b/venv/Lib/site-packages/langchain/llms/fireworks.py new file mode 100644 index 00000000..1b779f4a --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/fireworks.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Fireworks + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Fireworks": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Fireworks", +] diff --git a/venv/Lib/site-packages/langchain/llms/forefrontai.py b/venv/Lib/site-packages/langchain/llms/forefrontai.py new file mode 100644 index 00000000..7fafd6fa --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/forefrontai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import ForefrontAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ForefrontAI": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ForefrontAI", +] diff --git a/venv/Lib/site-packages/langchain/llms/gigachat.py b/venv/Lib/site-packages/langchain/llms/gigachat.py new file mode 100644 index 00000000..07723da3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/gigachat.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import GigaChat + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GigaChat": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GigaChat", +] diff --git a/venv/Lib/site-packages/langchain/llms/google_palm.py b/venv/Lib/site-packages/langchain/llms/google_palm.py new file mode 100644 index 00000000..946113e9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/google_palm.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import GooglePalm + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GooglePalm": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GooglePalm", +] diff --git a/venv/Lib/site-packages/langchain/llms/gooseai.py b/venv/Lib/site-packages/langchain/llms/gooseai.py new file mode 100644 index 00000000..52efd50f --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/gooseai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import GooseAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GooseAI": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GooseAI", +] diff --git a/venv/Lib/site-packages/langchain/llms/gpt4all.py b/venv/Lib/site-packages/langchain/llms/gpt4all.py new file mode 100644 index 00000000..4a89d823 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/gpt4all.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import GPT4All + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GPT4All": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GPT4All", +] diff --git a/venv/Lib/site-packages/langchain/llms/gradient_ai.py b/venv/Lib/site-packages/langchain/llms/gradient_ai.py new file mode 100644 index 00000000..83dbd686 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/gradient_ai.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import GradientLLM + from langchain_community.llms.gradient_ai import TrainResult + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "TrainResult": "langchain_community.llms.gradient_ai", + "GradientLLM": "langchain_community.llms", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TrainResult", + "GradientLLM", +] diff --git a/venv/Lib/site-packages/langchain/llms/grammars/json.gbnf b/venv/Lib/site-packages/langchain/llms/grammars/json.gbnf new file mode 100644 index 00000000..61bd2b2e --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/grammars/json.gbnf @@ -0,0 +1,29 @@ +# Grammar for subset of JSON - doesn't support full string or number syntax + +root ::= object +value ::= object | array | string | number | boolean | "null" + +object ::= + "{" ws ( + string ":" ws value + ("," ws string ":" ws value)* + )? "}" + +array ::= + "[" ws ( + value + ("," ws value)* + )? "]" + +string ::= + "\"" ( + [^"\\] | + "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes + )* "\"" ws + +# Only plain integers currently +number ::= "-"? [0-9]+ ws +boolean ::= ("true" | "false") ws + +# Optional space: by convention, applied in this grammar after literal chars when allowed +ws ::= ([ \t\n] ws)? \ No newline at end of file diff --git a/venv/Lib/site-packages/langchain/llms/grammars/list.gbnf b/venv/Lib/site-packages/langchain/llms/grammars/list.gbnf new file mode 100644 index 00000000..30ea6e0c --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/grammars/list.gbnf @@ -0,0 +1,14 @@ +root ::= "[" items "]" EOF + +items ::= item ("," ws* item)* + +item ::= string + +string ::= + "\"" word (ws+ word)* "\"" ws* + +word ::= [a-zA-Z]+ + +ws ::= " " + +EOF ::= "\n" \ No newline at end of file diff --git a/venv/Lib/site-packages/langchain/llms/huggingface_endpoint.py b/venv/Lib/site-packages/langchain/llms/huggingface_endpoint.py new file mode 100644 index 00000000..2faa5c32 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/huggingface_endpoint.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import HuggingFaceEndpoint + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"HuggingFaceEndpoint": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HuggingFaceEndpoint", +] diff --git a/venv/Lib/site-packages/langchain/llms/huggingface_hub.py b/venv/Lib/site-packages/langchain/llms/huggingface_hub.py new file mode 100644 index 00000000..1e2d9f5e --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/huggingface_hub.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import HuggingFaceHub + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"HuggingFaceHub": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HuggingFaceHub", +] diff --git a/venv/Lib/site-packages/langchain/llms/huggingface_pipeline.py b/venv/Lib/site-packages/langchain/llms/huggingface_pipeline.py new file mode 100644 index 00000000..1919595c --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/huggingface_pipeline.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import HuggingFacePipeline + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"HuggingFacePipeline": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HuggingFacePipeline", +] diff --git a/venv/Lib/site-packages/langchain/llms/huggingface_text_gen_inference.py b/venv/Lib/site-packages/langchain/llms/huggingface_text_gen_inference.py new file mode 100644 index 00000000..524ee817 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/huggingface_text_gen_inference.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import HuggingFaceTextGenInference + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"HuggingFaceTextGenInference": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HuggingFaceTextGenInference", +] diff --git a/venv/Lib/site-packages/langchain/llms/human.py b/venv/Lib/site-packages/langchain/llms/human.py new file mode 100644 index 00000000..b5346067 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/human.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import HumanInputLLM + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"HumanInputLLM": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HumanInputLLM", +] diff --git a/venv/Lib/site-packages/langchain/llms/javelin_ai_gateway.py b/venv/Lib/site-packages/langchain/llms/javelin_ai_gateway.py new file mode 100644 index 00000000..952e968a --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/javelin_ai_gateway.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import JavelinAIGateway + from langchain_community.llms.javelin_ai_gateway import Params + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "JavelinAIGateway": "langchain_community.llms", + "Params": "langchain_community.llms.javelin_ai_gateway", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JavelinAIGateway", + "Params", +] diff --git a/venv/Lib/site-packages/langchain/llms/koboldai.py b/venv/Lib/site-packages/langchain/llms/koboldai.py new file mode 100644 index 00000000..e2b94b99 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/koboldai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import KoboldApiLLM + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"KoboldApiLLM": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "KoboldApiLLM", +] diff --git a/venv/Lib/site-packages/langchain/llms/llamacpp.py b/venv/Lib/site-packages/langchain/llms/llamacpp.py new file mode 100644 index 00000000..bcfbf49b --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/llamacpp.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import LlamaCpp + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"LlamaCpp": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LlamaCpp", +] diff --git a/venv/Lib/site-packages/langchain/llms/loading.py b/venv/Lib/site-packages/langchain/llms/loading.py new file mode 100644 index 00000000..7517da6b --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/loading.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms.loading import load_llm, load_llm_from_config + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "load_llm_from_config": "langchain_community.llms.loading", + "load_llm": "langchain_community.llms.loading", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "load_llm_from_config", + "load_llm", +] diff --git a/venv/Lib/site-packages/langchain/llms/manifest.py b/venv/Lib/site-packages/langchain/llms/manifest.py new file mode 100644 index 00000000..51b8e3c4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/manifest.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import ManifestWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ManifestWrapper": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ManifestWrapper", +] diff --git a/venv/Lib/site-packages/langchain/llms/minimax.py b/venv/Lib/site-packages/langchain/llms/minimax.py new file mode 100644 index 00000000..92f78d6f --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/minimax.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Minimax + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Minimax": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Minimax", +] diff --git a/venv/Lib/site-packages/langchain/llms/mlflow.py b/venv/Lib/site-packages/langchain/llms/mlflow.py new file mode 100644 index 00000000..8cda29aa --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/mlflow.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Mlflow + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Mlflow": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Mlflow", +] diff --git a/venv/Lib/site-packages/langchain/llms/mlflow_ai_gateway.py b/venv/Lib/site-packages/langchain/llms/mlflow_ai_gateway.py new file mode 100644 index 00000000..889ba221 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/mlflow_ai_gateway.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import MlflowAIGateway + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MlflowAIGateway": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MlflowAIGateway", +] diff --git a/venv/Lib/site-packages/langchain/llms/modal.py b/venv/Lib/site-packages/langchain/llms/modal.py new file mode 100644 index 00000000..f6c47427 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/modal.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Modal + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Modal": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Modal", +] diff --git a/venv/Lib/site-packages/langchain/llms/mosaicml.py b/venv/Lib/site-packages/langchain/llms/mosaicml.py new file mode 100644 index 00000000..f4ecc601 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/mosaicml.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import MosaicML + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MosaicML": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MosaicML", +] diff --git a/venv/Lib/site-packages/langchain/llms/nlpcloud.py b/venv/Lib/site-packages/langchain/llms/nlpcloud.py new file mode 100644 index 00000000..a2ed7afa --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/nlpcloud.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import NLPCloud + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NLPCloud": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NLPCloud", +] diff --git a/venv/Lib/site-packages/langchain/llms/octoai_endpoint.py b/venv/Lib/site-packages/langchain/llms/octoai_endpoint.py new file mode 100644 index 00000000..b10a6650 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/octoai_endpoint.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import OctoAIEndpoint + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OctoAIEndpoint": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OctoAIEndpoint", +] diff --git a/venv/Lib/site-packages/langchain/llms/ollama.py b/venv/Lib/site-packages/langchain/llms/ollama.py new file mode 100644 index 00000000..12e828e7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/ollama.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Ollama + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Ollama": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Ollama", +] diff --git a/venv/Lib/site-packages/langchain/llms/opaqueprompts.py b/venv/Lib/site-packages/langchain/llms/opaqueprompts.py new file mode 100644 index 00000000..2135d7f8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/opaqueprompts.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import OpaquePrompts + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OpaquePrompts": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OpaquePrompts", +] diff --git a/venv/Lib/site-packages/langchain/llms/openai.py b/venv/Lib/site-packages/langchain/llms/openai.py new file mode 100644 index 00000000..8f88b4b0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/openai.py @@ -0,0 +1,32 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import AzureOpenAI, OpenAI, OpenAIChat + from langchain_community.llms.openai import BaseOpenAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BaseOpenAI": "langchain_community.llms.openai", + "OpenAI": "langchain_community.llms", + "AzureOpenAI": "langchain_community.llms", + "OpenAIChat": "langchain_community.llms", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaseOpenAI", + "OpenAI", + "AzureOpenAI", + "OpenAIChat", +] diff --git a/venv/Lib/site-packages/langchain/llms/openllm.py b/venv/Lib/site-packages/langchain/llms/openllm.py new file mode 100644 index 00000000..061af8b5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/openllm.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import OpenLLM + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OpenLLM": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OpenLLM", +] diff --git a/venv/Lib/site-packages/langchain/llms/openlm.py b/venv/Lib/site-packages/langchain/llms/openlm.py new file mode 100644 index 00000000..9b30131a --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/openlm.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import OpenLM + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OpenLM": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OpenLM", +] diff --git a/venv/Lib/site-packages/langchain/llms/pai_eas_endpoint.py b/venv/Lib/site-packages/langchain/llms/pai_eas_endpoint.py new file mode 100644 index 00000000..c8b3e9fc --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/pai_eas_endpoint.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import PaiEasEndpoint + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PaiEasEndpoint": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PaiEasEndpoint", +] diff --git a/venv/Lib/site-packages/langchain/llms/petals.py b/venv/Lib/site-packages/langchain/llms/petals.py new file mode 100644 index 00000000..836fdd86 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/petals.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Petals + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Petals": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Petals", +] diff --git a/venv/Lib/site-packages/langchain/llms/pipelineai.py b/venv/Lib/site-packages/langchain/llms/pipelineai.py new file mode 100644 index 00000000..e8130aac --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/pipelineai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import PipelineAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PipelineAI": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PipelineAI", +] diff --git a/venv/Lib/site-packages/langchain/llms/predibase.py b/venv/Lib/site-packages/langchain/llms/predibase.py new file mode 100644 index 00000000..8e262a43 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/predibase.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Predibase + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Predibase": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Predibase", +] diff --git a/venv/Lib/site-packages/langchain/llms/predictionguard.py b/venv/Lib/site-packages/langchain/llms/predictionguard.py new file mode 100644 index 00000000..2a519e60 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/predictionguard.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import PredictionGuard + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PredictionGuard": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PredictionGuard", +] diff --git a/venv/Lib/site-packages/langchain/llms/promptlayer_openai.py b/venv/Lib/site-packages/langchain/llms/promptlayer_openai.py new file mode 100644 index 00000000..41025078 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/promptlayer_openai.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import PromptLayerOpenAI, PromptLayerOpenAIChat + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PromptLayerOpenAI": "langchain_community.llms", + "PromptLayerOpenAIChat": "langchain_community.llms", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PromptLayerOpenAI", + "PromptLayerOpenAIChat", +] diff --git a/venv/Lib/site-packages/langchain/llms/replicate.py b/venv/Lib/site-packages/langchain/llms/replicate.py new file mode 100644 index 00000000..05d168b3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/replicate.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Replicate + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Replicate": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Replicate", +] diff --git a/venv/Lib/site-packages/langchain/llms/rwkv.py b/venv/Lib/site-packages/langchain/llms/rwkv.py new file mode 100644 index 00000000..153e10cd --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/rwkv.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import RWKV + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"RWKV": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RWKV", +] diff --git a/venv/Lib/site-packages/langchain/llms/sagemaker_endpoint.py b/venv/Lib/site-packages/langchain/llms/sagemaker_endpoint.py new file mode 100644 index 00000000..101f6e66 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/sagemaker_endpoint.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import SagemakerEndpoint + from langchain_community.llms.sagemaker_endpoint import LLMContentHandler + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SagemakerEndpoint": "langchain_community.llms", + "LLMContentHandler": "langchain_community.llms.sagemaker_endpoint", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SagemakerEndpoint", + "LLMContentHandler", +] diff --git a/venv/Lib/site-packages/langchain/llms/self_hosted.py b/venv/Lib/site-packages/langchain/llms/self_hosted.py new file mode 100644 index 00000000..0bf213a7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/self_hosted.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import SelfHostedPipeline + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SelfHostedPipeline": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SelfHostedPipeline", +] diff --git a/venv/Lib/site-packages/langchain/llms/self_hosted_hugging_face.py b/venv/Lib/site-packages/langchain/llms/self_hosted_hugging_face.py new file mode 100644 index 00000000..13e663e1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/self_hosted_hugging_face.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import SelfHostedHuggingFaceLLM + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SelfHostedHuggingFaceLLM": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SelfHostedHuggingFaceLLM", +] diff --git a/venv/Lib/site-packages/langchain/llms/stochasticai.py b/venv/Lib/site-packages/langchain/llms/stochasticai.py new file mode 100644 index 00000000..844d1f61 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/stochasticai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import StochasticAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"StochasticAI": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "StochasticAI", +] diff --git a/venv/Lib/site-packages/langchain/llms/symblai_nebula.py b/venv/Lib/site-packages/langchain/llms/symblai_nebula.py new file mode 100644 index 00000000..de0f217e --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/symblai_nebula.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Nebula + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Nebula": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Nebula", +] diff --git a/venv/Lib/site-packages/langchain/llms/textgen.py b/venv/Lib/site-packages/langchain/llms/textgen.py new file mode 100644 index 00000000..d5f73351 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/textgen.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import TextGen + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TextGen": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TextGen", +] diff --git a/venv/Lib/site-packages/langchain/llms/titan_takeoff.py b/venv/Lib/site-packages/langchain/llms/titan_takeoff.py new file mode 100644 index 00000000..b34af8ce --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/titan_takeoff.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import TitanTakeoff + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TitanTakeoff": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TitanTakeoff", +] diff --git a/venv/Lib/site-packages/langchain/llms/titan_takeoff_pro.py b/venv/Lib/site-packages/langchain/llms/titan_takeoff_pro.py new file mode 100644 index 00000000..fde6a12c --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/titan_takeoff_pro.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import TitanTakeoffPro + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TitanTakeoffPro": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TitanTakeoffPro", +] diff --git a/venv/Lib/site-packages/langchain/llms/together.py b/venv/Lib/site-packages/langchain/llms/together.py new file mode 100644 index 00000000..fd2bd8d8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/together.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Together + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Together": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Together", +] diff --git a/venv/Lib/site-packages/langchain/llms/tongyi.py b/venv/Lib/site-packages/langchain/llms/tongyi.py new file mode 100644 index 00000000..98dfdded --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/tongyi.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Tongyi + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Tongyi": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Tongyi", +] diff --git a/venv/Lib/site-packages/langchain/llms/utils.py b/venv/Lib/site-packages/langchain/llms/utils.py new file mode 100644 index 00000000..0e561fd6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/utils.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms.utils import enforce_stop_tokens + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"enforce_stop_tokens": "langchain_community.llms.utils"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "enforce_stop_tokens", +] diff --git a/venv/Lib/site-packages/langchain/llms/vertexai.py b/venv/Lib/site-packages/langchain/llms/vertexai.py new file mode 100644 index 00000000..e113bed9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/vertexai.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import VertexAI, VertexAIModelGarden + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "VertexAI": "langchain_community.llms", + "VertexAIModelGarden": "langchain_community.llms", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "VertexAI", + "VertexAIModelGarden", +] diff --git a/venv/Lib/site-packages/langchain/llms/vllm.py b/venv/Lib/site-packages/langchain/llms/vllm.py new file mode 100644 index 00000000..d26c0e3b --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/vllm.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import VLLM, VLLMOpenAI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "VLLM": "langchain_community.llms", + "VLLMOpenAI": "langchain_community.llms", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "VLLM", + "VLLMOpenAI", +] diff --git a/venv/Lib/site-packages/langchain/llms/volcengine_maas.py b/venv/Lib/site-packages/langchain/llms/volcengine_maas.py new file mode 100644 index 00000000..cfee8d47 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/volcengine_maas.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import VolcEngineMaasLLM + from langchain_community.llms.volcengine_maas import VolcEngineMaasBase + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "VolcEngineMaasBase": "langchain_community.llms.volcengine_maas", + "VolcEngineMaasLLM": "langchain_community.llms", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "VolcEngineMaasBase", + "VolcEngineMaasLLM", +] diff --git a/venv/Lib/site-packages/langchain/llms/watsonxllm.py b/venv/Lib/site-packages/langchain/llms/watsonxllm.py new file mode 100644 index 00000000..5bbc3772 --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/watsonxllm.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import WatsonxLLM + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WatsonxLLM": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WatsonxLLM", +] diff --git a/venv/Lib/site-packages/langchain/llms/writer.py b/venv/Lib/site-packages/langchain/llms/writer.py new file mode 100644 index 00000000..091a225d --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/writer.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Writer + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Writer": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Writer", +] diff --git a/venv/Lib/site-packages/langchain/llms/xinference.py b/venv/Lib/site-packages/langchain/llms/xinference.py new file mode 100644 index 00000000..01c27d7d --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/xinference.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import Xinference + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Xinference": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Xinference", +] diff --git a/venv/Lib/site-packages/langchain/llms/yandex.py b/venv/Lib/site-packages/langchain/llms/yandex.py new file mode 100644 index 00000000..9c25312d --- /dev/null +++ b/venv/Lib/site-packages/langchain/llms/yandex.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import YandexGPT + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"YandexGPT": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "YandexGPT", +] diff --git a/venv/Lib/site-packages/langchain/load/__init__.py b/venv/Lib/site-packages/langchain/load/__init__.py new file mode 100644 index 00000000..c6dd88dd --- /dev/null +++ b/venv/Lib/site-packages/langchain/load/__init__.py @@ -0,0 +1,11 @@ +"""Serialization and deserialization.""" + +from langchain_core.load.dump import dumpd, dumps +from langchain_core.load.load import load, loads + +__all__ = [ + "dumpd", + "dumps", + "load", + "loads", +] diff --git a/venv/Lib/site-packages/langchain/load/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/load/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..d59c2434 Binary files /dev/null and b/venv/Lib/site-packages/langchain/load/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/load/__pycache__/dump.cpython-312.pyc b/venv/Lib/site-packages/langchain/load/__pycache__/dump.cpython-312.pyc new file mode 100644 index 00000000..47413f31 Binary files /dev/null and b/venv/Lib/site-packages/langchain/load/__pycache__/dump.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/load/__pycache__/load.cpython-312.pyc b/venv/Lib/site-packages/langchain/load/__pycache__/load.cpython-312.pyc new file mode 100644 index 00000000..1cd6de9b Binary files /dev/null and b/venv/Lib/site-packages/langchain/load/__pycache__/load.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/load/__pycache__/serializable.cpython-312.pyc b/venv/Lib/site-packages/langchain/load/__pycache__/serializable.cpython-312.pyc new file mode 100644 index 00000000..f1559c8f Binary files /dev/null and b/venv/Lib/site-packages/langchain/load/__pycache__/serializable.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/load/dump.py b/venv/Lib/site-packages/langchain/load/dump.py new file mode 100644 index 00000000..fd955201 --- /dev/null +++ b/venv/Lib/site-packages/langchain/load/dump.py @@ -0,0 +1,3 @@ +from langchain_core.load.dump import default, dumpd, dumps + +__all__ = ["default", "dumps", "dumpd"] diff --git a/venv/Lib/site-packages/langchain/load/load.py b/venv/Lib/site-packages/langchain/load/load.py new file mode 100644 index 00000000..5a32ea08 --- /dev/null +++ b/venv/Lib/site-packages/langchain/load/load.py @@ -0,0 +1,3 @@ +from langchain_core.load.load import Reviver, load, loads + +__all__ = ["Reviver", "loads", "load"] diff --git a/venv/Lib/site-packages/langchain/load/serializable.py b/venv/Lib/site-packages/langchain/load/serializable.py new file mode 100644 index 00000000..d1c0772d --- /dev/null +++ b/venv/Lib/site-packages/langchain/load/serializable.py @@ -0,0 +1,19 @@ +from langchain_core.load.serializable import ( + BaseSerialized, + Serializable, + SerializedConstructor, + SerializedNotImplemented, + SerializedSecret, + to_json_not_implemented, + try_neq_default, +) + +__all__ = [ + "BaseSerialized", + "SerializedConstructor", + "SerializedSecret", + "SerializedNotImplemented", + "try_neq_default", + "Serializable", + "to_json_not_implemented", +] diff --git a/venv/Lib/site-packages/langchain/memory/__init__.py b/venv/Lib/site-packages/langchain/memory/__init__.py new file mode 100644 index 00000000..de0281c8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/__init__.py @@ -0,0 +1,153 @@ +"""**Memory** maintains Chain state, incorporating context from past runs. + +**Class hierarchy for Memory:** + +.. code-block:: + + BaseMemory --> BaseChatMemory --> Memory # Examples: ZepMemory, MotorheadMemory + +**Main helpers:** + +.. code-block:: + + BaseChatMessageHistory + +**Chat Message History** stores the chat message history in different stores. + +**Class hierarchy for ChatMessageHistory:** + +.. code-block:: + + BaseChatMessageHistory --> ChatMessageHistory # Example: ZepChatMessageHistory + +**Main helpers:** + +.. code-block:: + + AIMessage, BaseMessage, HumanMessage +""" # noqa: E501 + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer +from langchain.memory.buffer import ( + ConversationBufferMemory, + ConversationStringBufferMemory, +) +from langchain.memory.buffer_window import ConversationBufferWindowMemory +from langchain.memory.combined import CombinedMemory +from langchain.memory.entity import ( + ConversationEntityMemory, + InMemoryEntityStore, + RedisEntityStore, + SQLiteEntityStore, + UpstashRedisEntityStore, +) +from langchain.memory.readonly import ReadOnlySharedMemory +from langchain.memory.simple import SimpleMemory +from langchain.memory.summary import ConversationSummaryMemory +from langchain.memory.summary_buffer import ConversationSummaryBufferMemory +from langchain.memory.token_buffer import ConversationTokenBufferMemory +from langchain.memory.vectorstore import VectorStoreRetrieverMemory +from langchain.memory.vectorstore_token_buffer_memory import ( + ConversationVectorStoreTokenBufferMemory, # avoid circular import +) + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import ( + AstraDBChatMessageHistory, + CassandraChatMessageHistory, + ChatMessageHistory, + CosmosDBChatMessageHistory, + DynamoDBChatMessageHistory, + ElasticsearchChatMessageHistory, + FileChatMessageHistory, + MomentoChatMessageHistory, + MongoDBChatMessageHistory, + PostgresChatMessageHistory, + RedisChatMessageHistory, + SingleStoreDBChatMessageHistory, + SQLChatMessageHistory, + StreamlitChatMessageHistory, + UpstashRedisChatMessageHistory, + XataChatMessageHistory, + ZepChatMessageHistory, + ) + from langchain_community.memory.kg import ConversationKGMemory + from langchain_community.memory.motorhead_memory import MotorheadMemory + from langchain_community.memory.zep_memory import ZepMemory + + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MotorheadMemory": "langchain_community.memory.motorhead_memory", + "ConversationKGMemory": "langchain_community.memory.kg", + "ZepMemory": "langchain_community.memory.zep_memory", + "AstraDBChatMessageHistory": "langchain_community.chat_message_histories", + "CassandraChatMessageHistory": "langchain_community.chat_message_histories", + "ChatMessageHistory": "langchain_community.chat_message_histories", + "CosmosDBChatMessageHistory": "langchain_community.chat_message_histories", + "DynamoDBChatMessageHistory": "langchain_community.chat_message_histories", + "ElasticsearchChatMessageHistory": "langchain_community.chat_message_histories", + "FileChatMessageHistory": "langchain_community.chat_message_histories", + "MomentoChatMessageHistory": "langchain_community.chat_message_histories", + "MongoDBChatMessageHistory": "langchain_community.chat_message_histories", + "PostgresChatMessageHistory": "langchain_community.chat_message_histories", + "RedisChatMessageHistory": "langchain_community.chat_message_histories", + "SingleStoreDBChatMessageHistory": "langchain_community.chat_message_histories", + "SQLChatMessageHistory": "langchain_community.chat_message_histories", + "StreamlitChatMessageHistory": "langchain_community.chat_message_histories", + "UpstashRedisChatMessageHistory": "langchain_community.chat_message_histories", + "XataChatMessageHistory": "langchain_community.chat_message_histories", + "ZepChatMessageHistory": "langchain_community.chat_message_histories", +} + + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AstraDBChatMessageHistory", + "CassandraChatMessageHistory", + "ChatMessageHistory", + "CombinedMemory", + "ConversationBufferMemory", + "ConversationBufferWindowMemory", + "ConversationEntityMemory", + "ConversationKGMemory", + "ConversationStringBufferMemory", + "ConversationSummaryBufferMemory", + "ConversationSummaryMemory", + "ConversationTokenBufferMemory", + "ConversationVectorStoreTokenBufferMemory", + "CosmosDBChatMessageHistory", + "DynamoDBChatMessageHistory", + "ElasticsearchChatMessageHistory", + "FileChatMessageHistory", + "InMemoryEntityStore", + "MomentoChatMessageHistory", + "MongoDBChatMessageHistory", + "MotorheadMemory", + "PostgresChatMessageHistory", + "ReadOnlySharedMemory", + "RedisChatMessageHistory", + "RedisEntityStore", + "SingleStoreDBChatMessageHistory", + "SQLChatMessageHistory", + "SQLiteEntityStore", + "SimpleMemory", + "StreamlitChatMessageHistory", + "VectorStoreRetrieverMemory", + "XataChatMessageHistory", + "ZepChatMessageHistory", + "ZepMemory", + "UpstashRedisEntityStore", + "UpstashRedisChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..c4b4b94b Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/buffer.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/buffer.cpython-312.pyc new file mode 100644 index 00000000..00cc404e Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/buffer.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/buffer_window.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/buffer_window.cpython-312.pyc new file mode 100644 index 00000000..d0ce528f Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/buffer_window.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/chat_memory.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/chat_memory.cpython-312.pyc new file mode 100644 index 00000000..757c2c7b Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/chat_memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/combined.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/combined.cpython-312.pyc new file mode 100644 index 00000000..256df327 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/combined.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/entity.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/entity.cpython-312.pyc new file mode 100644 index 00000000..1ce4cb0c Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/entity.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/kg.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/kg.cpython-312.pyc new file mode 100644 index 00000000..091ced75 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/kg.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/motorhead_memory.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/motorhead_memory.cpython-312.pyc new file mode 100644 index 00000000..5f5abfff Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/motorhead_memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..bbd14bd9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/readonly.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/readonly.cpython-312.pyc new file mode 100644 index 00000000..9a0b3d20 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/readonly.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/simple.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/simple.cpython-312.pyc new file mode 100644 index 00000000..5cc8d5b9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/simple.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/summary.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/summary.cpython-312.pyc new file mode 100644 index 00000000..b1fee98a Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/summary.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/summary_buffer.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/summary_buffer.cpython-312.pyc new file mode 100644 index 00000000..3e8b36f7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/summary_buffer.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/token_buffer.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/token_buffer.cpython-312.pyc new file mode 100644 index 00000000..d3d72bee Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/token_buffer.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..81a69c87 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/vectorstore.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/vectorstore.cpython-312.pyc new file mode 100644 index 00000000..d6da49c1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/vectorstore.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/vectorstore_token_buffer_memory.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/vectorstore_token_buffer_memory.cpython-312.pyc new file mode 100644 index 00000000..5d8ed1f7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/vectorstore_token_buffer_memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/__pycache__/zep_memory.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/__pycache__/zep_memory.cpython-312.pyc new file mode 100644 index 00000000..a30a806f Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/__pycache__/zep_memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/buffer.py b/venv/Lib/site-packages/langchain/memory/buffer.py new file mode 100644 index 00000000..7d6ec764 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/buffer.py @@ -0,0 +1,170 @@ +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_core.utils import pre_init + +from langchain.memory.chat_memory import BaseChatMemory, BaseMemory +from langchain.memory.utils import get_prompt_input_key + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationBufferMemory(BaseChatMemory): + """A basic memory implementation that simply stores the conversation history. + + This stores the entire conversation history in memory without any + additional processing. + + Note that additional processing may be required in some situations when the + conversation history is too large to fit in the context window of the model. + """ + + human_prefix: str = "Human" + ai_prefix: str = "AI" + memory_key: str = "history" #: :meta private: + + @property + def buffer(self) -> Any: + """String buffer of memory.""" + return self.buffer_as_messages if self.return_messages else self.buffer_as_str + + async def abuffer(self) -> Any: + """String buffer of memory.""" + return ( + await self.abuffer_as_messages() + if self.return_messages + else await self.abuffer_as_str() + ) + + def _buffer_as_str(self, messages: list[BaseMessage]) -> str: + return get_buffer_string( + messages, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + @property + def buffer_as_str(self) -> str: + """Exposes the buffer as a string in case return_messages is True.""" + return self._buffer_as_str(self.chat_memory.messages) + + async def abuffer_as_str(self) -> str: + """Exposes the buffer as a string in case return_messages is True.""" + messages = await self.chat_memory.aget_messages() + return self._buffer_as_str(messages) + + @property + def buffer_as_messages(self) -> list[BaseMessage]: + """Exposes the buffer as a list of messages in case return_messages is False.""" + return self.chat_memory.messages + + async def abuffer_as_messages(self) -> list[BaseMessage]: + """Exposes the buffer as a list of messages in case return_messages is False.""" + return await self.chat_memory.aget_messages() + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return [self.memory_key] + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return history buffer.""" + return {self.memory_key: self.buffer} + + async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return key-value pairs given the text input to the chain.""" + buffer = await self.abuffer() + return {self.memory_key: buffer} + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationStringBufferMemory(BaseMemory): + """A basic memory implementation that simply stores the conversation history. + + This stores the entire conversation history in memory without any + additional processing. + + Equivalent to ConversationBufferMemory but tailored more specifically + for string-based conversations rather than chat models. + + Note that additional processing may be required in some situations when the + conversation history is too large to fit in the context window of the model. + """ + + human_prefix: str = "Human" + ai_prefix: str = "AI" + """Prefix to use for AI generated responses.""" + buffer: str = "" + output_key: Optional[str] = None + input_key: Optional[str] = None + memory_key: str = "history" #: :meta private: + + @pre_init + def validate_chains(cls, values: dict) -> dict: + """Validate that return messages is not True.""" + if values.get("return_messages", False): + raise ValueError( + "return_messages must be False for ConversationStringBufferMemory" + ) + return values + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + :meta private: + """ + return [self.memory_key] + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]: + """Return history buffer.""" + return {self.memory_key: self.buffer} + + async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]: + """Return history buffer.""" + return self.load_memory_variables(inputs) + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer.""" + if self.input_key is None: + prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) + else: + prompt_input_key = self.input_key + if self.output_key is None: + if len(outputs) != 1: + raise ValueError(f"One output key expected, got {outputs.keys()}") + output_key = list(outputs.keys())[0] + else: + output_key = self.output_key + human = f"{self.human_prefix}: " + inputs[prompt_input_key] + ai = f"{self.ai_prefix}: " + outputs[output_key] + self.buffer += "\n" + "\n".join([human, ai]) + + async def asave_context( + self, inputs: dict[str, Any], outputs: dict[str, str] + ) -> None: + """Save context from this conversation to buffer.""" + return self.save_context(inputs, outputs) + + def clear(self) -> None: + """Clear memory contents.""" + self.buffer = "" + + async def aclear(self) -> None: + self.clear() diff --git a/venv/Lib/site-packages/langchain/memory/buffer_window.py b/venv/Lib/site-packages/langchain/memory/buffer_window.py new file mode 100644 index 00000000..8e586bdb --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/buffer_window.py @@ -0,0 +1,60 @@ +from typing import Any, Union + +from langchain_core._api import deprecated +from langchain_core.messages import BaseMessage, get_buffer_string + +from langchain.memory.chat_memory import BaseChatMemory + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationBufferWindowMemory(BaseChatMemory): + """Use to keep track of the last k turns of a conversation. + + If the number of messages in the conversation is more than the maximum number + of messages to keep, the oldest messages are dropped. + """ + + human_prefix: str = "Human" + ai_prefix: str = "AI" + memory_key: str = "history" #: :meta private: + k: int = 5 + """Number of messages to store in buffer.""" + + @property + def buffer(self) -> Union[str, list[BaseMessage]]: + """String buffer of memory.""" + return self.buffer_as_messages if self.return_messages else self.buffer_as_str + + @property + def buffer_as_str(self) -> str: + """Exposes the buffer as a string in case return_messages is False.""" + messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] + return get_buffer_string( + messages, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + @property + def buffer_as_messages(self) -> list[BaseMessage]: + """Exposes the buffer as a list of messages in case return_messages is True.""" + return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return [self.memory_key] + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return history buffer.""" + return {self.memory_key: self.buffer} diff --git a/venv/Lib/site-packages/langchain/memory/chat_memory.py b/venv/Lib/site-packages/langchain/memory/chat_memory.py new file mode 100644 index 00000000..da14dd00 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_memory.py @@ -0,0 +1,98 @@ +import warnings +from abc import ABC +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.chat_history import ( + BaseChatMessageHistory, + InMemoryChatMessageHistory, +) +from langchain_core.memory import BaseMemory +from langchain_core.messages import AIMessage, HumanMessage +from pydantic import Field + +from langchain.memory.utils import get_prompt_input_key + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class BaseChatMemory(BaseMemory, ABC): + """Abstract base class for chat memory. + + **ATTENTION** This abstraction was created prior to when chat models had + native tool calling capabilities. + It does **NOT** support native tool calling capabilities for chat models and + will fail SILENTLY if used with a chat model that has native tool calling. + + DO NOT USE THIS ABSTRACTION FOR NEW CODE. + """ + + chat_memory: BaseChatMessageHistory = Field( + default_factory=InMemoryChatMessageHistory + ) + output_key: Optional[str] = None + input_key: Optional[str] = None + return_messages: bool = False + + def _get_input_output( + self, inputs: dict[str, Any], outputs: dict[str, str] + ) -> tuple[str, str]: + if self.input_key is None: + prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) + else: + prompt_input_key = self.input_key + if self.output_key is None: + if len(outputs) == 1: + output_key = list(outputs.keys())[0] + elif "output" in outputs: + output_key = "output" + warnings.warn( + f"'{self.__class__.__name__}' got multiple output keys:" + f" {outputs.keys()}. The default 'output' key is being used." + f" If this is not desired, please manually set 'output_key'." + ) + else: + raise ValueError( + f"Got multiple output keys: {outputs.keys()}, cannot " + f"determine which to store in memory. Please set the " + f"'output_key' explicitly." + ) + else: + output_key = self.output_key + return inputs[prompt_input_key], outputs[output_key] + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer.""" + input_str, output_str = self._get_input_output(inputs, outputs) + self.chat_memory.add_messages( + [ + HumanMessage(content=input_str), + AIMessage(content=output_str), + ] + ) + + async def asave_context( + self, inputs: dict[str, Any], outputs: dict[str, str] + ) -> None: + """Save context from this conversation to buffer.""" + input_str, output_str = self._get_input_output(inputs, outputs) + await self.chat_memory.aadd_messages( + [ + HumanMessage(content=input_str), + AIMessage(content=output_str), + ] + ) + + def clear(self) -> None: + """Clear memory contents.""" + self.chat_memory.clear() + + async def aclear(self) -> None: + """Clear memory contents.""" + await self.chat_memory.aclear() diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__init__.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__init__.py new file mode 100644 index 00000000..91910137 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__init__.py @@ -0,0 +1,84 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import ( + AstraDBChatMessageHistory, + CassandraChatMessageHistory, + ChatMessageHistory, + CosmosDBChatMessageHistory, + DynamoDBChatMessageHistory, + ElasticsearchChatMessageHistory, + FileChatMessageHistory, + FirestoreChatMessageHistory, + MomentoChatMessageHistory, + MongoDBChatMessageHistory, + Neo4jChatMessageHistory, + PostgresChatMessageHistory, + RedisChatMessageHistory, + RocksetChatMessageHistory, + SingleStoreDBChatMessageHistory, + SQLChatMessageHistory, + StreamlitChatMessageHistory, + UpstashRedisChatMessageHistory, + XataChatMessageHistory, + ZepChatMessageHistory, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AstraDBChatMessageHistory": "langchain_community.chat_message_histories", + "CassandraChatMessageHistory": "langchain_community.chat_message_histories", + "ChatMessageHistory": "langchain_community.chat_message_histories", + "CosmosDBChatMessageHistory": "langchain_community.chat_message_histories", + "DynamoDBChatMessageHistory": "langchain_community.chat_message_histories", + "ElasticsearchChatMessageHistory": "langchain_community.chat_message_histories", + "FileChatMessageHistory": "langchain_community.chat_message_histories", + "FirestoreChatMessageHistory": "langchain_community.chat_message_histories", + "MomentoChatMessageHistory": "langchain_community.chat_message_histories", + "MongoDBChatMessageHistory": "langchain_community.chat_message_histories", + "Neo4jChatMessageHistory": "langchain_community.chat_message_histories", + "PostgresChatMessageHistory": "langchain_community.chat_message_histories", + "RedisChatMessageHistory": "langchain_community.chat_message_histories", + "RocksetChatMessageHistory": "langchain_community.chat_message_histories", + "SQLChatMessageHistory": "langchain_community.chat_message_histories", + "SingleStoreDBChatMessageHistory": "langchain_community.chat_message_histories", + "StreamlitChatMessageHistory": "langchain_community.chat_message_histories", + "UpstashRedisChatMessageHistory": "langchain_community.chat_message_histories", + "XataChatMessageHistory": "langchain_community.chat_message_histories", + "ZepChatMessageHistory": "langchain_community.chat_message_histories", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AstraDBChatMessageHistory", + "CassandraChatMessageHistory", + "ChatMessageHistory", + "CosmosDBChatMessageHistory", + "DynamoDBChatMessageHistory", + "ElasticsearchChatMessageHistory", + "FileChatMessageHistory", + "FirestoreChatMessageHistory", + "MomentoChatMessageHistory", + "MongoDBChatMessageHistory", + "Neo4jChatMessageHistory", + "PostgresChatMessageHistory", + "RedisChatMessageHistory", + "RocksetChatMessageHistory", + "SingleStoreDBChatMessageHistory", + "SQLChatMessageHistory", + "StreamlitChatMessageHistory", + "UpstashRedisChatMessageHistory", + "XataChatMessageHistory", + "ZepChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..aa83cd11 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/astradb.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/astradb.cpython-312.pyc new file mode 100644 index 00000000..03b43768 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/astradb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/cassandra.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/cassandra.cpython-312.pyc new file mode 100644 index 00000000..44407cd3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/cassandra.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/cosmos_db.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/cosmos_db.cpython-312.pyc new file mode 100644 index 00000000..e3e8d645 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/cosmos_db.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/dynamodb.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/dynamodb.cpython-312.pyc new file mode 100644 index 00000000..8ddaaf67 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/dynamodb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/elasticsearch.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/elasticsearch.cpython-312.pyc new file mode 100644 index 00000000..bd00f28d Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/elasticsearch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/file.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/file.cpython-312.pyc new file mode 100644 index 00000000..cac8b8b9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/file.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/firestore.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/firestore.cpython-312.pyc new file mode 100644 index 00000000..a33947eb Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/firestore.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/in_memory.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/in_memory.cpython-312.pyc new file mode 100644 index 00000000..cf84260a Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/in_memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/momento.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/momento.cpython-312.pyc new file mode 100644 index 00000000..adfbcffa Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/momento.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/mongodb.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/mongodb.cpython-312.pyc new file mode 100644 index 00000000..d51d1fc6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/mongodb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/neo4j.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/neo4j.cpython-312.pyc new file mode 100644 index 00000000..bb3b28e7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/neo4j.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/postgres.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/postgres.cpython-312.pyc new file mode 100644 index 00000000..3fd49c1f Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/postgres.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/redis.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/redis.cpython-312.pyc new file mode 100644 index 00000000..2c33d472 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/redis.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/rocksetdb.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/rocksetdb.cpython-312.pyc new file mode 100644 index 00000000..f13f8de0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/rocksetdb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/singlestoredb.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/singlestoredb.cpython-312.pyc new file mode 100644 index 00000000..597d5ff6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/singlestoredb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/sql.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/sql.cpython-312.pyc new file mode 100644 index 00000000..d5ed026a Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/sql.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/streamlit.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/streamlit.cpython-312.pyc new file mode 100644 index 00000000..a382a9e2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/streamlit.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/upstash_redis.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/upstash_redis.cpython-312.pyc new file mode 100644 index 00000000..3b3686de Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/upstash_redis.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/xata.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/xata.cpython-312.pyc new file mode 100644 index 00000000..d5d632e6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/xata.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/zep.cpython-312.pyc b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/zep.cpython-312.pyc new file mode 100644 index 00000000..e1592b55 Binary files /dev/null and b/venv/Lib/site-packages/langchain/memory/chat_message_histories/__pycache__/zep.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/astradb.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/astradb.py new file mode 100644 index 00000000..3895f063 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/astradb.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import AstraDBChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AstraDBChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AstraDBChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/cassandra.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/cassandra.py new file mode 100644 index 00000000..e0c61cd2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/cassandra.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import CassandraChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CassandraChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CassandraChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/cosmos_db.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/cosmos_db.py new file mode 100644 index 00000000..6e9c8bf2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/cosmos_db.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import CosmosDBChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CosmosDBChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CosmosDBChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/dynamodb.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/dynamodb.py new file mode 100644 index 00000000..e0c5df1a --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/dynamodb.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import DynamoDBChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DynamoDBChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DynamoDBChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/elasticsearch.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/elasticsearch.py new file mode 100644 index 00000000..8845e67a --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/elasticsearch.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import ( + ElasticsearchChatMessageHistory, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ElasticsearchChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ElasticsearchChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/file.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/file.py new file mode 100644 index 00000000..2f9a0a9e --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/file.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import FileChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FileChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FileChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/firestore.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/firestore.py new file mode 100644 index 00000000..0d81d24d --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/firestore.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import FirestoreChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FirestoreChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FirestoreChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/in_memory.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/in_memory.py new file mode 100644 index 00000000..679c9ce6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/in_memory.py @@ -0,0 +1,5 @@ +from langchain_core.chat_history import InMemoryChatMessageHistory as ChatMessageHistory + +__all__ = [ + "ChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/momento.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/momento.py new file mode 100644 index 00000000..65d7cce3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/momento.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import MomentoChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MomentoChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MomentoChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/mongodb.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/mongodb.py new file mode 100644 index 00000000..f0f7db2c --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/mongodb.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import MongoDBChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MongoDBChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MongoDBChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/neo4j.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/neo4j.py new file mode 100644 index 00000000..ac60cd7c --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/neo4j.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import Neo4jChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "Neo4jChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Neo4jChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/postgres.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/postgres.py new file mode 100644 index 00000000..eb05b7db --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/postgres.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import PostgresChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PostgresChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PostgresChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/redis.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/redis.py new file mode 100644 index 00000000..a6dc08e2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/redis.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import RedisChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "RedisChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RedisChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/rocksetdb.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/rocksetdb.py new file mode 100644 index 00000000..64116d5b --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/rocksetdb.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import RocksetChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "RocksetChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RocksetChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/singlestoredb.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/singlestoredb.py new file mode 100644 index 00000000..dc6b7952 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/singlestoredb.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import ( + SingleStoreDBChatMessageHistory, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SingleStoreDBChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SingleStoreDBChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/sql.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/sql.py new file mode 100644 index 00000000..5ff23ce2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/sql.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import SQLChatMessageHistory + from langchain_community.chat_message_histories.sql import ( + BaseMessageConverter, + DefaultMessageConverter, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BaseMessageConverter": "langchain_community.chat_message_histories.sql", + "DefaultMessageConverter": "langchain_community.chat_message_histories.sql", + "SQLChatMessageHistory": "langchain_community.chat_message_histories", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaseMessageConverter", + "DefaultMessageConverter", + "SQLChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/streamlit.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/streamlit.py new file mode 100644 index 00000000..b1a74824 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/streamlit.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import StreamlitChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "StreamlitChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "StreamlitChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/upstash_redis.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/upstash_redis.py new file mode 100644 index 00000000..af4599f7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/upstash_redis.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import ( + UpstashRedisChatMessageHistory, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "UpstashRedisChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UpstashRedisChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/xata.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/xata.py new file mode 100644 index 00000000..43cfe672 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/xata.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import XataChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "XataChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "XataChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/chat_message_histories/zep.py b/venv/Lib/site-packages/langchain/memory/chat_message_histories/zep.py new file mode 100644 index 00000000..9b63e0fc --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/chat_message_histories/zep.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.chat_message_histories import ZepChatMessageHistory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ZepChatMessageHistory": "langchain_community.chat_message_histories" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ZepChatMessageHistory", +] diff --git a/venv/Lib/site-packages/langchain/memory/combined.py b/venv/Lib/site-packages/langchain/memory/combined.py new file mode 100644 index 00000000..8f737eaa --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/combined.py @@ -0,0 +1,84 @@ +import warnings +from typing import Any + +from langchain_core.memory import BaseMemory +from pydantic import field_validator + +from langchain.memory.chat_memory import BaseChatMemory + + +class CombinedMemory(BaseMemory): + """Combining multiple memories' data together.""" + + memories: list[BaseMemory] + """For tracking all the memories that should be accessed.""" + + @field_validator("memories") + @classmethod + def check_repeated_memory_variable( + cls, value: list[BaseMemory] + ) -> list[BaseMemory]: + all_variables: set[str] = set() + for val in value: + overlap = all_variables.intersection(val.memory_variables) + if overlap: + raise ValueError( + f"The same variables {overlap} are found in multiple" + "memory object, which is not allowed by CombinedMemory." + ) + all_variables |= set(val.memory_variables) + + return value + + @field_validator("memories") + @classmethod + def check_input_key(cls, value: list[BaseMemory]) -> list[BaseMemory]: + """Check that if memories are of type BaseChatMemory that input keys exist.""" + for val in value: + if isinstance(val, BaseChatMemory): + if val.input_key is None: + warnings.warn( + "When using CombinedMemory, " + "input keys should be so the input is known. " + f" Was not set on {val}" + ) + return value + + @property + def memory_variables(self) -> list[str]: + """All the memory variables that this instance provides.""" + """Collected from the all the linked memories.""" + + memory_variables = [] + + for memory in self.memories: + memory_variables.extend(memory.memory_variables) + + return memory_variables + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]: + """Load all vars from sub-memories.""" + memory_data: dict[str, Any] = {} + + # Collect vars from all sub-memories + for memory in self.memories: + data = memory.load_memory_variables(inputs) + for key, value in data.items(): + if key in memory_data: + raise ValueError( + f"The variable {key} is repeated in the CombinedMemory." + ) + memory_data[key] = value + + return memory_data + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this session for every memory.""" + # Save context for all sub-memories + for memory in self.memories: + memory.save_context(inputs, outputs) + + def clear(self) -> None: + """Clear context from this session for every memory.""" + for memory in self.memories: + memory.clear() diff --git a/venv/Lib/site-packages/langchain/memory/entity.py b/venv/Lib/site-packages/langchain/memory/entity.py new file mode 100644 index 00000000..8c09f4f2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/entity.py @@ -0,0 +1,541 @@ +"""Deprecated as of LangChain v0.3.4 and will be removed in LangChain v1.0.0.""" + +import logging +from abc import ABC, abstractmethod +from collections.abc import Iterable +from itertools import islice +from typing import Any, Optional + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_core.prompts import BasePromptTemplate +from pydantic import BaseModel, ConfigDict, Field + +from langchain.chains.llm import LLMChain +from langchain.memory.chat_memory import BaseChatMemory +from langchain.memory.prompt import ( + ENTITY_EXTRACTION_PROMPT, + ENTITY_SUMMARIZATION_PROMPT, +) +from langchain.memory.utils import get_prompt_input_key + +logger = logging.getLogger(__name__) + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class BaseEntityStore(BaseModel, ABC): + """Abstract base class for Entity store.""" + + @abstractmethod + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: + """Get entity value from store.""" + pass + + @abstractmethod + def set(self, key: str, value: Optional[str]) -> None: + """Set entity value in store.""" + pass + + @abstractmethod + def delete(self, key: str) -> None: + """Delete entity value from store.""" + pass + + @abstractmethod + def exists(self, key: str) -> bool: + """Check if entity exists in store.""" + pass + + @abstractmethod + def clear(self) -> None: + """Delete all entities from store.""" + pass + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class InMemoryEntityStore(BaseEntityStore): + """In-memory Entity store.""" + + store: dict[str, Optional[str]] = {} + + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: + return self.store.get(key, default) + + def set(self, key: str, value: Optional[str]) -> None: + self.store[key] = value + + def delete(self, key: str) -> None: + del self.store[key] + + def exists(self, key: str) -> bool: + return key in self.store + + def clear(self) -> None: + return self.store.clear() + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class UpstashRedisEntityStore(BaseEntityStore): + """Upstash Redis backed Entity store. + + Entities get a TTL of 1 day by default, and + that TTL is extended by 3 days every time the entity is read back. + """ + + def __init__( + self, + session_id: str = "default", + url: str = "", + token: str = "", + key_prefix: str = "memory_store", + ttl: Optional[int] = 60 * 60 * 24, + recall_ttl: Optional[int] = 60 * 60 * 24 * 3, + *args: Any, + **kwargs: Any, + ): + try: + from upstash_redis import Redis + except ImportError: + raise ImportError( + "Could not import upstash_redis python package. " + "Please install it with `pip install upstash_redis`." + ) + + super().__init__(*args, **kwargs) + + try: + self.redis_client = Redis(url=url, token=token) + except Exception: + logger.error("Upstash Redis instance could not be initiated.") + + self.session_id = session_id + self.key_prefix = key_prefix + self.ttl = ttl + self.recall_ttl = recall_ttl or ttl + + @property + def full_key_prefix(self) -> str: + return f"{self.key_prefix}:{self.session_id}" + + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: + res = ( + self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) + or default + or "" + ) + logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'") + return res + + def set(self, key: str, value: Optional[str]) -> None: + if not value: + return self.delete(key) + self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) + logger.debug( + f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" + ) + + def delete(self, key: str) -> None: + self.redis_client.delete(f"{self.full_key_prefix}:{key}") + + def exists(self, key: str) -> bool: + return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 + + def clear(self) -> None: + def scan_and_delete(cursor: int) -> int: + cursor, keys_to_delete = self.redis_client.scan( + cursor, f"{self.full_key_prefix}:*" + ) + self.redis_client.delete(*keys_to_delete) + return cursor + + cursor = scan_and_delete(0) + while cursor != 0: + scan_and_delete(cursor) + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class RedisEntityStore(BaseEntityStore): + """Redis-backed Entity store. + + Entities get a TTL of 1 day by default, and + that TTL is extended by 3 days every time the entity is read back. + """ + + redis_client: Any + session_id: str = "default" + key_prefix: str = "memory_store" + ttl: Optional[int] = 60 * 60 * 24 + recall_ttl: Optional[int] = 60 * 60 * 24 * 3 + + def __init__( + self, + session_id: str = "default", + url: str = "redis://localhost:6379/0", + key_prefix: str = "memory_store", + ttl: Optional[int] = 60 * 60 * 24, + recall_ttl: Optional[int] = 60 * 60 * 24 * 3, + *args: Any, + **kwargs: Any, + ): + try: + import redis + except ImportError: + raise ImportError( + "Could not import redis python package. " + "Please install it with `pip install redis`." + ) + + super().__init__(*args, **kwargs) + + try: + from langchain_community.utilities.redis import get_client + except ImportError: + raise ImportError( + "Could not import langchain_community.utilities.redis.get_client. " + "Please install it with `pip install langchain-community`." + ) + + try: + self.redis_client = get_client(redis_url=url, decode_responses=True) + except redis.exceptions.ConnectionError as error: + logger.error(error) + + self.session_id = session_id + self.key_prefix = key_prefix + self.ttl = ttl + self.recall_ttl = recall_ttl or ttl + + @property + def full_key_prefix(self) -> str: + return f"{self.key_prefix}:{self.session_id}" + + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: + res = ( + self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) + or default + or "" + ) + logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'") + return res + + def set(self, key: str, value: Optional[str]) -> None: + if not value: + return self.delete(key) + self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) + logger.debug( + f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" + ) + + def delete(self, key: str) -> None: + self.redis_client.delete(f"{self.full_key_prefix}:{key}") + + def exists(self, key: str) -> bool: + return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 + + def clear(self) -> None: + # iterate a list in batches of size batch_size + def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]: + iterator = iter(iterable) + while batch := list(islice(iterator, batch_size)): + yield batch + + for keybatch in batched( + self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500 + ): + self.redis_client.delete(*keybatch) + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class SQLiteEntityStore(BaseEntityStore): + """SQLite-backed Entity store""" + + session_id: str = "default" + table_name: str = "memory_store" + conn: Any = None + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def __init__( + self, + session_id: str = "default", + db_file: str = "entities.db", + table_name: str = "memory_store", + *args: Any, + **kwargs: Any, + ): + try: + import sqlite3 + except ImportError: + raise ImportError( + "Could not import sqlite3 python package. " + "Please install it with `pip install sqlite3`." + ) + super().__init__(*args, **kwargs) + + self.conn = sqlite3.connect(db_file) + self.session_id = session_id + self.table_name = table_name + self._create_table_if_not_exists() + + @property + def full_table_name(self) -> str: + return f"{self.table_name}_{self.session_id}" + + def _create_table_if_not_exists(self) -> None: + create_table_query = f""" + CREATE TABLE IF NOT EXISTS {self.full_table_name} ( + key TEXT PRIMARY KEY, + value TEXT + ) + """ + with self.conn: + self.conn.execute(create_table_query) + + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: + query = f""" + SELECT value + FROM {self.full_table_name} + WHERE key = ? + """ + cursor = self.conn.execute(query, (key,)) + result = cursor.fetchone() + if result is not None: + value = result[0] + return value + return default + + def set(self, key: str, value: Optional[str]) -> None: + if not value: + return self.delete(key) + query = f""" + INSERT OR REPLACE INTO {self.full_table_name} (key, value) + VALUES (?, ?) + """ + with self.conn: + self.conn.execute(query, (key, value)) + + def delete(self, key: str) -> None: + query = f""" + DELETE FROM {self.full_table_name} + WHERE key = ? + """ + with self.conn: + self.conn.execute(query, (key,)) + + def exists(self, key: str) -> bool: + query = f""" + SELECT 1 + FROM {self.full_table_name} + WHERE key = ? + LIMIT 1 + """ + cursor = self.conn.execute(query, (key,)) + result = cursor.fetchone() + return result is not None + + def clear(self) -> None: + query = f""" + DELETE FROM {self.full_table_name} + """ + with self.conn: + self.conn.execute(query) + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationEntityMemory(BaseChatMemory): + """Entity extractor & summarizer memory. + + Extracts named entities from the recent chat history and generates summaries. + With a swappable entity store, persisting entities across conversations. + Defaults to an in-memory entity store, and can be swapped out for a Redis, + SQLite, or other entity store. + """ + + human_prefix: str = "Human" + ai_prefix: str = "AI" + llm: BaseLanguageModel + entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT + entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT + + # Cache of recently detected entity names, if any + # It is updated when load_memory_variables is called: + entity_cache: list[str] = [] + + # Number of recent message pairs to consider when updating entities: + k: int = 3 + + chat_history_key: str = "history" + + # Store to manage entity-related data: + entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore) + + @property + def buffer(self) -> list[BaseMessage]: + """Access chat memory messages.""" + return self.chat_memory.messages + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return ["entities", self.chat_history_key] + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """ + Returns chat history and all generated entities with summaries if available, + and updates or clears the recent entity cache. + + New entity name can be found when calling this method, before the entity + summaries are generated, so the entity cache values may be empty if no entity + descriptions are generated yet. + """ + + # Create an LLMChain for predicting entity names from the recent chat history: + chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) + + if self.input_key is None: + prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) + else: + prompt_input_key = self.input_key + + # Extract an arbitrary window of the last message pairs from + # the chat history, where the hyperparameter k is the + # number of message pairs: + buffer_string = get_buffer_string( + self.buffer[-self.k * 2 :], + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + # Generates a comma-separated list of named entities, + # e.g. "Jane, White House, UFO" + # or "NONE" if no named entities are extracted: + output = chain.predict( + history=buffer_string, + input=inputs[prompt_input_key], + ) + + # If no named entities are extracted, assigns an empty list. + if output.strip() == "NONE": + entities = [] + else: + # Make a list of the extracted entities: + entities = [w.strip() for w in output.split(",")] + + # Make a dictionary of entities with summary if exists: + entity_summaries = {} + + for entity in entities: + entity_summaries[entity] = self.entity_store.get(entity, "") + + # Replaces the entity name cache with the most recently discussed entities, + # or if no entities were extracted, clears the cache: + self.entity_cache = entities + + # Should we return as message objects or as a string? + if self.return_messages: + # Get last `k` pair of chat messages: + buffer: Any = self.buffer[-self.k * 2 :] + else: + # Reuse the string we made earlier: + buffer = buffer_string + + return { + self.chat_history_key: buffer, + "entities": entity_summaries, + } + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """ + Save context from this conversation history to the entity store. + + Generates a summary for each entity in the entity cache by prompting + the model, and saves these summaries to the entity store. + """ + + super().save_context(inputs, outputs) + + if self.input_key is None: + prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) + else: + prompt_input_key = self.input_key + + # Extract an arbitrary window of the last message pairs from + # the chat history, where the hyperparameter k is the + # number of message pairs: + buffer_string = get_buffer_string( + self.buffer[-self.k * 2 :], + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + input_data = inputs[prompt_input_key] + + # Create an LLMChain for predicting entity summarization from the context + chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt) + + # Generate new summaries for entities and save them in the entity store + for entity in self.entity_cache: + # Get existing summary if it exists + existing_summary = self.entity_store.get(entity, "") + output = chain.predict( + summary=existing_summary, + entity=entity, + history=buffer_string, + input=input_data, + ) + # Save the updated summary to the entity store + self.entity_store.set(entity, output.strip()) + + def clear(self) -> None: + """Clear memory contents.""" + self.chat_memory.clear() + self.entity_cache.clear() + self.entity_store.clear() diff --git a/venv/Lib/site-packages/langchain/memory/kg.py b/venv/Lib/site-packages/langchain/memory/kg.py new file mode 100644 index 00000000..ae8ed07f --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/kg.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.memory.kg import ConversationKGMemory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ConversationKGMemory": "langchain_community.memory.kg"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ConversationKGMemory", +] diff --git a/venv/Lib/site-packages/langchain/memory/motorhead_memory.py b/venv/Lib/site-packages/langchain/memory/motorhead_memory.py new file mode 100644 index 00000000..744aa743 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/motorhead_memory.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.memory.motorhead_memory import MotorheadMemory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MotorheadMemory": "langchain_community.memory.motorhead_memory"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MotorheadMemory", +] diff --git a/venv/Lib/site-packages/langchain/memory/prompt.py b/venv/Lib/site-packages/langchain/memory/prompt.py new file mode 100644 index 00000000..c16e8e24 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/prompt.py @@ -0,0 +1,165 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +_DEFAULT_ENTITY_MEMORY_CONVERSATION_TEMPLATE = """You are an assistant to a human, powered by a large language model trained by OpenAI. + +You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. + +You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics. + +Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist. + +Context: +{entities} + +Current conversation: +{history} +Last line: +Human: {input} +You:""" + +ENTITY_MEMORY_CONVERSATION_TEMPLATE = PromptTemplate( + input_variables=["entities", "history", "input"], + template=_DEFAULT_ENTITY_MEMORY_CONVERSATION_TEMPLATE, +) + +_DEFAULT_SUMMARIZER_TEMPLATE = """Progressively summarize the lines of conversation provided, adding onto the previous summary returning a new summary. + +EXAMPLE +Current summary: +The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good. + +New lines of conversation: +Human: Why do you think artificial intelligence is a force for good? +AI: Because artificial intelligence will help humans reach their full potential. + +New summary: +The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential. +END OF EXAMPLE + +Current summary: +{summary} + +New lines of conversation: +{new_lines} + +New summary:""" +SUMMARY_PROMPT = PromptTemplate( + input_variables=["summary", "new_lines"], template=_DEFAULT_SUMMARIZER_TEMPLATE +) + +_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places. + +The conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line. + +Return the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation). + +EXAMPLE +Conversation history: +Person #1: how's it going today? +AI: "It's going great! How about you?" +Person #1: good! busy working on Langchain. lots to do. +AI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?" +Last line: +Person #1: i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. +Output: Langchain +END OF EXAMPLE + +EXAMPLE +Conversation history: +Person #1: how's it going today? +AI: "It's going great! How about you?" +Person #1: good! busy working on Langchain. lots to do. +AI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?" +Last line: +Person #1: i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I'm working with Person #2. +Output: Langchain, Person #2 +END OF EXAMPLE + +Conversation history (for reference only): +{history} +Last line of conversation (for extraction): +Human: {input} + +Output:""" +ENTITY_EXTRACTION_PROMPT = PromptTemplate( + input_variables=["history", "input"], template=_DEFAULT_ENTITY_EXTRACTION_TEMPLATE +) + +_DEFAULT_ENTITY_SUMMARIZATION_TEMPLATE = """You are an AI assistant helping a human keep track of facts about relevant people, places, and concepts in their life. Update the summary of the provided entity in the "Entity" section based on the last line of your conversation with the human. If you are writing the summary for the first time, return a single sentence. +The update should only include facts that are relayed in the last line of conversation about the provided entity, and should only contain facts about the provided entity. + +If there is no new information about the provided entity or the information is not worth noting (not an important or relevant fact to remember long-term), return the existing summary unchanged. + +Full conversation history (for context): +{history} + +Entity to summarize: +{entity} + +Existing summary of {entity}: +{summary} + +Last line of conversation: +Human: {input} +Updated summary:""" + +ENTITY_SUMMARIZATION_PROMPT = PromptTemplate( + input_variables=["entity", "summary", "history", "input"], + template=_DEFAULT_ENTITY_SUMMARIZATION_TEMPLATE, +) + + +KG_TRIPLE_DELIMITER = "<|>" +_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = ( + "You are a networked intelligence helping a human track knowledge triples" + " about all relevant people, things, concepts, etc. and integrating" + " them with your knowledge stored within your weights" + " as well as that stored in a knowledge graph." + " Extract all of the knowledge triples from the last line of conversation." + " A knowledge triple is a clause that contains a subject, a predicate," + " and an object. The subject is the entity being described," + " the predicate is the property of the subject that is being" + " described, and the object is the value of the property.\n\n" + "EXAMPLE\n" + "Conversation history:\n" + "Person #1: Did you hear aliens landed in Area 51?\n" + "AI: No, I didn't hear that. What do you know about Area 51?\n" + "Person #1: It's a secret military base in Nevada.\n" + "AI: What do you know about Nevada?\n" + "Last line of conversation:\n" + "Person #1: It's a state in the US. It's also the number 1 producer of gold in the US.\n\n" + f"Output: (Nevada, is a, state){KG_TRIPLE_DELIMITER}(Nevada, is in, US)" + f"{KG_TRIPLE_DELIMITER}(Nevada, is the number 1 producer of, gold)\n" + "END OF EXAMPLE\n\n" + "EXAMPLE\n" + "Conversation history:\n" + "Person #1: Hello.\n" + "AI: Hi! How are you?\n" + "Person #1: I'm good. How are you?\n" + "AI: I'm good too.\n" + "Last line of conversation:\n" + "Person #1: I'm going to the store.\n\n" + "Output: NONE\n" + "END OF EXAMPLE\n\n" + "EXAMPLE\n" + "Conversation history:\n" + "Person #1: What do you know about Descartes?\n" + "AI: Descartes was a French philosopher, mathematician, and scientist who lived in the 17th century.\n" + "Person #1: The Descartes I'm referring to is a standup comedian and interior designer from Montreal.\n" + "AI: Oh yes, He is a comedian and an interior designer. He has been in the industry for 30 years. His favorite food is baked bean pie.\n" + "Last line of conversation:\n" + "Person #1: Oh huh. I know Descartes likes to drive antique scooters and play the mandolin.\n" + f"Output: (Descartes, likes to drive, antique scooters){KG_TRIPLE_DELIMITER}(Descartes, plays, mandolin)\n" + "END OF EXAMPLE\n\n" + "Conversation history (for reference only):\n" + "{history}" + "\nLast line of conversation (for extraction):\n" + "Human: {input}\n\n" + "Output:" +) + +KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT = PromptTemplate( + input_variables=["history", "input"], + template=_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE, +) diff --git a/venv/Lib/site-packages/langchain/memory/readonly.py b/venv/Lib/site-packages/langchain/memory/readonly.py new file mode 100644 index 00000000..0e03c924 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/readonly.py @@ -0,0 +1,26 @@ +from typing import Any + +from langchain_core.memory import BaseMemory + + +class ReadOnlySharedMemory(BaseMemory): + """Memory wrapper that is read-only and cannot be changed.""" + + memory: BaseMemory + + @property + def memory_variables(self) -> list[str]: + """Return memory variables.""" + return self.memory.memory_variables + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]: + """Load memory variables from memory.""" + return self.memory.load_memory_variables(inputs) + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Nothing should be saved or changed""" + pass + + def clear(self) -> None: + """Nothing to clear, got a memory like a vault.""" + pass diff --git a/venv/Lib/site-packages/langchain/memory/simple.py b/venv/Lib/site-packages/langchain/memory/simple.py new file mode 100644 index 00000000..c6da1f89 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/simple.py @@ -0,0 +1,26 @@ +from typing import Any + +from langchain_core.memory import BaseMemory + + +class SimpleMemory(BaseMemory): + """Simple memory for storing context or other information that shouldn't + ever change between prompts. + """ + + memories: dict[str, Any] = dict() + + @property + def memory_variables(self) -> list[str]: + return list(self.memories.keys()) + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]: + return self.memories + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Nothing should be saved or changed, my memory is set in stone.""" + pass + + def clear(self) -> None: + """Nothing to clear, got a memory like a vault.""" + pass diff --git a/venv/Lib/site-packages/langchain/memory/summary.py b/venv/Lib/site-packages/langchain/memory/summary.py new file mode 100644 index 00000000..c2bb083c --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/summary.py @@ -0,0 +1,138 @@ +from __future__ import annotations + +from typing import Any + +from langchain_core._api import deprecated +from langchain_core.caches import BaseCache as BaseCache # For model_rebuild +from langchain_core.callbacks import Callbacks as Callbacks # For model_rebuild +from langchain_core.chat_history import BaseChatMessageHistory +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string +from langchain_core.prompts import BasePromptTemplate +from langchain_core.utils import pre_init +from pydantic import BaseModel + +from langchain.chains.llm import LLMChain +from langchain.memory.chat_memory import BaseChatMemory +from langchain.memory.prompt import SUMMARY_PROMPT + + +@deprecated( + since="0.2.12", + removal="1.0", + message=( + "Refer here for how to incorporate summaries of conversation history: " + "https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/" # noqa: E501 + ), +) +class SummarizerMixin(BaseModel): + """Mixin for summarizer.""" + + human_prefix: str = "Human" + ai_prefix: str = "AI" + llm: BaseLanguageModel + prompt: BasePromptTemplate = SUMMARY_PROMPT + summary_message_cls: type[BaseMessage] = SystemMessage + + def predict_new_summary( + self, messages: list[BaseMessage], existing_summary: str + ) -> str: + new_lines = get_buffer_string( + messages, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + chain = LLMChain(llm=self.llm, prompt=self.prompt) + return chain.predict(summary=existing_summary, new_lines=new_lines) + + async def apredict_new_summary( + self, messages: list[BaseMessage], existing_summary: str + ) -> str: + new_lines = get_buffer_string( + messages, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + chain = LLMChain(llm=self.llm, prompt=self.prompt) + return await chain.apredict(summary=existing_summary, new_lines=new_lines) + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin): + """Continually summarizes the conversation history. + + The summary is updated after each conversation turn. + The implementations returns a summary of the conversation history which + can be used to provide context to the model. + """ + + buffer: str = "" + memory_key: str = "history" #: :meta private: + + @classmethod + def from_messages( + cls, + llm: BaseLanguageModel, + chat_memory: BaseChatMessageHistory, + *, + summarize_step: int = 2, + **kwargs: Any, + ) -> ConversationSummaryMemory: + obj = cls(llm=llm, chat_memory=chat_memory, **kwargs) + for i in range(0, len(obj.chat_memory.messages), summarize_step): + obj.buffer = obj.predict_new_summary( + obj.chat_memory.messages[i : i + summarize_step], obj.buffer + ) + return obj + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return [self.memory_key] + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return history buffer.""" + if self.return_messages: + buffer: Any = [self.summary_message_cls(content=self.buffer)] + else: + buffer = self.buffer + return {self.memory_key: buffer} + + @pre_init + def validate_prompt_input_variables(cls, values: dict) -> dict: + """Validate that prompt input variables are consistent.""" + prompt_variables = values["prompt"].input_variables + expected_keys = {"summary", "new_lines"} + if expected_keys != set(prompt_variables): + raise ValueError( + "Got unexpected prompt input variables. The prompt expects " + f"{prompt_variables}, but it should have {expected_keys}." + ) + return values + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer.""" + super().save_context(inputs, outputs) + self.buffer = self.predict_new_summary( + self.chat_memory.messages[-2:], self.buffer + ) + + def clear(self) -> None: + """Clear memory contents.""" + super().clear() + self.buffer = "" + + +ConversationSummaryMemory.model_rebuild() diff --git a/venv/Lib/site-packages/langchain/memory/summary_buffer.py b/venv/Lib/site-packages/langchain/memory/summary_buffer.py new file mode 100644 index 00000000..ed5b7910 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/summary_buffer.py @@ -0,0 +1,139 @@ +from typing import Any, Union + +from langchain_core._api import deprecated +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_core.utils import pre_init + +from langchain.memory.chat_memory import BaseChatMemory +from langchain.memory.summary import SummarizerMixin + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin): + """Buffer with summarizer for storing conversation memory. + + Provides a running summary of the conversation together with the most recent + messages in the conversation under the constraint that the total number of + tokens in the conversation does not exceed a certain limit. + """ + + max_token_limit: int = 2000 + moving_summary_buffer: str = "" + memory_key: str = "history" + + @property + def buffer(self) -> Union[str, list[BaseMessage]]: + """String buffer of memory.""" + return self.load_memory_variables({})[self.memory_key] + + async def abuffer(self) -> Union[str, list[BaseMessage]]: + """Async memory buffer.""" + memory_variables = await self.aload_memory_variables({}) + return memory_variables[self.memory_key] + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return [self.memory_key] + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return history buffer.""" + buffer = self.chat_memory.messages + if self.moving_summary_buffer != "": + first_messages: list[BaseMessage] = [ + self.summary_message_cls(content=self.moving_summary_buffer) + ] + buffer = first_messages + buffer + if self.return_messages: + final_buffer: Any = buffer + else: + final_buffer = get_buffer_string( + buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix + ) + return {self.memory_key: final_buffer} + + async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Asynchronously return key-value pairs given the text input to the chain.""" + buffer = await self.chat_memory.aget_messages() + if self.moving_summary_buffer != "": + first_messages: list[BaseMessage] = [ + self.summary_message_cls(content=self.moving_summary_buffer) + ] + buffer = first_messages + buffer + if self.return_messages: + final_buffer: Any = buffer + else: + final_buffer = get_buffer_string( + buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix + ) + return {self.memory_key: final_buffer} + + @pre_init + def validate_prompt_input_variables(cls, values: dict) -> dict: + """Validate that prompt input variables are consistent.""" + prompt_variables = values["prompt"].input_variables + expected_keys = {"summary", "new_lines"} + if expected_keys != set(prompt_variables): + raise ValueError( + "Got unexpected prompt input variables. The prompt expects " + f"{prompt_variables}, but it should have {expected_keys}." + ) + return values + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer.""" + super().save_context(inputs, outputs) + self.prune() + + async def asave_context( + self, inputs: dict[str, Any], outputs: dict[str, str] + ) -> None: + """Asynchronously save context from this conversation to buffer.""" + await super().asave_context(inputs, outputs) + await self.aprune() + + def prune(self) -> None: + """Prune buffer if it exceeds max token limit""" + buffer = self.chat_memory.messages + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + if curr_buffer_length > self.max_token_limit: + pruned_memory = [] + while curr_buffer_length > self.max_token_limit: + pruned_memory.append(buffer.pop(0)) + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + self.moving_summary_buffer = self.predict_new_summary( + pruned_memory, self.moving_summary_buffer + ) + + async def aprune(self) -> None: + """Asynchronously prune buffer if it exceeds max token limit""" + buffer = self.chat_memory.messages + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + if curr_buffer_length > self.max_token_limit: + pruned_memory = [] + while curr_buffer_length > self.max_token_limit: + pruned_memory.append(buffer.pop(0)) + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + self.moving_summary_buffer = await self.apredict_new_summary( + pruned_memory, self.moving_summary_buffer + ) + + def clear(self) -> None: + """Clear memory contents.""" + super().clear() + self.moving_summary_buffer = "" + + async def aclear(self) -> None: + """Asynchronously clear memory contents.""" + await super().aclear() + self.moving_summary_buffer = "" diff --git a/venv/Lib/site-packages/langchain/memory/token_buffer.py b/venv/Lib/site-packages/langchain/memory/token_buffer.py new file mode 100644 index 00000000..527ac7eb --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/token_buffer.py @@ -0,0 +1,72 @@ +from typing import Any + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import BaseMessage, get_buffer_string + +from langchain.memory.chat_memory import BaseChatMemory + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationTokenBufferMemory(BaseChatMemory): + """Conversation chat memory with token limit. + + Keeps only the most recent messages in the conversation under the constraint + that the total number of tokens in the conversation does not exceed a certain limit. + """ + + human_prefix: str = "Human" + ai_prefix: str = "AI" + llm: BaseLanguageModel + memory_key: str = "history" + max_token_limit: int = 2000 + + @property + def buffer(self) -> Any: + """String buffer of memory.""" + return self.buffer_as_messages if self.return_messages else self.buffer_as_str + + @property + def buffer_as_str(self) -> str: + """Exposes the buffer as a string in case return_messages is False.""" + return get_buffer_string( + self.chat_memory.messages, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + @property + def buffer_as_messages(self) -> list[BaseMessage]: + """Exposes the buffer as a list of messages in case return_messages is True.""" + return self.chat_memory.messages + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return [self.memory_key] + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return history buffer.""" + return {self.memory_key: self.buffer} + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer. Pruned.""" + super().save_context(inputs, outputs) + # Prune buffer if it exceeds max token limit + buffer = self.chat_memory.messages + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + if curr_buffer_length > self.max_token_limit: + pruned_memory = [] + while curr_buffer_length > self.max_token_limit: + pruned_memory.append(buffer.pop(0)) + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) diff --git a/venv/Lib/site-packages/langchain/memory/utils.py b/venv/Lib/site-packages/langchain/memory/utils.py new file mode 100644 index 00000000..b3c8a5bb --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/utils.py @@ -0,0 +1,20 @@ +from typing import Any + + +def get_prompt_input_key(inputs: dict[str, Any], memory_variables: list[str]) -> str: + """ + Get the prompt input key. + + Args: + inputs: Dict[str, Any] + memory_variables: List[str] + + Returns: + A prompt input key. + """ + # "stop" is a special key that can be passed as input but is not used to + # format the prompt. + prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"])) + if len(prompt_input_keys) != 1: + raise ValueError(f"One input key expected got {prompt_input_keys}") + return prompt_input_keys[0] diff --git a/venv/Lib/site-packages/langchain/memory/vectorstore.py b/venv/Lib/site-packages/langchain/memory/vectorstore.py new file mode 100644 index 00000000..de4dad54 --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/vectorstore.py @@ -0,0 +1,113 @@ +"""Class for a VectorStore-backed memory object.""" + +from collections.abc import Sequence +from typing import Any, Optional, Union + +from langchain_core._api import deprecated +from langchain_core.documents import Document +from langchain_core.vectorstores import VectorStoreRetriever +from pydantic import Field + +from langchain.memory.chat_memory import BaseMemory +from langchain.memory.utils import get_prompt_input_key + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class VectorStoreRetrieverMemory(BaseMemory): + """Store the conversation history in a vector store and retrieves the relevant + parts of past conversation based on the input. + """ + + retriever: VectorStoreRetriever = Field(exclude=True) + """VectorStoreRetriever object to connect to.""" + + memory_key: str = "history" #: :meta private: + """Key name to locate the memories in the result of load_memory_variables.""" + + input_key: Optional[str] = None + """Key name to index the inputs to load_memory_variables.""" + + return_docs: bool = False + """Whether or not to return the result of querying the database directly.""" + + exclude_input_keys: Sequence[str] = Field(default_factory=tuple) + """Input keys to exclude in addition to memory key when constructing the document""" + + @property + def memory_variables(self) -> list[str]: + """The list of keys emitted from the load_memory_variables method.""" + return [self.memory_key] + + def _get_prompt_input_key(self, inputs: dict[str, Any]) -> str: + """Get the input key for the prompt.""" + if self.input_key is None: + return get_prompt_input_key(inputs, self.memory_variables) + return self.input_key + + def _documents_to_memory_variables( + self, docs: list[Document] + ) -> dict[str, Union[list[Document], str]]: + result: Union[list[Document], str] + if not self.return_docs: + result = "\n".join([doc.page_content for doc in docs]) + else: + result = docs + return {self.memory_key: result} + + def load_memory_variables( + self, inputs: dict[str, Any] + ) -> dict[str, Union[list[Document], str]]: + """Return history buffer.""" + input_key = self._get_prompt_input_key(inputs) + query = inputs[input_key] + docs = self.retriever.invoke(query) + return self._documents_to_memory_variables(docs) + + async def aload_memory_variables( + self, inputs: dict[str, Any] + ) -> dict[str, Union[list[Document], str]]: + """Return history buffer.""" + input_key = self._get_prompt_input_key(inputs) + query = inputs[input_key] + docs = await self.retriever.ainvoke(query) + return self._documents_to_memory_variables(docs) + + def _form_documents( + self, inputs: dict[str, Any], outputs: dict[str, str] + ) -> list[Document]: + """Format context from this conversation to buffer.""" + # Each document should only include the current turn, not the chat history + exclude = set(self.exclude_input_keys) + exclude.add(self.memory_key) + filtered_inputs = {k: v for k, v in inputs.items() if k not in exclude} + texts = [ + f"{k}: {v}" + for k, v in list(filtered_inputs.items()) + list(outputs.items()) + ] + page_content = "\n".join(texts) + return [Document(page_content=page_content)] + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer.""" + documents = self._form_documents(inputs, outputs) + self.retriever.add_documents(documents) + + async def asave_context( + self, inputs: dict[str, Any], outputs: dict[str, str] + ) -> None: + """Save context from this conversation to buffer.""" + documents = self._form_documents(inputs, outputs) + await self.retriever.aadd_documents(documents) + + def clear(self) -> None: + """Nothing to clear.""" + + async def aclear(self) -> None: + """Nothing to clear.""" diff --git a/venv/Lib/site-packages/langchain/memory/vectorstore_token_buffer_memory.py b/venv/Lib/site-packages/langchain/memory/vectorstore_token_buffer_memory.py new file mode 100644 index 00000000..e0c04f7a --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/vectorstore_token_buffer_memory.py @@ -0,0 +1,184 @@ +""" +Class for a conversation memory buffer with older messages stored in a vectorstore . + +This implements a conversation memory in which the messages are stored in a memory +buffer up to a specified token limit. When the limit is exceeded, older messages are +saved to a vectorstore backing database. The vectorstore can be made persistent across +sessions. +""" + +import warnings +from datetime import datetime +from typing import Any + +from langchain_core.messages import BaseMessage +from langchain_core.prompts.chat import SystemMessagePromptTemplate +from langchain_core.vectorstores import VectorStoreRetriever +from pydantic import Field, PrivateAttr + +from langchain.memory import ConversationTokenBufferMemory, VectorStoreRetrieverMemory +from langchain.memory.chat_memory import BaseChatMemory +from langchain.text_splitter import RecursiveCharacterTextSplitter + +DEFAULT_HISTORY_TEMPLATE = """ +Current date and time: {current_time}. + +Potentially relevant timestamped excerpts of previous conversations (you +do not need to use these if irrelevant): +{previous_history} + +""" + +TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S %Z" + + +class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory): + """Conversation chat memory with token limit and vectordb backing. + + load_memory_variables() will return a dict with the key "history". + It contains background information retrieved from the vector store + plus recent lines of the current conversation. + + To help the LLM understand the part of the conversation stored in the + vectorstore, each interaction is timestamped and the current date and + time is also provided in the history. A side effect of this is that the + LLM will have access to the current date and time. + + Initialization arguments: + + This class accepts all the initialization arguments of + ConversationTokenBufferMemory, such as `llm`. In addition, it + accepts the following additional arguments + + retriever: (required) A VectorStoreRetriever object to use + as the vector backing store + + split_chunk_size: (optional, 1000) Token chunk split size + for long messages generated by the AI + + previous_history_template: (optional) Template used to format + the contents of the prompt history + + + Example using ChromaDB: + + .. code-block:: python + + from langchain.memory.token_buffer_vectorstore_memory import ( + ConversationVectorStoreTokenBufferMemory + ) + from langchain_chroma import Chroma + from langchain_community.embeddings import HuggingFaceInstructEmbeddings + from langchain_openai import OpenAI + + embedder = HuggingFaceInstructEmbeddings( + query_instruction="Represent the query for retrieval: " + ) + chroma = Chroma(collection_name="demo", + embedding_function=embedder, + collection_metadata={"hnsw:space": "cosine"}, + ) + + retriever = chroma.as_retriever( + search_type="similarity_score_threshold", + search_kwargs={ + 'k': 5, + 'score_threshold': 0.75, + }, + ) + + conversation_memory = ConversationVectorStoreTokenBufferMemory( + return_messages=True, + llm=OpenAI(), + retriever=retriever, + max_token_limit = 1000, + ) + + conversation_memory.save_context({"Human": "Hi there"}, + {"AI": "Nice to meet you!"} + ) + conversation_memory.save_context({"Human": "Nice day isn't it?"}, + {"AI": "I love Wednesdays."} + ) + conversation_memory.load_memory_variables({"input": "What time is it?"}) + + """ + + retriever: VectorStoreRetriever = Field(exclude=True) + memory_key: str = "history" + previous_history_template: str = DEFAULT_HISTORY_TEMPLATE + split_chunk_size: int = 1000 + + _memory_retriever: VectorStoreRetrieverMemory = PrivateAttr(default=None) # type: ignore[assignment] + _timestamps: list[datetime] = PrivateAttr(default_factory=list) + + @property + def memory_retriever(self) -> VectorStoreRetrieverMemory: + """Return a memory retriever from the passed retriever object.""" + if self._memory_retriever is not None: + return self._memory_retriever + self._memory_retriever = VectorStoreRetrieverMemory(retriever=self.retriever) + return self._memory_retriever + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return history and memory buffer.""" + try: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + memory_variables = self.memory_retriever.load_memory_variables(inputs) + previous_history = memory_variables[self.memory_retriever.memory_key] + except AssertionError: # happens when db is empty + previous_history = "" + current_history = super().load_memory_variables(inputs) + template = SystemMessagePromptTemplate.from_template( + self.previous_history_template + ) + messages = [ + template.format( + previous_history=previous_history, + current_time=datetime.now().astimezone().strftime(TIMESTAMP_FORMAT), + ) + ] + messages.extend(current_history[self.memory_key]) + return {self.memory_key: messages} + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer. Pruned.""" + BaseChatMemory.save_context(self, inputs, outputs) + self._timestamps.append(datetime.now().astimezone()) + # Prune buffer if it exceeds max token limit + buffer = self.chat_memory.messages + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + if curr_buffer_length > self.max_token_limit: + while curr_buffer_length > self.max_token_limit: + self._pop_and_store_interaction(buffer) + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + + def save_remainder(self) -> None: + """ + Save the remainder of the conversation buffer to the vector store. + + This is useful if you have made the vectorstore persistent, in which + case this can be called before the end of the session to store the + remainder of the conversation. + """ + buffer = self.chat_memory.messages + while len(buffer) > 0: + self._pop_and_store_interaction(buffer) + + def _pop_and_store_interaction(self, buffer: list[BaseMessage]) -> None: + input = buffer.pop(0) + output = buffer.pop(0) + timestamp = self._timestamps.pop(0).strftime(TIMESTAMP_FORMAT) + # Split AI output into smaller chunks to avoid creating documents + # that will overflow the context window + ai_chunks = self._split_long_ai_text(str(output.content)) + for index, chunk in enumerate(ai_chunks): + self.memory_retriever.save_context( + {"Human": f"<{timestamp}/00> {str(input.content)}"}, + {"AI": f"<{timestamp}/{index:02}> {chunk}"}, + ) + + def _split_long_ai_text(self, text: str) -> list[str]: + splitter = RecursiveCharacterTextSplitter(chunk_size=self.split_chunk_size) + return [chunk.page_content for chunk in splitter.create_documents([text])] diff --git a/venv/Lib/site-packages/langchain/memory/zep_memory.py b/venv/Lib/site-packages/langchain/memory/zep_memory.py new file mode 100644 index 00000000..17b0d42b --- /dev/null +++ b/venv/Lib/site-packages/langchain/memory/zep_memory.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.memory.zep_memory import ZepMemory + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ZepMemory": "langchain_community.memory.zep_memory"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ZepMemory", +] diff --git a/venv/Lib/site-packages/langchain/model_laboratory.py b/venv/Lib/site-packages/langchain/model_laboratory.py new file mode 100644 index 00000000..6ade9dbc --- /dev/null +++ b/venv/Lib/site-packages/langchain/model_laboratory.py @@ -0,0 +1,98 @@ +"""Experiment with different models.""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import Optional + +from langchain_core.language_models.llms import BaseLLM +from langchain_core.prompts.prompt import PromptTemplate +from langchain_core.utils.input import get_color_mapping, print_text + +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain + + +class ModelLaboratory: + """A utility to experiment with and compare the performance of different models.""" + + def __init__(self, chains: Sequence[Chain], names: Optional[list[str]] = None): + """Initialize the ModelLaboratory with chains to experiment with. + + Args: + chains (Sequence[Chain]): A sequence of chains to experiment with. + Each chain must have exactly one input and one output variable. + names (Optional[List[str]]): Optional list of names corresponding to each chain. + If provided, its length must match the number of chains. + + + Raises: + ValueError: If any chain is not an instance of `Chain`. + ValueError: If a chain does not have exactly one input variable. + ValueError: If a chain does not have exactly one output variable. + ValueError: If the length of `names` does not match the number of chains. + """ + for chain in chains: + if not isinstance(chain, Chain): + raise ValueError( + "ModelLaboratory should now be initialized with Chains. " + "If you want to initialize with LLMs, use the `from_llms` method " + "instead (`ModelLaboratory.from_llms(...)`)" + ) + if len(chain.input_keys) != 1: + raise ValueError( + "Currently only support chains with one input variable, " + f"got {chain.input_keys}" + ) + if len(chain.output_keys) != 1: + raise ValueError( + "Currently only support chains with one output variable, " + f"got {chain.output_keys}" + ) + if names is not None: + if len(names) != len(chains): + raise ValueError("Length of chains does not match length of names.") + self.chains = chains + chain_range = [str(i) for i in range(len(self.chains))] + self.chain_colors = get_color_mapping(chain_range) + self.names = names + + @classmethod + def from_llms( + cls, llms: list[BaseLLM], prompt: Optional[PromptTemplate] = None + ) -> ModelLaboratory: + """Initialize the ModelLaboratory with LLMs and an optional prompt. + + Args: + llms (List[BaseLLM]): A list of LLMs to experiment with. + prompt (Optional[PromptTemplate]): An optional prompt to use with the LLMs. + If provided, the prompt must contain exactly one input variable. + + Returns: + ModelLaboratory: An instance of `ModelLaboratory` initialized with LLMs. + """ + if prompt is None: + prompt = PromptTemplate(input_variables=["_input"], template="{_input}") + chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms] + names = [str(llm) for llm in llms] + return cls(chains, names=names) + + def compare(self, text: str) -> None: + """Compare model outputs on an input text. + + If a prompt was provided with starting the laboratory, then this text will be + fed into the prompt. If no prompt was provided, then the input text is the + entire prompt. + + Args: + text: input text to run all models on. + """ + print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201 + for i, chain in enumerate(self.chains): + if self.names is not None: + name = self.names[i] + else: + name = str(chain) + print_text(name, end="\n") + output = chain.run(text) + print_text(output, color=self.chain_colors[str(i)], end="\n\n") diff --git a/venv/Lib/site-packages/langchain/output_parsers/__init__.py b/venv/Lib/site-packages/langchain/output_parsers/__init__.py new file mode 100644 index 00000000..c06b7713 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/__init__.py @@ -0,0 +1,87 @@ +"""**OutputParser** classes parse the output of an LLM call. + +**Class hierarchy:** + +.. code-block:: + + BaseLLMOutputParser --> BaseOutputParser --> OutputParser # ListOutputParser, PydanticOutputParser + +**Main helpers:** + +.. code-block:: + + Serializable, Generation, PromptValue +""" # noqa: E501 + +from typing import TYPE_CHECKING, Any + +from langchain_core.output_parsers import ( + CommaSeparatedListOutputParser, + ListOutputParser, + MarkdownListOutputParser, + NumberedListOutputParser, + PydanticOutputParser, + XMLOutputParser, +) +from langchain_core.output_parsers.openai_tools import ( + JsonOutputKeyToolsParser, + JsonOutputToolsParser, + PydanticToolsParser, +) + +from langchain._api import create_importer +from langchain.output_parsers.boolean import BooleanOutputParser +from langchain.output_parsers.combining import CombiningOutputParser +from langchain.output_parsers.datetime import DatetimeOutputParser +from langchain.output_parsers.enum import EnumOutputParser +from langchain.output_parsers.fix import OutputFixingParser +from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser +from langchain.output_parsers.regex import RegexParser +from langchain.output_parsers.regex_dict import RegexDictParser +from langchain.output_parsers.retry import RetryOutputParser, RetryWithErrorOutputParser +from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser +from langchain.output_parsers.yaml import YamlOutputParser + +if TYPE_CHECKING: + from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GuardrailsOutputParser": "langchain_community.output_parsers.rail_parser" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BooleanOutputParser", + "CombiningOutputParser", + "CommaSeparatedListOutputParser", + "DatetimeOutputParser", + "EnumOutputParser", + "GuardrailsOutputParser", + "ListOutputParser", + "MarkdownListOutputParser", + "NumberedListOutputParser", + "OutputFixingParser", + "PandasDataFrameOutputParser", + "PydanticOutputParser", + "RegexDictParser", + "RegexParser", + "ResponseSchema", + "RetryOutputParser", + "RetryWithErrorOutputParser", + "StructuredOutputParser", + "XMLOutputParser", + "JsonOutputToolsParser", + "PydanticToolsParser", + "JsonOutputKeyToolsParser", + "YamlOutputParser", +] diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..b6b5d387 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/boolean.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/boolean.cpython-312.pyc new file mode 100644 index 00000000..e4f328ec Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/boolean.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/combining.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/combining.cpython-312.pyc new file mode 100644 index 00000000..23d3db2a Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/combining.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/datetime.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/datetime.cpython-312.pyc new file mode 100644 index 00000000..d728777a Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/datetime.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/enum.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/enum.cpython-312.pyc new file mode 100644 index 00000000..05637557 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/enum.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/ernie_functions.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/ernie_functions.cpython-312.pyc new file mode 100644 index 00000000..1b99e521 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/ernie_functions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/fix.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/fix.cpython-312.pyc new file mode 100644 index 00000000..cf06f902 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/fix.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/format_instructions.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/format_instructions.cpython-312.pyc new file mode 100644 index 00000000..80c03dc5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/format_instructions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/json.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/json.cpython-312.pyc new file mode 100644 index 00000000..9657fd18 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/json.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/list.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/list.cpython-312.pyc new file mode 100644 index 00000000..88168e74 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/list.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/loading.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/loading.cpython-312.pyc new file mode 100644 index 00000000..52bb0f53 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/loading.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/openai_functions.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/openai_functions.cpython-312.pyc new file mode 100644 index 00000000..c54f6954 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/openai_functions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/openai_tools.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/openai_tools.cpython-312.pyc new file mode 100644 index 00000000..4a130da6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/openai_tools.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/pandas_dataframe.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/pandas_dataframe.cpython-312.pyc new file mode 100644 index 00000000..5bcc75d1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/pandas_dataframe.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/prompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/prompts.cpython-312.pyc new file mode 100644 index 00000000..8fffe1dd Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/prompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/pydantic.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/pydantic.cpython-312.pyc new file mode 100644 index 00000000..433bbac1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/pydantic.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/rail_parser.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/rail_parser.cpython-312.pyc new file mode 100644 index 00000000..1dc525ad Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/rail_parser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/regex.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/regex.cpython-312.pyc new file mode 100644 index 00000000..09ade573 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/regex.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/regex_dict.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/regex_dict.cpython-312.pyc new file mode 100644 index 00000000..9ea9f5d9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/regex_dict.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/retry.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/retry.cpython-312.pyc new file mode 100644 index 00000000..c6f4f7d8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/retry.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/structured.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/structured.cpython-312.pyc new file mode 100644 index 00000000..afaa18a2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/structured.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/xml.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/xml.cpython-312.pyc new file mode 100644 index 00000000..9a140878 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/xml.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/__pycache__/yaml.cpython-312.pyc b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/yaml.cpython-312.pyc new file mode 100644 index 00000000..140328c9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/output_parsers/__pycache__/yaml.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/output_parsers/boolean.py b/venv/Lib/site-packages/langchain/output_parsers/boolean.py new file mode 100644 index 00000000..5114b975 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/boolean.py @@ -0,0 +1,51 @@ +import re + +from langchain_core.output_parsers import BaseOutputParser + + +class BooleanOutputParser(BaseOutputParser[bool]): + """Parse the output of an LLM call to a boolean.""" + + true_val: str = "YES" + """The string value that should be parsed as True.""" + false_val: str = "NO" + """The string value that should be parsed as False.""" + + def parse(self, text: str) -> bool: + """Parse the output of an LLM call to a boolean. + + Args: + text: output of a language model + + Returns: + boolean + """ + regexp = rf"\b({self.true_val}|{self.false_val})\b" + + truthy = { + val.upper() + for val in re.findall(regexp, text, flags=re.IGNORECASE | re.MULTILINE) + } + if self.true_val.upper() in truthy: + if self.false_val.upper() in truthy: + raise ValueError( + f"Ambiguous response. Both {self.true_val} and {self.false_val} " + f"in received: {text}." + ) + return True + elif self.false_val.upper() in truthy: + if self.true_val.upper() in truthy: + raise ValueError( + f"Ambiguous response. Both {self.true_val} and {self.false_val} " + f"in received: {text}." + ) + return False + raise ValueError( + f"BooleanOutputParser expected output value to include either " + f"{self.true_val} or {self.false_val}. Received {text}." + ) + + @property + def _type(self) -> str: + """Snake-case string identifier for an output parser type.""" + return "boolean_output_parser" diff --git a/venv/Lib/site-packages/langchain/output_parsers/combining.py b/venv/Lib/site-packages/langchain/output_parsers/combining.py new file mode 100644 index 00000000..295bd55f --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/combining.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +from typing import Any + +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.utils import pre_init + + +class CombiningOutputParser(BaseOutputParser[dict[str, Any]]): + """Combine multiple output parsers into one.""" + + parsers: list[BaseOutputParser] + + @classmethod + def is_lc_serializable(cls) -> bool: + return True + + @pre_init + def validate_parsers(cls, values: dict[str, Any]) -> dict[str, Any]: + """Validate the parsers.""" + parsers = values["parsers"] + if len(parsers) < 2: + raise ValueError("Must have at least two parsers") + for parser in parsers: + if parser._type == "combining": + raise ValueError("Cannot nest combining parsers") + if parser._type == "list": + raise ValueError("Cannot combine list parsers") + return values + + @property + def _type(self) -> str: + """Return the type key.""" + return "combining" + + def get_format_instructions(self) -> str: + """Instructions on how the LLM output should be formatted.""" + + initial = f"For your first output: {self.parsers[0].get_format_instructions()}" + subsequent = "\n".join( + f"Complete that output fully. Then produce another output, separated by two newline characters: {p.get_format_instructions()}" # noqa: E501 + for p in self.parsers[1:] + ) + return f"{initial}\n{subsequent}" + + def parse(self, text: str) -> dict[str, Any]: + """Parse the output of an LLM call.""" + texts = text.split("\n\n") + output = dict() + for txt, parser in zip(texts, self.parsers): + output.update(parser.parse(txt.strip())) + return output diff --git a/venv/Lib/site-packages/langchain/output_parsers/datetime.py b/venv/Lib/site-packages/langchain/output_parsers/datetime.py new file mode 100644 index 00000000..a2fd1944 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/datetime.py @@ -0,0 +1,57 @@ +import random +from datetime import datetime, timedelta + +from langchain_core.exceptions import OutputParserException +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.utils import comma_list + + +def _generate_random_datetime_strings( + pattern: str, + n: int = 3, + start_date: datetime = datetime(1, 1, 1), + end_date: datetime = datetime.now() + timedelta(days=3650), +) -> list[str]: + """Generates n random datetime strings conforming to the + given pattern within the specified date range. + + Pattern should be a string containing the desired format codes. + start_date and end_date should be datetime objects representing + the start and end of the date range. + """ + examples = [] + delta = end_date - start_date + for i in range(n): + random_delta = random.uniform(0, delta.total_seconds()) + dt = start_date + timedelta(seconds=random_delta) + date_string = dt.strftime(pattern) + examples.append(date_string) + return examples + + +class DatetimeOutputParser(BaseOutputParser[datetime]): + """Parse the output of an LLM call to a datetime.""" + + format: str = "%Y-%m-%dT%H:%M:%S.%fZ" + """The string value that used as the datetime format.""" + + def get_format_instructions(self) -> str: + examples = comma_list(_generate_random_datetime_strings(self.format)) + return ( + f"Write a datetime string that matches the " + f"following pattern: '{self.format}'.\n\n" + f"Examples: {examples}\n\n" + f"Return ONLY this string, no other words!" + ) + + def parse(self, response: str) -> datetime: + try: + return datetime.strptime(response.strip(), self.format) + except ValueError as e: + raise OutputParserException( + f"Could not parse datetime string: {response}" + ) from e + + @property + def _type(self) -> str: + return "datetime" diff --git a/venv/Lib/site-packages/langchain/output_parsers/enum.py b/venv/Lib/site-packages/langchain/output_parsers/enum.py new file mode 100644 index 00000000..7100872d --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/enum.py @@ -0,0 +1,39 @@ +from enum import Enum + +from langchain_core.exceptions import OutputParserException +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.utils import pre_init + + +class EnumOutputParser(BaseOutputParser[Enum]): + """Parse an output that is one of a set of values.""" + + enum: type[Enum] + """The enum to parse. Its values must be strings.""" + + @pre_init + def raise_deprecation(cls, values: dict) -> dict: + enum = values["enum"] + if not all(isinstance(e.value, str) for e in enum): + raise ValueError("Enum values must be strings") + return values + + @property + def _valid_values(self) -> list[str]: + return [e.value for e in self.enum] + + def parse(self, response: str) -> Enum: + try: + return self.enum(response.strip()) + except ValueError: + raise OutputParserException( + f"Response '{response}' is not one of the " + f"expected values: {self._valid_values}" + ) + + def get_format_instructions(self) -> str: + return f"Select one of the following options: {', '.join(self._valid_values)}" + + @property + def OutputType(self) -> type[Enum]: + return self.enum diff --git a/venv/Lib/site-packages/langchain/output_parsers/ernie_functions.py b/venv/Lib/site-packages/langchain/output_parsers/ernie_functions.py new file mode 100644 index 00000000..bfe0b590 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/ernie_functions.py @@ -0,0 +1,45 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.output_parsers.ernie_functions import ( + JsonKeyOutputFunctionsParser, + JsonOutputFunctionsParser, + OutputFunctionsParser, + PydanticAttrOutputFunctionsParser, + PydanticOutputFunctionsParser, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "JsonKeyOutputFunctionsParser": ( + "langchain_community.output_parsers.ernie_functions" + ), + "JsonOutputFunctionsParser": "langchain_community.output_parsers.ernie_functions", + "OutputFunctionsParser": "langchain_community.output_parsers.ernie_functions", + "PydanticAttrOutputFunctionsParser": ( + "langchain_community.output_parsers.ernie_functions" + ), + "PydanticOutputFunctionsParser": ( + "langchain_community.output_parsers.ernie_functions" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JsonKeyOutputFunctionsParser", + "JsonOutputFunctionsParser", + "OutputFunctionsParser", + "PydanticAttrOutputFunctionsParser", + "PydanticOutputFunctionsParser", +] diff --git a/venv/Lib/site-packages/langchain/output_parsers/fix.py b/venv/Lib/site-packages/langchain/output_parsers/fix.py new file mode 100644 index 00000000..c5c3921a --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/fix.py @@ -0,0 +1,148 @@ +from __future__ import annotations + +from typing import Annotated, Any, TypeVar, Union + +from langchain_core.exceptions import OutputParserException +from langchain_core.output_parsers import BaseOutputParser, StrOutputParser +from langchain_core.prompts import BasePromptTemplate +from langchain_core.runnables import Runnable, RunnableSerializable +from pydantic import SkipValidation +from typing_extensions import TypedDict + +from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT + +T = TypeVar("T") + + +class OutputFixingParserRetryChainInput(TypedDict, total=False): + instructions: str + completion: str + error: str + + +class OutputFixingParser(BaseOutputParser[T]): + """Wrap a parser and try to fix parsing errors.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + return True + + parser: Annotated[Any, SkipValidation()] + """The parser to use to parse the output.""" + # Should be an LLMChain but we want to avoid top-level imports from langchain.chains + retry_chain: Annotated[ + Union[RunnableSerializable[OutputFixingParserRetryChainInput, str], Any], + SkipValidation(), + ] + """The RunnableSerializable to use to retry the completion (Legacy: LLMChain).""" + max_retries: int = 1 + """The maximum number of times to retry the parse.""" + legacy: bool = True + """Whether to use the run or arun method of the retry_chain.""" + + @classmethod + def from_llm( + cls, + llm: Runnable, + parser: BaseOutputParser[T], + prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, + max_retries: int = 1, + ) -> OutputFixingParser[T]: + """Create an OutputFixingParser from a language model and a parser. + + Args: + llm: llm to use for fixing + parser: parser to use for parsing + prompt: prompt to use for fixing + max_retries: Maximum number of retries to parse. + + Returns: + OutputFixingParser + """ + chain = prompt | llm | StrOutputParser() + return cls(parser=parser, retry_chain=chain, max_retries=max_retries) + + def parse(self, completion: str) -> T: + retries = 0 + + while retries <= self.max_retries: + try: + return self.parser.parse(completion) + except OutputParserException as e: + if retries == self.max_retries: + raise e + else: + retries += 1 + if self.legacy and hasattr(self.retry_chain, "run"): + completion = self.retry_chain.run( + instructions=self.parser.get_format_instructions(), + completion=completion, + error=repr(e), + ) + else: + try: + completion = self.retry_chain.invoke( + dict( + instructions=self.parser.get_format_instructions(), + completion=completion, + error=repr(e), + ) + ) + except (NotImplementedError, AttributeError): + # Case: self.parser does not have get_format_instructions + completion = self.retry_chain.invoke( + dict( + completion=completion, + error=repr(e), + ) + ) + + raise OutputParserException("Failed to parse") + + async def aparse(self, completion: str) -> T: + retries = 0 + + while retries <= self.max_retries: + try: + return await self.parser.aparse(completion) + except OutputParserException as e: + if retries == self.max_retries: + raise e + else: + retries += 1 + if self.legacy and hasattr(self.retry_chain, "arun"): + completion = await self.retry_chain.arun( + instructions=self.parser.get_format_instructions(), + completion=completion, + error=repr(e), + ) + else: + try: + completion = await self.retry_chain.ainvoke( + dict( + instructions=self.parser.get_format_instructions(), + completion=completion, + error=repr(e), + ) + ) + except (NotImplementedError, AttributeError): + # Case: self.parser does not have get_format_instructions + completion = await self.retry_chain.ainvoke( + dict( + completion=completion, + error=repr(e), + ) + ) + + raise OutputParserException("Failed to parse") + + def get_format_instructions(self) -> str: + return self.parser.get_format_instructions() + + @property + def _type(self) -> str: + return "output_fixing" + + @property + def OutputType(self) -> type[T]: + return self.parser.OutputType diff --git a/venv/Lib/site-packages/langchain/output_parsers/format_instructions.py b/venv/Lib/site-packages/langchain/output_parsers/format_instructions.py new file mode 100644 index 00000000..d9e892f2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/format_instructions.py @@ -0,0 +1,81 @@ +# flake8: noqa + +STRUCTURED_FORMAT_INSTRUCTIONS = """The output should be a markdown code snippet formatted in the following schema, including the leading and trailing "```json" and "```": + +```json +{{ +{format} +}} +```""" + +STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS = """ +```json +{{ +{format} +}} +```""" + + +PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below. + +As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}} +the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. + +Here is the output schema: +``` +{schema} +```""" + +YAML_FORMAT_INSTRUCTIONS = """The output should be formatted as a YAML instance that conforms to the given JSON schema below. + +# Examples +## Schema +``` +{{"title": "Players", "description": "A list of players", "type": "array", "items": {{"$ref": "#/definitions/Player"}}, "definitions": {{"Player": {{"title": "Player", "type": "object", "properties": {{"name": {{"title": "Name", "description": "Player name", "type": "string"}}, "avg": {{"title": "Avg", "description": "Batting average", "type": "number"}}}}, "required": ["name", "avg"]}}}}}} +``` +## Well formatted instance +``` +- name: John Doe + avg: 0.3 +- name: Jane Maxfield + avg: 1.4 +``` + +## Schema +``` +{{"properties": {{"habit": {{ "description": "A common daily habit", "type": "string" }}, "sustainable_alternative": {{ "description": "An environmentally friendly alternative to the habit", "type": "string"}}}}, "required": ["habit", "sustainable_alternative"]}} +``` +## Well formatted instance +``` +habit: Using disposable water bottles for daily hydration. +sustainable_alternative: Switch to a reusable water bottle to reduce plastic waste and decrease your environmental footprint. +``` + +Please follow the standard YAML formatting conventions with an indent of 2 spaces and make sure that the data types adhere strictly to the following JSON schema: +``` +{schema} +``` + +Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!""" + + +PANDAS_DATAFRAME_FORMAT_INSTRUCTIONS = """The output should be formatted as a string as the operation, followed by a colon, followed by the column or row to be queried on, followed by optional array parameters. +1. The column names are limited to the possible columns below. +2. Arrays must either be a comma-separated list of numbers formatted as [1,3,5], or it must be in range of numbers formatted as [0..4]. +3. Remember that arrays are optional and not necessarily required. +4. If the column is not in the possible columns or the operation is not a valid Pandas DataFrame operation, return why it is invalid as a sentence starting with either "Invalid column" or "Invalid operation". + +As an example, for the formats: +1. String "column:num_legs" is a well-formatted instance which gets the column num_legs, where num_legs is a possible column. +2. String "row:1" is a well-formatted instance which gets row 1. +3. String "column:num_legs[1,2]" is a well-formatted instance which gets the column num_legs for rows 1 and 2, where num_legs is a possible column. +4. String "row:1[num_legs]" is a well-formatted instance which gets row 1, but for just column num_legs, where num_legs is a possible column. +5. String "mean:num_legs[1..3]" is a well-formatted instance which takes the mean of num_legs from rows 1 to 3, where num_legs is a possible column and mean is a valid Pandas DataFrame operation. +6. String "do_something:num_legs" is a badly-formatted instance, where do_something is not a valid Pandas DataFrame operation. +7. String "mean:invalid_col" is a badly-formatted instance, where invalid_col is not a possible column. + +Here are the possible columns: +``` +{columns} +``` +""" diff --git a/venv/Lib/site-packages/langchain/output_parsers/json.py b/venv/Lib/site-packages/langchain/output_parsers/json.py new file mode 100644 index 00000000..20b06e3b --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/json.py @@ -0,0 +1,15 @@ +from langchain_core.output_parsers.json import ( + SimpleJsonOutputParser, +) +from langchain_core.utils.json import ( + parse_and_check_json_markdown, + parse_json_markdown, + parse_partial_json, +) + +__all__ = [ + "SimpleJsonOutputParser", + "parse_partial_json", + "parse_json_markdown", + "parse_and_check_json_markdown", +] diff --git a/venv/Lib/site-packages/langchain/output_parsers/list.py b/venv/Lib/site-packages/langchain/output_parsers/list.py new file mode 100644 index 00000000..b5ffd8a3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/list.py @@ -0,0 +1,13 @@ +from langchain_core.output_parsers.list import ( + CommaSeparatedListOutputParser, + ListOutputParser, + MarkdownListOutputParser, + NumberedListOutputParser, +) + +__all__ = [ + "ListOutputParser", + "CommaSeparatedListOutputParser", + "NumberedListOutputParser", + "MarkdownListOutputParser", +] diff --git a/venv/Lib/site-packages/langchain/output_parsers/loading.py b/venv/Lib/site-packages/langchain/output_parsers/loading.py new file mode 100644 index 00000000..b25710c1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/loading.py @@ -0,0 +1,22 @@ +from langchain.output_parsers.regex import RegexParser + + +def load_output_parser(config: dict) -> dict: + """Load an output parser. + + Args: + config: config dict + + Returns: + config dict with output parser loaded + """ + if "output_parsers" in config: + if config["output_parsers"] is not None: + _config = config["output_parsers"] + output_parser_type = _config["_type"] + if output_parser_type == "regex_parser": + output_parser = RegexParser(**_config) + else: + raise ValueError(f"Unsupported output parser {output_parser_type}") + config["output_parsers"] = output_parser + return config diff --git a/venv/Lib/site-packages/langchain/output_parsers/openai_functions.py b/venv/Lib/site-packages/langchain/output_parsers/openai_functions.py new file mode 100644 index 00000000..800b66b6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/openai_functions.py @@ -0,0 +1,13 @@ +from langchain_core.output_parsers.openai_functions import ( + JsonKeyOutputFunctionsParser, + JsonOutputFunctionsParser, + PydanticAttrOutputFunctionsParser, + PydanticOutputFunctionsParser, +) + +__all__ = [ + "PydanticOutputFunctionsParser", + "PydanticAttrOutputFunctionsParser", + "JsonOutputFunctionsParser", + "JsonKeyOutputFunctionsParser", +] diff --git a/venv/Lib/site-packages/langchain/output_parsers/openai_tools.py b/venv/Lib/site-packages/langchain/output_parsers/openai_tools.py new file mode 100644 index 00000000..57a1a667 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/openai_tools.py @@ -0,0 +1,7 @@ +from langchain_core.output_parsers.openai_tools import ( + JsonOutputKeyToolsParser, + JsonOutputToolsParser, + PydanticToolsParser, +) + +__all__ = ["PydanticToolsParser", "JsonOutputToolsParser", "JsonOutputKeyToolsParser"] diff --git a/venv/Lib/site-packages/langchain/output_parsers/pandas_dataframe.py b/venv/Lib/site-packages/langchain/output_parsers/pandas_dataframe.py new file mode 100644 index 00000000..5afc80d6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/pandas_dataframe.py @@ -0,0 +1,160 @@ +import re +from typing import Any, Union + +from langchain_core.exceptions import OutputParserException +from langchain_core.output_parsers.base import BaseOutputParser +from pydantic import field_validator + +from langchain.output_parsers.format_instructions import ( + PANDAS_DATAFRAME_FORMAT_INSTRUCTIONS, +) + + +class PandasDataFrameOutputParser(BaseOutputParser[dict[str, Any]]): + """Parse an output using Pandas DataFrame format.""" + + """The Pandas DataFrame to parse.""" + dataframe: Any + + @field_validator("dataframe") + @classmethod + def validate_dataframe(cls, val: Any) -> Any: + import pandas as pd + + if issubclass(type(val), pd.DataFrame): + return val + if pd.DataFrame(val).empty: + raise ValueError("DataFrame cannot be empty.") + + raise TypeError( + "Wrong type for 'dataframe', must be a subclass \ + of Pandas DataFrame (pd.DataFrame)" + ) + + def parse_array( + self, array: str, original_request_params: str + ) -> tuple[list[Union[int, str]], str]: + parsed_array: list[Union[int, str]] = [] + + # Check if the format is [1,3,5] + if re.match(r"\[\d+(,\s*\d+)*\]", array): + parsed_array = [int(i) for i in re.findall(r"\d+", array)] + # Check if the format is [1..5] + elif re.match(r"\[(\d+)\.\.(\d+)\]", array): + match = re.match(r"\[(\d+)\.\.(\d+)\]", array) + if match: + start, end = map(int, match.groups()) + parsed_array = list(range(start, end + 1)) + else: + raise OutputParserException( + f"Unable to parse the array provided in {array}. \ + Please check the format instructions." + ) + # Check if the format is ["column_name"] + elif re.match(r"\[[a-zA-Z0-9_]+(?:,[a-zA-Z0-9_]+)*\]", array): + match = re.match(r"\[[a-zA-Z0-9_]+(?:,[a-zA-Z0-9_]+)*\]", array) + if match: + parsed_array = list(map(str, match.group().strip("[]").split(","))) + else: + raise OutputParserException( + f"Unable to parse the array provided in {array}. \ + Please check the format instructions." + ) + + # Validate the array + if not parsed_array: + raise OutputParserException( + f"Invalid array format in '{original_request_params}'. \ + Please check the format instructions." + ) + elif ( + isinstance(parsed_array[0], int) + and parsed_array[-1] > self.dataframe.index.max() + ): + raise OutputParserException( + f"The maximum index {parsed_array[-1]} exceeds the maximum index of \ + the Pandas DataFrame {self.dataframe.index.max()}." + ) + + return parsed_array, original_request_params.split("[")[0] + + def parse(self, request: str) -> dict[str, Any]: + stripped_request_params = None + splitted_request = request.strip().split(":") + if len(splitted_request) != 2: + raise OutputParserException( + f"Request '{request}' is not correctly formatted. \ + Please refer to the format instructions." + ) + result = {} + try: + request_type, request_params = splitted_request + if request_type in {"Invalid column", "Invalid operation"}: + raise OutputParserException( + f"{request}. Please check the format instructions." + ) + array_exists = re.search(r"(\[.*?\])", request_params) + if array_exists: + parsed_array, stripped_request_params = self.parse_array( + array_exists.group(1), request_params + ) + if request_type == "column": + filtered_df = self.dataframe[ + self.dataframe.index.isin(parsed_array) + ] + if len(parsed_array) == 1: + result[stripped_request_params] = filtered_df[ + stripped_request_params + ].iloc[parsed_array[0]] + else: + result[stripped_request_params] = filtered_df[ + stripped_request_params + ] + elif request_type == "row": + filtered_df = self.dataframe[ + self.dataframe.columns.intersection(parsed_array) + ] + if len(parsed_array) == 1: + result[stripped_request_params] = filtered_df.iloc[ + int(stripped_request_params) + ][parsed_array[0]] + else: + result[stripped_request_params] = filtered_df.iloc[ + int(stripped_request_params) + ] + else: + filtered_df = self.dataframe[ + self.dataframe.index.isin(parsed_array) + ] + result[request_type] = getattr( + filtered_df[stripped_request_params], request_type + )() + else: + if request_type == "column": + result[request_params] = self.dataframe[request_params] + elif request_type == "row": + result[request_params] = self.dataframe.iloc[int(request_params)] + else: + result[request_type] = getattr( + self.dataframe[request_params], request_type + )() + except (AttributeError, IndexError, KeyError): + if request_type not in {"column", "row"}: + raise OutputParserException( + f"Unsupported request type '{request_type}'. \ + Please check the format instructions." + ) + raise OutputParserException( + f"""Requested index { + request_params + if stripped_request_params is None + else stripped_request_params + } is out of bounds.""" + ) + + return result + + def get_format_instructions(self) -> str: + return PANDAS_DATAFRAME_FORMAT_INSTRUCTIONS.format( + columns=", ".join(self.dataframe.columns) + ) diff --git a/venv/Lib/site-packages/langchain/output_parsers/prompts.py b/venv/Lib/site-packages/langchain/output_parsers/prompts.py new file mode 100644 index 00000000..dd06a70c --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/prompts.py @@ -0,0 +1,22 @@ +# flake8: noqa +from langchain_core.prompts.prompt import PromptTemplate + +NAIVE_FIX = """Instructions: +-------------- +{instructions} +-------------- +Completion: +-------------- +{completion} +-------------- + +Above, the Completion did not satisfy the constraints given in the Instructions. +Error: +-------------- +{error} +-------------- + +Please try again. Please only respond with an answer that satisfies the constraints laid out in the Instructions:""" + + +NAIVE_FIX_PROMPT = PromptTemplate.from_template(NAIVE_FIX) diff --git a/venv/Lib/site-packages/langchain/output_parsers/pydantic.py b/venv/Lib/site-packages/langchain/output_parsers/pydantic.py new file mode 100644 index 00000000..3d8dc727 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/pydantic.py @@ -0,0 +1,3 @@ +from langchain_core.output_parsers import PydanticOutputParser + +__all__ = ["PydanticOutputParser"] diff --git a/venv/Lib/site-packages/langchain/output_parsers/rail_parser.py b/venv/Lib/site-packages/langchain/output_parsers/rail_parser.py new file mode 100644 index 00000000..79637931 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/rail_parser.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GuardrailsOutputParser": "langchain_community.output_parsers.rail_parser" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GuardrailsOutputParser", +] diff --git a/venv/Lib/site-packages/langchain/output_parsers/regex.py b/venv/Lib/site-packages/langchain/output_parsers/regex.py new file mode 100644 index 00000000..1c469db2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/regex.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +import re +from typing import Optional + +from langchain_core.output_parsers import BaseOutputParser + + +class RegexParser(BaseOutputParser[dict[str, str]]): + """Parse the output of an LLM call using a regex.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + return True + + regex: str + """The regex to use to parse the output.""" + output_keys: list[str] + """The keys to use for the output.""" + default_output_key: Optional[str] = None + """The default key to use for the output.""" + + @property + def _type(self) -> str: + """Return the type key.""" + return "regex_parser" + + def parse(self, text: str) -> dict[str, str]: + """Parse the output of an LLM call.""" + match = re.search(self.regex, text) + if match: + return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)} + else: + if self.default_output_key is None: + raise ValueError(f"Could not parse output: {text}") + else: + return { + key: text if key == self.default_output_key else "" + for key in self.output_keys + } diff --git a/venv/Lib/site-packages/langchain/output_parsers/regex_dict.py b/venv/Lib/site-packages/langchain/output_parsers/regex_dict.py new file mode 100644 index 00000000..755de405 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/regex_dict.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +import re +from typing import Optional + +from langchain_core.output_parsers import BaseOutputParser + + +class RegexDictParser(BaseOutputParser[dict[str, str]]): + """Parse the output of an LLM call into a Dictionary using a regex.""" + + regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" # : :meta private: + """The regex pattern to use to parse the output.""" + output_key_to_format: dict[str, str] + """The keys to use for the output.""" + no_update_value: Optional[str] = None + """The default key to use for the output.""" + + @property + def _type(self) -> str: + """Return the type key.""" + return "regex_dict_parser" + + def parse(self, text: str) -> dict[str, str]: + """Parse the output of an LLM call.""" + result = {} + for output_key, expected_format in self.output_key_to_format.items(): + specific_regex = self.regex_pattern.format(re.escape(expected_format)) + matches = re.findall(specific_regex, text) + if not matches: + raise ValueError( + f"No match found for output key: {output_key} with expected format \ + {expected_format} on text {text}" + ) + elif len(matches) > 1: + raise ValueError( + f"Multiple matches found for output key: {output_key} with \ + expected format {expected_format} on text {text}" + ) + elif ( + self.no_update_value is not None and matches[0] == self.no_update_value + ): + continue + else: + result[output_key] = matches[0] + return result diff --git a/venv/Lib/site-packages/langchain/output_parsers/retry.py b/venv/Lib/site-packages/langchain/output_parsers/retry.py new file mode 100644 index 00000000..db20dd2d --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/retry.py @@ -0,0 +1,297 @@ +from __future__ import annotations + +from typing import Annotated, Any, TypeVar, Union + +from langchain_core.exceptions import OutputParserException +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import BaseOutputParser, StrOutputParser +from langchain_core.prompt_values import PromptValue +from langchain_core.prompts import BasePromptTemplate, PromptTemplate +from langchain_core.runnables import RunnableSerializable +from pydantic import SkipValidation +from typing_extensions import TypedDict + +NAIVE_COMPLETION_RETRY = """Prompt: +{prompt} +Completion: +{completion} + +Above, the Completion did not satisfy the constraints given in the Prompt. +Please try again:""" + +NAIVE_COMPLETION_RETRY_WITH_ERROR = """Prompt: +{prompt} +Completion: +{completion} + +Above, the Completion did not satisfy the constraints given in the Prompt. +Details: {error} +Please try again:""" + +NAIVE_RETRY_PROMPT = PromptTemplate.from_template(NAIVE_COMPLETION_RETRY) +NAIVE_RETRY_WITH_ERROR_PROMPT = PromptTemplate.from_template( + NAIVE_COMPLETION_RETRY_WITH_ERROR +) + +T = TypeVar("T") + + +class RetryOutputParserRetryChainInput(TypedDict): + prompt: str + completion: str + + +class RetryWithErrorOutputParserRetryChainInput(TypedDict): + prompt: str + completion: str + error: str + + +class RetryOutputParser(BaseOutputParser[T]): + """Wrap a parser and try to fix parsing errors. + + Does this by passing the original prompt and the completion to another + LLM, and telling it the completion did not satisfy criteria in the prompt. + """ + + parser: Annotated[BaseOutputParser[T], SkipValidation()] + """The parser to use to parse the output.""" + # Should be an LLMChain but we want to avoid top-level imports from langchain.chains + retry_chain: Annotated[ + Union[RunnableSerializable[RetryOutputParserRetryChainInput, str], Any], + SkipValidation(), + ] + """The RunnableSerializable to use to retry the completion (Legacy: LLMChain).""" + max_retries: int = 1 + """The maximum number of times to retry the parse.""" + legacy: bool = True + """Whether to use the run or arun method of the retry_chain.""" + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + parser: BaseOutputParser[T], + prompt: BasePromptTemplate = NAIVE_RETRY_PROMPT, + max_retries: int = 1, + ) -> RetryOutputParser[T]: + """Create an RetryOutputParser from a language model and a parser. + + Args: + llm: llm to use for fixing + parser: parser to use for parsing + prompt: prompt to use for fixing + max_retries: Maximum number of retries to parse. + + Returns: + RetryOutputParser + """ + chain = prompt | llm | StrOutputParser() + return cls(parser=parser, retry_chain=chain, max_retries=max_retries) + + def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: + """Parse the output of an LLM call using a wrapped parser. + + Args: + completion: The chain completion to parse. + prompt_value: The prompt to use to parse the completion. + + Returns: + The parsed completion. + """ + retries = 0 + + while retries <= self.max_retries: + try: + return self.parser.parse(completion) + except OutputParserException as e: + if retries == self.max_retries: + raise e + else: + retries += 1 + if self.legacy and hasattr(self.retry_chain, "run"): + completion = self.retry_chain.run( + prompt=prompt_value.to_string(), + completion=completion, + ) + else: + completion = self.retry_chain.invoke( + dict( + prompt=prompt_value.to_string(), + completion=completion, + ) + ) + + raise OutputParserException("Failed to parse") + + async def aparse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: + """Parse the output of an LLM call using a wrapped parser. + + Args: + completion: The chain completion to parse. + prompt_value: The prompt to use to parse the completion. + + Returns: + The parsed completion. + """ + retries = 0 + + while retries <= self.max_retries: + try: + return await self.parser.aparse(completion) + except OutputParserException as e: + if retries == self.max_retries: + raise e + else: + retries += 1 + if self.legacy and hasattr(self.retry_chain, "arun"): + completion = await self.retry_chain.arun( + prompt=prompt_value.to_string(), + completion=completion, + error=repr(e), + ) + else: + completion = await self.retry_chain.ainvoke( + dict( + prompt=prompt_value.to_string(), + completion=completion, + ) + ) + + raise OutputParserException("Failed to parse") + + def parse(self, completion: str) -> T: + raise NotImplementedError( + "This OutputParser can only be called by the `parse_with_prompt` method." + ) + + def get_format_instructions(self) -> str: + return self.parser.get_format_instructions() + + @property + def _type(self) -> str: + return "retry" + + @property + def OutputType(self) -> type[T]: + return self.parser.OutputType + + +class RetryWithErrorOutputParser(BaseOutputParser[T]): + """Wrap a parser and try to fix parsing errors. + + Does this by passing the original prompt, the completion, AND the error + that was raised to another language model and telling it that the completion + did not work, and raised the given error. Differs from RetryOutputParser + in that this implementation provides the error that was raised back to the + LLM, which in theory should give it more information on how to fix it. + """ + + parser: Annotated[BaseOutputParser[T], SkipValidation()] + """The parser to use to parse the output.""" + # Should be an LLMChain but we want to avoid top-level imports from langchain.chains + retry_chain: Annotated[ + Union[ + RunnableSerializable[RetryWithErrorOutputParserRetryChainInput, str], Any + ], + SkipValidation(), + ] + """The RunnableSerializable to use to retry the completion (Legacy: LLMChain).""" + max_retries: int = 1 + """The maximum number of times to retry the parse.""" + legacy: bool = True + """Whether to use the run or arun method of the retry_chain.""" + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + parser: BaseOutputParser[T], + prompt: BasePromptTemplate = NAIVE_RETRY_WITH_ERROR_PROMPT, + max_retries: int = 1, + ) -> RetryWithErrorOutputParser[T]: + """Create a RetryWithErrorOutputParser from an LLM. + + Args: + llm: The LLM to use to retry the completion. + parser: The parser to use to parse the output. + prompt: The prompt to use to retry the completion. + max_retries: The maximum number of times to retry the completion. + + Returns: + A RetryWithErrorOutputParser. + """ + chain = prompt | llm | StrOutputParser() + return cls(parser=parser, retry_chain=chain, max_retries=max_retries) + + def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: + retries = 0 + + while retries <= self.max_retries: + try: + return self.parser.parse(completion) + except OutputParserException as e: + if retries == self.max_retries: + raise e + else: + retries += 1 + if self.legacy and hasattr(self.retry_chain, "run"): + completion = self.retry_chain.run( + prompt=prompt_value.to_string(), + completion=completion, + error=repr(e), + ) + else: + completion = self.retry_chain.invoke( + dict( + completion=completion, + prompt=prompt_value.to_string(), + error=repr(e), + ) + ) + + raise OutputParserException("Failed to parse") + + async def aparse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: + retries = 0 + + while retries <= self.max_retries: + try: + return await self.parser.aparse(completion) + except OutputParserException as e: + if retries == self.max_retries: + raise e + else: + retries += 1 + if self.legacy and hasattr(self.retry_chain, "arun"): + completion = await self.retry_chain.arun( + prompt=prompt_value.to_string(), + completion=completion, + error=repr(e), + ) + else: + completion = await self.retry_chain.ainvoke( + dict( + prompt=prompt_value.to_string(), + completion=completion, + error=repr(e), + ) + ) + + raise OutputParserException("Failed to parse") + + def parse(self, completion: str) -> T: + raise NotImplementedError( + "This OutputParser can only be called by the `parse_with_prompt` method." + ) + + def get_format_instructions(self) -> str: + return self.parser.get_format_instructions() + + @property + def _type(self) -> str: + return "retry_with_error" + + @property + def OutputType(self) -> type[T]: + return self.parser.OutputType diff --git a/venv/Lib/site-packages/langchain/output_parsers/structured.py b/venv/Lib/site-packages/langchain/output_parsers/structured.py new file mode 100644 index 00000000..181acd7b --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/structured.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +from typing import Any + +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.output_parsers.json import parse_and_check_json_markdown +from pydantic import BaseModel + +from langchain.output_parsers.format_instructions import ( + STRUCTURED_FORMAT_INSTRUCTIONS, + STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS, +) + +line_template = '\t"{name}": {type} // {description}' + + +class ResponseSchema(BaseModel): + """Schema for a response from a structured output parser.""" + + name: str + """The name of the schema.""" + description: str + """The description of the schema.""" + type: str = "string" + """The type of the response.""" + + +def _get_sub_string(schema: ResponseSchema) -> str: + return line_template.format( + name=schema.name, description=schema.description, type=schema.type + ) + + +class StructuredOutputParser(BaseOutputParser[dict[str, Any]]): + """Parse the output of an LLM call to a structured output.""" + + response_schemas: list[ResponseSchema] + """The schemas for the response.""" + + @classmethod + def from_response_schemas( + cls, response_schemas: list[ResponseSchema] + ) -> StructuredOutputParser: + return cls(response_schemas=response_schemas) + + def get_format_instructions(self, only_json: bool = False) -> str: + """Get format instructions for the output parser. + + example: + ```python + from langchain.output_parsers.structured import ( + StructuredOutputParser, ResponseSchema + ) + + response_schemas = [ + ResponseSchema( + name="foo", + description="a list of strings", + type="List[string]" + ), + ResponseSchema( + name="bar", + description="a string", + type="string" + ), + ] + + parser = StructuredOutputParser.from_response_schemas(response_schemas) + + print(parser.get_format_instructions()) # noqa: T201 + + output: + # The output should be a Markdown code snippet formatted in the following + # schema, including the leading and trailing "```json" and "```": + # + # ```json + # { + # "foo": List[string] // a list of strings + # "bar": string // a string + # } + # ``` + + Args: + only_json (bool): If True, only the json in the Markdown code snippet + will be returned, without the introducing text. Defaults to False. + """ + schema_str = "\n".join( + [_get_sub_string(schema) for schema in self.response_schemas] + ) + if only_json: + return STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS.format(format=schema_str) + else: + return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str) + + def parse(self, text: str) -> dict[str, Any]: + expected_keys = [rs.name for rs in self.response_schemas] + return parse_and_check_json_markdown(text, expected_keys) + + @property + def _type(self) -> str: + return "structured" diff --git a/venv/Lib/site-packages/langchain/output_parsers/xml.py b/venv/Lib/site-packages/langchain/output_parsers/xml.py new file mode 100644 index 00000000..655dc65b --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/xml.py @@ -0,0 +1,3 @@ +from langchain_core.output_parsers.xml import XMLOutputParser + +__all__ = ["XMLOutputParser"] diff --git a/venv/Lib/site-packages/langchain/output_parsers/yaml.py b/venv/Lib/site-packages/langchain/output_parsers/yaml.py new file mode 100644 index 00000000..1dfab768 --- /dev/null +++ b/venv/Lib/site-packages/langchain/output_parsers/yaml.py @@ -0,0 +1,69 @@ +import json +import re +from typing import TypeVar + +import yaml +from langchain_core.exceptions import OutputParserException +from langchain_core.output_parsers import BaseOutputParser +from pydantic import BaseModel, ValidationError + +from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS + +T = TypeVar("T", bound=BaseModel) + + +class YamlOutputParser(BaseOutputParser[T]): + """Parse YAML output using a pydantic model.""" + + pydantic_object: type[T] + """The pydantic model to parse.""" + pattern: re.Pattern = re.compile( + r"^```(?:ya?ml)?(?P[^`]*)", re.MULTILINE | re.DOTALL + ) + """Regex pattern to match yaml code blocks + within triple backticks with optional yaml or yml prefix.""" + + def parse(self, text: str) -> T: + try: + # Greedy search for 1st yaml candidate. + match = re.search(self.pattern, text.strip()) + yaml_str = "" + if match: + yaml_str = match.group("yaml") + else: + # If no backticks were present, try to parse the entire output as yaml. + yaml_str = text + + json_object = yaml.safe_load(yaml_str) + if hasattr(self.pydantic_object, "model_validate"): + return self.pydantic_object.model_validate(json_object) + else: + return self.pydantic_object.parse_obj(json_object) + + except (yaml.YAMLError, ValidationError) as e: + name = self.pydantic_object.__name__ + msg = f"Failed to parse {name} from completion {text}. Got: {e}" + raise OutputParserException(msg, llm_output=text) from e + + def get_format_instructions(self) -> str: + # Copy schema to avoid altering original Pydantic schema. + schema = {k: v for k, v in self.pydantic_object.schema().items()} + + # Remove extraneous fields. + reduced_schema = schema + if "title" in reduced_schema: + del reduced_schema["title"] + if "type" in reduced_schema: + del reduced_schema["type"] + # Ensure yaml in context is well-formed with double quotes. + schema_str = json.dumps(reduced_schema) + + return YAML_FORMAT_INSTRUCTIONS.format(schema=schema_str) + + @property + def _type(self) -> str: + return "yaml" + + @property + def OutputType(self) -> type[T]: + return self.pydantic_object diff --git a/venv/Lib/site-packages/langchain/prompts/__init__.py b/venv/Lib/site-packages/langchain/prompts/__init__.py new file mode 100644 index 00000000..63ace4e2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/__init__.py @@ -0,0 +1,100 @@ +"""**Prompt** is the input to the model. + +Prompt is often constructed +from multiple components. Prompt classes and functions make constructing + and working with prompts easy. + +**Class hierarchy:** + +.. code-block:: + + BasePromptTemplate --> PipelinePromptTemplate + StringPromptTemplate --> PromptTemplate + FewShotPromptTemplate + FewShotPromptWithTemplates + BaseChatPromptTemplate --> AutoGPTPrompt + ChatPromptTemplate --> AgentScratchPadChatPromptTemplate + + + + BaseMessagePromptTemplate --> MessagesPlaceholder + BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate + HumanMessagePromptTemplate + AIMessagePromptTemplate + SystemMessagePromptTemplate + + PromptValue --> StringPromptValue + ChatPromptValue + +""" # noqa: E501 + +from typing import TYPE_CHECKING, Any + +from langchain_core.example_selectors import ( + LengthBasedExampleSelector, + MaxMarginalRelevanceExampleSelector, + SemanticSimilarityExampleSelector, +) +from langchain_core.prompts import ( + AIMessagePromptTemplate, + BaseChatPromptTemplate, + BasePromptTemplate, + ChatMessagePromptTemplate, + ChatPromptTemplate, + FewShotChatMessagePromptTemplate, + FewShotPromptTemplate, + FewShotPromptWithTemplates, + HumanMessagePromptTemplate, + MessagesPlaceholder, + PipelinePromptTemplate, + PromptTemplate, + StringPromptTemplate, + SystemMessagePromptTemplate, + load_prompt, +) + +from langchain._api import create_importer +from langchain.prompts.prompt import Prompt + +if TYPE_CHECKING: + from langchain_community.example_selectors.ngram_overlap import ( + NGramOverlapExampleSelector, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +MODULE_LOOKUP = { + "NGramOverlapExampleSelector": "langchain_community.example_selectors.ngram_overlap" +} + +_import_attribute = create_importer(__file__, module_lookup=MODULE_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AIMessagePromptTemplate", + "BaseChatPromptTemplate", + "BasePromptTemplate", + "ChatMessagePromptTemplate", + "ChatPromptTemplate", + "FewShotPromptTemplate", + "FewShotPromptWithTemplates", + "HumanMessagePromptTemplate", + "LengthBasedExampleSelector", + "MaxMarginalRelevanceExampleSelector", + "MessagesPlaceholder", + "NGramOverlapExampleSelector", + "PipelinePromptTemplate", + "PromptTemplate", + "SemanticSimilarityExampleSelector", + "StringPromptTemplate", + "SystemMessagePromptTemplate", + "load_prompt", + "FewShotChatMessagePromptTemplate", + "Prompt", +] diff --git a/venv/Lib/site-packages/langchain/prompts/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..efc007ef Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..170ee04f Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/__pycache__/chat.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/__pycache__/chat.cpython-312.pyc new file mode 100644 index 00000000..c6b70ff6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/__pycache__/chat.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/__pycache__/few_shot.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/__pycache__/few_shot.cpython-312.pyc new file mode 100644 index 00000000..dd2ac484 Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/__pycache__/few_shot.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/__pycache__/few_shot_with_templates.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/__pycache__/few_shot_with_templates.cpython-312.pyc new file mode 100644 index 00000000..28f8aa6e Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/__pycache__/few_shot_with_templates.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/__pycache__/loading.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/__pycache__/loading.cpython-312.pyc new file mode 100644 index 00000000..222a0bdd Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/__pycache__/loading.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/__pycache__/pipeline.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/__pycache__/pipeline.cpython-312.pyc new file mode 100644 index 00000000..3300c560 Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/__pycache__/pipeline.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..9c8abf4a Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/base.py b/venv/Lib/site-packages/langchain/prompts/base.py new file mode 100644 index 00000000..a315ec92 --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/base.py @@ -0,0 +1,21 @@ +from langchain_core.prompt_values import StringPromptValue +from langchain_core.prompts import ( + BasePromptTemplate, + StringPromptTemplate, + check_valid_template, + get_template_variables, + jinja2_formatter, + validate_jinja2, +) +from langchain_core.prompts.string import _get_jinja2_variables_from_template + +__all__ = [ + "jinja2_formatter", + "validate_jinja2", + "check_valid_template", + "get_template_variables", + "StringPromptTemplate", + "BasePromptTemplate", + "StringPromptValue", + "_get_jinja2_variables_from_template", +] diff --git a/venv/Lib/site-packages/langchain/prompts/chat.py b/venv/Lib/site-packages/langchain/prompts/chat.py new file mode 100644 index 00000000..35de60b4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/chat.py @@ -0,0 +1,36 @@ +from langchain_core.prompt_values import ChatPromptValue, ChatPromptValueConcrete +from langchain_core.prompts.chat import ( + AIMessagePromptTemplate, + BaseChatPromptTemplate, + BaseMessagePromptTemplate, + BaseStringMessagePromptTemplate, + ChatMessagePromptTemplate, + ChatPromptTemplate, + HumanMessagePromptTemplate, + MessageLike, + MessageLikeRepresentation, + MessagePromptTemplateT, + MessagesPlaceholder, + SystemMessagePromptTemplate, + _convert_to_message, + _create_template_from_message_type, +) + +__all__ = [ + "BaseMessagePromptTemplate", + "MessagesPlaceholder", + "BaseStringMessagePromptTemplate", + "ChatMessagePromptTemplate", + "HumanMessagePromptTemplate", + "AIMessagePromptTemplate", + "SystemMessagePromptTemplate", + "BaseChatPromptTemplate", + "ChatPromptTemplate", + "ChatPromptValue", + "ChatPromptValueConcrete", + "_convert_to_message", + "_create_template_from_message_type", + "MessagePromptTemplateT", + "MessageLike", + "MessageLikeRepresentation", +] diff --git a/venv/Lib/site-packages/langchain/prompts/example_selector/__init__.py b/venv/Lib/site-packages/langchain/prompts/example_selector/__init__.py new file mode 100644 index 00000000..8e56a21b --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/example_selector/__init__.py @@ -0,0 +1,40 @@ +"""Logic for selecting examples to include in prompts.""" + +from typing import TYPE_CHECKING, Any + +from langchain_core.example_selectors.length_based import ( + LengthBasedExampleSelector, +) +from langchain_core.example_selectors.semantic_similarity import ( + MaxMarginalRelevanceExampleSelector, + SemanticSimilarityExampleSelector, +) + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.example_selectors.ngram_overlap import ( + NGramOverlapExampleSelector, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUPS = { + "NGramOverlapExampleSelector": "langchain_community.example_selectors.ngram_overlap" +} + +_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUPS) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LengthBasedExampleSelector", + "MaxMarginalRelevanceExampleSelector", + "NGramOverlapExampleSelector", + "SemanticSimilarityExampleSelector", +] diff --git a/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..7e2e7e1c Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..539544fa Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/length_based.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/length_based.cpython-312.pyc new file mode 100644 index 00000000..00f54051 Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/length_based.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/ngram_overlap.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/ngram_overlap.cpython-312.pyc new file mode 100644 index 00000000..55ed509d Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/ngram_overlap.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/semantic_similarity.cpython-312.pyc b/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/semantic_similarity.cpython-312.pyc new file mode 100644 index 00000000..445e239a Binary files /dev/null and b/venv/Lib/site-packages/langchain/prompts/example_selector/__pycache__/semantic_similarity.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/prompts/example_selector/base.py b/venv/Lib/site-packages/langchain/prompts/example_selector/base.py new file mode 100644 index 00000000..d6c9e074 --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/example_selector/base.py @@ -0,0 +1,3 @@ +from langchain_core.example_selectors.base import BaseExampleSelector + +__all__ = ["BaseExampleSelector"] diff --git a/venv/Lib/site-packages/langchain/prompts/example_selector/length_based.py b/venv/Lib/site-packages/langchain/prompts/example_selector/length_based.py new file mode 100644 index 00000000..1bb38d8a --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/example_selector/length_based.py @@ -0,0 +1,5 @@ +from langchain_core.example_selectors.length_based import ( + LengthBasedExampleSelector, +) + +__all__ = ["LengthBasedExampleSelector"] diff --git a/venv/Lib/site-packages/langchain/prompts/example_selector/ngram_overlap.py b/venv/Lib/site-packages/langchain/prompts/example_selector/ngram_overlap.py new file mode 100644 index 00000000..ae2e1417 --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/example_selector/ngram_overlap.py @@ -0,0 +1,32 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.example_selectors.ngram_overlap import ( + NGramOverlapExampleSelector, + ngram_overlap_score, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +MODULE_LOOKUP = { + "NGramOverlapExampleSelector": ( + "langchain_community.example_selectors.ngram_overlap" + ), + "ngram_overlap_score": "langchain_community.example_selectors.ngram_overlap", +} + +_import_attribute = create_importer(__file__, deprecated_lookups=MODULE_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NGramOverlapExampleSelector", + "ngram_overlap_score", +] diff --git a/venv/Lib/site-packages/langchain/prompts/example_selector/semantic_similarity.py b/venv/Lib/site-packages/langchain/prompts/example_selector/semantic_similarity.py new file mode 100644 index 00000000..e921236d --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/example_selector/semantic_similarity.py @@ -0,0 +1,11 @@ +from langchain_core.example_selectors.semantic_similarity import ( + MaxMarginalRelevanceExampleSelector, + SemanticSimilarityExampleSelector, + sorted_values, +) + +__all__ = [ + "sorted_values", + "SemanticSimilarityExampleSelector", + "MaxMarginalRelevanceExampleSelector", +] diff --git a/venv/Lib/site-packages/langchain/prompts/few_shot.py b/venv/Lib/site-packages/langchain/prompts/few_shot.py new file mode 100644 index 00000000..67b31062 --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/few_shot.py @@ -0,0 +1,11 @@ +from langchain_core.prompts.few_shot import ( + FewShotChatMessagePromptTemplate, + FewShotPromptTemplate, + _FewShotPromptTemplateMixin, +) + +__all__ = [ + "FewShotPromptTemplate", + "FewShotChatMessagePromptTemplate", + "_FewShotPromptTemplateMixin", +] diff --git a/venv/Lib/site-packages/langchain/prompts/few_shot_with_templates.py b/venv/Lib/site-packages/langchain/prompts/few_shot_with_templates.py new file mode 100644 index 00000000..7e530dbe --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/few_shot_with_templates.py @@ -0,0 +1,3 @@ +from langchain_core.prompts.few_shot_with_templates import FewShotPromptWithTemplates + +__all__ = ["FewShotPromptWithTemplates"] diff --git a/venv/Lib/site-packages/langchain/prompts/loading.py b/venv/Lib/site-packages/langchain/prompts/loading.py new file mode 100644 index 00000000..b1dfcd90 --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/loading.py @@ -0,0 +1,23 @@ +from langchain_core.prompts.loading import ( + _load_examples, + _load_few_shot_prompt, + _load_output_parser, + _load_prompt, + _load_prompt_from_file, + _load_template, + load_prompt, + load_prompt_from_config, +) +from langchain_core.utils.loading import try_load_from_hub + +__all__ = [ + "load_prompt_from_config", + "load_prompt", + "try_load_from_hub", + "_load_examples", + "_load_few_shot_prompt", + "_load_output_parser", + "_load_prompt", + "_load_prompt_from_file", + "_load_template", +] diff --git a/venv/Lib/site-packages/langchain/prompts/pipeline.py b/venv/Lib/site-packages/langchain/prompts/pipeline.py new file mode 100644 index 00000000..88e2cc79 --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/pipeline.py @@ -0,0 +1,3 @@ +from langchain_core.prompts.pipeline import PipelinePromptTemplate, _get_inputs + +__all__ = ["PipelinePromptTemplate", "_get_inputs"] diff --git a/venv/Lib/site-packages/langchain/prompts/prompt.py b/venv/Lib/site-packages/langchain/prompts/prompt.py new file mode 100644 index 00000000..5e35f878 --- /dev/null +++ b/venv/Lib/site-packages/langchain/prompts/prompt.py @@ -0,0 +1,6 @@ +from langchain_core.prompts.prompt import PromptTemplate + +# For backwards compatibility. +Prompt = PromptTemplate + +__all__ = ["PromptTemplate", "Prompt"] diff --git a/venv/Lib/site-packages/langchain/py.typed b/venv/Lib/site-packages/langchain/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/pydantic_v1/__init__.py b/venv/Lib/site-packages/langchain/pydantic_v1/__init__.py new file mode 100644 index 00000000..2cabc662 --- /dev/null +++ b/venv/Lib/site-packages/langchain/pydantic_v1/__init__.py @@ -0,0 +1,38 @@ +from importlib import metadata + +from langchain_core._api import warn_deprecated + +## Create namespaces for pydantic v1 and v2. +# This code must stay at the top of the file before other modules may +# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules. +# +# This hack is done for the following reasons: +# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since +# both dependencies and dependents may be stuck on either version of v1 or v2. +# * Creating namespaces for pydantic v1 and v2 should allow us to write code that +# unambiguously uses either v1 or v2 API. +# * This change is easier to roll out and roll back. +from pydantic.v1 import * # noqa: F403 + +try: + _PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0]) +except metadata.PackageNotFoundError: + _PYDANTIC_MAJOR_VERSION = 0 + +warn_deprecated( + "0.3.0", + removal="1.0.0", + alternative="pydantic.v1 or pydantic", + message=( + "As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. " + "The langchain.pydantic_v1 module was a " + "compatibility shim for pydantic v1, and should no longer be used. " + "Please update the code to import from Pydantic directly.\n\n" + "For example, replace imports like: " + "`from langchain.pydantic_v1 import BaseModel`\n" + "with: `from pydantic import BaseModel`\n" + "or the v1 compatibility namespace if you are working in a code base " + "that has not been fully upgraded to pydantic 2 yet. " + "\tfrom pydantic.v1 import BaseModel\n" + ), +) diff --git a/venv/Lib/site-packages/langchain/pydantic_v1/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/pydantic_v1/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..43f2331e Binary files /dev/null and b/venv/Lib/site-packages/langchain/pydantic_v1/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/pydantic_v1/__pycache__/dataclasses.cpython-312.pyc b/venv/Lib/site-packages/langchain/pydantic_v1/__pycache__/dataclasses.cpython-312.pyc new file mode 100644 index 00000000..d0bf6d11 Binary files /dev/null and b/venv/Lib/site-packages/langchain/pydantic_v1/__pycache__/dataclasses.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/pydantic_v1/__pycache__/main.cpython-312.pyc b/venv/Lib/site-packages/langchain/pydantic_v1/__pycache__/main.cpython-312.pyc new file mode 100644 index 00000000..4cab79cd Binary files /dev/null and b/venv/Lib/site-packages/langchain/pydantic_v1/__pycache__/main.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/pydantic_v1/dataclasses.py b/venv/Lib/site-packages/langchain/pydantic_v1/dataclasses.py new file mode 100644 index 00000000..cdadd11d --- /dev/null +++ b/venv/Lib/site-packages/langchain/pydantic_v1/dataclasses.py @@ -0,0 +1,20 @@ +from langchain_core._api import warn_deprecated +from pydantic.v1.dataclasses import * # noqa: F403 + +warn_deprecated( + "0.3.0", + removal="1.0.0", + alternative="pydantic.v1 or pydantic", + message=( + "As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. " + "The langchain.pydantic_v1 module was a " + "compatibility shim for pydantic v1, and should no longer be used. " + "Please update the code to import from Pydantic directly.\n\n" + "For example, replace imports like: " + "`from langchain.pydantic_v1 import BaseModel`\n" + "with: `from pydantic import BaseModel`\n" + "or the v1 compatibility namespace if you are working in a code base " + "that has not been fully upgraded to pydantic 2 yet. " + "\tfrom pydantic.v1 import BaseModel\n" + ), +) diff --git a/venv/Lib/site-packages/langchain/pydantic_v1/main.py b/venv/Lib/site-packages/langchain/pydantic_v1/main.py new file mode 100644 index 00000000..d8630658 --- /dev/null +++ b/venv/Lib/site-packages/langchain/pydantic_v1/main.py @@ -0,0 +1,20 @@ +from langchain_core._api import warn_deprecated +from pydantic.v1.main import * # noqa: F403 + +warn_deprecated( + "0.3.0", + removal="1.0.0", + alternative="pydantic.v1 or pydantic", + message=( + "As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. " + "The langchain.pydantic_v1 module was a " + "compatibility shim for pydantic v1, and should no longer be used. " + "Please update the code to import from Pydantic directly.\n\n" + "For example, replace imports like: " + "`from langchain.pydantic_v1 import BaseModel`\n" + "with: `from pydantic import BaseModel`\n" + "or the v1 compatibility namespace if you are working in a code base " + "that has not been fully upgraded to pydantic 2 yet. " + "\tfrom pydantic.v1 import BaseModel\n" + ), +) diff --git a/venv/Lib/site-packages/langchain/python.py b/venv/Lib/site-packages/langchain/python.py new file mode 100644 index 00000000..2a077fcf --- /dev/null +++ b/venv/Lib/site-packages/langchain/python.py @@ -0,0 +1,19 @@ +"""For backwards compatibility.""" + +from typing import Any + +from langchain._api import create_importer + +# Code has been removed from the community package as well. +# We'll proxy to community package, which will raise an appropriate exception, +# but we'll not include this in __all__, so it won't be listed as importable. + +_importer = create_importer( + __package__, + deprecated_lookups={"PythonREPL": "langchain_community.utilities.python"}, +) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _importer(name) diff --git a/venv/Lib/site-packages/langchain/requests.py b/venv/Lib/site-packages/langchain/requests.py new file mode 100644 index 00000000..90034f2e --- /dev/null +++ b/venv/Lib/site-packages/langchain/requests.py @@ -0,0 +1,35 @@ +"""DEPRECATED: Kept for backwards compatibility.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import ( + Requests, + RequestsWrapper, + TextRequestsWrapper, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "Requests": "langchain_community.utilities", + "RequestsWrapper": "langchain_community.utilities", + "TextRequestsWrapper": "langchain_community.utilities", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Requests", + "RequestsWrapper", + "TextRequestsWrapper", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/__init__.py b/venv/Lib/site-packages/langchain/retrievers/__init__.py new file mode 100644 index 00000000..ba28b14c --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/__init__.py @@ -0,0 +1,178 @@ +"""**Retriever** class returns Documents given a text **query**. + +It is more general than a vector store. A retriever does not need to be able to +store documents, only to return (or retrieve) it. Vector stores can be used as +the backbone of a retriever, but there are other types of retrievers as well. + +**Class hierarchy:** + +.. code-block:: + + BaseRetriever --> Retriever # Examples: ArxivRetriever, MergerRetriever + +**Main helpers:** + +.. code-block:: + + Document, Serializable, Callbacks, + CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun +""" + +from typing import TYPE_CHECKING, Any + +from langchain._api.module_import import create_importer +from langchain.retrievers.contextual_compression import ContextualCompressionRetriever +from langchain.retrievers.ensemble import EnsembleRetriever +from langchain.retrievers.merger_retriever import MergerRetriever +from langchain.retrievers.multi_query import MultiQueryRetriever +from langchain.retrievers.multi_vector import MultiVectorRetriever +from langchain.retrievers.parent_document_retriever import ParentDocumentRetriever +from langchain.retrievers.re_phraser import RePhraseQueryRetriever +from langchain.retrievers.self_query.base import SelfQueryRetriever +from langchain.retrievers.time_weighted_retriever import ( + TimeWeightedVectorStoreRetriever, +) + +if TYPE_CHECKING: + from langchain_community.retrievers import ( + AmazonKendraRetriever, + AmazonKnowledgeBasesRetriever, + ArceeRetriever, + ArxivRetriever, + AzureAISearchRetriever, + AzureCognitiveSearchRetriever, + BM25Retriever, + ChaindeskRetriever, + ChatGPTPluginRetriever, + CohereRagRetriever, + DocArrayRetriever, + DriaRetriever, + ElasticSearchBM25Retriever, + EmbedchainRetriever, + GoogleCloudEnterpriseSearchRetriever, + GoogleDocumentAIWarehouseRetriever, + GoogleVertexAIMultiTurnSearchRetriever, + GoogleVertexAISearchRetriever, + KayAiRetriever, + KNNRetriever, + LlamaIndexGraphRetriever, + LlamaIndexRetriever, + MetalRetriever, + MilvusRetriever, + NeuralDBRetriever, + OutlineRetriever, + PineconeHybridSearchRetriever, + PubMedRetriever, + RemoteLangChainRetriever, + SVMRetriever, + TavilySearchAPIRetriever, + TFIDFRetriever, + VespaRetriever, + WeaviateHybridSearchRetriever, + WebResearchRetriever, + WikipediaRetriever, + ZepRetriever, + ZillizRetriever, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AmazonKendraRetriever": "langchain_community.retrievers", + "AmazonKnowledgeBasesRetriever": "langchain_community.retrievers", + "ArceeRetriever": "langchain_community.retrievers", + "ArxivRetriever": "langchain_community.retrievers", + "AzureAISearchRetriever": "langchain_community.retrievers", + "AzureCognitiveSearchRetriever": "langchain_community.retrievers", + "ChatGPTPluginRetriever": "langchain_community.retrievers", + "ChaindeskRetriever": "langchain_community.retrievers", + "CohereRagRetriever": "langchain_community.retrievers", + "ElasticSearchBM25Retriever": "langchain_community.retrievers", + "EmbedchainRetriever": "langchain_community.retrievers", + "GoogleDocumentAIWarehouseRetriever": "langchain_community.retrievers", + "GoogleCloudEnterpriseSearchRetriever": "langchain_community.retrievers", + "GoogleVertexAIMultiTurnSearchRetriever": "langchain_community.retrievers", + "GoogleVertexAISearchRetriever": "langchain_community.retrievers", + "KayAiRetriever": "langchain_community.retrievers", + "KNNRetriever": "langchain_community.retrievers", + "LlamaIndexGraphRetriever": "langchain_community.retrievers", + "LlamaIndexRetriever": "langchain_community.retrievers", + "MetalRetriever": "langchain_community.retrievers", + "MilvusRetriever": "langchain_community.retrievers", + "OutlineRetriever": "langchain_community.retrievers", + "PineconeHybridSearchRetriever": "langchain_community.retrievers", + "PubMedRetriever": "langchain_community.retrievers", + "RemoteLangChainRetriever": "langchain_community.retrievers", + "SVMRetriever": "langchain_community.retrievers", + "TavilySearchAPIRetriever": "langchain_community.retrievers", + "BM25Retriever": "langchain_community.retrievers", + "DriaRetriever": "langchain_community.retrievers", + "NeuralDBRetriever": "langchain_community.retrievers", + "TFIDFRetriever": "langchain_community.retrievers", + "VespaRetriever": "langchain_community.retrievers", + "WeaviateHybridSearchRetriever": "langchain_community.retrievers", + "WebResearchRetriever": "langchain_community.retrievers", + "WikipediaRetriever": "langchain_community.retrievers", + "ZepRetriever": "langchain_community.retrievers", + "ZillizRetriever": "langchain_community.retrievers", + "DocArrayRetriever": "langchain_community.retrievers", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AmazonKendraRetriever", + "AmazonKnowledgeBasesRetriever", + "ArceeRetriever", + "ArxivRetriever", + "AzureAISearchRetriever", + "AzureCognitiveSearchRetriever", + "BM25Retriever", + "ChaindeskRetriever", + "ChatGPTPluginRetriever", + "CohereRagRetriever", + "ContextualCompressionRetriever", + "DocArrayRetriever", + "DriaRetriever", + "ElasticSearchBM25Retriever", + "EmbedchainRetriever", + "EnsembleRetriever", + "GoogleCloudEnterpriseSearchRetriever", + "GoogleDocumentAIWarehouseRetriever", + "GoogleVertexAIMultiTurnSearchRetriever", + "GoogleVertexAISearchRetriever", + "KayAiRetriever", + "KNNRetriever", + "LlamaIndexGraphRetriever", + "LlamaIndexRetriever", + "MergerRetriever", + "MetalRetriever", + "MilvusRetriever", + "MultiQueryRetriever", + "MultiVectorRetriever", + "OutlineRetriever", + "ParentDocumentRetriever", + "PineconeHybridSearchRetriever", + "PubMedRetriever", + "RemoteLangChainRetriever", + "RePhraseQueryRetriever", + "SelfQueryRetriever", + "SVMRetriever", + "TavilySearchAPIRetriever", + "TFIDFRetriever", + "TimeWeightedVectorStoreRetriever", + "VespaRetriever", + "WeaviateHybridSearchRetriever", + "WebResearchRetriever", + "WikipediaRetriever", + "ZepRetriever", + "NeuralDBRetriever", + "ZillizRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..2e447e3d Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/arcee.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/arcee.cpython-312.pyc new file mode 100644 index 00000000..ebe51847 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/arcee.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/arxiv.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/arxiv.cpython-312.pyc new file mode 100644 index 00000000..d6761a82 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/arxiv.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/azure_ai_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/azure_ai_search.cpython-312.pyc new file mode 100644 index 00000000..a9956bb6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/azure_ai_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/bedrock.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/bedrock.cpython-312.pyc new file mode 100644 index 00000000..630330c2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/bedrock.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/bm25.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/bm25.cpython-312.pyc new file mode 100644 index 00000000..1bed8d73 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/bm25.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/chaindesk.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/chaindesk.cpython-312.pyc new file mode 100644 index 00000000..41b7d816 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/chaindesk.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/chatgpt_plugin_retriever.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/chatgpt_plugin_retriever.cpython-312.pyc new file mode 100644 index 00000000..a8e59b5f Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/chatgpt_plugin_retriever.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/cohere_rag_retriever.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/cohere_rag_retriever.cpython-312.pyc new file mode 100644 index 00000000..f3613e8a Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/cohere_rag_retriever.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/contextual_compression.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/contextual_compression.cpython-312.pyc new file mode 100644 index 00000000..6dd7fed3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/contextual_compression.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/databerry.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/databerry.cpython-312.pyc new file mode 100644 index 00000000..c638f299 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/databerry.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/docarray.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/docarray.cpython-312.pyc new file mode 100644 index 00000000..2a60ff8b Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/docarray.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/elastic_search_bm25.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/elastic_search_bm25.cpython-312.pyc new file mode 100644 index 00000000..5119ce9f Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/elastic_search_bm25.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/embedchain.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/embedchain.cpython-312.pyc new file mode 100644 index 00000000..33b74ce6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/embedchain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/ensemble.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/ensemble.cpython-312.pyc new file mode 100644 index 00000000..a2206778 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/ensemble.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/google_cloud_documentai_warehouse.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/google_cloud_documentai_warehouse.cpython-312.pyc new file mode 100644 index 00000000..3f32ebbe Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/google_cloud_documentai_warehouse.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/google_vertex_ai_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/google_vertex_ai_search.cpython-312.pyc new file mode 100644 index 00000000..3f032d3b Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/google_vertex_ai_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/kay.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/kay.cpython-312.pyc new file mode 100644 index 00000000..538f5bbc Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/kay.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/kendra.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/kendra.cpython-312.pyc new file mode 100644 index 00000000..1463d862 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/kendra.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/knn.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/knn.cpython-312.pyc new file mode 100644 index 00000000..fb6b7ac6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/knn.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/llama_index.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/llama_index.cpython-312.pyc new file mode 100644 index 00000000..7bec52dc Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/llama_index.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/merger_retriever.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/merger_retriever.cpython-312.pyc new file mode 100644 index 00000000..f0a98f32 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/merger_retriever.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/metal.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/metal.cpython-312.pyc new file mode 100644 index 00000000..853d0683 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/metal.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/milvus.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/milvus.cpython-312.pyc new file mode 100644 index 00000000..78ede9fd Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/milvus.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/multi_query.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/multi_query.cpython-312.pyc new file mode 100644 index 00000000..2621d00d Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/multi_query.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/multi_vector.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/multi_vector.cpython-312.pyc new file mode 100644 index 00000000..30c35fce Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/multi_vector.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/outline.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/outline.cpython-312.pyc new file mode 100644 index 00000000..59395e35 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/outline.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/parent_document_retriever.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/parent_document_retriever.cpython-312.pyc new file mode 100644 index 00000000..715d85c6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/parent_document_retriever.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/pinecone_hybrid_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/pinecone_hybrid_search.cpython-312.pyc new file mode 100644 index 00000000..d2d3a2d8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/pinecone_hybrid_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/pubmed.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/pubmed.cpython-312.pyc new file mode 100644 index 00000000..f95dbeca Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/pubmed.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/pupmed.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/pupmed.cpython-312.pyc new file mode 100644 index 00000000..ef4f035f Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/pupmed.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/re_phraser.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/re_phraser.cpython-312.pyc new file mode 100644 index 00000000..4a9e837f Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/re_phraser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/remote_retriever.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/remote_retriever.cpython-312.pyc new file mode 100644 index 00000000..c58249ff Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/remote_retriever.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/svm.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/svm.cpython-312.pyc new file mode 100644 index 00000000..04c4d80b Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/svm.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/tavily_search_api.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/tavily_search_api.cpython-312.pyc new file mode 100644 index 00000000..d61cb1ac Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/tavily_search_api.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/tfidf.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/tfidf.cpython-312.pyc new file mode 100644 index 00000000..2c035479 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/tfidf.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/time_weighted_retriever.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/time_weighted_retriever.cpython-312.pyc new file mode 100644 index 00000000..f307343a Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/time_weighted_retriever.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/vespa_retriever.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/vespa_retriever.cpython-312.pyc new file mode 100644 index 00000000..229a17f9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/vespa_retriever.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/weaviate_hybrid_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/weaviate_hybrid_search.cpython-312.pyc new file mode 100644 index 00000000..f7cd4cc2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/weaviate_hybrid_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/web_research.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/web_research.cpython-312.pyc new file mode 100644 index 00000000..381882d8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/web_research.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/wikipedia.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/wikipedia.cpython-312.pyc new file mode 100644 index 00000000..2bb603da Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/wikipedia.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/you.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/you.cpython-312.pyc new file mode 100644 index 00000000..b3efb7fb Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/you.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/zep.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/zep.cpython-312.pyc new file mode 100644 index 00000000..c7c01947 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/zep.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/__pycache__/zilliz.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/__pycache__/zilliz.cpython-312.pyc new file mode 100644 index 00000000..ef7ab9bd Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/__pycache__/zilliz.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/arcee.py b/venv/Lib/site-packages/langchain/retrievers/arcee.py new file mode 100644 index 00000000..5ba3b563 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/arcee.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import ArceeRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ArceeRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ArceeRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/arxiv.py b/venv/Lib/site-packages/langchain/retrievers/arxiv.py new file mode 100644 index 00000000..3fce2d20 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/arxiv.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import ArxivRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ArxivRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ArxivRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/azure_ai_search.py b/venv/Lib/site-packages/langchain/retrievers/azure_ai_search.py new file mode 100644 index 00000000..fd061535 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/azure_ai_search.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import ( + AzureAISearchRetriever, + AzureCognitiveSearchRetriever, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AzureAISearchRetriever": "langchain_community.retrievers", + "AzureCognitiveSearchRetriever": "langchain_community.retrievers", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureAISearchRetriever", + "AzureCognitiveSearchRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/bedrock.py b/venv/Lib/site-packages/langchain/retrievers/bedrock.py new file mode 100644 index 00000000..25558d92 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/bedrock.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import AmazonKnowledgeBasesRetriever + from langchain_community.retrievers.bedrock import ( + RetrievalConfig, + VectorSearchConfig, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "VectorSearchConfig": "langchain_community.retrievers.bedrock", + "RetrievalConfig": "langchain_community.retrievers.bedrock", + "AmazonKnowledgeBasesRetriever": "langchain_community.retrievers", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "VectorSearchConfig", + "RetrievalConfig", + "AmazonKnowledgeBasesRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/bm25.py b/venv/Lib/site-packages/langchain/retrievers/bm25.py new file mode 100644 index 00000000..e5dc4195 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/bm25.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import BM25Retriever + from langchain_community.retrievers.bm25 import default_preprocessing_func + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "default_preprocessing_func": "langchain_community.retrievers.bm25", + "BM25Retriever": "langchain_community.retrievers", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "default_preprocessing_func", + "BM25Retriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/chaindesk.py b/venv/Lib/site-packages/langchain/retrievers/chaindesk.py new file mode 100644 index 00000000..a0a9f6ee --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/chaindesk.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import ChaindeskRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChaindeskRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChaindeskRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/chatgpt_plugin_retriever.py b/venv/Lib/site-packages/langchain/retrievers/chatgpt_plugin_retriever.py new file mode 100644 index 00000000..6258c462 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/chatgpt_plugin_retriever.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import ChatGPTPluginRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ChatGPTPluginRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ChatGPTPluginRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/cohere_rag_retriever.py b/venv/Lib/site-packages/langchain/retrievers/cohere_rag_retriever.py new file mode 100644 index 00000000..7c2e1bed --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/cohere_rag_retriever.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import CohereRagRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"CohereRagRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CohereRagRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/contextual_compression.py b/venv/Lib/site-packages/langchain/retrievers/contextual_compression.py new file mode 100644 index 00000000..98da4e6e --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/contextual_compression.py @@ -0,0 +1,79 @@ +from typing import Any + +from langchain_core.callbacks import ( + AsyncCallbackManagerForRetrieverRun, + CallbackManagerForRetrieverRun, +) +from langchain_core.documents import Document +from langchain_core.retrievers import BaseRetriever, RetrieverLike +from pydantic import ConfigDict + +from langchain.retrievers.document_compressors.base import ( + BaseDocumentCompressor, +) + + +class ContextualCompressionRetriever(BaseRetriever): + """Retriever that wraps a base retriever and compresses the results.""" + + base_compressor: BaseDocumentCompressor + """Compressor for compressing retrieved documents.""" + + base_retriever: RetrieverLike + """Base Retriever to use for getting relevant documents.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def _get_relevant_documents( + self, + query: str, + *, + run_manager: CallbackManagerForRetrieverRun, + **kwargs: Any, + ) -> list[Document]: + """Get documents relevant for a query. + + Args: + query: string to find relevant documents for + + Returns: + Sequence of relevant documents + """ + docs = self.base_retriever.invoke( + query, config={"callbacks": run_manager.get_child()}, **kwargs + ) + if docs: + compressed_docs = self.base_compressor.compress_documents( + docs, query, callbacks=run_manager.get_child() + ) + return list(compressed_docs) + else: + return [] + + async def _aget_relevant_documents( + self, + query: str, + *, + run_manager: AsyncCallbackManagerForRetrieverRun, + **kwargs: Any, + ) -> list[Document]: + """Get documents relevant for a query. + + Args: + query: string to find relevant documents for + + Returns: + List of relevant documents + """ + docs = await self.base_retriever.ainvoke( + query, config={"callbacks": run_manager.get_child()}, **kwargs + ) + if docs: + compressed_docs = await self.base_compressor.acompress_documents( + docs, query, callbacks=run_manager.get_child() + ) + return list(compressed_docs) + else: + return [] diff --git a/venv/Lib/site-packages/langchain/retrievers/databerry.py b/venv/Lib/site-packages/langchain/retrievers/databerry.py new file mode 100644 index 00000000..3eeca776 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/databerry.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers.databerry import DataberryRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DataberryRetriever": "langchain_community.retrievers.databerry"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DataberryRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/docarray.py b/venv/Lib/site-packages/langchain/retrievers/docarray.py new file mode 100644 index 00000000..7f433c58 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/docarray.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import DocArrayRetriever + from langchain_community.retrievers.docarray import SearchType + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SearchType": "langchain_community.retrievers.docarray", + "DocArrayRetriever": "langchain_community.retrievers", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SearchType", + "DocArrayRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__init__.py b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__init__.py new file mode 100644 index 00000000..de0710bf --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__init__.py @@ -0,0 +1,42 @@ +import importlib +from typing import Any + +from langchain.retrievers.document_compressors.base import DocumentCompressorPipeline +from langchain.retrievers.document_compressors.chain_extract import ( + LLMChainExtractor, +) +from langchain.retrievers.document_compressors.chain_filter import ( + LLMChainFilter, +) +from langchain.retrievers.document_compressors.cohere_rerank import CohereRerank +from langchain.retrievers.document_compressors.cross_encoder_rerank import ( + CrossEncoderReranker, +) +from langchain.retrievers.document_compressors.embeddings_filter import ( + EmbeddingsFilter, +) +from langchain.retrievers.document_compressors.listwise_rerank import ( + LLMListwiseRerank, +) + +_module_lookup = { + "FlashrankRerank": "langchain_community.document_compressors.flashrank_rerank", +} + + +def __getattr__(name: str) -> Any: + if name in _module_lookup: + module = importlib.import_module(_module_lookup[name]) + return getattr(module, name) + raise AttributeError(f"module {__name__} has no attribute {name}") + + +__all__ = [ + "DocumentCompressorPipeline", + "EmbeddingsFilter", + "LLMListwiseRerank", + "LLMChainExtractor", + "LLMChainFilter", + "CohereRerank", + "CrossEncoderReranker", +] + list(_module_lookup.keys()) diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..750f77c5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..99491130 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/chain_extract.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/chain_extract.cpython-312.pyc new file mode 100644 index 00000000..3be03649 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/chain_extract.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/chain_extract_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/chain_extract_prompt.cpython-312.pyc new file mode 100644 index 00000000..85c30b80 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/chain_extract_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/chain_filter.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/chain_filter.cpython-312.pyc new file mode 100644 index 00000000..53b2f63e Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/chain_filter.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/chain_filter_prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/chain_filter_prompt.cpython-312.pyc new file mode 100644 index 00000000..a070f759 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/chain_filter_prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/cohere_rerank.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/cohere_rerank.cpython-312.pyc new file mode 100644 index 00000000..30ec96c2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/cohere_rerank.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/cross_encoder.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/cross_encoder.cpython-312.pyc new file mode 100644 index 00000000..b5ce2d0f Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/cross_encoder.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/cross_encoder_rerank.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/cross_encoder_rerank.cpython-312.pyc new file mode 100644 index 00000000..2787b5e1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/cross_encoder_rerank.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/embeddings_filter.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/embeddings_filter.cpython-312.pyc new file mode 100644 index 00000000..bb184617 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/embeddings_filter.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/flashrank_rerank.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/flashrank_rerank.cpython-312.pyc new file mode 100644 index 00000000..ced17e40 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/flashrank_rerank.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/listwise_rerank.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/listwise_rerank.cpython-312.pyc new file mode 100644 index 00000000..43b70b1f Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/document_compressors/__pycache__/listwise_rerank.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/base.py b/venv/Lib/site-packages/langchain/retrievers/document_compressors/base.py new file mode 100644 index 00000000..7a2ca567 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/document_compressors/base.py @@ -0,0 +1,76 @@ +from collections.abc import Sequence +from inspect import signature +from typing import Optional, Union + +from langchain_core.callbacks.manager import Callbacks +from langchain_core.documents import ( + BaseDocumentCompressor, + BaseDocumentTransformer, + Document, +) +from pydantic import ConfigDict + + +class DocumentCompressorPipeline(BaseDocumentCompressor): + """Document compressor that uses a pipeline of Transformers.""" + + transformers: list[Union[BaseDocumentTransformer, BaseDocumentCompressor]] + """List of document filters that are chained together and run in sequence.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def compress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """Transform a list of documents.""" + for _transformer in self.transformers: + if isinstance(_transformer, BaseDocumentCompressor): + accepts_callbacks = ( + signature(_transformer.compress_documents).parameters.get( + "callbacks" + ) + is not None + ) + if accepts_callbacks: + documents = _transformer.compress_documents( + documents, query, callbacks=callbacks + ) + else: + documents = _transformer.compress_documents(documents, query) + elif isinstance(_transformer, BaseDocumentTransformer): + documents = _transformer.transform_documents(documents) + else: + raise ValueError(f"Got unexpected transformer type: {_transformer}") + return documents + + async def acompress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """Compress retrieved documents given the query context.""" + for _transformer in self.transformers: + if isinstance(_transformer, BaseDocumentCompressor): + accepts_callbacks = ( + signature(_transformer.acompress_documents).parameters.get( + "callbacks" + ) + is not None + ) + if accepts_callbacks: + documents = await _transformer.acompress_documents( + documents, query, callbacks=callbacks + ) + else: + documents = await _transformer.acompress_documents(documents, query) + elif isinstance(_transformer, BaseDocumentTransformer): + documents = await _transformer.atransform_documents(documents) + else: + raise ValueError(f"Got unexpected transformer type: {_transformer}") + return documents diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/chain_extract.py b/venv/Lib/site-packages/langchain/retrievers/document_compressors/chain_extract.py new file mode 100644 index 00000000..a0892504 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/document_compressors/chain_extract.py @@ -0,0 +1,122 @@ +"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents.""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any, Callable, Optional, cast + +from langchain_core.callbacks.manager import Callbacks +from langchain_core.documents import Document +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import BaseOutputParser, StrOutputParser +from langchain_core.prompts import PromptTemplate +from langchain_core.runnables import Runnable +from pydantic import ConfigDict + +from langchain.chains.llm import LLMChain +from langchain.retrievers.document_compressors.base import BaseDocumentCompressor +from langchain.retrievers.document_compressors.chain_extract_prompt import ( + prompt_template, +) + + +def default_get_input(query: str, doc: Document) -> dict[str, Any]: + """Return the compression chain input.""" + return {"question": query, "context": doc.page_content} + + +class NoOutputParser(BaseOutputParser[str]): + """Parse outputs that could return a null string of some sort.""" + + no_output_str: str = "NO_OUTPUT" + + def parse(self, text: str) -> str: + cleaned_text = text.strip() + if cleaned_text == self.no_output_str: + return "" + return cleaned_text + + +def _get_default_chain_prompt() -> PromptTemplate: + output_parser = NoOutputParser() + template = prompt_template.format(no_output_str=output_parser.no_output_str) + return PromptTemplate( + template=template, + input_variables=["question", "context"], + output_parser=output_parser, + ) + + +class LLMChainExtractor(BaseDocumentCompressor): + """Document compressor that uses an LLM chain to extract + the relevant parts of documents.""" + + llm_chain: Runnable + """LLM wrapper to use for compressing documents.""" + + get_input: Callable[[str, Document], dict] = default_get_input + """Callable for constructing the chain input from the query and a Document.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def compress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """Compress page content of raw documents.""" + compressed_docs = [] + for doc in documents: + _input = self.get_input(query, doc) + output_ = self.llm_chain.invoke(_input, config={"callbacks": callbacks}) + if isinstance(self.llm_chain, LLMChain): + output = output_[self.llm_chain.output_key] + if self.llm_chain.prompt.output_parser is not None: + output = self.llm_chain.prompt.output_parser.parse(output) + else: + output = output_ + if len(output) == 0: + continue + compressed_docs.append( + Document(page_content=cast(str, output), metadata=doc.metadata) + ) + return compressed_docs + + async def acompress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """Compress page content of raw documents asynchronously.""" + inputs = [self.get_input(query, doc) for doc in documents] + outputs = await self.llm_chain.abatch(inputs, {"callbacks": callbacks}) + compressed_docs = [] + for i, doc in enumerate(documents): + if len(outputs[i]) == 0: + continue + compressed_docs.append( + Document(page_content=outputs[i], metadata=doc.metadata) + ) + return compressed_docs + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + prompt: Optional[PromptTemplate] = None, + get_input: Optional[Callable[[str, Document], str]] = None, + llm_chain_kwargs: Optional[dict] = None, + ) -> LLMChainExtractor: + """Initialize from LLM.""" + _prompt = prompt if prompt is not None else _get_default_chain_prompt() + _get_input = get_input if get_input is not None else default_get_input + if _prompt.output_parser is not None: + parser = _prompt.output_parser + else: + parser = StrOutputParser() + llm_chain = _prompt | llm | parser + return cls(llm_chain=llm_chain, get_input=_get_input) # type: ignore[arg-type] diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/chain_extract_prompt.py b/venv/Lib/site-packages/langchain/retrievers/document_compressors/chain_extract_prompt.py new file mode 100644 index 00000000..c27b8770 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/document_compressors/chain_extract_prompt.py @@ -0,0 +1,11 @@ +# flake8: noqa +prompt_template = """Given the following question and context, extract any part of the context *AS IS* that is relevant to answer the question. If none of the context is relevant return {no_output_str}. + +Remember, *DO NOT* edit the extracted parts of the context. + +> Question: {{question}} +> Context: +>>> +{{context}} +>>> +Extracted relevant parts:""" diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/chain_filter.py b/venv/Lib/site-packages/langchain/retrievers/document_compressors/chain_filter.py new file mode 100644 index 00000000..a696c288 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/document_compressors/chain_filter.py @@ -0,0 +1,134 @@ +"""Filter that uses an LLM to drop documents that aren't relevant to the query.""" + +from collections.abc import Sequence +from typing import Any, Callable, Optional + +from langchain_core.callbacks.manager import Callbacks +from langchain_core.documents import Document +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import BasePromptTemplate, PromptTemplate +from langchain_core.runnables import Runnable +from langchain_core.runnables.config import RunnableConfig +from pydantic import ConfigDict + +from langchain.chains import LLMChain +from langchain.output_parsers.boolean import BooleanOutputParser +from langchain.retrievers.document_compressors.base import BaseDocumentCompressor +from langchain.retrievers.document_compressors.chain_filter_prompt import ( + prompt_template, +) + + +def _get_default_chain_prompt() -> PromptTemplate: + return PromptTemplate( + template=prompt_template, + input_variables=["question", "context"], + output_parser=BooleanOutputParser(), + ) + + +def default_get_input(query: str, doc: Document) -> dict[str, Any]: + """Return the compression chain input.""" + return {"question": query, "context": doc.page_content} + + +class LLMChainFilter(BaseDocumentCompressor): + """Filter that drops documents that aren't relevant to the query.""" + + llm_chain: Runnable + """LLM wrapper to use for filtering documents. + The chain prompt is expected to have a BooleanOutputParser.""" + + get_input: Callable[[str, Document], dict] = default_get_input + """Callable for constructing the chain input from the query and a Document.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def compress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """Filter down documents based on their relevance to the query.""" + filtered_docs = [] + + config = RunnableConfig(callbacks=callbacks) + outputs = zip( + self.llm_chain.batch( + [self.get_input(query, doc) for doc in documents], config=config + ), + documents, + ) + + for output_, doc in outputs: + include_doc = None + if isinstance(self.llm_chain, LLMChain): + output = output_[self.llm_chain.output_key] + if self.llm_chain.prompt.output_parser is not None: + include_doc = self.llm_chain.prompt.output_parser.parse(output) + else: + if isinstance(output_, bool): + include_doc = output_ + if include_doc: + filtered_docs.append(doc) + + return filtered_docs + + async def acompress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """Filter down documents based on their relevance to the query.""" + filtered_docs = [] + + config = RunnableConfig(callbacks=callbacks) + outputs = zip( + await self.llm_chain.abatch( + [self.get_input(query, doc) for doc in documents], config=config + ), + documents, + ) + for output_, doc in outputs: + include_doc = None + if isinstance(self.llm_chain, LLMChain): + output = output_[self.llm_chain.output_key] + if self.llm_chain.prompt.output_parser is not None: + include_doc = self.llm_chain.prompt.output_parser.parse(output) + else: + if isinstance(output_, bool): + include_doc = output_ + if include_doc: + filtered_docs.append(doc) + + return filtered_docs + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + prompt: Optional[BasePromptTemplate] = None, + **kwargs: Any, + ) -> "LLMChainFilter": + """Create a LLMChainFilter from a language model. + + Args: + llm: The language model to use for filtering. + prompt: The prompt to use for the filter. + kwargs: Additional arguments to pass to the constructor. + + Returns: + A LLMChainFilter that uses the given language model. + """ + _prompt = prompt if prompt is not None else _get_default_chain_prompt() + if _prompt.output_parser is not None: + parser = _prompt.output_parser + else: + parser = StrOutputParser() + llm_chain = _prompt | llm | parser + return cls(llm_chain=llm_chain, **kwargs) diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/chain_filter_prompt.py b/venv/Lib/site-packages/langchain/retrievers/document_compressors/chain_filter_prompt.py new file mode 100644 index 00000000..5376dfa2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/document_compressors/chain_filter_prompt.py @@ -0,0 +1,9 @@ +# flake8: noqa +prompt_template = """Given the following question and context, return YES if the context is relevant to the question and NO if it isn't. + +> Question: {question} +> Context: +>>> +{context} +>>> +> Relevant (YES / NO):""" diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/cohere_rerank.py b/venv/Lib/site-packages/langchain/retrievers/document_compressors/cohere_rerank.py new file mode 100644 index 00000000..c8c4c06f --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/document_compressors/cohere_rerank.py @@ -0,0 +1,126 @@ +from __future__ import annotations + +from collections.abc import Sequence +from copy import deepcopy +from typing import Any, Optional, Union + +from langchain_core._api.deprecation import deprecated +from langchain_core.callbacks.manager import Callbacks +from langchain_core.documents import Document +from langchain_core.utils import get_from_dict_or_env +from pydantic import ConfigDict, model_validator + +from langchain.retrievers.document_compressors.base import BaseDocumentCompressor + + +@deprecated( + since="0.0.30", removal="1.0", alternative_import="langchain_cohere.CohereRerank" +) +class CohereRerank(BaseDocumentCompressor): + """Document compressor that uses `Cohere Rerank API`.""" + + client: Any = None + """Cohere client to use for compressing documents.""" + top_n: Optional[int] = 3 + """Number of documents to return.""" + model: str = "rerank-english-v2.0" + """Model to use for reranking.""" + cohere_api_key: Optional[str] = None + """Cohere API key. Must be specified directly or via environment variable + COHERE_API_KEY.""" + user_agent: str = "langchain" + """Identifier for the application making the request.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @model_validator(mode="before") + @classmethod + def validate_environment(cls, values: dict) -> Any: + """Validate that api key and python package exists in environment.""" + if not values.get("client"): + try: + import cohere + except ImportError: + raise ImportError( + "Could not import cohere python package. " + "Please install it with `pip install cohere`." + ) + cohere_api_key = get_from_dict_or_env( + values, "cohere_api_key", "COHERE_API_KEY" + ) + client_name = values.get("user_agent", "langchain") + values["client"] = cohere.Client(cohere_api_key, client_name=client_name) + return values + + def rerank( + self, + documents: Sequence[Union[str, Document, dict]], + query: str, + *, + model: Optional[str] = None, + top_n: Optional[int] = -1, + max_chunks_per_doc: Optional[int] = None, + ) -> list[dict[str, Any]]: + """Returns an ordered list of documents ordered by their relevance to the provided query. + + Args: + query: The query to use for reranking. + documents: A sequence of documents to rerank. + model: The model to use for re-ranking. Default to self.model. + top_n : The number of results to return. If None returns all results. + Defaults to self.top_n. + max_chunks_per_doc : The maximum number of chunks derived from a document. + """ # noqa: E501 + if len(documents) == 0: # to avoid empty api call + return [] + docs = [ + doc.page_content if isinstance(doc, Document) else doc for doc in documents + ] + model = model or self.model + top_n = top_n if (top_n is None or top_n > 0) else self.top_n + results = self.client.rerank( + query=query, + documents=docs, + model=model, + top_n=top_n, + max_chunks_per_doc=max_chunks_per_doc, + ) + if hasattr(results, "results"): + results = getattr(results, "results") + result_dicts = [] + for res in results: + result_dicts.append( + { + "index": res.index, + "relevance_score": res.relevance_score, + } + ) + return result_dicts + + def compress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """ + Compress documents using Cohere's rerank API. + + Args: + documents: A sequence of documents to compress. + query: The query to use for compressing the documents. + callbacks: Callbacks to run during the compression process. + + Returns: + A sequence of compressed documents. + """ + compressed = [] + for res in self.rerank(documents, query): + doc = documents[res["index"]] + doc_copy = Document(doc.page_content, metadata=deepcopy(doc.metadata)) + doc_copy.metadata["relevance_score"] = res["relevance_score"] + compressed.append(doc_copy) + return compressed diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/cross_encoder.py b/venv/Lib/site-packages/langchain/retrievers/document_compressors/cross_encoder.py new file mode 100644 index 00000000..7a26ceb5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/document_compressors/cross_encoder.py @@ -0,0 +1,16 @@ +from abc import ABC, abstractmethod + + +class BaseCrossEncoder(ABC): + """Interface for cross encoder models.""" + + @abstractmethod + def score(self, text_pairs: list[tuple[str, str]]) -> list[float]: + """Score pairs' similarity. + + Args: + text_pairs: List of pairs of texts. + + Returns: + List of scores. + """ diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/cross_encoder_rerank.py b/venv/Lib/site-packages/langchain/retrievers/document_compressors/cross_encoder_rerank.py new file mode 100644 index 00000000..f786279e --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/document_compressors/cross_encoder_rerank.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +import operator +from collections.abc import Sequence +from typing import Optional + +from langchain_core.callbacks import Callbacks +from langchain_core.documents import BaseDocumentCompressor, Document +from pydantic import ConfigDict + +from langchain.retrievers.document_compressors.cross_encoder import BaseCrossEncoder + + +class CrossEncoderReranker(BaseDocumentCompressor): + """Document compressor that uses CrossEncoder for reranking.""" + + model: BaseCrossEncoder + """CrossEncoder model to use for scoring similarity + between the query and documents.""" + top_n: int = 3 + """Number of documents to return.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + def compress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """ + Rerank documents using CrossEncoder. + + Args: + documents: A sequence of documents to compress. + query: The query to use for compressing the documents. + callbacks: Callbacks to run during the compression process. + + Returns: + A sequence of compressed documents. + """ + scores = self.model.score([(query, doc.page_content) for doc in documents]) + docs_with_scores = list(zip(documents, scores)) + result = sorted(docs_with_scores, key=operator.itemgetter(1), reverse=True) + return [doc for doc, _ in result[: self.top_n]] diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/embeddings_filter.py b/venv/Lib/site-packages/langchain/retrievers/document_compressors/embeddings_filter.py new file mode 100644 index 00000000..3915d0ed --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/document_compressors/embeddings_filter.py @@ -0,0 +1,137 @@ +from collections.abc import Sequence +from typing import Callable, Optional + +from langchain_core.callbacks.manager import Callbacks +from langchain_core.documents import Document +from langchain_core.embeddings import Embeddings +from langchain_core.utils import pre_init +from pydantic import ConfigDict, Field + +from langchain.retrievers.document_compressors.base import ( + BaseDocumentCompressor, +) + + +def _get_similarity_function() -> Callable: + try: + from langchain_community.utils.math import cosine_similarity + except ImportError: + raise ImportError( + "To use please install langchain-community " + "with `pip install langchain-community`." + ) + return cosine_similarity + + +class EmbeddingsFilter(BaseDocumentCompressor): + """Document compressor that uses embeddings to drop documents + unrelated to the query.""" + + embeddings: Embeddings + """Embeddings to use for embedding document contents and queries.""" + similarity_fn: Callable = Field(default_factory=_get_similarity_function) + """Similarity function for comparing documents. Function expected to take as input + two matrices (List[List[float]]) and return a matrix of scores where higher values + indicate greater similarity.""" + k: Optional[int] = 20 + """The number of relevant documents to return. Can be set to None, in which case + `similarity_threshold` must be specified. Defaults to 20.""" + similarity_threshold: Optional[float] = None + """Threshold for determining when two documents are similar enough + to be considered redundant. Defaults to None, must be specified if `k` is set + to None.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @pre_init + def validate_params(cls, values: dict) -> dict: + """Validate similarity parameters.""" + if values["k"] is None and values["similarity_threshold"] is None: + raise ValueError("Must specify one of `k` or `similarity_threshold`.") + return values + + def compress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """Filter documents based on similarity of their embeddings to the query.""" + try: + from langchain_community.document_transformers.embeddings_redundant_filter import ( # noqa: E501 + _get_embeddings_from_stateful_docs, + get_stateful_documents, + ) + except ImportError: + raise ImportError( + "To use please install langchain-community " + "with `pip install langchain-community`." + ) + + try: + import numpy as np + except ImportError as e: + raise ImportError( + "Could not import numpy, please install with `pip install numpy`." + ) from e + stateful_documents = get_stateful_documents(documents) + embedded_documents = _get_embeddings_from_stateful_docs( + self.embeddings, stateful_documents + ) + embedded_query = self.embeddings.embed_query(query) + similarity = self.similarity_fn([embedded_query], embedded_documents)[0] + included_idxs: np.ndarray = np.arange(len(embedded_documents)) + if self.k is not None: + included_idxs = np.argsort(similarity)[::-1][: self.k] + if self.similarity_threshold is not None: + similar_enough = np.where( + similarity[included_idxs] > self.similarity_threshold + ) + included_idxs = included_idxs[similar_enough] + for i in included_idxs: + stateful_documents[i].state["query_similarity_score"] = similarity[i] + return [stateful_documents[i] for i in included_idxs] + + async def acompress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """Filter documents based on similarity of their embeddings to the query.""" + try: + from langchain_community.document_transformers.embeddings_redundant_filter import ( # noqa: E501 + _aget_embeddings_from_stateful_docs, + get_stateful_documents, + ) + except ImportError: + raise ImportError( + "To use please install langchain-community " + "with `pip install langchain-community`." + ) + + try: + import numpy as np + except ImportError as e: + raise ImportError( + "Could not import numpy, please install with `pip install numpy`." + ) from e + stateful_documents = get_stateful_documents(documents) + embedded_documents = await _aget_embeddings_from_stateful_docs( + self.embeddings, stateful_documents + ) + embedded_query = await self.embeddings.aembed_query(query) + similarity = self.similarity_fn([embedded_query], embedded_documents)[0] + included_idxs: np.ndarray = np.arange(len(embedded_documents)) + if self.k is not None: + included_idxs = np.argsort(similarity)[::-1][: self.k] + if self.similarity_threshold is not None: + similar_enough = np.where( + similarity[included_idxs] > self.similarity_threshold + ) + included_idxs = included_idxs[similar_enough] + for i in included_idxs: + stateful_documents[i].state["query_similarity_score"] = similarity[i] + return [stateful_documents[i] for i in included_idxs] diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/flashrank_rerank.py b/venv/Lib/site-packages/langchain/retrievers/document_compressors/flashrank_rerank.py new file mode 100644 index 00000000..f2196fa6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/document_compressors/flashrank_rerank.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_compressors.flashrank_rerank import ( + FlashrankRerank, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FlashrankRerank": "langchain_community.document_compressors.flashrank_rerank" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FlashrankRerank", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/document_compressors/listwise_rerank.py b/venv/Lib/site-packages/langchain/retrievers/document_compressors/listwise_rerank.py new file mode 100644 index 00000000..4b83073b --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/document_compressors/listwise_rerank.py @@ -0,0 +1,139 @@ +"""Filter that uses an LLM to rerank documents listwise and select top-k.""" + +from collections.abc import Sequence +from typing import Any, Optional + +from langchain_core.callbacks import Callbacks +from langchain_core.documents import BaseDocumentCompressor, Document +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate +from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough +from pydantic import BaseModel, ConfigDict, Field + +_default_system_tmpl = """{context} + +Sort the Documents by their relevance to the Query.""" +_DEFAULT_PROMPT = ChatPromptTemplate.from_messages( + [("system", _default_system_tmpl), ("human", "{query}")], +) + + +def _get_prompt_input(input_: dict) -> dict[str, Any]: + """Return the compression chain input.""" + documents = input_["documents"] + context = "" + for index, doc in enumerate(documents): + context += f"Document ID: {index}\n```{doc.page_content}```\n\n" + context += f"Documents = [Document ID: 0, ..., Document ID: {len(documents) - 1}]" + return {"query": input_["query"], "context": context} + + +def _parse_ranking(results: dict) -> list[Document]: + ranking = results["ranking"] + docs = results["documents"] + return [docs[i] for i in ranking.ranked_document_ids] + + +class LLMListwiseRerank(BaseDocumentCompressor): + """Document compressor that uses `Zero-Shot Listwise Document Reranking`. + + Adapted from: https://arxiv.org/pdf/2305.02156.pdf + + ``LLMListwiseRerank`` uses a language model to rerank a list of documents based on + their relevance to a query. + + **NOTE**: requires that underlying model implement ``with_structured_output``. + + Example usage: + .. code-block:: python + + from langchain.retrievers.document_compressors.listwise_rerank import ( + LLMListwiseRerank, + ) + from langchain_core.documents import Document + from langchain_openai import ChatOpenAI + + documents = [ + Document("Sally is my friend from school"), + Document("Steve is my friend from home"), + Document("I didn't always like yogurt"), + Document("I wonder why it's called football"), + Document("Where's waldo"), + ] + + reranker = LLMListwiseRerank.from_llm( + llm=ChatOpenAI(model="gpt-3.5-turbo"), top_n=3 + ) + compressed_docs = reranker.compress_documents(documents, "Who is steve") + assert len(compressed_docs) == 3 + assert "Steve" in compressed_docs[0].page_content + """ + + reranker: Runnable[dict, list[Document]] + """LLM-based reranker to use for filtering documents. Expected to take in a dict + with 'documents: Sequence[Document]' and 'query: str' keys and output a + List[Document].""" + + top_n: int = 3 + """Number of documents to return.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def compress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """Filter down documents based on their relevance to the query.""" + results = self.reranker.invoke( + {"documents": documents, "query": query}, config={"callbacks": callbacks} + ) + return results[: self.top_n] + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + *, + prompt: Optional[BasePromptTemplate] = None, + **kwargs: Any, + ) -> "LLMListwiseRerank": + """Create a LLMListwiseRerank document compressor from a language model. + + Args: + llm: The language model to use for filtering. **Must implement + BaseLanguageModel.with_structured_output().** + prompt: The prompt to use for the filter. + kwargs: Additional arguments to pass to the constructor. + + Returns: + A LLMListwiseRerank document compressor that uses the given language model. + """ + + if llm.with_structured_output == BaseLanguageModel.with_structured_output: + raise ValueError( + f"llm of type {type(llm)} does not implement `with_structured_output`." + ) + + class RankDocuments(BaseModel): + """Rank the documents by their relevance to the user question. + Rank from most to least relevant.""" + + ranked_document_ids: list[int] = Field( + ..., + description=( + "The integer IDs of the documents, sorted from most to least " + "relevant to the user question." + ), + ) + + _prompt = prompt if prompt is not None else _DEFAULT_PROMPT + reranker = RunnablePassthrough.assign( + ranking=RunnableLambda(_get_prompt_input) + | _prompt + | llm.with_structured_output(RankDocuments) + ) | RunnableLambda(_parse_ranking) + return cls(reranker=reranker, **kwargs) diff --git a/venv/Lib/site-packages/langchain/retrievers/elastic_search_bm25.py b/venv/Lib/site-packages/langchain/retrievers/elastic_search_bm25.py new file mode 100644 index 00000000..52d5ffae --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/elastic_search_bm25.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import ElasticSearchBM25Retriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ElasticSearchBM25Retriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ElasticSearchBM25Retriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/embedchain.py b/venv/Lib/site-packages/langchain/retrievers/embedchain.py new file mode 100644 index 00000000..5e2958db --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/embedchain.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import EmbedchainRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EmbedchainRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EmbedchainRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/ensemble.py b/venv/Lib/site-packages/langchain/retrievers/ensemble.py new file mode 100644 index 00000000..1fe344c0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/ensemble.py @@ -0,0 +1,333 @@ +""" +Ensemble retriever that ensemble the results of +multiple retrievers by using weighted Reciprocal Rank Fusion +""" + +import asyncio +from collections import defaultdict +from collections.abc import Hashable, Iterable, Iterator +from itertools import chain +from typing import ( + Any, + Callable, + Optional, + TypeVar, + cast, +) + +from langchain_core.callbacks import ( + AsyncCallbackManagerForRetrieverRun, + CallbackManagerForRetrieverRun, +) +from langchain_core.documents import Document +from langchain_core.retrievers import BaseRetriever, RetrieverLike +from langchain_core.runnables import RunnableConfig +from langchain_core.runnables.config import ensure_config, patch_config +from langchain_core.runnables.utils import ( + ConfigurableFieldSpec, + get_unique_config_specs, +) +from pydantic import model_validator + +T = TypeVar("T") +H = TypeVar("H", bound=Hashable) + + +def unique_by_key(iterable: Iterable[T], key: Callable[[T], H]) -> Iterator[T]: + """Yield unique elements of an iterable based on a key function. + + Args: + iterable: The iterable to filter. + key: A function that returns a hashable key for each element. + + Yields: + Unique elements of the iterable based on the key function. + """ + seen = set() + for e in iterable: + if (k := key(e)) not in seen: + seen.add(k) + yield e + + +class EnsembleRetriever(BaseRetriever): + """Retriever that ensembles the multiple retrievers. + + It uses a rank fusion. + + Args: + retrievers: A list of retrievers to ensemble. + weights: A list of weights corresponding to the retrievers. Defaults to equal + weighting for all retrievers. + c: A constant added to the rank, controlling the balance between the importance + of high-ranked items and the consideration given to lower-ranked items. + Default is 60. + id_key: The key in the document's metadata used to determine unique documents. + If not specified, page_content is used. + """ + + retrievers: list[RetrieverLike] + weights: list[float] + c: int = 60 + id_key: Optional[str] = None + + @property + def config_specs(self) -> list[ConfigurableFieldSpec]: + """List configurable fields for this runnable.""" + return get_unique_config_specs( + spec for retriever in self.retrievers for spec in retriever.config_specs + ) + + @model_validator(mode="before") + @classmethod + def set_weights(cls, values: dict[str, Any]) -> Any: + if not values.get("weights"): + n_retrievers = len(values["retrievers"]) + values["weights"] = [1 / n_retrievers] * n_retrievers + return values + + def invoke( + self, input: str, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> list[Document]: + from langchain_core.callbacks import CallbackManager + + config = ensure_config(config) + callback_manager = CallbackManager.configure( + config.get("callbacks"), + None, + verbose=kwargs.get("verbose", False), + inheritable_tags=config.get("tags", []), + local_tags=self.tags, + inheritable_metadata=config.get("metadata", {}), + local_metadata=self.metadata, + ) + run_manager = callback_manager.on_retriever_start( + None, + input, + name=config.get("run_name") or self.get_name(), + **kwargs, + ) + try: + result = self.rank_fusion(input, run_manager=run_manager, config=config) + except Exception as e: + run_manager.on_retriever_error(e) + raise e + else: + run_manager.on_retriever_end( + result, + **kwargs, + ) + return result + + async def ainvoke( + self, input: str, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> list[Document]: + from langchain_core.callbacks import AsyncCallbackManager + + config = ensure_config(config) + callback_manager = AsyncCallbackManager.configure( + config.get("callbacks"), + None, + verbose=kwargs.get("verbose", False), + inheritable_tags=config.get("tags", []), + local_tags=self.tags, + inheritable_metadata=config.get("metadata", {}), + local_metadata=self.metadata, + ) + run_manager = await callback_manager.on_retriever_start( + None, + input, + name=config.get("run_name") or self.get_name(), + **kwargs, + ) + try: + result = await self.arank_fusion( + input, run_manager=run_manager, config=config + ) + except Exception as e: + await run_manager.on_retriever_error(e) + raise e + else: + await run_manager.on_retriever_end( + result, + **kwargs, + ) + return result + + def _get_relevant_documents( + self, + query: str, + *, + run_manager: CallbackManagerForRetrieverRun, + ) -> list[Document]: + """ + Get the relevant documents for a given query. + + Args: + query: The query to search for. + + Returns: + A list of reranked documents. + """ + + # Get fused result of the retrievers. + fused_documents = self.rank_fusion(query, run_manager) + + return fused_documents + + async def _aget_relevant_documents( + self, + query: str, + *, + run_manager: AsyncCallbackManagerForRetrieverRun, + ) -> list[Document]: + """ + Asynchronously get the relevant documents for a given query. + + Args: + query: The query to search for. + + Returns: + A list of reranked documents. + """ + + # Get fused result of the retrievers. + fused_documents = await self.arank_fusion(query, run_manager) + + return fused_documents + + def rank_fusion( + self, + query: str, + run_manager: CallbackManagerForRetrieverRun, + *, + config: Optional[RunnableConfig] = None, + ) -> list[Document]: + """ + Retrieve the results of the retrievers and use rank_fusion_func to get + the final result. + + Args: + query: The query to search for. + + Returns: + A list of reranked documents. + """ + + # Get the results of all retrievers. + retriever_docs = [ + retriever.invoke( + query, + patch_config( + config, callbacks=run_manager.get_child(tag=f"retriever_{i + 1}") + ), + ) + for i, retriever in enumerate(self.retrievers) + ] + + # Enforce that retrieved docs are Documents for each list in retriever_docs + for i in range(len(retriever_docs)): + retriever_docs[i] = [ + Document(page_content=cast(str, doc)) if isinstance(doc, str) else doc + for doc in retriever_docs[i] + ] + + # apply rank fusion + fused_documents = self.weighted_reciprocal_rank(retriever_docs) + + return fused_documents + + async def arank_fusion( + self, + query: str, + run_manager: AsyncCallbackManagerForRetrieverRun, + *, + config: Optional[RunnableConfig] = None, + ) -> list[Document]: + """ + Asynchronously retrieve the results of the retrievers + and use rank_fusion_func to get the final result. + + Args: + query: The query to search for. + + Returns: + A list of reranked documents. + """ + + # Get the results of all retrievers. + retriever_docs = await asyncio.gather( + *[ + retriever.ainvoke( + query, + patch_config( + config, + callbacks=run_manager.get_child(tag=f"retriever_{i + 1}"), + ), + ) + for i, retriever in enumerate(self.retrievers) + ] + ) + + # Enforce that retrieved docs are Documents for each list in retriever_docs + for i in range(len(retriever_docs)): + retriever_docs[i] = [ + Document(page_content=doc) if not isinstance(doc, Document) else doc + for doc in retriever_docs[i] + ] + + # apply rank fusion + fused_documents = self.weighted_reciprocal_rank(retriever_docs) + + return fused_documents + + def weighted_reciprocal_rank( + self, doc_lists: list[list[Document]] + ) -> list[Document]: + """ + Perform weighted Reciprocal Rank Fusion on multiple rank lists. + You can find more details about RRF here: + https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf + + Args: + doc_lists: A list of rank lists, where each rank list contains unique items. + + Returns: + list: The final aggregated list of items sorted by their weighted RRF + scores in descending order. + """ + if len(doc_lists) != len(self.weights): + raise ValueError( + "Number of rank lists must be equal to the number of weights." + ) + + # Associate each doc's content with its RRF score for later sorting by it + # Duplicated contents across retrievers are collapsed & scored cumulatively + rrf_score: dict[str, float] = defaultdict(float) + for doc_list, weight in zip(doc_lists, self.weights): + for rank, doc in enumerate(doc_list, start=1): + rrf_score[ + ( + doc.page_content + if self.id_key is None + else doc.metadata[self.id_key] + ) + ] += weight / (rank + self.c) + + # Docs are deduplicated by their contents then sorted by their scores + all_docs = chain.from_iterable(doc_lists) + sorted_docs = sorted( + unique_by_key( + all_docs, + lambda doc: ( + doc.page_content + if self.id_key is None + else doc.metadata[self.id_key] + ), + ), + reverse=True, + key=lambda doc: rrf_score[ + doc.page_content if self.id_key is None else doc.metadata[self.id_key] + ], + ) + return sorted_docs diff --git a/venv/Lib/site-packages/langchain/retrievers/google_cloud_documentai_warehouse.py b/venv/Lib/site-packages/langchain/retrievers/google_cloud_documentai_warehouse.py new file mode 100644 index 00000000..f310037e --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/google_cloud_documentai_warehouse.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import GoogleDocumentAIWarehouseRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleDocumentAIWarehouseRetriever": "langchain_community.retrievers" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleDocumentAIWarehouseRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/google_vertex_ai_search.py b/venv/Lib/site-packages/langchain/retrievers/google_vertex_ai_search.py new file mode 100644 index 00000000..1f8a71de --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/google_vertex_ai_search.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import ( + GoogleCloudEnterpriseSearchRetriever, + GoogleVertexAIMultiTurnSearchRetriever, + GoogleVertexAISearchRetriever, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleVertexAISearchRetriever": "langchain_community.retrievers", + "GoogleVertexAIMultiTurnSearchRetriever": "langchain_community.retrievers", + "GoogleCloudEnterpriseSearchRetriever": "langchain_community.retrievers", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleVertexAISearchRetriever", + "GoogleVertexAIMultiTurnSearchRetriever", + "GoogleCloudEnterpriseSearchRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/kay.py b/venv/Lib/site-packages/langchain/retrievers/kay.py new file mode 100644 index 00000000..ddc45323 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/kay.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import KayAiRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"KayAiRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "KayAiRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/kendra.py b/venv/Lib/site-packages/langchain/retrievers/kendra.py new file mode 100644 index 00000000..824c6569 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/kendra.py @@ -0,0 +1,66 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import AmazonKendraRetriever + from langchain_community.retrievers.kendra import ( + AdditionalResultAttribute, + AdditionalResultAttributeValue, + DocumentAttribute, + DocumentAttributeValue, + Highlight, + QueryResult, + QueryResultItem, + ResultItem, + RetrieveResult, + RetrieveResultItem, + TextWithHighLights, + clean_excerpt, + combined_text, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "clean_excerpt": "langchain_community.retrievers.kendra", + "combined_text": "langchain_community.retrievers.kendra", + "Highlight": "langchain_community.retrievers.kendra", + "TextWithHighLights": "langchain_community.retrievers.kendra", + "AdditionalResultAttributeValue": "langchain_community.retrievers.kendra", + "AdditionalResultAttribute": "langchain_community.retrievers.kendra", + "DocumentAttributeValue": "langchain_community.retrievers.kendra", + "DocumentAttribute": "langchain_community.retrievers.kendra", + "ResultItem": "langchain_community.retrievers.kendra", + "QueryResultItem": "langchain_community.retrievers.kendra", + "RetrieveResultItem": "langchain_community.retrievers.kendra", + "QueryResult": "langchain_community.retrievers.kendra", + "RetrieveResult": "langchain_community.retrievers.kendra", + "AmazonKendraRetriever": "langchain_community.retrievers", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "clean_excerpt", + "combined_text", + "Highlight", + "TextWithHighLights", + "AdditionalResultAttributeValue", + "AdditionalResultAttribute", + "DocumentAttributeValue", + "DocumentAttribute", + "ResultItem", + "QueryResultItem", + "RetrieveResultItem", + "QueryResult", + "RetrieveResult", + "AmazonKendraRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/knn.py b/venv/Lib/site-packages/langchain/retrievers/knn.py new file mode 100644 index 00000000..6e10d9a7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/knn.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import KNNRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"KNNRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "KNNRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/llama_index.py b/venv/Lib/site-packages/langchain/retrievers/llama_index.py new file mode 100644 index 00000000..a86b9e0d --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/llama_index.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import ( + LlamaIndexGraphRetriever, + LlamaIndexRetriever, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "LlamaIndexRetriever": "langchain_community.retrievers", + "LlamaIndexGraphRetriever": "langchain_community.retrievers", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LlamaIndexRetriever", + "LlamaIndexGraphRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/merger_retriever.py b/venv/Lib/site-packages/langchain/retrievers/merger_retriever.py new file mode 100644 index 00000000..5a192ef8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/merger_retriever.py @@ -0,0 +1,123 @@ +import asyncio + +from langchain_core.callbacks import ( + AsyncCallbackManagerForRetrieverRun, + CallbackManagerForRetrieverRun, +) +from langchain_core.documents import Document +from langchain_core.retrievers import BaseRetriever + + +class MergerRetriever(BaseRetriever): + """Retriever that merges the results of multiple retrievers.""" + + retrievers: list[BaseRetriever] + """A list of retrievers to merge.""" + + def _get_relevant_documents( + self, + query: str, + *, + run_manager: CallbackManagerForRetrieverRun, + ) -> list[Document]: + """ + Get the relevant documents for a given query. + + Args: + query: The query to search for. + + Returns: + A list of relevant documents. + """ + + # Merge the results of the retrievers. + merged_documents = self.merge_documents(query, run_manager) + + return merged_documents + + async def _aget_relevant_documents( + self, + query: str, + *, + run_manager: AsyncCallbackManagerForRetrieverRun, + ) -> list[Document]: + """ + Asynchronously get the relevant documents for a given query. + + Args: + query: The query to search for. + + Returns: + A list of relevant documents. + """ + + # Merge the results of the retrievers. + merged_documents = await self.amerge_documents(query, run_manager) + + return merged_documents + + def merge_documents( + self, query: str, run_manager: CallbackManagerForRetrieverRun + ) -> list[Document]: + """ + Merge the results of the retrievers. + + Args: + query: The query to search for. + + Returns: + A list of merged documents. + """ + + # Get the results of all retrievers. + retriever_docs = [ + retriever.invoke( + query, + config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")}, + ) + for i, retriever in enumerate(self.retrievers) + ] + + # Merge the results of the retrievers. + merged_documents = [] + max_docs = max(map(len, retriever_docs), default=0) + for i in range(max_docs): + for retriever, doc in zip(self.retrievers, retriever_docs): + if i < len(doc): + merged_documents.append(doc[i]) + + return merged_documents + + async def amerge_documents( + self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun + ) -> list[Document]: + """ + Asynchronously merge the results of the retrievers. + + Args: + query: The query to search for. + + Returns: + A list of merged documents. + """ + + # Get the results of all retrievers. + retriever_docs = await asyncio.gather( + *( + retriever.ainvoke( + query, + config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")}, + ) + for i, retriever in enumerate(self.retrievers) + ) + ) + + # Merge the results of the retrievers. + merged_documents = [] + max_docs = max(map(len, retriever_docs), default=0) + for i in range(max_docs): + for retriever, doc in zip(self.retrievers, retriever_docs): + if i < len(doc): + merged_documents.append(doc[i]) + + return merged_documents diff --git a/venv/Lib/site-packages/langchain/retrievers/metal.py b/venv/Lib/site-packages/langchain/retrievers/metal.py new file mode 100644 index 00000000..9d4927de --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/metal.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import MetalRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MetalRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MetalRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/milvus.py b/venv/Lib/site-packages/langchain/retrievers/milvus.py new file mode 100644 index 00000000..bd9fd853 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/milvus.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import MilvusRetriever + from langchain_community.retrievers.milvus import MilvusRetreiver + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MilvusRetriever": "langchain_community.retrievers", + "MilvusRetreiver": "langchain_community.retrievers.milvus", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MilvusRetriever", + "MilvusRetreiver", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/multi_query.py b/venv/Lib/site-packages/langchain/retrievers/multi_query.py new file mode 100644 index 00000000..ccea321b --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/multi_query.py @@ -0,0 +1,225 @@ +import asyncio +import logging +from collections.abc import Sequence +from typing import Optional + +from langchain_core.callbacks import ( + AsyncCallbackManagerForRetrieverRun, + CallbackManagerForRetrieverRun, +) +from langchain_core.documents import Document +from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import BaseOutputParser +from langchain_core.prompts import BasePromptTemplate +from langchain_core.prompts.prompt import PromptTemplate +from langchain_core.retrievers import BaseRetriever +from langchain_core.runnables import Runnable + +from langchain.chains.llm import LLMChain + +logger = logging.getLogger(__name__) + + +class LineListOutputParser(BaseOutputParser[list[str]]): + """Output parser for a list of lines.""" + + def parse(self, text: str) -> list[str]: + lines = text.strip().split("\n") + return list(filter(None, lines)) # Remove empty lines + + +# Default prompt +DEFAULT_QUERY_PROMPT = PromptTemplate( + input_variables=["question"], + template="""You are an AI language model assistant. Your task is + to generate 3 different versions of the given user + question to retrieve relevant documents from a vector database. + By generating multiple perspectives on the user question, + your goal is to help the user overcome some of the limitations + of distance-based similarity search. Provide these alternative + questions separated by newlines. Original question: {question}""", +) + + +def _unique_documents(documents: Sequence[Document]) -> list[Document]: + return [doc for i, doc in enumerate(documents) if doc not in documents[:i]] + + +class MultiQueryRetriever(BaseRetriever): + """Given a query, use an LLM to write a set of queries. + + Retrieve docs for each query. Return the unique union of all retrieved docs. + """ + + retriever: BaseRetriever + llm_chain: Runnable + verbose: bool = True + parser_key: str = "lines" + """DEPRECATED. parser_key is no longer used and should not be specified.""" + include_original: bool = False + """Whether to include the original query in the list of generated queries.""" + + @classmethod + def from_llm( + cls, + retriever: BaseRetriever, + llm: BaseLanguageModel, + prompt: BasePromptTemplate = DEFAULT_QUERY_PROMPT, + parser_key: Optional[str] = None, + include_original: bool = False, + ) -> "MultiQueryRetriever": + """Initialize from llm using default template. + + Args: + retriever: retriever to query documents from + llm: llm for query generation using DEFAULT_QUERY_PROMPT + prompt: The prompt which aims to generate several different versions + of the given user query + include_original: Whether to include the original query in the list of + generated queries. + + Returns: + MultiQueryRetriever + """ + output_parser = LineListOutputParser() + llm_chain = prompt | llm | output_parser + return cls( + retriever=retriever, + llm_chain=llm_chain, + include_original=include_original, + ) + + async def _aget_relevant_documents( + self, + query: str, + *, + run_manager: AsyncCallbackManagerForRetrieverRun, + ) -> list[Document]: + """Get relevant documents given a user query. + + Args: + query: user query + + Returns: + Unique union of relevant documents from all generated queries + """ + queries = await self.agenerate_queries(query, run_manager) + if self.include_original: + queries.append(query) + documents = await self.aretrieve_documents(queries, run_manager) + return self.unique_union(documents) + + async def agenerate_queries( + self, question: str, run_manager: AsyncCallbackManagerForRetrieverRun + ) -> list[str]: + """Generate queries based upon user input. + + Args: + question: user query + + Returns: + List of LLM generated queries that are similar to the user input + """ + response = await self.llm_chain.ainvoke( + {"question": question}, config={"callbacks": run_manager.get_child()} + ) + if isinstance(self.llm_chain, LLMChain): + lines = response["text"] + else: + lines = response + if self.verbose: + logger.info(f"Generated queries: {lines}") + return lines + + async def aretrieve_documents( + self, queries: list[str], run_manager: AsyncCallbackManagerForRetrieverRun + ) -> list[Document]: + """Run all LLM generated queries. + + Args: + queries: query list + + Returns: + List of retrieved Documents + """ + document_lists = await asyncio.gather( + *( + self.retriever.ainvoke( + query, config={"callbacks": run_manager.get_child()} + ) + for query in queries + ) + ) + return [doc for docs in document_lists for doc in docs] + + def _get_relevant_documents( + self, + query: str, + *, + run_manager: CallbackManagerForRetrieverRun, + ) -> list[Document]: + """Get relevant documents given a user query. + + Args: + query: user query + + Returns: + Unique union of relevant documents from all generated queries + """ + queries = self.generate_queries(query, run_manager) + if self.include_original: + queries.append(query) + documents = self.retrieve_documents(queries, run_manager) + return self.unique_union(documents) + + def generate_queries( + self, question: str, run_manager: CallbackManagerForRetrieverRun + ) -> list[str]: + """Generate queries based upon user input. + + Args: + question: user query + + Returns: + List of LLM generated queries that are similar to the user input + """ + response = self.llm_chain.invoke( + {"question": question}, config={"callbacks": run_manager.get_child()} + ) + if isinstance(self.llm_chain, LLMChain): + lines = response["text"] + else: + lines = response + if self.verbose: + logger.info(f"Generated queries: {lines}") + return lines + + def retrieve_documents( + self, queries: list[str], run_manager: CallbackManagerForRetrieverRun + ) -> list[Document]: + """Run all LLM generated queries. + + Args: + queries: query list + + Returns: + List of retrieved Documents + """ + documents = [] + for query in queries: + docs = self.retriever.invoke( + query, config={"callbacks": run_manager.get_child()} + ) + documents.extend(docs) + return documents + + def unique_union(self, documents: list[Document]) -> list[Document]: + """Get unique Documents. + + Args: + documents: List of retrieved Documents + + Returns: + List of unique retrieved Documents + """ + return _unique_documents(documents) diff --git a/venv/Lib/site-packages/langchain/retrievers/multi_vector.py b/venv/Lib/site-packages/langchain/retrievers/multi_vector.py new file mode 100644 index 00000000..e4b491b8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/multi_vector.py @@ -0,0 +1,120 @@ +from enum import Enum +from typing import Any, Optional + +from langchain_core.callbacks import ( + AsyncCallbackManagerForRetrieverRun, + CallbackManagerForRetrieverRun, +) +from langchain_core.documents import Document +from langchain_core.retrievers import BaseRetriever +from langchain_core.stores import BaseStore, ByteStore +from langchain_core.vectorstores import VectorStore +from pydantic import Field, model_validator + +from langchain.storage._lc_store import create_kv_docstore + + +class SearchType(str, Enum): + """Enumerator of the types of search to perform.""" + + similarity = "similarity" + """Similarity search.""" + similarity_score_threshold = "similarity_score_threshold" + """Similarity search with a score threshold.""" + mmr = "mmr" + """Maximal Marginal Relevance reranking of similarity search.""" + + +class MultiVectorRetriever(BaseRetriever): + """Retrieve from a set of multiple embeddings for the same document.""" + + vectorstore: VectorStore + """The underlying vectorstore to use to store small chunks + and their embedding vectors""" + byte_store: Optional[ByteStore] = None + """The lower-level backing storage layer for the parent documents""" + docstore: BaseStore[str, Document] + """The storage interface for the parent documents""" + id_key: str = "doc_id" + search_kwargs: dict = Field(default_factory=dict) + """Keyword arguments to pass to the search function.""" + search_type: SearchType = SearchType.similarity + """Type of search to perform (similarity / mmr)""" + + @model_validator(mode="before") + @classmethod + def shim_docstore(cls, values: dict) -> Any: + byte_store = values.get("byte_store") + docstore = values.get("docstore") + if byte_store is not None: + docstore = create_kv_docstore(byte_store) + elif docstore is None: + raise Exception("You must pass a `byte_store` parameter.") + values["docstore"] = docstore + return values + + def _get_relevant_documents( + self, query: str, *, run_manager: CallbackManagerForRetrieverRun + ) -> list[Document]: + """Get documents relevant to a query. + Args: + query: String to find relevant documents for + run_manager: The callbacks handler to use + Returns: + List of relevant documents + """ + if self.search_type == SearchType.mmr: + sub_docs = self.vectorstore.max_marginal_relevance_search( + query, **self.search_kwargs + ) + elif self.search_type == SearchType.similarity_score_threshold: + sub_docs_and_similarities = ( + self.vectorstore.similarity_search_with_relevance_scores( + query, **self.search_kwargs + ) + ) + sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities] + else: + sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs) + + # We do this to maintain the order of the ids that are returned + ids = [] + for d in sub_docs: + if self.id_key in d.metadata and d.metadata[self.id_key] not in ids: + ids.append(d.metadata[self.id_key]) + docs = self.docstore.mget(ids) + return [d for d in docs if d is not None] + + async def _aget_relevant_documents( + self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun + ) -> list[Document]: + """Asynchronously get documents relevant to a query. + Args: + query: String to find relevant documents for + run_manager: The callbacks handler to use + Returns: + List of relevant documents + """ + if self.search_type == SearchType.mmr: + sub_docs = await self.vectorstore.amax_marginal_relevance_search( + query, **self.search_kwargs + ) + elif self.search_type == SearchType.similarity_score_threshold: + sub_docs_and_similarities = ( + await self.vectorstore.asimilarity_search_with_relevance_scores( + query, **self.search_kwargs + ) + ) + sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities] + else: + sub_docs = await self.vectorstore.asimilarity_search( + query, **self.search_kwargs + ) + + # We do this to maintain the order of the ids that are returned + ids = [] + for d in sub_docs: + if self.id_key in d.metadata and d.metadata[self.id_key] not in ids: + ids.append(d.metadata[self.id_key]) + docs = await self.docstore.amget(ids) + return [d for d in docs if d is not None] diff --git a/venv/Lib/site-packages/langchain/retrievers/outline.py b/venv/Lib/site-packages/langchain/retrievers/outline.py new file mode 100644 index 00000000..be0e4451 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/outline.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import OutlineRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OutlineRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OutlineRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/parent_document_retriever.py b/venv/Lib/site-packages/langchain/retrievers/parent_document_retriever.py new file mode 100644 index 00000000..c3b3d1fe --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/parent_document_retriever.py @@ -0,0 +1,148 @@ +import uuid +from collections.abc import Sequence +from typing import Any, Optional + +from langchain_core.documents import Document +from langchain_text_splitters import TextSplitter + +from langchain.retrievers import MultiVectorRetriever + + +class ParentDocumentRetriever(MultiVectorRetriever): + """Retrieve small chunks then retrieve their parent documents. + + When splitting documents for retrieval, there are often conflicting desires: + + 1. You may want to have small documents, so that their embeddings can most + accurately reflect their meaning. If too long, then the embeddings can + lose meaning. + 2. You want to have long enough documents that the context of each chunk is + retained. + + The ParentDocumentRetriever strikes that balance by splitting and storing + small chunks of data. During retrieval, it first fetches the small chunks + but then looks up the parent ids for those chunks and returns those larger + documents. + + Note that "parent document" refers to the document that a small chunk + originated from. This can either be the whole raw document OR a larger + chunk. + + Examples: + + .. code-block:: python + + from langchain_chroma import Chroma + from langchain_community.embeddings import OpenAIEmbeddings + from langchain_text_splitters import RecursiveCharacterTextSplitter + from langchain.storage import InMemoryStore + + # This text splitter is used to create the parent documents + parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, add_start_index=True) + # This text splitter is used to create the child documents + # It should create documents smaller than the parent + child_splitter = RecursiveCharacterTextSplitter(chunk_size=400, add_start_index=True) + # The vectorstore to use to index the child chunks + vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) + # The storage layer for the parent documents + store = InMemoryStore() + + # Initialize the retriever + retriever = ParentDocumentRetriever( + vectorstore=vectorstore, + docstore=store, + child_splitter=child_splitter, + parent_splitter=parent_splitter, + ) + """ # noqa: E501 + + child_splitter: TextSplitter + """The text splitter to use to create child documents.""" + + """The key to use to track the parent id. This will be stored in the + metadata of child documents.""" + parent_splitter: Optional[TextSplitter] = None + """The text splitter to use to create parent documents. + If none, then the parent documents will be the raw documents passed in.""" + + child_metadata_fields: Optional[Sequence[str]] = None + """Metadata fields to leave in child documents. If None, leave all parent document + metadata. + """ + + def _split_docs_for_adding( + self, + documents: list[Document], + ids: Optional[list[str]] = None, + add_to_docstore: bool = True, + ) -> tuple[list[Document], list[tuple[str, Document]]]: + if self.parent_splitter is not None: + documents = self.parent_splitter.split_documents(documents) + if ids is None: + doc_ids = [str(uuid.uuid4()) for _ in documents] + if not add_to_docstore: + raise ValueError( + "If ids are not passed in, `add_to_docstore` MUST be True" + ) + else: + if len(documents) != len(ids): + raise ValueError( + "Got uneven list of documents and ids. " + "If `ids` is provided, should be same length as `documents`." + ) + doc_ids = ids + + docs = [] + full_docs = [] + for i, doc in enumerate(documents): + _id = doc_ids[i] + sub_docs = self.child_splitter.split_documents([doc]) + if self.child_metadata_fields is not None: + for _doc in sub_docs: + _doc.metadata = { + k: _doc.metadata[k] for k in self.child_metadata_fields + } + for _doc in sub_docs: + _doc.metadata[self.id_key] = _id + docs.extend(sub_docs) + full_docs.append((_id, doc)) + + return docs, full_docs + + def add_documents( + self, + documents: list[Document], + ids: Optional[list[str]] = None, + add_to_docstore: bool = True, + **kwargs: Any, + ) -> None: + """Adds documents to the docstore and vectorstores. + + Args: + documents: List of documents to add + ids: Optional list of ids for documents. If provided should be the same + length as the list of documents. Can be provided if parent documents + are already in the document store and you don't want to re-add + to the docstore. If not provided, random UUIDs will be used as + ids. + add_to_docstore: Boolean of whether to add documents to docstore. + This can be false if and only if `ids` are provided. You may want + to set this to False if the documents are already in the docstore + and you don't want to re-add them. + """ + docs, full_docs = self._split_docs_for_adding(documents, ids, add_to_docstore) + self.vectorstore.add_documents(docs, **kwargs) + if add_to_docstore: + self.docstore.mset(full_docs) + + async def aadd_documents( + self, + documents: list[Document], + ids: Optional[list[str]] = None, + add_to_docstore: bool = True, + **kwargs: Any, + ) -> None: + docs, full_docs = self._split_docs_for_adding(documents, ids, add_to_docstore) + await self.vectorstore.aadd_documents(docs, **kwargs) + if add_to_docstore: + await self.docstore.amset(full_docs) diff --git a/venv/Lib/site-packages/langchain/retrievers/pinecone_hybrid_search.py b/venv/Lib/site-packages/langchain/retrievers/pinecone_hybrid_search.py new file mode 100644 index 00000000..e27acd05 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/pinecone_hybrid_search.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import PineconeHybridSearchRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PineconeHybridSearchRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PineconeHybridSearchRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/pubmed.py b/venv/Lib/site-packages/langchain/retrievers/pubmed.py new file mode 100644 index 00000000..ae03d4b1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/pubmed.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import PubMedRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PubMedRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PubMedRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/pupmed.py b/venv/Lib/site-packages/langchain/retrievers/pupmed.py new file mode 100644 index 00000000..ae03d4b1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/pupmed.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import PubMedRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PubMedRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PubMedRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/re_phraser.py b/venv/Lib/site-packages/langchain/retrievers/re_phraser.py new file mode 100644 index 00000000..9f8fc643 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/re_phraser.py @@ -0,0 +1,88 @@ +import logging + +from langchain_core.callbacks import ( + AsyncCallbackManagerForRetrieverRun, + CallbackManagerForRetrieverRun, +) +from langchain_core.documents import Document +from langchain_core.language_models import BaseLLM +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import BasePromptTemplate +from langchain_core.prompts.prompt import PromptTemplate +from langchain_core.retrievers import BaseRetriever +from langchain_core.runnables import Runnable + +logger = logging.getLogger(__name__) + +# Default template +DEFAULT_TEMPLATE = """You are an assistant tasked with taking a natural language \ +query from a user and converting it into a query for a vectorstore. \ +In this process, you strip out information that is not relevant for \ +the retrieval task. Here is the user query: {question}""" + +# Default prompt +DEFAULT_QUERY_PROMPT = PromptTemplate.from_template(DEFAULT_TEMPLATE) + + +class RePhraseQueryRetriever(BaseRetriever): + """Given a query, use an LLM to re-phrase it. + Then, retrieve docs for the re-phrased query.""" + + retriever: BaseRetriever + llm_chain: Runnable + + @classmethod + def from_llm( + cls, + retriever: BaseRetriever, + llm: BaseLLM, + prompt: BasePromptTemplate = DEFAULT_QUERY_PROMPT, + ) -> "RePhraseQueryRetriever": + """Initialize from llm using default template. + + The prompt used here expects a single input: `question` + + Args: + retriever: retriever to query documents from + llm: llm for query generation using DEFAULT_QUERY_PROMPT + prompt: prompt template for query generation + + Returns: + RePhraseQueryRetriever + """ + llm_chain = prompt | llm | StrOutputParser() + return cls( + retriever=retriever, + llm_chain=llm_chain, + ) + + def _get_relevant_documents( + self, + query: str, + *, + run_manager: CallbackManagerForRetrieverRun, + ) -> list[Document]: + """Get relevant documents given a user question. + + Args: + query: user question + + Returns: + Relevant documents for re-phrased question + """ + re_phrased_question = self.llm_chain.invoke( + query, {"callbacks": run_manager.get_child()} + ) + logger.info(f"Re-phrased question: {re_phrased_question}") + docs = self.retriever.invoke( + re_phrased_question, config={"callbacks": run_manager.get_child()} + ) + return docs + + async def _aget_relevant_documents( + self, + query: str, + *, + run_manager: AsyncCallbackManagerForRetrieverRun, + ) -> list[Document]: + raise NotImplementedError diff --git a/venv/Lib/site-packages/langchain/retrievers/remote_retriever.py b/venv/Lib/site-packages/langchain/retrievers/remote_retriever.py new file mode 100644 index 00000000..d8384e2d --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/remote_retriever.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import RemoteLangChainRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"RemoteLangChainRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RemoteLangChainRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__init__.py b/venv/Lib/site-packages/langchain/retrievers/self_query/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..32546219 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/astradb.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/astradb.cpython-312.pyc new file mode 100644 index 00000000..4888c84d Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/astradb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..c3878571 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/chroma.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/chroma.cpython-312.pyc new file mode 100644 index 00000000..80dd970e Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/chroma.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/dashvector.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/dashvector.cpython-312.pyc new file mode 100644 index 00000000..d0840391 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/dashvector.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/databricks_vector_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/databricks_vector_search.cpython-312.pyc new file mode 100644 index 00000000..e30a3659 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/databricks_vector_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/deeplake.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/deeplake.cpython-312.pyc new file mode 100644 index 00000000..23249d54 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/deeplake.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/dingo.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/dingo.cpython-312.pyc new file mode 100644 index 00000000..5149ace5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/dingo.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/elasticsearch.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/elasticsearch.cpython-312.pyc new file mode 100644 index 00000000..53adcccc Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/elasticsearch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/milvus.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/milvus.cpython-312.pyc new file mode 100644 index 00000000..99e4268b Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/milvus.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/mongodb_atlas.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/mongodb_atlas.cpython-312.pyc new file mode 100644 index 00000000..2c2f7290 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/mongodb_atlas.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/myscale.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/myscale.cpython-312.pyc new file mode 100644 index 00000000..ea9eb636 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/myscale.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/opensearch.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/opensearch.cpython-312.pyc new file mode 100644 index 00000000..8f4962c2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/opensearch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/pgvector.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/pgvector.cpython-312.pyc new file mode 100644 index 00000000..2483c009 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/pgvector.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/pinecone.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/pinecone.cpython-312.pyc new file mode 100644 index 00000000..39cf732b Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/pinecone.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/qdrant.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/qdrant.cpython-312.pyc new file mode 100644 index 00000000..0e5121b7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/qdrant.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/redis.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/redis.cpython-312.pyc new file mode 100644 index 00000000..8965d203 Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/redis.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/supabase.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/supabase.cpython-312.pyc new file mode 100644 index 00000000..577b5c2e Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/supabase.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/tencentvectordb.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/tencentvectordb.cpython-312.pyc new file mode 100644 index 00000000..725048cf Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/tencentvectordb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/timescalevector.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/timescalevector.cpython-312.pyc new file mode 100644 index 00000000..fb333cfc Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/timescalevector.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/vectara.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/vectara.cpython-312.pyc new file mode 100644 index 00000000..b81a729b Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/vectara.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/weaviate.cpython-312.pyc b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/weaviate.cpython-312.pyc new file mode 100644 index 00000000..f73626ef Binary files /dev/null and b/venv/Lib/site-packages/langchain/retrievers/self_query/__pycache__/weaviate.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/astradb.py b/venv/Lib/site-packages/langchain/retrievers/self_query/astradb.py new file mode 100644 index 00000000..a0d7b4b8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/astradb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.astradb import AstraDBTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AstraDBTranslator": "langchain_community.query_constructors.astradb", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["AstraDBTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/base.py b/venv/Lib/site-packages/langchain/retrievers/self_query/base.py new file mode 100644 index 00000000..801990d1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/base.py @@ -0,0 +1,378 @@ +"""Retriever that generates and executes structured queries over its own data source.""" + +import logging +from collections.abc import Sequence +from typing import Any, Optional, Union + +from langchain_core.callbacks.manager import ( + AsyncCallbackManagerForRetrieverRun, + CallbackManagerForRetrieverRun, +) +from langchain_core.documents import Document +from langchain_core.language_models import BaseLanguageModel +from langchain_core.retrievers import BaseRetriever +from langchain_core.runnables import Runnable +from langchain_core.structured_query import StructuredQuery, Visitor +from langchain_core.vectorstores import VectorStore +from pydantic import ConfigDict, Field, model_validator + +from langchain.chains.query_constructor.base import load_query_constructor_runnable +from langchain.chains.query_constructor.schema import AttributeInfo + +logger = logging.getLogger(__name__) +QUERY_CONSTRUCTOR_RUN_NAME = "query_constructor" + + +def _get_builtin_translator(vectorstore: VectorStore) -> Visitor: + """Get the translator class corresponding to the vector store class.""" + try: + import langchain_community # noqa: F401 + except ImportError: + raise ImportError( + "The langchain-community package must be installed to use this feature." + " Please install it using `pip install langchain-community`." + ) + + from langchain_community.query_constructors.astradb import AstraDBTranslator + from langchain_community.query_constructors.chroma import ChromaTranslator + from langchain_community.query_constructors.dashvector import DashvectorTranslator + from langchain_community.query_constructors.databricks_vector_search import ( + DatabricksVectorSearchTranslator, + ) + from langchain_community.query_constructors.deeplake import DeepLakeTranslator + from langchain_community.query_constructors.dingo import DingoDBTranslator + from langchain_community.query_constructors.elasticsearch import ( + ElasticsearchTranslator, + ) + from langchain_community.query_constructors.milvus import MilvusTranslator + from langchain_community.query_constructors.mongodb_atlas import ( + MongoDBAtlasTranslator, + ) + from langchain_community.query_constructors.myscale import MyScaleTranslator + from langchain_community.query_constructors.neo4j import Neo4jTranslator + from langchain_community.query_constructors.opensearch import OpenSearchTranslator + from langchain_community.query_constructors.pgvector import PGVectorTranslator + from langchain_community.query_constructors.pinecone import PineconeTranslator + from langchain_community.query_constructors.qdrant import QdrantTranslator + from langchain_community.query_constructors.redis import RedisTranslator + from langchain_community.query_constructors.supabase import SupabaseVectorTranslator + from langchain_community.query_constructors.tencentvectordb import ( + TencentVectorDBTranslator, + ) + from langchain_community.query_constructors.timescalevector import ( + TimescaleVectorTranslator, + ) + from langchain_community.query_constructors.vectara import VectaraTranslator + from langchain_community.query_constructors.weaviate import WeaviateTranslator + from langchain_community.vectorstores import ( + AstraDB, + DashVector, + DatabricksVectorSearch, + DeepLake, + Dingo, + Milvus, + MyScale, + Neo4jVector, + OpenSearchVectorSearch, + PGVector, + Qdrant, + Redis, + SupabaseVectorStore, + TencentVectorDB, + TimescaleVector, + Vectara, + Weaviate, + ) + from langchain_community.vectorstores import ( + Chroma as CommunityChroma, + ) + from langchain_community.vectorstores import ( + ElasticsearchStore as ElasticsearchStoreCommunity, + ) + from langchain_community.vectorstores import ( + MongoDBAtlasVectorSearch as CommunityMongoDBAtlasVectorSearch, + ) + from langchain_community.vectorstores import ( + Pinecone as CommunityPinecone, + ) + + BUILTIN_TRANSLATORS: dict[type[VectorStore], type[Visitor]] = { + AstraDB: AstraDBTranslator, + PGVector: PGVectorTranslator, + CommunityPinecone: PineconeTranslator, + CommunityChroma: ChromaTranslator, + DashVector: DashvectorTranslator, + Dingo: DingoDBTranslator, + Weaviate: WeaviateTranslator, + Vectara: VectaraTranslator, + Qdrant: QdrantTranslator, + MyScale: MyScaleTranslator, + DeepLake: DeepLakeTranslator, + ElasticsearchStoreCommunity: ElasticsearchTranslator, + Milvus: MilvusTranslator, + SupabaseVectorStore: SupabaseVectorTranslator, + TimescaleVector: TimescaleVectorTranslator, + OpenSearchVectorSearch: OpenSearchTranslator, + CommunityMongoDBAtlasVectorSearch: MongoDBAtlasTranslator, + Neo4jVector: Neo4jTranslator, + } + if isinstance(vectorstore, DatabricksVectorSearch): + return DatabricksVectorSearchTranslator() + elif isinstance(vectorstore, MyScale): + return MyScaleTranslator(metadata_key=vectorstore.metadata_column) + elif isinstance(vectorstore, Redis): + return RedisTranslator.from_vectorstore(vectorstore) + elif isinstance(vectorstore, TencentVectorDB): + fields = [ + field.name for field in (vectorstore.meta_fields or []) if field.index + ] + return TencentVectorDBTranslator(fields) + elif vectorstore.__class__ in BUILTIN_TRANSLATORS: + return BUILTIN_TRANSLATORS[vectorstore.__class__]() + else: + try: + from langchain_astradb.vectorstores import AstraDBVectorStore + except ImportError: + pass + else: + if isinstance(vectorstore, AstraDBVectorStore): + return AstraDBTranslator() + + try: + from langchain_elasticsearch.vectorstores import ElasticsearchStore + except ImportError: + pass + else: + if isinstance(vectorstore, ElasticsearchStore): + return ElasticsearchTranslator() + + try: + from langchain_pinecone import PineconeVectorStore + except ImportError: + pass + else: + if isinstance(vectorstore, PineconeVectorStore): + return PineconeTranslator() + + try: + from langchain_mongodb import MongoDBAtlasVectorSearch + except ImportError: + pass + else: + if isinstance(vectorstore, MongoDBAtlasVectorSearch): + return MongoDBAtlasTranslator() + + try: + from langchain_neo4j import Neo4jVector + except ImportError: + pass + else: + if isinstance(vectorstore, Neo4jVector): + return Neo4jTranslator() + + try: + # Trying langchain_chroma import if exists + from langchain_chroma import Chroma + except ImportError: + pass + else: + if isinstance(vectorstore, Chroma): + return ChromaTranslator() + + try: + from langchain_postgres import PGVector + from langchain_postgres import PGVectorTranslator as NewPGVectorTranslator + except ImportError: + pass + else: + if isinstance(vectorstore, PGVector): + return NewPGVectorTranslator() + + try: + from langchain_qdrant import QdrantVectorStore + except ImportError: + pass + else: + if isinstance(vectorstore, QdrantVectorStore): + return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key) + + try: + # Added in langchain-community==0.2.11 + from langchain_community.query_constructors.hanavector import HanaTranslator + from langchain_community.vectorstores import HanaDB + except ImportError: + pass + else: + if isinstance(vectorstore, HanaDB): + return HanaTranslator() + + try: + # Trying langchain_weaviate (weaviate v4) import if exists + from langchain_weaviate.vectorstores import WeaviateVectorStore + + except ImportError: + pass + else: + if isinstance(vectorstore, WeaviateVectorStore): + return WeaviateTranslator() + + raise ValueError( + f"Self query retriever with Vector Store type {vectorstore.__class__}" + f" not supported." + ) + + +class SelfQueryRetriever(BaseRetriever): + """Retriever that uses a vector store and an LLM to generate + the vector store queries.""" + + vectorstore: VectorStore + """The underlying vector store from which documents will be retrieved.""" + query_constructor: Runnable[dict, StructuredQuery] = Field(alias="llm_chain") + """The query constructor chain for generating the vector store queries. + + llm_chain is legacy name kept for backwards compatibility.""" + search_type: str = "similarity" + """The search type to perform on the vector store.""" + search_kwargs: dict = Field(default_factory=dict) + """Keyword arguments to pass in to the vector store search.""" + structured_query_translator: Visitor + """Translator for turning internal query language into vectorstore search params.""" + verbose: bool = False + + use_original_query: bool = False + """Use original query instead of the revised new query from LLM""" + + model_config = ConfigDict( + populate_by_name=True, + arbitrary_types_allowed=True, + ) + + @model_validator(mode="before") + @classmethod + def validate_translator(cls, values: dict) -> Any: + """Validate translator.""" + if "structured_query_translator" not in values: + values["structured_query_translator"] = _get_builtin_translator( + values["vectorstore"] + ) + return values + + @property + def llm_chain(self) -> Runnable: + """llm_chain is legacy name kept for backwards compatibility.""" + return self.query_constructor + + def _prepare_query( + self, query: str, structured_query: StructuredQuery + ) -> tuple[str, dict[str, Any]]: + new_query, new_kwargs = self.structured_query_translator.visit_structured_query( + structured_query + ) + if structured_query.limit is not None: + new_kwargs["k"] = structured_query.limit + if self.use_original_query: + new_query = query + search_kwargs = {**self.search_kwargs, **new_kwargs} + return new_query, search_kwargs + + def _get_docs_with_query( + self, query: str, search_kwargs: dict[str, Any] + ) -> list[Document]: + docs = self.vectorstore.search(query, self.search_type, **search_kwargs) + return docs + + async def _aget_docs_with_query( + self, query: str, search_kwargs: dict[str, Any] + ) -> list[Document]: + docs = await self.vectorstore.asearch(query, self.search_type, **search_kwargs) + return docs + + def _get_relevant_documents( + self, query: str, *, run_manager: CallbackManagerForRetrieverRun + ) -> list[Document]: + """Get documents relevant for a query. + + Args: + query: string to find relevant documents for + + Returns: + List of relevant documents + """ + structured_query = self.query_constructor.invoke( + {"query": query}, config={"callbacks": run_manager.get_child()} + ) + if self.verbose: + logger.info(f"Generated Query: {structured_query}") + new_query, search_kwargs = self._prepare_query(query, structured_query) + docs = self._get_docs_with_query(new_query, search_kwargs) + return docs + + async def _aget_relevant_documents( + self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun + ) -> list[Document]: + """Get documents relevant for a query. + + Args: + query: string to find relevant documents for + + Returns: + List of relevant documents + """ + structured_query = await self.query_constructor.ainvoke( + {"query": query}, config={"callbacks": run_manager.get_child()} + ) + if self.verbose: + logger.info(f"Generated Query: {structured_query}") + new_query, search_kwargs = self._prepare_query(query, structured_query) + docs = await self._aget_docs_with_query(new_query, search_kwargs) + return docs + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + vectorstore: VectorStore, + document_contents: str, + metadata_field_info: Sequence[Union[AttributeInfo, dict]], + structured_query_translator: Optional[Visitor] = None, + chain_kwargs: Optional[dict] = None, + enable_limit: bool = False, + use_original_query: bool = False, + **kwargs: Any, + ) -> "SelfQueryRetriever": + if structured_query_translator is None: + structured_query_translator = _get_builtin_translator(vectorstore) + chain_kwargs = chain_kwargs or {} + + if ( + "allowed_comparators" not in chain_kwargs + and structured_query_translator.allowed_comparators is not None + ): + chain_kwargs["allowed_comparators"] = ( + structured_query_translator.allowed_comparators + ) + if ( + "allowed_operators" not in chain_kwargs + and structured_query_translator.allowed_operators is not None + ): + chain_kwargs["allowed_operators"] = ( + structured_query_translator.allowed_operators + ) + query_constructor = load_query_constructor_runnable( + llm, + document_contents, + metadata_field_info, + enable_limit=enable_limit, + **chain_kwargs, + ) + query_constructor = query_constructor.with_config( + run_name=QUERY_CONSTRUCTOR_RUN_NAME + ) + return cls( # type: ignore[call-arg] + query_constructor=query_constructor, + vectorstore=vectorstore, + use_original_query=use_original_query, + structured_query_translator=structured_query_translator, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/chroma.py b/venv/Lib/site-packages/langchain/retrievers/self_query/chroma.py new file mode 100644 index 00000000..4e1d3217 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/chroma.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.chroma import ChromaTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ChromaTranslator": "langchain_community.query_constructors.chroma", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["ChromaTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/dashvector.py b/venv/Lib/site-packages/langchain/retrievers/self_query/dashvector.py new file mode 100644 index 00000000..f4067baa --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/dashvector.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.dashvector import DashvectorTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DashvectorTranslator": "langchain_community.query_constructors.dashvector", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["DashvectorTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/databricks_vector_search.py b/venv/Lib/site-packages/langchain/retrievers/self_query/databricks_vector_search.py new file mode 100644 index 00000000..ece66269 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/databricks_vector_search.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.databricks_vector_search import ( + DatabricksVectorSearchTranslator, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DatabricksVectorSearchTranslator": ( + "langchain_community.query_constructors.databricks_vector_search" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["DatabricksVectorSearchTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/deeplake.py b/venv/Lib/site-packages/langchain/retrievers/self_query/deeplake.py new file mode 100644 index 00000000..71f31716 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/deeplake.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.deeplake import ( + DeepLakeTranslator, + can_cast_to_float, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DeepLakeTranslator": "langchain_community.query_constructors.deeplake", + "can_cast_to_float": "langchain_community.query_constructors.deeplake", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["DeepLakeTranslator", "can_cast_to_float"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/dingo.py b/venv/Lib/site-packages/langchain/retrievers/self_query/dingo.py new file mode 100644 index 00000000..2acfa95a --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/dingo.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.dingo import DingoDBTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DingoDBTranslator": "langchain_community.query_constructors.dingo", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["DingoDBTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/elasticsearch.py b/venv/Lib/site-packages/langchain/retrievers/self_query/elasticsearch.py new file mode 100644 index 00000000..868524cb --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/elasticsearch.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.elasticsearch import ( + ElasticsearchTranslator, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ElasticsearchTranslator": "langchain_community.query_constructors.elasticsearch", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["ElasticsearchTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/milvus.py b/venv/Lib/site-packages/langchain/retrievers/self_query/milvus.py new file mode 100644 index 00000000..17b8934c --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/milvus.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.milvus import ( + MilvusTranslator, + process_value, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MilvusTranslator": "langchain_community.query_constructors.milvus", + "process_value": "langchain_community.query_constructors.milvus", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["MilvusTranslator", "process_value"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/mongodb_atlas.py b/venv/Lib/site-packages/langchain/retrievers/self_query/mongodb_atlas.py new file mode 100644 index 00000000..81196772 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/mongodb_atlas.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.mongodb_atlas import ( + MongoDBAtlasTranslator, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MongoDBAtlasTranslator": "langchain_community.query_constructors.mongodb_atlas", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["MongoDBAtlasTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/myscale.py b/venv/Lib/site-packages/langchain/retrievers/self_query/myscale.py new file mode 100644 index 00000000..a5bfcadc --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/myscale.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.myscale import MyScaleTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MyScaleTranslator": "langchain_community.query_constructors.myscale", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["MyScaleTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/opensearch.py b/venv/Lib/site-packages/langchain/retrievers/self_query/opensearch.py new file mode 100644 index 00000000..519cf851 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/opensearch.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.opensearch import OpenSearchTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "OpenSearchTranslator": "langchain_community.query_constructors.opensearch", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["OpenSearchTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/pgvector.py b/venv/Lib/site-packages/langchain/retrievers/self_query/pgvector.py new file mode 100644 index 00000000..1355b6b8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/pgvector.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.pgvector import PGVectorTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PGVectorTranslator": "langchain_community.query_constructors.pgvector", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["PGVectorTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/pinecone.py b/venv/Lib/site-packages/langchain/retrievers/self_query/pinecone.py new file mode 100644 index 00000000..43d299e3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/pinecone.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.pinecone import PineconeTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PineconeTranslator": "langchain_community.query_constructors.pinecone", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["PineconeTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/qdrant.py b/venv/Lib/site-packages/langchain/retrievers/self_query/qdrant.py new file mode 100644 index 00000000..aa0f8197 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/qdrant.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.qdrant import QdrantTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "QdrantTranslator": "langchain_community.query_constructors.qdrant", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["QdrantTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/redis.py b/venv/Lib/site-packages/langchain/retrievers/self_query/redis.py new file mode 100644 index 00000000..69e7d5c9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/redis.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.redis import RedisTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "RedisTranslator": "langchain_community.query_constructors.redis", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["RedisTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/supabase.py b/venv/Lib/site-packages/langchain/retrievers/self_query/supabase.py new file mode 100644 index 00000000..4941fec0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/supabase.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.supabase import SupabaseVectorTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SupabaseVectorTranslator": "langchain_community.query_constructors.supabase", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["SupabaseVectorTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/tencentvectordb.py b/venv/Lib/site-packages/langchain/retrievers/self_query/tencentvectordb.py new file mode 100644 index 00000000..c971a1c1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/tencentvectordb.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.tencentvectordb import ( + TencentVectorDBTranslator, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "TencentVectorDBTranslator": ( + "langchain_community.query_constructors.tencentvectordb" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["TencentVectorDBTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/timescalevector.py b/venv/Lib/site-packages/langchain/retrievers/self_query/timescalevector.py new file mode 100644 index 00000000..623ca390 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/timescalevector.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.timescalevector import ( + TimescaleVectorTranslator, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "TimescaleVectorTranslator": ( + "langchain_community.query_constructors.timescalevector" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["TimescaleVectorTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/vectara.py b/venv/Lib/site-packages/langchain/retrievers/self_query/vectara.py new file mode 100644 index 00000000..0fa15959 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/vectara.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.vectara import ( + VectaraTranslator, + process_value, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "VectaraTranslator": "langchain_community.query_constructors.vectara", + "process_value": "langchain_community.query_constructors.vectara", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["VectaraTranslator", "process_value"] diff --git a/venv/Lib/site-packages/langchain/retrievers/self_query/weaviate.py b/venv/Lib/site-packages/langchain/retrievers/self_query/weaviate.py new file mode 100644 index 00000000..5385258d --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/self_query/weaviate.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.query_constructors.weaviate import WeaviateTranslator + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "WeaviateTranslator": "langchain_community.query_constructors.weaviate", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["WeaviateTranslator"] diff --git a/venv/Lib/site-packages/langchain/retrievers/svm.py b/venv/Lib/site-packages/langchain/retrievers/svm.py new file mode 100644 index 00000000..4b50d5d5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/svm.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import SVMRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SVMRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SVMRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/tavily_search_api.py b/venv/Lib/site-packages/langchain/retrievers/tavily_search_api.py new file mode 100644 index 00000000..fba6f611 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/tavily_search_api.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import TavilySearchAPIRetriever + from langchain_community.retrievers.tavily_search_api import SearchDepth + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SearchDepth": "langchain_community.retrievers.tavily_search_api", + "TavilySearchAPIRetriever": "langchain_community.retrievers", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SearchDepth", + "TavilySearchAPIRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/tfidf.py b/venv/Lib/site-packages/langchain/retrievers/tfidf.py new file mode 100644 index 00000000..d7c36a32 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/tfidf.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import TFIDFRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TFIDFRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TFIDFRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/time_weighted_retriever.py b/venv/Lib/site-packages/langchain/retrievers/time_weighted_retriever.py new file mode 100644 index 00000000..4e3edd0a --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/time_weighted_retriever.py @@ -0,0 +1,182 @@ +import datetime +from copy import deepcopy +from typing import Any, Optional + +from langchain_core.callbacks import ( + AsyncCallbackManagerForRetrieverRun, + CallbackManagerForRetrieverRun, +) +from langchain_core.documents import Document +from langchain_core.retrievers import BaseRetriever +from langchain_core.vectorstores import VectorStore +from pydantic import ConfigDict, Field + + +def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float: + """Get the hours passed between two datetimes.""" + return (time - ref_time).total_seconds() / 3600 + + +class TimeWeightedVectorStoreRetriever(BaseRetriever): + """Retriever that combines embedding similarity with + recency in retrieving values.""" + + vectorstore: VectorStore + """The vectorstore to store documents and determine salience.""" + + search_kwargs: dict = Field(default_factory=lambda: dict(k=100)) + """Keyword arguments to pass to the vectorstore similarity search.""" + + # TODO: abstract as a queue + memory_stream: list[Document] = Field(default_factory=list) + """The memory_stream of documents to search through.""" + + decay_rate: float = Field(default=0.01) + """The exponential decay factor used as (1.0-decay_rate)**(hrs_passed).""" + + k: int = 4 + """The maximum number of documents to retrieve in a given call.""" + + other_score_keys: list[str] = [] + """Other keys in the metadata to factor into the score, e.g. 'importance'.""" + + default_salience: Optional[float] = None + """The salience to assign memories not retrieved from the vector store. + + None assigns no salience to documents not fetched from the vector store. + """ + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def _document_get_date(self, field: str, document: Document) -> datetime.datetime: + """Return the value of the date field of a document.""" + if field in document.metadata: + if isinstance(document.metadata[field], float): + return datetime.datetime.fromtimestamp(document.metadata[field]) + return document.metadata[field] + return datetime.datetime.now() + + def _get_combined_score( + self, + document: Document, + vector_relevance: Optional[float], + current_time: datetime.datetime, + ) -> float: + """Return the combined score for a document.""" + hours_passed = _get_hours_passed( + current_time, + self._document_get_date("last_accessed_at", document), + ) + score = (1.0 - self.decay_rate) ** hours_passed + for key in self.other_score_keys: + if key in document.metadata: + score += document.metadata[key] + if vector_relevance is not None: + score += vector_relevance + return score + + def get_salient_docs(self, query: str) -> dict[int, tuple[Document, float]]: + """Return documents that are salient to the query.""" + docs_and_scores: list[tuple[Document, float]] + docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores( + query, **self.search_kwargs + ) + results = {} + for fetched_doc, relevance in docs_and_scores: + if "buffer_idx" in fetched_doc.metadata: + buffer_idx = fetched_doc.metadata["buffer_idx"] + doc = self.memory_stream[buffer_idx] + results[buffer_idx] = (doc, relevance) + return results + + async def aget_salient_docs(self, query: str) -> dict[int, tuple[Document, float]]: + """Return documents that are salient to the query.""" + docs_and_scores: list[tuple[Document, float]] + docs_and_scores = ( + await self.vectorstore.asimilarity_search_with_relevance_scores( + query, **self.search_kwargs + ) + ) + results = {} + for fetched_doc, relevance in docs_and_scores: + if "buffer_idx" in fetched_doc.metadata: + buffer_idx = fetched_doc.metadata["buffer_idx"] + doc = self.memory_stream[buffer_idx] + results[buffer_idx] = (doc, relevance) + return results + + def _get_rescored_docs( + self, docs_and_scores: dict[Any, tuple[Document, Optional[float]]] + ) -> list[Document]: + current_time = datetime.datetime.now() + rescored_docs = [ + (doc, self._get_combined_score(doc, relevance, current_time)) + for doc, relevance in docs_and_scores.values() + ] + rescored_docs.sort(key=lambda x: x[1], reverse=True) + result = [] + # Ensure frequently accessed memories aren't forgotten + for doc, _ in rescored_docs[: self.k]: + # TODO: Update vector store doc once `update` method is exposed. + buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]] + buffered_doc.metadata["last_accessed_at"] = current_time + result.append(buffered_doc) + return result + + def _get_relevant_documents( + self, query: str, *, run_manager: CallbackManagerForRetrieverRun + ) -> list[Document]: + docs_and_scores = { + doc.metadata["buffer_idx"]: (doc, self.default_salience) + for doc in self.memory_stream[-self.k :] + } + # If a doc is considered salient, update the salience score + docs_and_scores.update(self.get_salient_docs(query)) + return self._get_rescored_docs(docs_and_scores) + + async def _aget_relevant_documents( + self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun + ) -> list[Document]: + docs_and_scores = { + doc.metadata["buffer_idx"]: (doc, self.default_salience) + for doc in self.memory_stream[-self.k :] + } + # If a doc is considered salient, update the salience score + docs_and_scores.update(await self.aget_salient_docs(query)) + return self._get_rescored_docs(docs_and_scores) + + def add_documents(self, documents: list[Document], **kwargs: Any) -> list[str]: + """Add documents to vectorstore.""" + current_time = kwargs.get("current_time") + if current_time is None: + current_time = datetime.datetime.now() + # Avoid mutating input documents + dup_docs = [deepcopy(d) for d in documents] + for i, doc in enumerate(dup_docs): + if "last_accessed_at" not in doc.metadata: + doc.metadata["last_accessed_at"] = current_time + if "created_at" not in doc.metadata: + doc.metadata["created_at"] = current_time + doc.metadata["buffer_idx"] = len(self.memory_stream) + i + self.memory_stream.extend(dup_docs) + return self.vectorstore.add_documents(dup_docs, **kwargs) + + async def aadd_documents( + self, documents: list[Document], **kwargs: Any + ) -> list[str]: + """Add documents to vectorstore.""" + current_time = kwargs.get("current_time") + if current_time is None: + current_time = datetime.datetime.now() + # Avoid mutating input documents + dup_docs = [deepcopy(d) for d in documents] + for i, doc in enumerate(dup_docs): + if "last_accessed_at" not in doc.metadata: + doc.metadata["last_accessed_at"] = current_time + if "created_at" not in doc.metadata: + doc.metadata["created_at"] = current_time + doc.metadata["buffer_idx"] = len(self.memory_stream) + i + self.memory_stream.extend(dup_docs) + return await self.vectorstore.aadd_documents(dup_docs, **kwargs) diff --git a/venv/Lib/site-packages/langchain/retrievers/vespa_retriever.py b/venv/Lib/site-packages/langchain/retrievers/vespa_retriever.py new file mode 100644 index 00000000..04065ba1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/vespa_retriever.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import VespaRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"VespaRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "VespaRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/weaviate_hybrid_search.py b/venv/Lib/site-packages/langchain/retrievers/weaviate_hybrid_search.py new file mode 100644 index 00000000..51e468cd --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/weaviate_hybrid_search.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import WeaviateHybridSearchRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WeaviateHybridSearchRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WeaviateHybridSearchRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/web_research.py b/venv/Lib/site-packages/langchain/retrievers/web_research.py new file mode 100644 index 00000000..5d853752 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/web_research.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers.web_research import ( + QuestionListOutputParser, + SearchQueries, + WebResearchRetriever, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "QuestionListOutputParser": "langchain_community.retrievers.web_research", + "SearchQueries": "langchain_community.retrievers.web_research", + "WebResearchRetriever": "langchain_community.retrievers.web_research", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = ["QuestionListOutputParser", "SearchQueries", "WebResearchRetriever"] diff --git a/venv/Lib/site-packages/langchain/retrievers/wikipedia.py b/venv/Lib/site-packages/langchain/retrievers/wikipedia.py new file mode 100644 index 00000000..23ce0733 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/wikipedia.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import WikipediaRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WikipediaRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WikipediaRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/you.py b/venv/Lib/site-packages/langchain/retrievers/you.py new file mode 100644 index 00000000..5267e940 --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/you.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import YouRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"YouRetriever": "langchain_community.retrievers"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "YouRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/zep.py b/venv/Lib/site-packages/langchain/retrievers/zep.py new file mode 100644 index 00000000..ade5e59e --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/zep.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import ZepRetriever + from langchain_community.retrievers.zep import SearchScope, SearchType + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SearchScope": "langchain_community.retrievers.zep", + "SearchType": "langchain_community.retrievers.zep", + "ZepRetriever": "langchain_community.retrievers", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SearchScope", + "SearchType", + "ZepRetriever", +] diff --git a/venv/Lib/site-packages/langchain/retrievers/zilliz.py b/venv/Lib/site-packages/langchain/retrievers/zilliz.py new file mode 100644 index 00000000..3b07c57c --- /dev/null +++ b/venv/Lib/site-packages/langchain/retrievers/zilliz.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.retrievers import ZillizRetriever + from langchain_community.retrievers.zilliz import ZillizRetreiver + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ZillizRetriever": "langchain_community.retrievers", + "ZillizRetreiver": "langchain_community.retrievers.zilliz", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ZillizRetriever", + "ZillizRetreiver", +] diff --git a/venv/Lib/site-packages/langchain/runnables/__init__.py b/venv/Lib/site-packages/langchain/runnables/__init__.py new file mode 100644 index 00000000..0a8b3c39 --- /dev/null +++ b/venv/Lib/site-packages/langchain/runnables/__init__.py @@ -0,0 +1,18 @@ +"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**. + +The LangChain Expression Language (LCEL) offers a declarative method to build +production-grade programs that harness the power of LLMs. + +Programs created using LCEL and LangChain Runnables inherently support +synchronous, asynchronous, batch, and streaming operations. + +Support for **async** allows servers hosting the LCEL based programs +to scale better for higher concurrent loads. + +**Batch** operations allow for processing multiple inputs in parallel. + +**Streaming** of intermediate outputs, as they're being generated, allows for +creating more responsive UX. + +This module contains non-core Runnable classes. +""" diff --git a/venv/Lib/site-packages/langchain/runnables/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/runnables/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..818f2248 Binary files /dev/null and b/venv/Lib/site-packages/langchain/runnables/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/runnables/__pycache__/hub.cpython-312.pyc b/venv/Lib/site-packages/langchain/runnables/__pycache__/hub.cpython-312.pyc new file mode 100644 index 00000000..ab2ec6da Binary files /dev/null and b/venv/Lib/site-packages/langchain/runnables/__pycache__/hub.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/runnables/__pycache__/openai_functions.cpython-312.pyc b/venv/Lib/site-packages/langchain/runnables/__pycache__/openai_functions.cpython-312.pyc new file mode 100644 index 00000000..c7ba1fec Binary files /dev/null and b/venv/Lib/site-packages/langchain/runnables/__pycache__/openai_functions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/runnables/hub.py b/venv/Lib/site-packages/langchain/runnables/hub.py new file mode 100644 index 00000000..71fad87b --- /dev/null +++ b/venv/Lib/site-packages/langchain/runnables/hub.py @@ -0,0 +1,31 @@ +from typing import Any, Optional + +from langchain_core.runnables.base import Input, Output, RunnableBindingBase + + +class HubRunnable(RunnableBindingBase[Input, Output]): + """ + An instance of a runnable stored in the LangChain Hub. + """ + + owner_repo_commit: str + + def __init__( + self, + owner_repo_commit: str, + *, + api_url: Optional[str] = None, + api_key: Optional[str] = None, + **kwargs: Any, + ) -> None: + from langchain.hub import pull + + pulled = pull(owner_repo_commit, api_url=api_url, api_key=api_key) + super_kwargs = { + "kwargs": {}, + "config": {}, + **kwargs, + "bound": pulled, + "owner_repo_commit": owner_repo_commit, + } + super().__init__(**super_kwargs) diff --git a/venv/Lib/site-packages/langchain/runnables/openai_functions.py b/venv/Lib/site-packages/langchain/runnables/openai_functions.py new file mode 100644 index 00000000..c42d92d8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/runnables/openai_functions.py @@ -0,0 +1,47 @@ +from collections.abc import Mapping +from operator import itemgetter +from typing import Any, Callable, Optional, Union + +from langchain_core.messages import BaseMessage +from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser +from langchain_core.runnables import RouterRunnable, Runnable +from langchain_core.runnables.base import RunnableBindingBase +from typing_extensions import TypedDict + + +class OpenAIFunction(TypedDict): + """A function description for ChatOpenAI""" + + name: str + """The name of the function.""" + description: str + """The description of the function.""" + parameters: dict + """The parameters to the function.""" + + +class OpenAIFunctionsRouter(RunnableBindingBase[BaseMessage, Any]): + """A runnable that routes to the selected function.""" + + functions: Optional[list[OpenAIFunction]] + + def __init__( + self, + runnables: Mapping[ + str, + Union[ + Runnable[dict, Any], + Callable[[dict], Any], + ], + ], + functions: Optional[list[OpenAIFunction]] = None, + ): + if functions is not None: + assert len(functions) == len(runnables) + assert all(func["name"] in runnables for func in functions) + router = ( + JsonOutputFunctionsParser(args_only=False) + | {"key": itemgetter("name"), "input": itemgetter("arguments")} + | RouterRunnable(runnables) + ) + super().__init__(bound=router, kwargs={}, functions=functions) diff --git a/venv/Lib/site-packages/langchain/schema/__init__.py b/venv/Lib/site-packages/langchain/schema/__init__.py new file mode 100644 index 00000000..7853f3ec --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/__init__.py @@ -0,0 +1,82 @@ +"""**Schemas** are the LangChain Base Classes and Interfaces.""" + +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.caches import BaseCache +from langchain_core.chat_history import BaseChatMessageHistory +from langchain_core.documents import BaseDocumentTransformer, Document +from langchain_core.exceptions import LangChainException, OutputParserException +from langchain_core.memory import BaseMemory +from langchain_core.messages import ( + AIMessage, + BaseMessage, + ChatMessage, + FunctionMessage, + HumanMessage, + SystemMessage, + _message_from_dict, + get_buffer_string, + messages_from_dict, + messages_to_dict, +) +from langchain_core.messages.base import message_to_dict +from langchain_core.output_parsers import ( + BaseLLMOutputParser, + BaseOutputParser, + StrOutputParser, +) +from langchain_core.outputs import ( + ChatGeneration, + ChatResult, + Generation, + LLMResult, + RunInfo, +) +from langchain_core.prompt_values import PromptValue +from langchain_core.prompts import BasePromptTemplate, format_document +from langchain_core.retrievers import BaseRetriever +from langchain_core.stores import BaseStore + +RUN_KEY = "__run" + +# Backwards compatibility. +Memory = BaseMemory +_message_to_dict = message_to_dict + +__all__ = [ + "BaseCache", + "BaseMemory", + "BaseStore", + "AgentFinish", + "AgentAction", + "Document", + "BaseChatMessageHistory", + "BaseDocumentTransformer", + "BaseMessage", + "ChatMessage", + "FunctionMessage", + "HumanMessage", + "AIMessage", + "SystemMessage", + "messages_from_dict", + "messages_to_dict", + "message_to_dict", + "_message_to_dict", + "_message_from_dict", + "get_buffer_string", + "RunInfo", + "LLMResult", + "ChatResult", + "ChatGeneration", + "Generation", + "PromptValue", + "LangChainException", + "BaseRetriever", + "RUN_KEY", + "Memory", + "OutputParserException", + "StrOutputParser", + "BaseOutputParser", + "BaseLLMOutputParser", + "BasePromptTemplate", + "format_document", +] diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..5a0f7262 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/agent.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/agent.cpython-312.pyc new file mode 100644 index 00000000..39d57350 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/agent.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/cache.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/cache.cpython-312.pyc new file mode 100644 index 00000000..1ad476a8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/cache.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/chat.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/chat.cpython-312.pyc new file mode 100644 index 00000000..8d54cb5c Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/chat.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/chat_history.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/chat_history.cpython-312.pyc new file mode 100644 index 00000000..fef4adb0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/chat_history.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/document.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/document.cpython-312.pyc new file mode 100644 index 00000000..7c61aa83 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/document.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/embeddings.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/embeddings.cpython-312.pyc new file mode 100644 index 00000000..aa2da8a4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/embeddings.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/exceptions.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 00000000..5e0e36df Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/exceptions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/language_model.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/language_model.cpython-312.pyc new file mode 100644 index 00000000..d8903c93 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/language_model.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/memory.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/memory.cpython-312.pyc new file mode 100644 index 00000000..bfa23924 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/messages.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/messages.cpython-312.pyc new file mode 100644 index 00000000..27179cff Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/messages.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/output.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/output.cpython-312.pyc new file mode 100644 index 00000000..7bb6f2a2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/output.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/output_parser.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/output_parser.cpython-312.pyc new file mode 100644 index 00000000..5806d073 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/output_parser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..afb4a755 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/prompt_template.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/prompt_template.cpython-312.pyc new file mode 100644 index 00000000..75ce92ce Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/prompt_template.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/retriever.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/retriever.cpython-312.pyc new file mode 100644 index 00000000..3e2e09f3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/retriever.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/storage.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/storage.cpython-312.pyc new file mode 100644 index 00000000..4aea9b69 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/storage.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/__pycache__/vectorstore.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/__pycache__/vectorstore.cpython-312.pyc new file mode 100644 index 00000000..425bebc4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/__pycache__/vectorstore.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/agent.py b/venv/Lib/site-packages/langchain/schema/agent.py new file mode 100644 index 00000000..498a4aea --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/agent.py @@ -0,0 +1,3 @@ +from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish + +__all__ = ["AgentAction", "AgentActionMessageLog", "AgentFinish"] diff --git a/venv/Lib/site-packages/langchain/schema/cache.py b/venv/Lib/site-packages/langchain/schema/cache.py new file mode 100644 index 00000000..8407af18 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/cache.py @@ -0,0 +1,3 @@ +from langchain_core.caches import RETURN_VAL_TYPE, BaseCache + +__all__ = ["BaseCache", "RETURN_VAL_TYPE"] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/__init__.py b/venv/Lib/site-packages/langchain/schema/callbacks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..47994029 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..77038858 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/manager.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/manager.cpython-312.pyc new file mode 100644 index 00000000..446cdae2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/manager.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/stdout.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/stdout.cpython-312.pyc new file mode 100644 index 00000000..fd419a67 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/stdout.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/streaming_stdout.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/streaming_stdout.cpython-312.pyc new file mode 100644 index 00000000..099942cf Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/__pycache__/streaming_stdout.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/base.py b/venv/Lib/site-packages/langchain/schema/callbacks/base.py new file mode 100644 index 00000000..ab2053a0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/base.py @@ -0,0 +1,23 @@ +from langchain_core.callbacks.base import ( + AsyncCallbackHandler, + BaseCallbackHandler, + BaseCallbackManager, + CallbackManagerMixin, + ChainManagerMixin, + LLMManagerMixin, + RetrieverManagerMixin, + RunManagerMixin, + ToolManagerMixin, +) + +__all__ = [ + "RetrieverManagerMixin", + "LLMManagerMixin", + "ChainManagerMixin", + "ToolManagerMixin", + "CallbackManagerMixin", + "RunManagerMixin", + "BaseCallbackHandler", + "AsyncCallbackHandler", + "BaseCallbackManager", +] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/manager.py b/venv/Lib/site-packages/langchain/schema/callbacks/manager.py new file mode 100644 index 00000000..a459e1bb --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/manager.py @@ -0,0 +1,55 @@ +from langchain_core.callbacks.manager import ( + AsyncCallbackManager, + AsyncCallbackManagerForChainGroup, + AsyncCallbackManagerForChainRun, + AsyncCallbackManagerForLLMRun, + AsyncCallbackManagerForRetrieverRun, + AsyncCallbackManagerForToolRun, + AsyncParentRunManager, + AsyncRunManager, + BaseRunManager, + CallbackManager, + CallbackManagerForChainGroup, + CallbackManagerForChainRun, + CallbackManagerForLLMRun, + CallbackManagerForRetrieverRun, + CallbackManagerForToolRun, + ParentRunManager, + RunManager, + handle_event, + trace_as_chain_group, +) +from langchain_core.tracers.context import ( + collect_runs, + register_configure_hook, + tracing_enabled, + tracing_v2_enabled, +) +from langchain_core.utils.env import env_var_is_set + +__all__ = [ + "tracing_enabled", + "tracing_v2_enabled", + "collect_runs", + "trace_as_chain_group", + "handle_event", + "BaseRunManager", + "RunManager", + "ParentRunManager", + "AsyncRunManager", + "AsyncParentRunManager", + "CallbackManagerForLLMRun", + "AsyncCallbackManagerForLLMRun", + "CallbackManagerForChainRun", + "AsyncCallbackManagerForChainRun", + "CallbackManagerForToolRun", + "AsyncCallbackManagerForToolRun", + "CallbackManagerForRetrieverRun", + "AsyncCallbackManagerForRetrieverRun", + "CallbackManager", + "CallbackManagerForChainGroup", + "AsyncCallbackManager", + "AsyncCallbackManagerForChainGroup", + "register_configure_hook", + "env_var_is_set", +] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/stdout.py b/venv/Lib/site-packages/langchain/schema/callbacks/stdout.py new file mode 100644 index 00000000..754e5824 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/stdout.py @@ -0,0 +1,3 @@ +from langchain_core.callbacks.stdout import StdOutCallbackHandler + +__all__ = ["StdOutCallbackHandler"] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/streaming_stdout.py b/venv/Lib/site-packages/langchain/schema/callbacks/streaming_stdout.py new file mode 100644 index 00000000..35608689 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/streaming_stdout.py @@ -0,0 +1,3 @@ +from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler + +__all__ = ["StreamingStdOutCallbackHandler"] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__init__.py b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..b5bb348c Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..85b7fd2b Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/evaluation.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/evaluation.cpython-312.pyc new file mode 100644 index 00000000..6d4cd931 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/evaluation.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/langchain.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/langchain.cpython-312.pyc new file mode 100644 index 00000000..49b1eb7c Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/langchain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/langchain_v1.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/langchain_v1.cpython-312.pyc new file mode 100644 index 00000000..8d2cf184 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/langchain_v1.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/log_stream.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/log_stream.cpython-312.pyc new file mode 100644 index 00000000..3cc019ff Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/log_stream.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/root_listeners.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/root_listeners.cpython-312.pyc new file mode 100644 index 00000000..10d24ee8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/root_listeners.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/run_collector.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/run_collector.cpython-312.pyc new file mode 100644 index 00000000..72b5bc2b Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/run_collector.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/schemas.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/schemas.cpython-312.pyc new file mode 100644 index 00000000..d94cfdbe Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/schemas.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/stdout.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/stdout.cpython-312.pyc new file mode 100644 index 00000000..146971c6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/__pycache__/stdout.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/base.py b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/base.py new file mode 100644 index 00000000..ce38a8bc --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/base.py @@ -0,0 +1,3 @@ +from langchain_core.tracers.base import BaseTracer, TracerException + +__all__ = ["TracerException", "BaseTracer"] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/evaluation.py b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/evaluation.py new file mode 100644 index 00000000..363c74fc --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/evaluation.py @@ -0,0 +1,6 @@ +from langchain_core.tracers.evaluation import ( + EvaluatorCallbackHandler, + wait_for_all_evaluators, +) + +__all__ = ["wait_for_all_evaluators", "EvaluatorCallbackHandler"] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/langchain.py b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/langchain.py new file mode 100644 index 00000000..8415dfab --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/langchain.py @@ -0,0 +1,8 @@ +from langchain_core.tracers.langchain import ( + LangChainTracer, + get_client, + log_error_once, + wait_for_all_tracers, +) + +__all__ = ["log_error_once", "wait_for_all_tracers", "get_client", "LangChainTracer"] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/langchain_v1.py b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/langchain_v1.py new file mode 100644 index 00000000..fca2d759 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/langchain_v1.py @@ -0,0 +1,3 @@ +from langchain_core.tracers.langchain_v1 import LangChainTracerV1, get_headers + +__all__ = ["get_headers", "LangChainTracerV1"] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/log_stream.py b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/log_stream.py new file mode 100644 index 00000000..ef3dd946 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/log_stream.py @@ -0,0 +1,9 @@ +from langchain_core.tracers.log_stream import ( + LogEntry, + LogStreamCallbackHandler, + RunLog, + RunLogPatch, + RunState, +) + +__all__ = ["LogEntry", "RunState", "RunLogPatch", "RunLog", "LogStreamCallbackHandler"] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/root_listeners.py b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/root_listeners.py new file mode 100644 index 00000000..0dee9bce --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/root_listeners.py @@ -0,0 +1,3 @@ +from langchain_core.tracers.root_listeners import RootListenersTracer + +__all__ = ["RootListenersTracer"] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/run_collector.py b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/run_collector.py new file mode 100644 index 00000000..1240026b --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/run_collector.py @@ -0,0 +1,3 @@ +from langchain_core.tracers.run_collector import RunCollectorCallbackHandler + +__all__ = ["RunCollectorCallbackHandler"] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/schemas.py b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/schemas.py new file mode 100644 index 00000000..6fb49dbf --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/schemas.py @@ -0,0 +1,27 @@ +from langchain_core.tracers.schemas import ( + BaseRun, + ChainRun, + LLMRun, + Run, + RunTypeEnum, + ToolRun, + TracerSession, + TracerSessionBase, + TracerSessionV1, + TracerSessionV1Base, + TracerSessionV1Create, +) + +__all__ = [ + "RunTypeEnum", + "TracerSessionV1Base", + "TracerSessionV1Create", + "TracerSessionV1", + "TracerSessionBase", + "TracerSession", + "BaseRun", + "LLMRun", + "ChainRun", + "ToolRun", + "Run", +] diff --git a/venv/Lib/site-packages/langchain/schema/callbacks/tracers/stdout.py b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/stdout.py new file mode 100644 index 00000000..b2ef6fdc --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/callbacks/tracers/stdout.py @@ -0,0 +1,13 @@ +from langchain_core.tracers.stdout import ( + ConsoleCallbackHandler, + FunctionCallbackHandler, + elapsed, + try_json_stringify, +) + +__all__ = [ + "try_json_stringify", + "elapsed", + "FunctionCallbackHandler", + "ConsoleCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain/schema/chat.py b/venv/Lib/site-packages/langchain/schema/chat.py new file mode 100644 index 00000000..1f9080f8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/chat.py @@ -0,0 +1,3 @@ +from langchain_core.chat_sessions import ChatSession + +__all__ = ["ChatSession"] diff --git a/venv/Lib/site-packages/langchain/schema/chat_history.py b/venv/Lib/site-packages/langchain/schema/chat_history.py new file mode 100644 index 00000000..08dcffe4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/chat_history.py @@ -0,0 +1,3 @@ +from langchain_core.chat_history import BaseChatMessageHistory + +__all__ = ["BaseChatMessageHistory"] diff --git a/venv/Lib/site-packages/langchain/schema/document.py b/venv/Lib/site-packages/langchain/schema/document.py new file mode 100644 index 00000000..8b5b254b --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/document.py @@ -0,0 +1,3 @@ +from langchain_core.documents import BaseDocumentTransformer, Document + +__all__ = ["Document", "BaseDocumentTransformer"] diff --git a/venv/Lib/site-packages/langchain/schema/embeddings.py b/venv/Lib/site-packages/langchain/schema/embeddings.py new file mode 100644 index 00000000..dd38b627 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/embeddings.py @@ -0,0 +1,3 @@ +from langchain_core.embeddings import Embeddings + +__all__ = ["Embeddings"] diff --git a/venv/Lib/site-packages/langchain/schema/exceptions.py b/venv/Lib/site-packages/langchain/schema/exceptions.py new file mode 100644 index 00000000..a26216c6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/exceptions.py @@ -0,0 +1,3 @@ +from langchain_core.exceptions import LangChainException + +__all__ = ["LangChainException"] diff --git a/venv/Lib/site-packages/langchain/schema/language_model.py b/venv/Lib/site-packages/langchain/schema/language_model.py new file mode 100644 index 00000000..63a0292b --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/language_model.py @@ -0,0 +1,15 @@ +from langchain_core.language_models import ( + BaseLanguageModel, + LanguageModelInput, + LanguageModelOutput, + get_tokenizer, +) +from langchain_core.language_models.base import _get_token_ids_default_method + +__all__ = [ + "get_tokenizer", + "BaseLanguageModel", + "_get_token_ids_default_method", + "LanguageModelInput", + "LanguageModelOutput", +] diff --git a/venv/Lib/site-packages/langchain/schema/memory.py b/venv/Lib/site-packages/langchain/schema/memory.py new file mode 100644 index 00000000..d2f3d73e --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/memory.py @@ -0,0 +1,3 @@ +from langchain_core.memory import BaseMemory + +__all__ = ["BaseMemory"] diff --git a/venv/Lib/site-packages/langchain/schema/messages.py b/venv/Lib/site-packages/langchain/schema/messages.py new file mode 100644 index 00000000..2c42e609 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/messages.py @@ -0,0 +1,51 @@ +from langchain_core.messages import ( + AIMessage, + AIMessageChunk, + AnyMessage, + BaseMessage, + BaseMessageChunk, + ChatMessage, + ChatMessageChunk, + FunctionMessage, + FunctionMessageChunk, + HumanMessage, + HumanMessageChunk, + SystemMessage, + SystemMessageChunk, + ToolMessage, + ToolMessageChunk, + _message_from_dict, + get_buffer_string, + merge_content, + message_to_dict, + messages_from_dict, + messages_to_dict, +) + +# Backwards compatibility. +_message_to_dict = message_to_dict + +__all__ = [ + "get_buffer_string", + "BaseMessage", + "merge_content", + "BaseMessageChunk", + "HumanMessage", + "HumanMessageChunk", + "AIMessage", + "AIMessageChunk", + "SystemMessage", + "SystemMessageChunk", + "FunctionMessage", + "FunctionMessageChunk", + "ToolMessage", + "ToolMessageChunk", + "ChatMessage", + "ChatMessageChunk", + "messages_to_dict", + "messages_from_dict", + "_message_to_dict", + "_message_from_dict", + "message_to_dict", + "AnyMessage", +] diff --git a/venv/Lib/site-packages/langchain/schema/output.py b/venv/Lib/site-packages/langchain/schema/output.py new file mode 100644 index 00000000..970986df --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/output.py @@ -0,0 +1,19 @@ +from langchain_core.outputs import ( + ChatGeneration, + ChatGenerationChunk, + ChatResult, + Generation, + GenerationChunk, + LLMResult, + RunInfo, +) + +__all__ = [ + "Generation", + "GenerationChunk", + "ChatGeneration", + "ChatGenerationChunk", + "RunInfo", + "ChatResult", + "LLMResult", +] diff --git a/venv/Lib/site-packages/langchain/schema/output_parser.py b/venv/Lib/site-packages/langchain/schema/output_parser.py new file mode 100644 index 00000000..b0d8e03a --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/output_parser.py @@ -0,0 +1,25 @@ +from langchain_core.exceptions import OutputParserException +from langchain_core.output_parsers import ( + BaseCumulativeTransformOutputParser, + BaseGenerationOutputParser, + BaseLLMOutputParser, + BaseOutputParser, + BaseTransformOutputParser, + StrOutputParser, +) +from langchain_core.output_parsers.base import T + +# Backwards compatibility. +NoOpOutputParser = StrOutputParser + +__all__ = [ + "BaseLLMOutputParser", + "BaseGenerationOutputParser", + "BaseOutputParser", + "BaseTransformOutputParser", + "BaseCumulativeTransformOutputParser", + "NoOpOutputParser", + "StrOutputParser", + "OutputParserException", + "T", +] diff --git a/venv/Lib/site-packages/langchain/schema/prompt.py b/venv/Lib/site-packages/langchain/schema/prompt.py new file mode 100644 index 00000000..8b3e3c44 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/prompt.py @@ -0,0 +1,3 @@ +from langchain_core.prompt_values import PromptValue + +__all__ = ["PromptValue"] diff --git a/venv/Lib/site-packages/langchain/schema/prompt_template.py b/venv/Lib/site-packages/langchain/schema/prompt_template.py new file mode 100644 index 00000000..49a3595b --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/prompt_template.py @@ -0,0 +1,3 @@ +from langchain_core.prompts import BasePromptTemplate, format_document + +__all__ = ["BasePromptTemplate", "format_document"] diff --git a/venv/Lib/site-packages/langchain/schema/retriever.py b/venv/Lib/site-packages/langchain/schema/retriever.py new file mode 100644 index 00000000..ca795d34 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/retriever.py @@ -0,0 +1,3 @@ +from langchain_core.retrievers import BaseRetriever + +__all__ = ["BaseRetriever"] diff --git a/venv/Lib/site-packages/langchain/schema/runnable/__init__.py b/venv/Lib/site-packages/langchain/schema/runnable/__init__.py new file mode 100644 index 00000000..c76c6ffb --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/runnable/__init__.py @@ -0,0 +1,58 @@ +"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**. + +The LangChain Expression Language (LCEL) offers a declarative method to build +production-grade programs that harness the power of LLMs. + +Programs created using LCEL and LangChain Runnables inherently support +synchronous, asynchronous, batch, and streaming operations. + +Support for **async** allows servers hosting LCEL based programs to scale better +for higher concurrent loads. + +**Streaming** of intermediate outputs as they're being generated allows for +creating more responsive UX. + +This module contains schema and implementation of LangChain Runnables primitives. +""" + +from langchain_core.runnables.base import ( + Runnable, + RunnableBinding, + RunnableGenerator, + RunnableLambda, + RunnableMap, + RunnableParallel, + RunnableSequence, + RunnableSerializable, +) +from langchain_core.runnables.branch import RunnableBranch +from langchain_core.runnables.config import RunnableConfig, patch_config +from langchain_core.runnables.fallbacks import RunnableWithFallbacks +from langchain_core.runnables.passthrough import RunnablePassthrough +from langchain_core.runnables.router import RouterInput, RouterRunnable +from langchain_core.runnables.utils import ( + ConfigurableField, + ConfigurableFieldMultiOption, + ConfigurableFieldSingleOption, +) + +__all__ = [ + "ConfigurableField", + "ConfigurableFieldSingleOption", + "ConfigurableFieldMultiOption", + "patch_config", + "RouterInput", + "RouterRunnable", + "Runnable", + "RunnableSerializable", + "RunnableBinding", + "RunnableBranch", + "RunnableConfig", + "RunnableGenerator", + "RunnableLambda", + "RunnableMap", + "RunnableParallel", + "RunnablePassthrough", + "RunnableSequence", + "RunnableWithFallbacks", +] diff --git a/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..9a51ce0c Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..f078cd88 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/branch.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/branch.cpython-312.pyc new file mode 100644 index 00000000..36b5859c Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/branch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/config.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/config.cpython-312.pyc new file mode 100644 index 00000000..9aad4b20 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/config.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/configurable.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/configurable.cpython-312.pyc new file mode 100644 index 00000000..72d08e3c Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/configurable.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/fallbacks.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/fallbacks.cpython-312.pyc new file mode 100644 index 00000000..9f85199a Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/fallbacks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/history.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/history.cpython-312.pyc new file mode 100644 index 00000000..3e357452 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/history.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/passthrough.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/passthrough.cpython-312.pyc new file mode 100644 index 00000000..8a66ce3d Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/passthrough.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/retry.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/retry.cpython-312.pyc new file mode 100644 index 00000000..9a657293 Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/retry.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/router.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/router.cpython-312.pyc new file mode 100644 index 00000000..de38a22f Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/router.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..f45700ae Binary files /dev/null and b/venv/Lib/site-packages/langchain/schema/runnable/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/schema/runnable/base.py b/venv/Lib/site-packages/langchain/schema/runnable/base.py new file mode 100644 index 00000000..c9bde221 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/runnable/base.py @@ -0,0 +1,38 @@ +from langchain_core.runnables.base import ( + Other, + Runnable, + RunnableBinding, + RunnableBindingBase, + RunnableEach, + RunnableEachBase, + RunnableGenerator, + RunnableLambda, + RunnableLike, + RunnableParallel, + RunnableSequence, + RunnableSerializable, + coerce_to_runnable, +) +from langchain_core.runnables.utils import Input, Output + +# Backwards compatibility. +RunnableMap = RunnableParallel + +__all__ = [ + "Input", + "Output", + "RunnableLike", + "Other", + "Runnable", + "RunnableSerializable", + "RunnableSequence", + "RunnableParallel", + "RunnableGenerator", + "RunnableLambda", + "RunnableEachBase", + "RunnableEach", + "RunnableBindingBase", + "RunnableBinding", + "RunnableMap", + "coerce_to_runnable", +] diff --git a/venv/Lib/site-packages/langchain/schema/runnable/branch.py b/venv/Lib/site-packages/langchain/schema/runnable/branch.py new file mode 100644 index 00000000..ed83f197 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/runnable/branch.py @@ -0,0 +1,3 @@ +from langchain_core.runnables.branch import RunnableBranch + +__all__ = ["RunnableBranch"] diff --git a/venv/Lib/site-packages/langchain/schema/runnable/config.py b/venv/Lib/site-packages/langchain/schema/runnable/config.py new file mode 100644 index 00000000..af0179f9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/runnable/config.py @@ -0,0 +1,27 @@ +from langchain_core.runnables.config import ( + EmptyDict, + RunnableConfig, + acall_func_with_variable_args, + call_func_with_variable_args, + ensure_config, + get_async_callback_manager_for_config, + get_callback_manager_for_config, + get_config_list, + get_executor_for_config, + merge_configs, + patch_config, +) + +__all__ = [ + "EmptyDict", + "RunnableConfig", + "ensure_config", + "get_config_list", + "patch_config", + "merge_configs", + "acall_func_with_variable_args", + "call_func_with_variable_args", + "get_callback_manager_for_config", + "get_async_callback_manager_for_config", + "get_executor_for_config", +] diff --git a/venv/Lib/site-packages/langchain/schema/runnable/configurable.py b/venv/Lib/site-packages/langchain/schema/runnable/configurable.py new file mode 100644 index 00000000..a1463d57 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/runnable/configurable.py @@ -0,0 +1,15 @@ +from langchain_core.runnables.configurable import ( + DynamicRunnable, + RunnableConfigurableAlternatives, + RunnableConfigurableFields, + StrEnum, + make_options_spec, +) + +__all__ = [ + "DynamicRunnable", + "RunnableConfigurableFields", + "StrEnum", + "RunnableConfigurableAlternatives", + "make_options_spec", +] diff --git a/venv/Lib/site-packages/langchain/schema/runnable/fallbacks.py b/venv/Lib/site-packages/langchain/schema/runnable/fallbacks.py new file mode 100644 index 00000000..7a54468d --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/runnable/fallbacks.py @@ -0,0 +1,3 @@ +from langchain_core.runnables.fallbacks import RunnableWithFallbacks + +__all__ = ["RunnableWithFallbacks"] diff --git a/venv/Lib/site-packages/langchain/schema/runnable/history.py b/venv/Lib/site-packages/langchain/schema/runnable/history.py new file mode 100644 index 00000000..423b192b --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/runnable/history.py @@ -0,0 +1,11 @@ +from langchain_core.runnables.history import ( + GetSessionHistoryCallable, + MessagesOrDictWithMessages, + RunnableWithMessageHistory, +) + +__all__ = [ + "RunnableWithMessageHistory", + "GetSessionHistoryCallable", + "MessagesOrDictWithMessages", +] diff --git a/venv/Lib/site-packages/langchain/schema/runnable/passthrough.py b/venv/Lib/site-packages/langchain/schema/runnable/passthrough.py new file mode 100644 index 00000000..aed683d3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/runnable/passthrough.py @@ -0,0 +1,8 @@ +from langchain_core.runnables.passthrough import ( + RunnableAssign, + RunnablePassthrough, + aidentity, + identity, +) + +__all__ = ["aidentity", "identity", "RunnablePassthrough", "RunnableAssign"] diff --git a/venv/Lib/site-packages/langchain/schema/runnable/retry.py b/venv/Lib/site-packages/langchain/schema/runnable/retry.py new file mode 100644 index 00000000..d146c8ec --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/runnable/retry.py @@ -0,0 +1,3 @@ +from langchain_core.runnables.retry import RunnableRetry, U + +__all__ = ["RunnableRetry", "U"] diff --git a/venv/Lib/site-packages/langchain/schema/runnable/router.py b/venv/Lib/site-packages/langchain/schema/runnable/router.py new file mode 100644 index 00000000..259dd677 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/runnable/router.py @@ -0,0 +1,3 @@ +from langchain_core.runnables.router import RouterInput, RouterRunnable + +__all__ = ["RouterInput", "RouterRunnable"] diff --git a/venv/Lib/site-packages/langchain/schema/runnable/utils.py b/venv/Lib/site-packages/langchain/schema/runnable/utils.py new file mode 100644 index 00000000..1a03a6af --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/runnable/utils.py @@ -0,0 +1,51 @@ +from langchain_core.runnables.utils import ( + Addable, + AddableDict, + AnyConfigurableField, + ConfigurableField, + ConfigurableFieldMultiOption, + ConfigurableFieldSingleOption, + ConfigurableFieldSpec, + GetLambdaSource, + Input, + IsFunctionArgDict, + IsLocalDict, + Output, + SupportsAdd, + aadd, + accepts_config, + accepts_run_manager, + add, + gated_coro, + gather_with_concurrency, + get_function_first_arg_dict_keys, + get_lambda_source, + get_unique_config_specs, + indent_lines_after_first, +) + +__all__ = [ + "accepts_run_manager", + "accepts_config", + "IsLocalDict", + "IsFunctionArgDict", + "GetLambdaSource", + "get_function_first_arg_dict_keys", + "get_lambda_source", + "indent_lines_after_first", + "AddableDict", + "SupportsAdd", + "add", + "ConfigurableField", + "ConfigurableFieldSingleOption", + "ConfigurableFieldMultiOption", + "ConfigurableFieldSpec", + "get_unique_config_specs", + "aadd", + "gated_coro", + "gather_with_concurrency", + "Input", + "Output", + "Addable", + "AnyConfigurableField", +] diff --git a/venv/Lib/site-packages/langchain/schema/storage.py b/venv/Lib/site-packages/langchain/schema/storage.py new file mode 100644 index 00000000..70d6925a --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/storage.py @@ -0,0 +1,3 @@ +from langchain_core.stores import BaseStore, K, V + +__all__ = ["BaseStore", "K", "V"] diff --git a/venv/Lib/site-packages/langchain/schema/vectorstore.py b/venv/Lib/site-packages/langchain/schema/vectorstore.py new file mode 100644 index 00000000..776dbce0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/schema/vectorstore.py @@ -0,0 +1,3 @@ +from langchain_core.vectorstores import VST, VectorStore, VectorStoreRetriever + +__all__ = ["VectorStore", "VectorStoreRetriever", "VST"] diff --git a/venv/Lib/site-packages/langchain/serpapi.py b/venv/Lib/site-packages/langchain/serpapi.py new file mode 100644 index 00000000..d1f2b0cb --- /dev/null +++ b/venv/Lib/site-packages/langchain/serpapi.py @@ -0,0 +1,25 @@ +"""For backwards compatibility.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import SerpAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SerpAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SerpAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/smith/__init__.py b/venv/Lib/site-packages/langchain/smith/__init__.py new file mode 100644 index 00000000..2c6c1af6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/smith/__init__.py @@ -0,0 +1,102 @@ +"""**LangSmith** utilities. + +This module provides utilities for connecting to `LangSmith `_. For more information on LangSmith, see the `LangSmith documentation `_. + +**Evaluation** + +LangSmith helps you evaluate Chains and other language model application components using a number of LangChain evaluators. +An example of this is shown below, assuming you've created a LangSmith dataset called ````: + +.. code-block:: python + + from langsmith import Client + from langchain_community.chat_models import ChatOpenAI + from langchain.chains import LLMChain + from langchain.smith import RunEvalConfig, run_on_dataset + + # Chains may have memory. Passing in a constructor function lets the + # evaluation framework avoid cross-contamination between runs. + def construct_chain(): + llm = ChatOpenAI(temperature=0) + chain = LLMChain.from_string( + llm, + "What's the answer to {your_input_key}" + ) + return chain + + # Load off-the-shelf evaluators via config or the EvaluatorType (string or enum) + evaluation_config = RunEvalConfig( + evaluators=[ + "qa", # "Correctness" against a reference answer + "embedding_distance", + RunEvalConfig.Criteria("helpfulness"), + RunEvalConfig.Criteria({ + "fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?" + }), + ] + ) + + client = Client() + run_on_dataset( + client, + "", + construct_chain, + evaluation=evaluation_config, + ) + +You can also create custom evaluators by subclassing the +:class:`StringEvaluator ` +or LangSmith's `RunEvaluator` classes. + +.. code-block:: python + + from typing import Optional + from langchain.evaluation import StringEvaluator + + class MyStringEvaluator(StringEvaluator): + + @property + def requires_input(self) -> bool: + return False + + @property + def requires_reference(self) -> bool: + return True + + @property + def evaluation_name(self) -> str: + return "exact_match" + + def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict: + return {"score": prediction == reference} + + + evaluation_config = RunEvalConfig( + custom_evaluators = [MyStringEvaluator()], + ) + + run_on_dataset( + client, + "", + construct_chain, + evaluation=evaluation_config, + ) + +**Primary Functions** + +- :func:`arun_on_dataset `: Asynchronous function to evaluate a chain, agent, or other LangChain component over a dataset. +- :func:`run_on_dataset `: Function to evaluate a chain, agent, or other LangChain component over a dataset. +- :class:`RunEvalConfig `: Class representing the configuration for running evaluation. You can select evaluators by :class:`EvaluatorType ` or config, or you can pass in `custom_evaluators` +""" # noqa: E501 + +from langchain.smith.evaluation import ( + RunEvalConfig, + arun_on_dataset, + run_on_dataset, +) + +__all__ = [ + "arun_on_dataset", + "run_on_dataset", + "RunEvalConfig", +] diff --git a/venv/Lib/site-packages/langchain/smith/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/smith/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..0564a777 Binary files /dev/null and b/venv/Lib/site-packages/langchain/smith/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/__init__.py b/venv/Lib/site-packages/langchain/smith/evaluation/__init__.py new file mode 100644 index 00000000..125dfdcf --- /dev/null +++ b/venv/Lib/site-packages/langchain/smith/evaluation/__init__.py @@ -0,0 +1,68 @@ +"""LangSmith evaluation utilities. + +This module provides utilities for evaluating Chains and other language model +applications using LangChain evaluators and LangSmith. + +For more information on the LangSmith API, see the `LangSmith API documentation `_. + +**Example** + +.. code-block:: python + + from langsmith import Client + from langchain_community.chat_models import ChatOpenAI + from langchain.chains import LLMChain + from langchain.smith import EvaluatorType, RunEvalConfig, run_on_dataset + + def construct_chain(): + llm = ChatOpenAI(temperature=0) + chain = LLMChain.from_string( + llm, + "What's the answer to {your_input_key}" + ) + return chain + + evaluation_config = RunEvalConfig( + evaluators=[ + EvaluatorType.QA, # "Correctness" against a reference answer + EvaluatorType.EMBEDDING_DISTANCE, + RunEvalConfig.Criteria("helpfulness"), + RunEvalConfig.Criteria({ + "fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?" + }), + ] + ) + + client = Client() + run_on_dataset( + client, + "", + construct_chain, + evaluation=evaluation_config + ) + +**Attributes** + +- ``arun_on_dataset``: Asynchronous function to evaluate a chain or other LangChain component over a dataset. +- ``run_on_dataset``: Function to evaluate a chain or other LangChain component over a dataset. +- ``RunEvalConfig``: Class representing the configuration for running evaluation. +- ``StringRunEvaluatorChain``: Class representing a string run evaluator chain. +- ``InputFormatError``: Exception raised when the input format is incorrect. + +""" # noqa: E501 + +from langchain.smith.evaluation.config import RunEvalConfig +from langchain.smith.evaluation.runner_utils import ( + InputFormatError, + arun_on_dataset, + run_on_dataset, +) +from langchain.smith.evaluation.string_run_evaluator import StringRunEvaluatorChain + +__all__ = [ + "InputFormatError", + "arun_on_dataset", + "run_on_dataset", + "StringRunEvaluatorChain", + "RunEvalConfig", +] diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..802baf46 Binary files /dev/null and b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/config.cpython-312.pyc b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/config.cpython-312.pyc new file mode 100644 index 00000000..1e7a4252 Binary files /dev/null and b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/config.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/name_generation.cpython-312.pyc b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/name_generation.cpython-312.pyc new file mode 100644 index 00000000..f77ab2cc Binary files /dev/null and b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/name_generation.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/progress.cpython-312.pyc b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/progress.cpython-312.pyc new file mode 100644 index 00000000..8d4b479c Binary files /dev/null and b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/progress.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/runner_utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/runner_utils.cpython-312.pyc new file mode 100644 index 00000000..94787ad1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/runner_utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/string_run_evaluator.cpython-312.pyc b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/string_run_evaluator.cpython-312.pyc new file mode 100644 index 00000000..9f726d1d Binary files /dev/null and b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/string_run_evaluator.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..1702daf6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/smith/evaluation/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/config.py b/venv/Lib/site-packages/langchain/smith/evaluation/config.py new file mode 100644 index 00000000..9a3d6e07 --- /dev/null +++ b/venv/Lib/site-packages/langchain/smith/evaluation/config.py @@ -0,0 +1,379 @@ +"""Configuration for run evaluators.""" + +from collections.abc import Sequence +from typing import Any, Callable, Optional, Union + +from langchain_core.embeddings import Embeddings +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate +from langsmith import RunEvaluator +from langsmith.evaluation.evaluator import EvaluationResult, EvaluationResults +from langsmith.schemas import Example, Run +from pydantic import BaseModel, ConfigDict, Field + +from langchain.evaluation.criteria.eval_chain import CRITERIA_TYPE +from langchain.evaluation.embedding_distance.base import ( + EmbeddingDistance as EmbeddingDistanceEnum, +) +from langchain.evaluation.schema import EvaluatorType, StringEvaluator +from langchain.evaluation.string_distance.base import ( + StringDistance as StringDistanceEnum, +) + +RUN_EVALUATOR_LIKE = Callable[ + [Run, Optional[Example]], Union[EvaluationResult, EvaluationResults, dict] +] +BATCH_EVALUATOR_LIKE = Callable[ + [Sequence[Run], Optional[Sequence[Example]]], + Union[EvaluationResult, EvaluationResults, dict], +] + + +class EvalConfig(BaseModel): + """Configuration for a given run evaluator. + + Parameters + ---------- + evaluator_type : EvaluatorType + The type of evaluator to use. + + Methods + ------- + get_kwargs() + Get the keyword arguments for the evaluator configuration. + + """ + + evaluator_type: EvaluatorType + + def get_kwargs(self) -> dict[str, Any]: + """Get the keyword arguments for the load_evaluator call. + + Returns + ------- + Dict[str, Any] + The keyword arguments for the load_evaluator call. + + """ + kwargs = {} + for field, val in self: + if field == "evaluator_type": + continue + elif val is None: + continue + kwargs[field] = val + return kwargs + + +class SingleKeyEvalConfig(EvalConfig): + """Configuration for a run evaluator that only requires a single key.""" + + reference_key: Optional[str] = None + """The key in the dataset run to use as the reference string. + If not provided, we will attempt to infer automatically.""" + prediction_key: Optional[str] = None + """The key from the traced run's outputs dictionary to use to + represent the prediction. If not provided, it will be inferred + automatically.""" + input_key: Optional[str] = None + """The key from the traced run's inputs dictionary to use to represent the + input. If not provided, it will be inferred automatically.""" + + def get_kwargs(self) -> dict[str, Any]: + kwargs = super().get_kwargs() + # Filer out the keys that are not needed for the evaluator. + for key in ["reference_key", "prediction_key", "input_key"]: + kwargs.pop(key, None) + return kwargs + + +CUSTOM_EVALUATOR_TYPE = Union[RUN_EVALUATOR_LIKE, RunEvaluator, StringEvaluator] +SINGLE_EVAL_CONFIG_TYPE = Union[EvaluatorType, str, EvalConfig] + + +class RunEvalConfig(BaseModel): + """Configuration for a run evaluation. + + Parameters + ---------- + evaluators : List[Union[EvaluatorType, EvalConfig, RunEvaluator, Callable]] + Configurations for which evaluators to apply to the dataset run. + Each can be the string of an :class:`EvaluatorType `, such + as EvaluatorType.QA, the evaluator type string ("qa"), or a configuration for a + given evaluator (e.g., :class:`RunEvalConfig.QA `). + + custom_evaluators : Optional[List[Union[RunEvaluator, StringEvaluator]]] + Custom evaluators to apply to the dataset run. + + reference_key : Optional[str] + The key in the dataset run to use as the reference string. + If not provided, it will be inferred automatically. + + prediction_key : Optional[str] + The key from the traced run's outputs dictionary to use to + represent the prediction. If not provided, it will be inferred + automatically. + + input_key : Optional[str] + The key from the traced run's inputs dictionary to use to represent the + input. If not provided, it will be inferred automatically. + + eval_llm : Optional[BaseLanguageModel] + The language model to pass to any evaluators that use a language model. + """ # noqa: E501 + + evaluators: list[ + Union[ + SINGLE_EVAL_CONFIG_TYPE, + CUSTOM_EVALUATOR_TYPE, + ] + ] = Field(default_factory=list) + """Configurations for which evaluators to apply to the dataset run. + Each can be the string of an + :class:`EvaluatorType `, such + as `EvaluatorType.QA`, the evaluator type string ("qa"), or a configuration for a + given evaluator + (e.g., + :class:`RunEvalConfig.QA `).""" + custom_evaluators: Optional[list[CUSTOM_EVALUATOR_TYPE]] = None + """Custom evaluators to apply to the dataset run.""" + batch_evaluators: Optional[list[BATCH_EVALUATOR_LIKE]] = None + """Evaluators that run on an aggregate/batch level. + + These generate 1 or more metrics that are assigned to the full test run. + As a result, they are not associated with individual traces. + """ + + reference_key: Optional[str] = None + """The key in the dataset run to use as the reference string. + If not provided, we will attempt to infer automatically.""" + prediction_key: Optional[str] = None + """The key from the traced run's outputs dictionary to use to + represent the prediction. If not provided, it will be inferred + automatically.""" + input_key: Optional[str] = None + """The key from the traced run's inputs dictionary to use to represent the + input. If not provided, it will be inferred automatically.""" + eval_llm: Optional[BaseLanguageModel] = None + """The language model to pass to any evaluators that require one.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + class Criteria(SingleKeyEvalConfig): + """Configuration for a reference-free criteria evaluator. + + Parameters + ---------- + criteria : Optional[CRITERIA_TYPE] + The criteria to evaluate. + llm : Optional[BaseLanguageModel] + The language model to use for the evaluation chain. + + """ + + criteria: Optional[CRITERIA_TYPE] = None + llm: Optional[BaseLanguageModel] = None + evaluator_type: EvaluatorType = EvaluatorType.CRITERIA + + def __init__( + self, criteria: Optional[CRITERIA_TYPE] = None, **kwargs: Any + ) -> None: + super().__init__(criteria=criteria, **kwargs) # type: ignore[call-arg] + + class LabeledCriteria(SingleKeyEvalConfig): + """Configuration for a labeled (with references) criteria evaluator. + + Parameters + ---------- + criteria : Optional[CRITERIA_TYPE] + The criteria to evaluate. + llm : Optional[BaseLanguageModel] + The language model to use for the evaluation chain. + """ + + criteria: Optional[CRITERIA_TYPE] = None + llm: Optional[BaseLanguageModel] = None + evaluator_type: EvaluatorType = EvaluatorType.LABELED_CRITERIA + + def __init__( + self, criteria: Optional[CRITERIA_TYPE] = None, **kwargs: Any + ) -> None: + super().__init__(criteria=criteria, **kwargs) # type: ignore[call-arg] + + class EmbeddingDistance(SingleKeyEvalConfig): + """Configuration for an embedding distance evaluator. + + Parameters + ---------- + embeddings : Optional[Embeddings] + The embeddings to use for computing the distance. + + distance_metric : Optional[EmbeddingDistanceEnum] + The distance metric to use for computing the distance. + + """ + + evaluator_type: EvaluatorType = EvaluatorType.EMBEDDING_DISTANCE + embeddings: Optional[Embeddings] = None + distance_metric: Optional[EmbeddingDistanceEnum] = None + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + class StringDistance(SingleKeyEvalConfig): + """Configuration for a string distance evaluator. + + Parameters + ---------- + distance : Optional[StringDistanceEnum] + The string distance metric to use. + + """ + + evaluator_type: EvaluatorType = EvaluatorType.STRING_DISTANCE + distance: Optional[StringDistanceEnum] = None + """The string distance metric to use. + damerau_levenshtein: The Damerau-Levenshtein distance. + levenshtein: The Levenshtein distance. + jaro: The Jaro distance. + jaro_winkler: The Jaro-Winkler distance. + """ + normalize_score: bool = True + """Whether to normalize the distance to between 0 and 1. + Applies only to the Levenshtein and Damerau-Levenshtein distances.""" + + class QA(SingleKeyEvalConfig): + """Configuration for a QA evaluator. + + Parameters + ---------- + prompt : Optional[BasePromptTemplate] + The prompt template to use for generating the question. + llm : Optional[BaseLanguageModel] + The language model to use for the evaluation chain. + """ + + evaluator_type: EvaluatorType = EvaluatorType.QA + llm: Optional[BaseLanguageModel] = None + prompt: Optional[BasePromptTemplate] = None + + class ContextQA(SingleKeyEvalConfig): + """Configuration for a context-based QA evaluator. + + Parameters + ---------- + prompt : Optional[BasePromptTemplate] + The prompt template to use for generating the question. + llm : Optional[BaseLanguageModel] + The language model to use for the evaluation chain. + + """ + + evaluator_type: EvaluatorType = EvaluatorType.CONTEXT_QA + llm: Optional[BaseLanguageModel] = None + prompt: Optional[BasePromptTemplate] = None + + class CoTQA(SingleKeyEvalConfig): + """Configuration for a context-based QA evaluator. + + Parameters + ---------- + prompt : Optional[BasePromptTemplate] + The prompt template to use for generating the question. + llm : Optional[BaseLanguageModel] + The language model to use for the evaluation chain. + + """ + + evaluator_type: EvaluatorType = EvaluatorType.CONTEXT_QA + llm: Optional[BaseLanguageModel] = None + prompt: Optional[BasePromptTemplate] = None + + class JsonValidity(SingleKeyEvalConfig): + """Configuration for a json validity evaluator. + + Parameters + ---------- + """ + + evaluator_type: EvaluatorType = EvaluatorType.JSON_VALIDITY + + class JsonEqualityEvaluator(EvalConfig): + """Configuration for a json equality evaluator. + + Parameters + ---------- + """ + + evaluator_type: EvaluatorType = EvaluatorType.JSON_EQUALITY + + class ExactMatch(SingleKeyEvalConfig): + """Configuration for an exact match string evaluator. + + Parameters + ---------- + ignore_case : bool + Whether to ignore case when comparing strings. + ignore_punctuation : bool + Whether to ignore punctuation when comparing strings. + ignore_numbers : bool + Whether to ignore numbers when comparing strings. + """ + + evaluator_type: EvaluatorType = EvaluatorType.EXACT_MATCH + ignore_case: bool = False + ignore_punctuation: bool = False + ignore_numbers: bool = False + + class RegexMatch(SingleKeyEvalConfig): + """Configuration for a regex match string evaluator. + + Parameters + ---------- + flags : int + The flags to pass to the regex. Example: re.IGNORECASE. + """ + + evaluator_type: EvaluatorType = EvaluatorType.REGEX_MATCH + flags: int = 0 + + class ScoreString(SingleKeyEvalConfig): + """Configuration for a score string evaluator. + This is like the criteria evaluator but it is configured by + default to return a score on the scale from 1-10. + + It is recommended to normalize these scores + by setting `normalize_by` to 10. + + Parameters + ---------- + criteria : Optional[CRITERIA_TYPE] + The criteria to evaluate. + llm : Optional[BaseLanguageModel] + The language model to use for the evaluation chain. + normalize_by: Optional[int] = None + If you want to normalize the score, the denominator to use. + If not provided, the score will be between 1 and 10 (by default). + prompt : Optional[BasePromptTemplate] + + """ + + evaluator_type: EvaluatorType = EvaluatorType.SCORE_STRING + criteria: Optional[CRITERIA_TYPE] = None + llm: Optional[BaseLanguageModel] = None + normalize_by: Optional[float] = None + prompt: Optional[BasePromptTemplate] = None + + def __init__( + self, + criteria: Optional[CRITERIA_TYPE] = None, + normalize_by: Optional[float] = None, + **kwargs: Any, + ) -> None: + super().__init__(criteria=criteria, normalize_by=normalize_by, **kwargs) # type: ignore[call-arg] + + class LabeledScoreString(ScoreString): + evaluator_type: EvaluatorType = EvaluatorType.LABELED_SCORE_STRING diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/name_generation.py b/venv/Lib/site-packages/langchain/smith/evaluation/name_generation.py new file mode 100644 index 00000000..191c7463 --- /dev/null +++ b/venv/Lib/site-packages/langchain/smith/evaluation/name_generation.py @@ -0,0 +1,727 @@ +import random + +adjectives = [ + "abandoned", + "aching", + "advanced", + "ample", + "artistic", + "back", + "best", + "bold", + "brief", + "clear", + "cold", + "complicated", + "cooked", + "crazy", + "crushing", + "damp", + "dear", + "definite", + "dependable", + "diligent", + "drab", + "earnest", + "elderly", + "enchanted", + "essential", + "excellent", + "extraneous", + "fixed", + "flowery", + "formal", + "fresh", + "frosty", + "giving", + "glossy", + "healthy", + "helpful", + "impressionable", + "kind", + "large", + "left", + "long", + "loyal", + "mealy", + "memorable", + "monthly", + "new", + "notable", + "only", + "ordinary", + "passionate", + "perfect", + "pertinent", + "proper", + "puzzled", + "reflecting", + "respectful", + "roasted", + "scholarly", + "shiny", + "slight", + "sparkling", + "spotless", + "stupendous", + "sunny", + "tart", + "terrific", + "timely", + "unique", + "upbeat", + "vacant", + "virtual", + "warm", + "weary", + "whispered", + "worthwhile", + "yellow", +] + +nouns = [ + "account", + "acknowledgment", + "address", + "advertising", + "airplane", + "animal", + "appointment", + "arrival", + "artist", + "attachment", + "attitude", + "availability", + "backpack", + "bag", + "balance", + "bass", + "bean", + "beauty", + "bibliography", + "bill", + "bite", + "blossom", + "boat", + "book", + "box", + "boy", + "bread", + "bridge", + "broccoli", + "building", + "butter", + "button", + "cabbage", + "cake", + "camera", + "camp", + "candle", + "candy", + "canvas", + "car", + "card", + "carrot", + "cart", + "case", + "cat", + "chain", + "chair", + "chalk", + "chance", + "change", + "channel", + "character", + "charge", + "charm", + "chart", + "check", + "cheek", + "cheese", + "chef", + "cherry", + "chicken", + "child", + "church", + "circle", + "class", + "clay", + "click", + "clock", + "cloth", + "cloud", + "clove", + "club", + "coach", + "coal", + "coast", + "coat", + "cod", + "coffee", + "collar", + "color", + "comb", + "comfort", + "comic", + "committee", + "community", + "company", + "comparison", + "competition", + "condition", + "connection", + "control", + "cook", + "copper", + "copy", + "corn", + "cough", + "country", + "cover", + "crate", + "crayon", + "cream", + "creator", + "crew", + "crown", + "current", + "curtain", + "curve", + "cushion", + "dad", + "daughter", + "day", + "death", + "debt", + "decision", + "deer", + "degree", + "design", + "desire", + "desk", + "detail", + "development", + "digestion", + "dime", + "dinner", + "direction", + "dirt", + "discovery", + "discussion", + "disease", + "disgust", + "distance", + "distribution", + "division", + "doctor", + "dog", + "door", + "drain", + "drawer", + "dress", + "drink", + "driving", + "dust", + "ear", + "earth", + "edge", + "education", + "effect", + "egg", + "end", + "energy", + "engine", + "error", + "event", + "example", + "exchange", + "existence", + "expansion", + "experience", + "expert", + "eye", + "face", + "fact", + "fall", + "family", + "farm", + "father", + "fear", + "feeling", + "field", + "finger", + "fire", + "fish", + "flag", + "flight", + "floor", + "flower", + "fold", + "food", + "football", + "force", + "form", + "frame", + "friend", + "frog", + "fruit", + "fuel", + "furniture", + "game", + "garden", + "gate", + "girl", + "glass", + "glove", + "goat", + "gold", + "government", + "grade", + "grain", + "grass", + "green", + "grip", + "group", + "growth", + "guide", + "guitar", + "hair", + "hall", + "hand", + "harbor", + "harmony", + "hat", + "head", + "health", + "heart", + "heat", + "hill", + "history", + "hobbies", + "hole", + "hope", + "horn", + "horse", + "hospital", + "hour", + "house", + "humor", + "idea", + "impulse", + "income", + "increase", + "industry", + "ink", + "insect", + "instrument", + "insurance", + "interest", + "invention", + "iron", + "island", + "jelly", + "jet", + "jewel", + "join", + "judge", + "juice", + "jump", + "kettle", + "key", + "kick", + "kiss", + "kitten", + "knee", + "knife", + "knowledge", + "land", + "language", + "laugh", + "law", + "lead", + "learning", + "leather", + "leg", + "lettuce", + "level", + "library", + "lift", + "light", + "limit", + "line", + "linen", + "lip", + "liquid", + "list", + "look", + "loss", + "love", + "lunch", + "machine", + "man", + "manager", + "map", + "marble", + "mark", + "market", + "mass", + "match", + "meal", + "measure", + "meat", + "meeting", + "memory", + "metal", + "middle", + "milk", + "mind", + "mine", + "minute", + "mist", + "mitten", + "mom", + "money", + "monkey", + "month", + "moon", + "morning", + "mother", + "motion", + "mountain", + "mouth", + "muscle", + "music", + "nail", + "name", + "nation", + "neck", + "need", + "news", + "night", + "noise", + "note", + "number", + "nut", + "observation", + "offer", + "oil", + "operation", + "opinion", + "orange", + "order", + "organization", + "ornament", + "oven", + "page", + "pail", + "pain", + "paint", + "pan", + "pancake", + "paper", + "parcel", + "parent", + "part", + "passenger", + "paste", + "payment", + "peace", + "pear", + "pen", + "pencil", + "person", + "pest", + "pet", + "picture", + "pie", + "pin", + "pipe", + "pizza", + "place", + "plane", + "plant", + "plastic", + "plate", + "play", + "pleasure", + "plot", + "plough", + "pocket", + "point", + "poison", + "police", + "pollution", + "popcorn", + "porter", + "position", + "pot", + "potato", + "powder", + "power", + "price", + "print", + "process", + "produce", + "product", + "profit", + "property", + "prose", + "protest", + "pull", + "pump", + "punishment", + "purpose", + "push", + "quarter", + "question", + "quiet", + "quill", + "quilt", + "quince", + "rabbit", + "rail", + "rain", + "range", + "rat", + "rate", + "ray", + "reaction", + "reading", + "reason", + "record", + "regret", + "relation", + "religion", + "representative", + "request", + "respect", + "rest", + "reward", + "rhythm", + "rice", + "river", + "road", + "roll", + "room", + "root", + "rose", + "route", + "rub", + "rule", + "run", + "sack", + "sail", + "salt", + "sand", + "scale", + "scarecrow", + "scarf", + "scene", + "scent", + "school", + "science", + "scissors", + "screw", + "sea", + "seat", + "secretary", + "seed", + "selection", + "self", + "sense", + "servant", + "shade", + "shake", + "shame", + "shape", + "sheep", + "sheet", + "shelf", + "ship", + "shirt", + "shock", + "shoe", + "shop", + "show", + "side", + "sign", + "silk", + "sink", + "sister", + "size", + "sky", + "sleep", + "smash", + "smell", + "smile", + "smoke", + "snail", + "snake", + "sneeze", + "snow", + "soap", + "society", + "sock", + "soda", + "sofa", + "son", + "song", + "sort", + "sound", + "soup", + "space", + "spark", + "speed", + "sponge", + "spoon", + "spray", + "spring", + "spy", + "square", + "stamp", + "star", + "start", + "statement", + "station", + "steam", + "steel", + "stem", + "step", + "stew", + "stick", + "stitch", + "stocking", + "stomach", + "stone", + "stop", + "store", + "story", + "stove", + "stranger", + "straw", + "stream", + "street", + "stretch", + "string", + "structure", + "substance", + "sugar", + "suggestion", + "suit", + "summer", + "sun", + "support", + "surprise", + "sweater", + "swim", + "system", + "table", + "tail", + "talk", + "tank", + "taste", + "tax", + "tea", + "teaching", + "team", + "tendency", + "test", + "texture", + "theory", + "thing", + "thought", + "thread", + "throat", + "thumb", + "thunder", + "ticket", + "time", + "tin", + "title", + "toad", + "toe", + "tooth", + "toothpaste", + "touch", + "town", + "toy", + "trade", + "train", + "transport", + "tray", + "treatment", + "tree", + "trick", + "trip", + "trouble", + "trousers", + "truck", + "tub", + "turkey", + "turn", + "twist", + "umbrella", + "uncle", + "underwear", + "unit", + "use", + "vacation", + "value", + "van", + "vase", + "vegetable", + "veil", + "vein", + "verse", + "vessel", + "view", + "visitor", + "voice", + "volcano", + "walk", + "wall", + "war", + "wash", + "waste", + "watch", + "water", + "wave", + "wax", + "way", + "wealth", + "weather", + "week", + "weight", + "wheel", + "whip", + "whistle", + "window", + "wine", + "wing", + "winter", + "wire", + "wish", + "woman", + "wood", + "wool", + "word", + "work", + "worm", + "wound", + "wrist", + "writer", + "yard", + "yoke", + "zebra", + "zinc", + "zipper", + "zone", +] + + +def random_name() -> str: + """Generate a random name.""" + adjective = random.choice(adjectives) + noun = random.choice(nouns) + number = random.randint(1, 100) + return f"{adjective}-{noun}-{number}" diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/progress.py b/venv/Lib/site-packages/langchain/smith/evaluation/progress.py new file mode 100644 index 00000000..af94ebb5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/smith/evaluation/progress.py @@ -0,0 +1,128 @@ +"""A simple progress bar for the console.""" + +import threading +from collections.abc import Sequence +from typing import Any, Optional +from uuid import UUID + +from langchain_core.callbacks import base as base_callbacks +from langchain_core.documents import Document +from langchain_core.outputs import LLMResult + + +class ProgressBarCallback(base_callbacks.BaseCallbackHandler): + """A simple progress bar for the console.""" + + def __init__(self, total: int, ncols: int = 50, **kwargs: Any): + """Initialize the progress bar. + + Args: + total: int, the total number of items to be processed. + ncols: int, the character width of the progress bar. + """ + self.total = total + self.ncols = ncols + self.counter = 0 + self.lock = threading.Lock() + self._print_bar() + + def increment(self) -> None: + """Increment the counter and update the progress bar.""" + with self.lock: + self.counter += 1 + self._print_bar() + + def _print_bar(self) -> None: + """Print the progress bar to the console.""" + progress = self.counter / self.total + arrow = "-" * int(round(progress * self.ncols) - 1) + ">" + spaces = " " * (self.ncols - len(arrow)) + print(f"\r[{arrow + spaces}] {self.counter}/{self.total}", end="") # noqa: T201 + + def on_chain_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + if parent_run_id is None: + self.increment() + + def on_chain_end( + self, + outputs: dict[str, Any], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + if parent_run_id is None: + self.increment() + + def on_retriever_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + if parent_run_id is None: + self.increment() + + def on_retriever_end( + self, + documents: Sequence[Document], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + if parent_run_id is None: + self.increment() + + def on_llm_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + if parent_run_id is None: + self.increment() + + def on_llm_end( + self, + response: LLMResult, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + if parent_run_id is None: + self.increment() + + def on_tool_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + if parent_run_id is None: + self.increment() + + def on_tool_end( + self, + output: str, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + if parent_run_id is None: + self.increment() diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/runner_utils.py b/venv/Lib/site-packages/langchain/smith/evaluation/runner_utils.py new file mode 100644 index 00000000..979e3e72 --- /dev/null +++ b/venv/Lib/site-packages/langchain/smith/evaluation/runner_utils.py @@ -0,0 +1,1520 @@ +"""Utilities for running language models or Chains over datasets.""" + +from __future__ import annotations + +import concurrent.futures +import dataclasses +import functools +import inspect +import logging +import uuid +from datetime import datetime, timezone +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Union, + cast, +) + +from langchain_core._api import warn_deprecated +from langchain_core.callbacks.manager import Callbacks +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import BaseMessage, messages_from_dict +from langchain_core.outputs import ChatResult, LLMResult +from langchain_core.runnables import Runnable, RunnableConfig, RunnableLambda +from langchain_core.runnables import config as runnable_config +from langchain_core.runnables import utils as runnable_utils +from langchain_core.tracers.evaluation import ( + EvaluatorCallbackHandler, + wait_for_all_evaluators, +) +from langchain_core.tracers.langchain import LangChainTracer +from langsmith.client import Client +from langsmith.env import get_git_info, get_langchain_env_var_metadata +from langsmith.evaluation import ( + EvaluationResult, + RunEvaluator, +) +from langsmith.evaluation import ( + run_evaluator as run_evaluator_dec, +) +from langsmith.run_helpers import as_runnable, is_traceable_function +from langsmith.schemas import Dataset, DataType, Example, Run, TracerSession +from langsmith.utils import LangSmithError +from requests import HTTPError +from typing_extensions import TypedDict + +from langchain.chains.base import Chain +from langchain.evaluation.loading import load_evaluator +from langchain.evaluation.schema import ( + EvaluatorType, + PairwiseStringEvaluator, + StringEvaluator, +) +from langchain.smith import evaluation as smith_eval +from langchain.smith.evaluation import config as smith_eval_config +from langchain.smith.evaluation import name_generation, progress + +if TYPE_CHECKING: + import pandas as pd + +logger = logging.getLogger(__name__) + +MODEL_OR_CHAIN_FACTORY = Union[ + Callable[[], Union[Chain, Runnable]], + BaseLanguageModel, + Callable[[dict], Any], + Runnable, + Chain, +] +MCF = Union[Callable[[], Union[Chain, Runnable]], BaseLanguageModel] + + +class InputFormatError(Exception): + """Raised when the input format is invalid.""" + + +## Shared Utilities + + +class TestResult(dict): + """A dictionary of the results of a single test run.""" + + def get_aggregate_feedback( + self, + ) -> pd.DataFrame: + """Return quantiles for the feedback scores. + + This method calculates and prints the quantiles for the feedback scores + across all feedback keys. + + Returns: + A DataFrame containing the quantiles for each feedback key. + """ + df = self.to_dataframe() + # Drop all things starting with inputs., outputs., and reference + to_drop = [ + col + for col in df.columns + if col.startswith("inputs.") + or col.startswith("outputs.") + or col in {"input", "output"} + or col.startswith("reference") + ] + return df.describe(include="all").drop(to_drop, axis=1) + + def to_dataframe(self) -> pd.DataFrame: + """Convert the results to a dataframe.""" + try: + import pandas as pd + except ImportError as e: + raise ImportError( + "Pandas is required to convert the results to a dataframe." + " to install pandas, run `pip install pandas`." + ) from e + + indices = [] + records = [] + for example_id, result in self["results"].items(): + feedback = result["feedback"] + output_ = result.get("output") + if isinstance(output_, dict): + output = {f"outputs.{k}": v for k, v in output_.items()} + elif output_ is None: + output = {} + else: + output = {"output": output_} + + r = { + **{f"inputs.{k}": v for k, v in result["input"].items()}, + **output, + } + if "reference" in result: + if isinstance(result["reference"], dict): + r.update( + {f"reference.{k}": v for k, v in result["reference"].items()} + ) + else: + r["reference"] = result["reference"] + r.update( + { + **{f"feedback.{f.key}": f.score for f in feedback}, + "error": result.get("Error"), + "execution_time": result["execution_time"], + "run_id": result.get("run_id"), + } + ) + records.append(r) + indices.append(example_id) + + return pd.DataFrame(records, index=indices) + + +class EvalError(dict): + """Your architecture raised an error.""" + + def __init__(self, Error: BaseException, **kwargs: Any) -> None: + super().__init__(Error=Error, **kwargs) + + def __getattr__(self, name: str) -> Any: + try: + return self[name] + except KeyError: + raise AttributeError(f"'EvalError' object has no attribute '{name}'") + + +def _wrap_in_chain_factory( + llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, + dataset_name: str = "", +) -> MCF: + """Forgive the user if they pass in a chain without memory instead of a chain + factory. It's a common mistake. Raise a more helpful error message as well.""" + if isinstance(llm_or_chain_factory, Chain): + chain = llm_or_chain_factory + chain_class = chain.__class__.__name__ + if llm_or_chain_factory.memory is not None: + memory_class = chain.memory.__class__.__name__ + raise ValueError( + "Cannot directly evaluate a chain with stateful memory." + " To evaluate this chain, pass in a chain constructor" + " that initializes fresh memory each time it is called." + " This will safegaurd against information" + " leakage between dataset examples." + "\nFor example:\n\n" + "def chain_constructor():\n" + f" new_memory = {memory_class}(...)\n" + f" return {chain_class}" + "(memory=new_memory, ...)\n\n" + f'run_on_dataset("{dataset_name}", chain_constructor, ...)' + ) + return lambda: chain + elif isinstance(llm_or_chain_factory, BaseLanguageModel): + return llm_or_chain_factory + elif isinstance(llm_or_chain_factory, Runnable): + # Memory may exist here, but it's not elegant to check all those cases. + lcf = llm_or_chain_factory + return lambda: lcf + elif callable(llm_or_chain_factory): + if is_traceable_function(llm_or_chain_factory): + runnable_ = as_runnable(cast(Callable, llm_or_chain_factory)) + return lambda: runnable_ + try: + _model = llm_or_chain_factory() # type: ignore[call-arg] + except TypeError: + # It's an arbitrary function, wrap it in a RunnableLambda + user_func = cast(Callable, llm_or_chain_factory) + sig = inspect.signature(user_func) + logger.info(f"Wrapping function {sig} as RunnableLambda.") + wrapped = RunnableLambda(user_func) + return lambda: wrapped + constructor = cast(Callable, llm_or_chain_factory) + if isinstance(_model, BaseLanguageModel): + # It's not uncommon to do an LLM constructor instead of raw LLM, + # so we'll unpack it for the user. + return _model + elif is_traceable_function(cast(Callable, _model)): + runnable_ = as_runnable(cast(Callable, _model)) + return lambda: runnable_ + elif not isinstance(_model, Runnable): + # This is unlikely to happen - a constructor for a model function + return lambda: RunnableLambda(constructor) + else: + # Typical correct case + return constructor + return llm_or_chain_factory + + +def _get_prompt(inputs: dict[str, Any]) -> str: + """Get prompt from inputs. + + Args: + inputs: The input dictionary. + + Returns: + A string prompt. + Raises: + InputFormatError: If the input format is invalid. + """ + if not inputs: + raise InputFormatError("Inputs should not be empty.") + + prompts = [] + if "prompt" in inputs: + if not isinstance(inputs["prompt"], str): + raise InputFormatError( + f"Expected string for 'prompt', got {type(inputs['prompt']).__name__}" + ) + prompts = [inputs["prompt"]] + elif "prompts" in inputs: + if not isinstance(inputs["prompts"], list) or not all( + isinstance(i, str) for i in inputs["prompts"] + ): + raise InputFormatError( + "Expected list of strings for 'prompts'," + f" got {type(inputs['prompts']).__name__}" + ) + prompts = inputs["prompts"] + elif len(inputs) == 1: + prompt_ = next(iter(inputs.values())) + if isinstance(prompt_, str): + prompts = [prompt_] + elif isinstance(prompt_, list) and all(isinstance(i, str) for i in prompt_): + prompts = prompt_ + else: + raise InputFormatError(f"LLM Run expects string prompt input. Got {inputs}") + else: + raise InputFormatError( + f"LLM Run expects 'prompt' or 'prompts' in inputs. Got {inputs}" + ) + if len(prompts) == 1: + return prompts[0] + else: + raise InputFormatError( + f"LLM Run expects single prompt input. Got {len(prompts)} prompts." + ) + + +class ChatModelInput(TypedDict): + """Input for a chat model. + + Parameters: + messages: List of chat messages. + """ + + messages: list[BaseMessage] + + +def _get_messages(inputs: dict[str, Any]) -> dict: + """Get Chat Messages from inputs. + + Args: + inputs: The input dictionary. + + Returns: + A list of chat messages. + Raises: + InputFormatError: If the input format is invalid. + """ + if not inputs: + raise InputFormatError("Inputs should not be empty.") + input_copy = inputs.copy() + if "messages" in inputs: + input_copy["input"] = input_copy.pop("messages") + elif len(inputs) == 1: + input_copy["input"] = next(iter(inputs.values())) + if "input" in input_copy: + raw_messages = input_copy["input"] + if isinstance(raw_messages, list) and all( + isinstance(i, dict) for i in raw_messages + ): + raw_messages = [raw_messages] + if len(raw_messages) == 1: + input_copy["input"] = messages_from_dict(raw_messages[0]) + else: + raise InputFormatError( + "Batch messages not supported. Please provide a" + " single list of messages." + ) + return input_copy + else: + raise InputFormatError( + f"Chat Run expects single List[dict] or List[List[dict]] 'messages'" + f" input. Got {inputs}" + ) + + +## Shared data validation utilities +def _validate_example_inputs_for_language_model( + first_example: Example, + input_mapper: Optional[Callable[[dict], Any]], +) -> None: + if input_mapper: + prompt_input = input_mapper(first_example.inputs or {}) + if not isinstance(prompt_input, str) and not ( + isinstance(prompt_input, list) + and all(isinstance(msg, BaseMessage) for msg in prompt_input) + ): + raise InputFormatError( + "When using an input_mapper to prepare dataset example inputs" + " for an LLM or chat model, the output must a single string or" + " a list of chat messages." + f"\nGot: {prompt_input} of type {type(prompt_input)}." + ) + else: + try: + _get_prompt(first_example.inputs or {}) + except InputFormatError: + try: + _get_messages(first_example.inputs or {}) + except InputFormatError: + raise InputFormatError( + "Example inputs do not match language model input format. " + "Expected a dictionary with messages or a single prompt." + f" Got: {first_example.inputs}" + " Please update your dataset OR provide an input_mapper" + " to convert the example.inputs to a compatible format" + " for the llm or chat model you wish to evaluate." + ) + + +def _validate_example_inputs_for_chain( + first_example: Example, + chain: Chain, + input_mapper: Optional[Callable[[dict], Any]], +) -> None: + """Validate that the example inputs match the chain input keys.""" + if input_mapper: + first_inputs = input_mapper(first_example.inputs or {}) + missing_keys = set(chain.input_keys).difference(first_inputs) + if not isinstance(first_inputs, dict): + raise InputFormatError( + "When using an input_mapper to prepare dataset example" + " inputs for a chain, the mapped value must be a dictionary." + f"\nGot: {first_inputs} of type {type(first_inputs)}." + ) + if missing_keys: + raise InputFormatError( + "Missing keys after loading example using input_mapper." + f"\nExpected: {chain.input_keys}. Got: {first_inputs.keys()}" + ) + else: + first_inputs = first_example.inputs + missing_keys = set(chain.input_keys).difference(first_inputs) + if len(first_inputs) == 1 and len(chain.input_keys) == 1: + # We can pass this through the run method. + # Refrain from calling to validate. + pass + elif missing_keys: + raise InputFormatError( + "Example inputs missing expected chain input keys." + " Please provide an input_mapper to convert the example.inputs" + " to a compatible format for the chain you wish to evaluate." + f"Expected: {chain.input_keys}. " + f"Got: {first_inputs.keys()}" + ) + + +def _validate_example_inputs( + example: Example, + llm_or_chain_factory: MCF, + input_mapper: Optional[Callable[[dict], Any]], +) -> None: + """Validate that the example inputs are valid for the model.""" + if isinstance(llm_or_chain_factory, BaseLanguageModel): + _validate_example_inputs_for_language_model(example, input_mapper) + else: + chain = llm_or_chain_factory() + if isinstance(chain, Chain): + # Otherwise it's a runnable + _validate_example_inputs_for_chain(example, chain, input_mapper) + elif isinstance(chain, Runnable): + logger.debug(f"Skipping input validation for {chain}") + + +## Shared Evaluator Setup Utilities + + +def _setup_evaluation( + llm_or_chain_factory: MCF, + examples: list[Example], + evaluation: Optional[smith_eval.RunEvalConfig], + data_type: DataType, +) -> Optional[list[RunEvaluator]]: + """Configure the evaluators to run on the results of the chain.""" + if evaluation: + if isinstance(llm_or_chain_factory, BaseLanguageModel): + run_inputs, run_outputs = None, None + run_type = "llm" + else: + run_type = "chain" + chain = llm_or_chain_factory() + run_inputs = chain.input_keys if isinstance(chain, Chain) else None + run_outputs = chain.output_keys if isinstance(chain, Chain) else None + run_evaluators = _load_run_evaluators( + evaluation, + run_type, + data_type, + list(examples[0].outputs) if examples[0].outputs else None, + run_inputs, + run_outputs, + ) + else: + # TODO: Create a default helpfulness evaluator + run_evaluators = None + return run_evaluators + + +def _determine_input_key( + config: smith_eval.RunEvalConfig, + run_inputs: Optional[list[str]], +) -> Optional[str]: + input_key = None + if config.input_key: + input_key = config.input_key + if run_inputs and input_key not in run_inputs: + logger.warning( + f"Input key {input_key} not in chain's specified" + f" input keys {run_inputs}. Evaluation behavior may be undefined." + ) + elif run_inputs and len(run_inputs) == 1: + input_key = run_inputs[0] + elif run_inputs is not None and len(run_inputs) > 1: + logger.warning( + f"Chain expects multiple input keys: {run_inputs}," + f" Evaluator is likely to fail. Evaluation behavior may be undefined." + " Specify an input_key in the RunEvalConfig to avoid this warning." + ) + + return input_key + + +def _determine_prediction_key( + config: smith_eval.RunEvalConfig, + run_outputs: Optional[list[str]], +) -> Optional[str]: + prediction_key = None + if config.prediction_key: + prediction_key = config.prediction_key + if run_outputs and prediction_key not in run_outputs: + logger.warning( + f"Prediction key {prediction_key} not in chain's specified" + f" output keys {run_outputs}. Evaluation behavior may be undefined." + ) + elif run_outputs and len(run_outputs) == 1: + prediction_key = run_outputs[0] + elif run_outputs is not None and len(run_outputs) > 1: + logger.warning( + f"Chain expects multiple output keys: {run_outputs}," + f" Evaluation behavior may be undefined. Specify a prediction_key" + " in the RunEvalConfig to avoid this warning." + ) + return prediction_key + + +def _determine_reference_key( + config: smith_eval.RunEvalConfig, + example_outputs: Optional[list[str]], +) -> Optional[str]: + if config.reference_key: + reference_key = config.reference_key + if example_outputs and reference_key not in example_outputs: + raise ValueError( + f"Reference key {reference_key} not in Dataset" + f" example outputs: {example_outputs}" + ) + elif example_outputs and len(example_outputs) == 1: + reference_key = list(example_outputs)[0] + else: + reference_key = None + return reference_key + + +def _construct_run_evaluator( + eval_config: Union[ + smith_eval_config.SINGLE_EVAL_CONFIG_TYPE, + smith_eval_config.CUSTOM_EVALUATOR_TYPE, + ], + eval_llm: Optional[BaseLanguageModel], + run_type: str, + data_type: DataType, + example_outputs: Optional[list[str]], + reference_key: Optional[str], + input_key: Optional[str], + prediction_key: Optional[str], +) -> RunEvaluator: + if isinstance(eval_config, RunEvaluator): + return eval_config + if isinstance(eval_config, (EvaluatorType, str)): + if not isinstance(eval_config, EvaluatorType): + eval_config = EvaluatorType(eval_config) + evaluator_ = load_evaluator(eval_config, llm=eval_llm) + eval_type_tag = eval_config.value + elif isinstance(eval_config, smith_eval_config.EvalConfig): + kwargs = {"llm": eval_llm, **eval_config.get_kwargs()} + evaluator_ = load_evaluator(eval_config.evaluator_type, **kwargs) + eval_type_tag = eval_config.evaluator_type.value + # Override keys if specified in the config + if isinstance(eval_config, smith_eval_config.SingleKeyEvalConfig): + input_key = eval_config.input_key or input_key + prediction_key = eval_config.prediction_key or prediction_key + reference_key = eval_config.reference_key or reference_key + elif callable(eval_config): + # Assume we can decorate + return run_evaluator_dec(eval_config) + else: + raise ValueError(f"Unknown evaluator type: {type(eval_config)}") + + if isinstance(evaluator_, StringEvaluator): + if evaluator_.requires_reference and reference_key is None: + raise ValueError( + f"Must specify reference_key in smith_eval.RunEvalConfig to use" + f" evaluator of type {eval_type_tag} with" + f" dataset with multiple output keys: {example_outputs}." + ) + run_evaluator = smith_eval.StringRunEvaluatorChain.from_run_and_data_type( + evaluator_, + run_type, + data_type, + input_key=input_key, + prediction_key=prediction_key, + reference_key=reference_key, + tags=[eval_type_tag], + ) + elif isinstance(evaluator_, PairwiseStringEvaluator): + raise NotImplementedError( + f"Run evaluator for {eval_type_tag} is not implemented." + " PairwiseStringEvaluators compare the outputs of two different models" + " rather than the output of a single model." + " Did you mean to use a StringEvaluator instead?" + "\nSee: https://python.langchain.com/docs/guides/evaluation/string/" + ) + + else: + raise NotImplementedError( + f"Run evaluator for {eval_type_tag} is not implemented" + ) + return run_evaluator + + +def _get_keys( + config: smith_eval.RunEvalConfig, + run_inputs: Optional[list[str]], + run_outputs: Optional[list[str]], + example_outputs: Optional[list[str]], +) -> tuple[Optional[str], Optional[str], Optional[str]]: + input_key = _determine_input_key(config, run_inputs) + prediction_key = _determine_prediction_key(config, run_outputs) + reference_key = _determine_reference_key(config, example_outputs) + return input_key, prediction_key, reference_key + + +def _load_run_evaluators( + config: smith_eval.RunEvalConfig, + run_type: str, + data_type: DataType, + example_outputs: Optional[list[str]], + run_inputs: Optional[list[str]], + run_outputs: Optional[list[str]], +) -> list[RunEvaluator]: + """ + Load run evaluators from a configuration. + + Args: + config: Configuration for the run evaluators. + + Returns: + A list of run evaluators. + """ + run_evaluators = [] + input_key, prediction_key, reference_key = None, None, None + if config.evaluators or ( + config.custom_evaluators + and any([isinstance(e, StringEvaluator) for e in config.custom_evaluators]) + ): + input_key, prediction_key, reference_key = _get_keys( + config, run_inputs, run_outputs, example_outputs + ) + for eval_config in config.evaluators: + run_evaluator = _construct_run_evaluator( + eval_config, + config.eval_llm, + run_type, + data_type, + example_outputs, + reference_key, + input_key, + prediction_key, + ) + run_evaluators.append(run_evaluator) + custom_evaluators = config.custom_evaluators or [] + for custom_evaluator in custom_evaluators: + if isinstance(custom_evaluator, RunEvaluator): + run_evaluators.append(custom_evaluator) + elif isinstance(custom_evaluator, StringEvaluator): + run_evaluators.append( + smith_eval.StringRunEvaluatorChain.from_run_and_data_type( + custom_evaluator, + run_type, + data_type, + input_key=input_key, + prediction_key=prediction_key, + reference_key=reference_key, + ) + ) + elif callable(custom_evaluator): + run_evaluators.append(run_evaluator_dec(custom_evaluator)) + else: + raise ValueError( + f"Unsupported custom evaluator: {custom_evaluator}." + f" Expected RunEvaluator or StringEvaluator." + ) + + return run_evaluators + + +### Async Helpers + + +async def _arun_llm( + llm: BaseLanguageModel, + inputs: dict[str, Any], + *, + tags: Optional[list[str]] = None, + callbacks: Callbacks = None, + input_mapper: Optional[Callable[[dict], Any]] = None, + metadata: Optional[dict[str, Any]] = None, +) -> Union[str, BaseMessage]: + """Asynchronously run the language model. + + Args: + llm: The language model to run. + inputs: The input dictionary. + tags: Optional tags to add to the run. + callbacks: Optional callbacks to use during the run. + input_mapper: Optional function to map inputs to the expected format. + + Returns: + The LLMResult or ChatResult. + Raises: + ValueError: If the LLM type is unsupported. + InputFormatError: If the input format is invalid. + """ + if input_mapper is not None: + prompt_or_messages = input_mapper(inputs) + if ( + isinstance(prompt_or_messages, str) + or isinstance(prompt_or_messages, list) + and all(isinstance(msg, BaseMessage) for msg in prompt_or_messages) + ): + return await llm.ainvoke( + prompt_or_messages, + config=RunnableConfig( + callbacks=callbacks, tags=tags or [], metadata=metadata or {} + ), + ) + else: + raise InputFormatError( + "Input mapper returned invalid format" + f" {prompt_or_messages}" + "\nExpected a single string or list of chat messages." + ) + + else: + try: + prompt = _get_prompt(inputs) + llm_output: Union[str, BaseMessage] = await llm.ainvoke( + prompt, + config=RunnableConfig( + callbacks=callbacks, tags=tags or [], metadata=metadata or {} + ), + ) + except InputFormatError: + llm_inputs = _get_messages(inputs) + llm_output = await llm.ainvoke( + **llm_inputs, + config=RunnableConfig( + callbacks=callbacks, tags=tags or [], metadata=metadata or {} + ), + ) + return llm_output + + +async def _arun_chain( + chain: Union[Chain, Runnable], + inputs: dict[str, Any], + callbacks: Callbacks, + *, + tags: Optional[list[str]] = None, + input_mapper: Optional[Callable[[dict], Any]] = None, + metadata: Optional[dict[str, Any]] = None, +) -> Union[dict, str]: + """Run a chain asynchronously on inputs.""" + inputs_ = inputs if input_mapper is None else input_mapper(inputs) + if ( + isinstance(chain, Chain) + and isinstance(inputs_, dict) + and len(inputs_) == 1 + and chain.input_keys + ): + val = next(iter(inputs_.values())) + output = await chain.ainvoke( + val, + config=RunnableConfig( + callbacks=callbacks, tags=tags or [], metadata=metadata or {} + ), + ) + else: + runnable_config = RunnableConfig( + tags=tags or [], callbacks=callbacks, metadata=metadata or {} + ) + output = await chain.ainvoke(inputs_, config=runnable_config) + return output + + +async def _arun_llm_or_chain( + example: Example, + config: RunnableConfig, + *, + llm_or_chain_factory: MCF, + input_mapper: Optional[Callable[[dict], Any]] = None, +) -> Union[dict, str, LLMResult, ChatResult]: + """Asynchronously run the Chain or language model. + + Args: + example: The example to run. + llm_or_chain_factory: The Chain or language model constructor to run. + tags: Optional tags to add to the run. + callbacks: Optional callbacks to use during the run. + input_mapper: Optional function to map the input to the expected format. + + Returns: + A list of outputs. + """ + chain_or_llm = ( + "LLM" if isinstance(llm_or_chain_factory, BaseLanguageModel) else "Chain" + ) + result = None + try: + if isinstance(llm_or_chain_factory, BaseLanguageModel): + output: Any = await _arun_llm( + llm_or_chain_factory, + example.inputs or {}, + tags=config["tags"], + callbacks=config["callbacks"], + input_mapper=input_mapper, + metadata=config.get("metadata"), + ) + else: + chain = llm_or_chain_factory() + output = await _arun_chain( + chain, + example.inputs or {}, + tags=config["tags"], + callbacks=config["callbacks"], + input_mapper=input_mapper, + metadata=config.get("metadata"), + ) + result = output + except Exception as e: + logger.warning( + f"{chain_or_llm} failed for example {example.id} " + f"with inputs {example.inputs}" + f"\n{repr(e)}" + ) + result = EvalError(Error=e) + return result + + +## Sync Utilities + + +def _run_llm( + llm: BaseLanguageModel, + inputs: dict[str, Any], + callbacks: Callbacks, + *, + tags: Optional[list[str]] = None, + input_mapper: Optional[Callable[[dict], Any]] = None, + metadata: Optional[dict[str, Any]] = None, +) -> Union[str, BaseMessage]: + """ + Run the language model on the example. + + Args: + llm: The language model to run. + inputs: The input dictionary. + callbacks: The callbacks to use during the run. + tags: Optional tags to add to the run. + input_mapper: function to map to the inputs dictionary from an Example + Returns: + The LLMResult or ChatResult. + Raises: + ValueError: If the LLM type is unsupported. + InputFormatError: If the input format is invalid. + """ + # Most of this is legacy code; we could probably remove a lot of it. + if input_mapper is not None: + prompt_or_messages = input_mapper(inputs) + if ( + isinstance(prompt_or_messages, str) + or isinstance(prompt_or_messages, list) + and all(isinstance(msg, BaseMessage) for msg in prompt_or_messages) + ): + llm_output: Union[str, BaseMessage] = llm.invoke( + prompt_or_messages, + config=RunnableConfig( + callbacks=callbacks, tags=tags or [], metadata=metadata or {} + ), + ) + else: + raise InputFormatError( + "Input mapper returned invalid format: " + f" {prompt_or_messages}" + "\nExpected a single string or list of chat messages." + ) + else: + try: + llm_prompts = _get_prompt(inputs) + llm_output = llm.invoke( + llm_prompts, + config=RunnableConfig( + callbacks=callbacks, tags=tags or [], metadata=metadata or {} + ), + ) + except InputFormatError: + llm_inputs = _get_messages(inputs) + llm_output = llm.invoke( + **llm_inputs, + config=RunnableConfig(callbacks=callbacks, metadata=metadata or {}), + ) + return llm_output + + +def _run_chain( + chain: Union[Chain, Runnable], + inputs: dict[str, Any], + callbacks: Callbacks, + *, + tags: Optional[list[str]] = None, + input_mapper: Optional[Callable[[dict], Any]] = None, + metadata: Optional[dict[str, Any]] = None, +) -> Union[dict, str]: + """Run a chain on inputs.""" + inputs_ = inputs if input_mapper is None else input_mapper(inputs) + if ( + isinstance(chain, Chain) + and isinstance(inputs_, dict) + and len(inputs_) == 1 + and chain.input_keys + ): + val = next(iter(inputs_.values())) + output = chain.invoke( + val, + config=RunnableConfig( + callbacks=callbacks, tags=tags or [], metadata=metadata or {} + ), + ) + else: + runnable_config = RunnableConfig( + tags=tags or [], callbacks=callbacks, metadata=metadata or {} + ) + output = chain.invoke(inputs_, config=runnable_config) + return output + + +def _run_llm_or_chain( + example: Example, + config: RunnableConfig, + *, + llm_or_chain_factory: MCF, + input_mapper: Optional[Callable[[dict], Any]] = None, +) -> Union[dict, str, LLMResult, ChatResult]: + """ + Run the Chain or language model synchronously. + + Args: + example: The example to run. + llm_or_chain_factory: The Chain or language model constructor to run. + tags: Optional tags to add to the run. + callbacks: Optional callbacks to use during the run. + + Returns: + Union[List[dict], List[str], List[LLMResult], List[ChatResult]]: + The outputs of the model or chain. + """ + chain_or_llm = ( + "LLM" if isinstance(llm_or_chain_factory, BaseLanguageModel) else "Chain" + ) + result = None + try: + if isinstance(llm_or_chain_factory, BaseLanguageModel): + output: Any = _run_llm( + llm_or_chain_factory, + example.inputs or {}, + config["callbacks"], + tags=config["tags"], + input_mapper=input_mapper, + metadata=config.get("metadata"), + ) + else: + chain = llm_or_chain_factory() + output = _run_chain( + chain, + example.inputs or {}, + config["callbacks"], + tags=config["tags"], + input_mapper=input_mapper, + metadata=config.get("metadata"), + ) + result = output + except Exception as e: + error_type = type(e).__name__ + logger.warning( + f"{chain_or_llm} failed for example {example.id} " + f"with inputs {example.inputs}" + f"\nError Type: {error_type}, Message: {e}" + ) + result = EvalError(Error=e) + return result + + +def _prepare_eval_run( + client: Client, + dataset_name: str, + llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, + project_name: str, + project_metadata: Optional[dict[str, Any]] = None, + tags: Optional[list[str]] = None, + dataset_version: Optional[Union[str, datetime]] = None, +) -> tuple[MCF, TracerSession, Dataset, list[Example]]: + wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory, dataset_name) + dataset = client.read_dataset(dataset_name=dataset_name) + + examples = list(client.list_examples(dataset_id=dataset.id, as_of=dataset_version)) + if not examples: + raise ValueError(f"Dataset {dataset_name} has no example rows.") + modified_at = [ex.modified_at for ex in examples if ex.modified_at] + # Should always be defined in practice when fetched, + # but the typing permits None + max_modified_at = max(modified_at) if modified_at else None + inferred_version = max_modified_at.isoformat() if max_modified_at else None + + try: + project_metadata = project_metadata or {} + git_info = get_git_info() + if git_info: + project_metadata = { + **project_metadata, + "git": git_info, + } + + project_metadata["dataset_version"] = inferred_version + project = client.create_project( + project_name, + reference_dataset_id=dataset.id, + project_extra={"tags": tags} if tags else {}, + metadata=project_metadata, + ) + except (HTTPError, ValueError, LangSmithError) as e: + if "already exists " not in str(e): + raise e + uid = uuid.uuid4() + example_msg = f""" +run_on_dataset( + ... + project_name="{project_name} - {uid}", # Update since {project_name} already exists +) +""" + raise ValueError( + f"Test project {project_name} already exists. Please use a different name:" + f"\n\n{example_msg}" + ) + comparison_url = dataset.url + f"/compare?selectedSessions={project.id}" + print( # noqa: T201 + f"View the evaluation results for project '{project_name}'" + f" at:\n{comparison_url}\n\n" + f"View all tests for Dataset {dataset_name} at:\n{dataset.url}", + flush=True, + ) + return wrapped_model, project, dataset, examples + + +class _RowResult(TypedDict, total=False): + """A dictionary of the results for a single example row.""" + + feedback: Optional[list[EvaluationResult]] + execution_time: Optional[float] + run_id: Optional[str] + + +@dataclasses.dataclass +class _DatasetRunContainer: + """A container to help manage the state of a eval run.""" + + client: Client + project: TracerSession + wrapped_model: MCF + examples: list[Example] + configs: list[RunnableConfig] + batch_evaluators: Optional[list[smith_eval_config.BATCH_EVALUATOR_LIKE]] = None + + def _merge_test_outputs( + self, + batch_results: list, + all_eval_results: dict[str, _RowResult], + ) -> dict: + results: dict = {} + for example, output in zip(self.examples, batch_results): + row_result = cast(_RowResult, all_eval_results.get(str(example.id), {})) + results[str(example.id)] = { + "input": example.inputs, + "feedback": row_result.get("feedback", []), + "execution_time": row_result.get("execution_time"), + "run_id": row_result.get("run_id"), + } + if isinstance(output, EvalError): + results[str(example.id)]["Error"] = output.Error + else: + results[str(example.id)]["output"] = output + if example.outputs: + results[str(example.id)]["reference"] = example.outputs + return results + + def _run_batch_evaluators(self, runs: dict[str, Run]) -> list[dict]: + evaluators = self.batch_evaluators + if not evaluators: + return [] + runs_list = [runs[str(example.id)] for example in self.examples] + aggregate_feedback = [] + with concurrent.futures.ThreadPoolExecutor() as executor: + for evaluator in evaluators: + try: + result = evaluator(runs_list, self.examples) + if isinstance(result, EvaluationResult): + result = result.dict() + aggregate_feedback.append(cast(dict, result)) + executor.submit( + self.client.create_feedback, + **result, + run_id=None, + project_id=self.project.id, + ) + except Exception as e: + logger.error( + f"Error running batch evaluator {repr(evaluator)}: {e}" + ) + return aggregate_feedback + + def _collect_metrics(self) -> tuple[dict[str, _RowResult], dict[str, Run]]: + all_eval_results: dict = {} + all_runs: dict = {} + for c in self.configs: + for callback in cast(list, c["callbacks"]): + if isinstance(callback, EvaluatorCallbackHandler): + eval_results = callback.logged_eval_results + for (_, example_id), v in eval_results.items(): + all_eval_results.setdefault(str(example_id), {}).update( + {"feedback": v} + ) + elif isinstance(callback, LangChainTracer): + run = callback.latest_run + execution_time = ( + (run.end_time - run.start_time).total_seconds() + if run and run.end_time + else None + ) + run_id = str(run.id) if run else None + all_eval_results.setdefault(str(callback.example_id), {}).update( + { + "execution_time": execution_time, + "run_id": run_id, + "run": run, + } + ) + all_runs[str(callback.example_id)] = run + return cast(dict[str, _RowResult], all_eval_results), all_runs + + def _collect_test_results( + self, + batch_results: list[Union[dict, str, LLMResult, ChatResult]], + ) -> TestResult: + logger.info("Waiting for evaluators to complete.") + wait_for_all_evaluators() + all_eval_results, all_runs = self._collect_metrics() + aggregate_feedback = None + if self.batch_evaluators: + logger.info("Running session evaluators.") + aggregate_feedback = self._run_batch_evaluators(all_runs) + results = self._merge_test_outputs(batch_results, all_eval_results) + return TestResult( + project_name=self.project.name, + results=results, + aggregate_metrics=aggregate_feedback, + ) + + def finish(self, batch_results: list, verbose: bool = False) -> TestResult: + results = self._collect_test_results(batch_results) + if verbose: + try: + agg_feedback = results.get_aggregate_feedback() + _display_aggregate_results(agg_feedback) + except Exception as e: + logger.debug(f"Failed to print aggregate feedback: {repr(e)}") + try: + # Closing the project permits name changing and metric optimizations + self.client.update_project( + self.project.id, end_time=datetime.now(timezone.utc) + ) + except Exception as e: + logger.debug(f"Failed to close project: {repr(e)}") + return results + + @classmethod + def prepare( + cls, + client: Client, + dataset_name: str, + llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, + project_name: Optional[str], + evaluation: Optional[smith_eval.RunEvalConfig] = None, + tags: Optional[list[str]] = None, + input_mapper: Optional[Callable[[dict], Any]] = None, + concurrency_level: int = 5, + project_metadata: Optional[dict[str, Any]] = None, + revision_id: Optional[str] = None, + dataset_version: Optional[Union[datetime, str]] = None, + ) -> _DatasetRunContainer: + project_name = project_name or name_generation.random_name() + if revision_id: + if not project_metadata: + project_metadata = {} + project_metadata.update({"revision_id": revision_id}) + wrapped_model, project, dataset, examples = _prepare_eval_run( + client, + dataset_name, + llm_or_chain_factory, + project_name, + project_metadata=project_metadata, + tags=tags, + dataset_version=dataset_version, + ) + tags = tags or [] + for k, v in (project.metadata.get("git") or {}).items(): + tags.append(f"git:{k}={v}") + run_metadata = {"dataset_version": project.metadata["dataset_version"]} + if revision_id: + run_metadata["revision_id"] = revision_id + wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory) + run_evaluators = _setup_evaluation( + wrapped_model, examples, evaluation, dataset.data_type or DataType.kv + ) + _validate_example_inputs(examples[0], wrapped_model, input_mapper) + progress_bar = progress.ProgressBarCallback(len(examples)) + configs = [ + RunnableConfig( + callbacks=[ + LangChainTracer( + project_name=project.name, + client=client, + example_id=example.id, + ), + EvaluatorCallbackHandler( + evaluators=run_evaluators or [], + client=client, + example_id=example.id, + max_concurrency=0, + ), + progress_bar, + ], + tags=tags, + max_concurrency=concurrency_level, + metadata=run_metadata, + ) + for example in examples + ] + return cls( + client=client, + project=project, + wrapped_model=wrapped_model, + examples=examples, + configs=configs, + batch_evaluators=evaluation.batch_evaluators if evaluation else None, + ) + + +def _is_jupyter_environment() -> bool: + try: + from IPython import get_ipython + + res = get_ipython() + return get_ipython() is not None and "zmqshell" in str(type(res)) + except ImportError: + return False + + +def _display_aggregate_results(aggregate_results: pd.DataFrame) -> None: + if _is_jupyter_environment(): + from IPython.display import HTML, display + + display(HTML("

Experiment Results:

")) + display(aggregate_results) + else: + formatted_string = aggregate_results.to_string( + float_format=lambda x: f"{x:.2f}", justify="right" + ) + print("\n Experiment Results:") # noqa: T201 + print(formatted_string) # noqa: T201 + + +_INPUT_MAPPER_DEP_WARNING = ( + "The input_mapper argument is deprecated and " + "will be removed in a future release. Please add a " + " RunnableLambda to your chain to map inputs to the expected format" + " instead. Example:\n" + "def construct_chain():\n" + " my_chain = ...\n" + " input_mapper = {'other_key': 'MyOtherInput', 'my_input_key': x}\n" + " return input_mapper | my_chain\n" + "run_on_dataset(..., llm_or_chain_factory=construct_chain)\n" + "(See https://api.python.langchain.com/en/latest/schema/" + "langchain.schema.runnable.base.RunnableLambda.html)" +) + +## Public API + + +async def arun_on_dataset( + client: Optional[Client], + dataset_name: str, + llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, + *, + evaluation: Optional[smith_eval.RunEvalConfig] = None, + dataset_version: Optional[Union[datetime, str]] = None, + concurrency_level: int = 5, + project_name: Optional[str] = None, + project_metadata: Optional[dict[str, Any]] = None, + verbose: bool = False, + revision_id: Optional[str] = None, + **kwargs: Any, +) -> dict[str, Any]: + input_mapper = kwargs.pop("input_mapper", None) + if input_mapper: + warn_deprecated("0.0.305", message=_INPUT_MAPPER_DEP_WARNING, pending=True) + if revision_id is None: + revision_id = get_langchain_env_var_metadata().get("revision_id") + tags = kwargs.pop("tags", None) + if tags: + warn_deprecated( + "0.1.9", + message="The tags argument is deprecated and will be" + " removed in a future release. Please specify project_metadata instead.", + pending=True, + ) + + if kwargs: + warn_deprecated( + "0.0.305", + message="The following arguments are deprecated and " + "will be removed in a future release: " + f"{kwargs.keys()}.", + removal="0.0.305", + ) + client = client or Client() + container = _DatasetRunContainer.prepare( + client, + dataset_name, + llm_or_chain_factory, + project_name, + evaluation, + tags, + input_mapper, + concurrency_level, + project_metadata=project_metadata, + revision_id=revision_id, + dataset_version=dataset_version, + ) + batch_results = await runnable_utils.gather_with_concurrency( + container.configs[0].get("max_concurrency"), + *map( + functools.partial( + _arun_llm_or_chain, + llm_or_chain_factory=container.wrapped_model, + input_mapper=input_mapper, + ), + container.examples, + container.configs, + ), + ) + return container.finish(batch_results, verbose=verbose) + + +def run_on_dataset( + client: Optional[Client], + dataset_name: str, + llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, + *, + evaluation: Optional[smith_eval.RunEvalConfig] = None, + dataset_version: Optional[Union[datetime, str]] = None, + concurrency_level: int = 5, + project_name: Optional[str] = None, + project_metadata: Optional[dict[str, Any]] = None, + verbose: bool = False, + revision_id: Optional[str] = None, + **kwargs: Any, +) -> dict[str, Any]: + input_mapper = kwargs.pop("input_mapper", None) + if input_mapper: + warn_deprecated("0.0.305", message=_INPUT_MAPPER_DEP_WARNING, pending=True) + tags = kwargs.pop("tags", None) + if tags: + warn_deprecated( + "0.1.9", + message="The tags argument is deprecated and will be" + " removed in a future release. Please specify project_metadata instead.", + pending=True, + ) + if revision_id is None: + revision_id = get_langchain_env_var_metadata().get("revision_id") + + if kwargs: + warn_deprecated( + "0.0.305", + message="The following arguments are deprecated and " + "will be removed in a future release: " + f"{kwargs.keys()}.", + removal="0.0.305", + ) + client = client or Client() + container = _DatasetRunContainer.prepare( + client, + dataset_name, + llm_or_chain_factory, + project_name, + evaluation, + tags, + input_mapper, + concurrency_level, + project_metadata=project_metadata, + revision_id=revision_id, + dataset_version=dataset_version, + ) + if concurrency_level == 0: + batch_results = [ + _run_llm_or_chain( + example, + config, + llm_or_chain_factory=container.wrapped_model, + input_mapper=input_mapper, + ) + for example, config in zip(container.examples, container.configs) + ] + else: + with runnable_config.get_executor_for_config(container.configs[0]) as executor: + batch_results = list( + executor.map( + functools.partial( + _run_llm_or_chain, + llm_or_chain_factory=container.wrapped_model, + input_mapper=input_mapper, + ), + container.examples, + container.configs, + ) + ) + + return container.finish(batch_results, verbose=verbose) + + +_RUN_ON_DATASET_DOCSTRING = """ +Run the Chain or language model on a dataset and store traces +to the specified project name. + +Args: + dataset_name: Name of the dataset to run the chain on. + llm_or_chain_factory: Language model or Chain constructor to run + over the dataset. The Chain constructor is used to permit + independent calls on each example without carrying over state. + evaluation: Configuration for evaluators to run on the + results of the chain + concurrency_level: The number of async tasks to run concurrently. + project_name: Name of the project to store the traces in. + Defaults to {dataset_name}-{chain class name}-{datetime}. + project_metadata: Optional metadata to add to the project. + Useful for storing information the test variant. + (prompt version, model version, etc.) + client: LangSmith client to use to access the dataset and to + log feedback and run traces. + verbose: Whether to print progress. + tags: Tags to add to each run in the project. + revision_id: Optional revision identifier to assign this test run to + track the performance of different versions of your system. +Returns: + A dictionary containing the run's project name and the resulting model outputs. + + +For the (usually faster) async version of this function, see :func:`arun_on_dataset`. + +Examples +-------- + +.. code-block:: python + + from langsmith import Client + from langchain_openai import ChatOpenAI + from langchain.chains import LLMChain + from langchain.smith import smith_eval.RunEvalConfig, run_on_dataset + + # Chains may have memory. Passing in a constructor function lets the + # evaluation framework avoid cross-contamination between runs. + def construct_chain(): + llm = ChatOpenAI(temperature=0) + chain = LLMChain.from_string( + llm, + "What's the answer to {your_input_key}" + ) + return chain + + # Load off-the-shelf evaluators via config or the EvaluatorType (string or enum) + evaluation_config = smith_eval.RunEvalConfig( + evaluators=[ + "qa", # "Correctness" against a reference answer + "embedding_distance", + smith_eval.RunEvalConfig.Criteria("helpfulness"), + smith_eval.RunEvalConfig.Criteria({ + "fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?" + }), + ] + ) + + client = Client() + run_on_dataset( + client, + dataset_name="", + llm_or_chain_factory=construct_chain, + evaluation=evaluation_config, + ) + +You can also create custom evaluators by subclassing the +:class:`StringEvaluator ` +or LangSmith's `RunEvaluator` classes. + +.. code-block:: python + + from typing import Optional + from langchain.evaluation import StringEvaluator + + class MyStringEvaluator(StringEvaluator): + + @property + def requires_input(self) -> bool: + return False + + @property + def requires_reference(self) -> bool: + return True + + @property + def evaluation_name(self) -> str: + return "exact_match" + + def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict: + return {"score": prediction == reference} + + + evaluation_config = smith_eval.RunEvalConfig( + custom_evaluators = [MyStringEvaluator()], + ) + + run_on_dataset( + client, + dataset_name="", + llm_or_chain_factory=construct_chain, + evaluation=evaluation_config, + ) +""" # noqa: E501 +run_on_dataset.__doc__ = _RUN_ON_DATASET_DOCSTRING +arun_on_dataset.__doc__ = _RUN_ON_DATASET_DOCSTRING.replace( + "run_on_dataset(", "await arun_on_dataset(" +) diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/string_run_evaluator.py b/venv/Lib/site-packages/langchain/smith/evaluation/string_run_evaluator.py new file mode 100644 index 00000000..c0e56ee9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/smith/evaluation/string_run_evaluator.py @@ -0,0 +1,433 @@ +"""Run evaluator wrapper for string evaluators.""" + +from __future__ import annotations + +import uuid +from abc import abstractmethod +from typing import Any, Optional + +from langchain_core.callbacks.manager import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from langchain_core.load.dump import dumpd +from langchain_core.load.load import load +from langchain_core.load.serializable import Serializable +from langchain_core.messages import BaseMessage, get_buffer_string, messages_from_dict +from langsmith import EvaluationResult, RunEvaluator +from langsmith.schemas import DataType, Example, Run + +from langchain.chains.base import Chain +from langchain.evaluation.schema import StringEvaluator +from langchain.schema import RUN_KEY + + +def _get_messages_from_run_dict(messages: list[dict]) -> list[BaseMessage]: + if not messages: + return [] + first_message = messages[0] + if "lc" in first_message: + return [load(dumpd(message)) for message in messages] + else: + return messages_from_dict(messages) + + +class StringRunMapper(Serializable): + """Extract items to evaluate from the run object.""" + + @property + def output_keys(self) -> list[str]: + """The keys to extract from the run.""" + return ["prediction", "input"] + + @abstractmethod + def map(self, run: Run) -> dict[str, str]: + """Maps the Run to a dictionary.""" + + def __call__(self, run: Run) -> dict[str, str]: + """Maps the Run to a dictionary.""" + if not run.outputs: + raise ValueError(f"Run {run.id} has no outputs to evaluate.") + return self.map(run) + + +class LLMStringRunMapper(StringRunMapper): + """Extract items to evaluate from the run object.""" + + def serialize_chat_messages(self, messages: list[dict]) -> str: + """Extract the input messages from the run.""" + if isinstance(messages, list) and messages: + if isinstance(messages[0], dict): + chat_messages = _get_messages_from_run_dict(messages) + elif isinstance(messages[0], list): + # Runs from Tracer have messages as a list of lists of dicts + chat_messages = _get_messages_from_run_dict(messages[0]) + else: + raise ValueError(f"Could not extract messages to evaluate {messages}") + return get_buffer_string(chat_messages) + raise ValueError(f"Could not extract messages to evaluate {messages}") + + def serialize_inputs(self, inputs: dict) -> str: + if "prompts" in inputs: # Should we even accept this? + input_ = "\n\n".join(inputs["prompts"]) + elif "prompt" in inputs: + input_ = inputs["prompt"] + elif "messages" in inputs: + input_ = self.serialize_chat_messages(inputs["messages"]) + else: + raise ValueError("LLM Run must have either messages or prompts as inputs.") + return input_ + + def serialize_outputs(self, outputs: dict) -> str: + if not outputs.get("generations"): + raise ValueError("Cannot evaluate LLM Run without generations.") + generations: list[dict] = outputs["generations"] + if not generations: + raise ValueError("Cannot evaluate LLM run with empty generations.") + first_generation: dict = generations[0] + if isinstance(first_generation, list): + # Runs from Tracer have generations as a list of lists of dicts + # Whereas Runs from the API have a list of dicts + first_generation = first_generation[0] + if "message" in first_generation: + output_ = self.serialize_chat_messages([first_generation["message"]]) + else: + output_ = first_generation["text"] + return output_ + + def map(self, run: Run) -> dict[str, str]: + """Maps the Run to a dictionary.""" + if run.run_type != "llm": + raise ValueError("LLM RunMapper only supports LLM runs.") + elif not run.outputs: + if run.error: + raise ValueError( + f"Cannot evaluate errored LLM run {run.id}: {run.error}" + ) + else: + raise ValueError( + f"Run {run.id} has no outputs. Cannot evaluate this run." + ) + else: + try: + inputs = self.serialize_inputs(run.inputs) + except Exception as e: + raise ValueError( + f"Could not parse LM input from run inputs {run.inputs}" + ) from e + try: + output_ = self.serialize_outputs(run.outputs) + except Exception as e: + raise ValueError( + f"Could not parse LM prediction from run outputs {run.outputs}" + ) from e + return {"input": inputs, "prediction": output_} + + +class ChainStringRunMapper(StringRunMapper): + """Extract items to evaluate from the run object from a chain.""" + + input_key: Optional[str] = None + """The key from the model Run's inputs to use as the eval input. + If not provided, will use the only input key or raise an + error if there are multiple.""" + prediction_key: Optional[str] = None + """The key from the model Run's outputs to use as the eval prediction. + If not provided, will use the only output key or raise an error + if there are multiple.""" + + def _get_key(self, source: dict, key: Optional[str], which: str) -> str: + if key is not None: + return source[key] + elif len(source) == 1: + return next(iter(source.values())) + else: + raise ValueError( + f"Could not map run {which} with multiple keys: " + f"{source}\nPlease manually specify a {which}_key" + ) + + def map(self, run: Run) -> dict[str, str]: + """Maps the Run to a dictionary.""" + if not run.outputs: + raise ValueError( + f"Run with ID {run.id} lacks outputs required for evaluation." + " Ensure the Run has valid outputs." + ) + if self.input_key is not None and self.input_key not in run.inputs: + raise ValueError( + f"Run with ID {run.id} is missing the expected input key" + f" '{self.input_key}'.\nAvailable input keys in this Run" + f" are: {run.inputs.keys()}.\nAdjust the evaluator's" + f" input_key or ensure your input data includes key" + f" '{self.input_key}'." + ) + elif self.prediction_key is not None and self.prediction_key not in run.outputs: + available_keys = ", ".join(run.outputs.keys()) + raise ValueError( + f"Run with ID {run.id} doesn't have the expected prediction key" + f" '{self.prediction_key}'. Available prediction keys in this Run are:" + f" {available_keys}. Adjust the evaluator's prediction_key or" + " ensure the Run object's outputs the expected key." + ) + + else: + input_ = self._get_key(run.inputs, self.input_key, "input") + prediction = self._get_key(run.outputs, self.prediction_key, "prediction") + return { + "input": input_, + "prediction": prediction, + } + + +class ToolStringRunMapper(StringRunMapper): + """Map an input to the tool.""" + + def map(self, run: Run) -> dict[str, str]: + if not run.outputs: + raise ValueError(f"Run {run.id} has no outputs to evaluate.") + return {"input": run.inputs["input"], "prediction": run.outputs["output"]} + + +class StringExampleMapper(Serializable): + """Map an example, or row in the dataset, to the inputs of an evaluation.""" + + reference_key: Optional[str] = None + + @property + def output_keys(self) -> list[str]: + """The keys to extract from the run.""" + return ["reference"] + + def serialize_chat_messages(self, messages: list[dict]) -> str: + """Extract the input messages from the run.""" + chat_messages = _get_messages_from_run_dict(messages) + return get_buffer_string(chat_messages) + + def map(self, example: Example) -> dict[str, str]: + """Maps the Example, or dataset row to a dictionary.""" + if not example.outputs: + raise ValueError( + f"Example {example.id} has no outputs to use as a reference." + ) + if self.reference_key is None: + if len(example.outputs) > 1: + raise ValueError( + f"Example {example.id} has multiple outputs, so you must" + " specify a reference_key." + ) + else: + output = list(example.outputs.values())[0] + elif self.reference_key not in example.outputs: + raise ValueError( + f"Example {example.id} does not have reference key" + f" {self.reference_key}." + ) + else: + output = example.outputs[self.reference_key] + return { + "reference": self.serialize_chat_messages([output]) + if isinstance(output, dict) and output.get("type") and output.get("data") + else output + } + + def __call__(self, example: Example) -> dict[str, str]: + """Maps the Run and Example to a dictionary.""" + if not example.outputs: + raise ValueError( + f"Example {example.id} has no outputs to use as areference label." + ) + return self.map(example) + + +class StringRunEvaluatorChain(Chain, RunEvaluator): + """Evaluate Run and optional examples.""" + + run_mapper: StringRunMapper + """Maps the Run to a dictionary with 'input' and 'prediction' strings.""" + example_mapper: Optional[StringExampleMapper] = None + """Maps the Example (dataset row) to a dictionary + with a 'reference' string.""" + name: str + """The name of the evaluation metric.""" + string_evaluator: StringEvaluator + """The evaluation chain.""" + + @property + def input_keys(self) -> list[str]: + return ["run", "example"] + + @property + def output_keys(self) -> list[str]: + return ["feedback"] + + def _prepare_input(self, inputs: dict[str, Any]) -> dict[str, str]: + run: Run = inputs["run"] + example: Optional[Example] = inputs.get("example") + evaluate_strings_inputs = self.run_mapper(run) + if not self.string_evaluator.requires_input: + # Hide warning about unused input + evaluate_strings_inputs.pop("input", None) + if example and self.example_mapper and self.string_evaluator.requires_reference: + evaluate_strings_inputs.update(self.example_mapper(example)) + elif self.string_evaluator.requires_reference: + raise ValueError( + f"Evaluator {self.name} requires an reference" + " example from the dataset," + f" but none was provided for run {run.id}." + ) + return evaluate_strings_inputs + + def _prepare_output(self, output: dict[str, Any]) -> dict[str, Any]: + evaluation_result = EvaluationResult( + key=self.name, comment=output.get("reasoning"), **output + ) + if RUN_KEY in output: + # TODO: Not currently surfaced. Update + evaluation_result.evaluator_info[RUN_KEY] = output[RUN_KEY] + return {"feedback": evaluation_result} + + def _call( + self, + inputs: dict[str, str], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Call the evaluation chain.""" + evaluate_strings_inputs = self._prepare_input(inputs) + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + callbacks = _run_manager.get_child() + chain_output = self.string_evaluator.evaluate_strings( + **evaluate_strings_inputs, + callbacks=callbacks, + include_run_info=True, + ) + return self._prepare_output(chain_output) + + async def _acall( + self, + inputs: dict[str, str], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> dict[str, Any]: + """Call the evaluation chain.""" + evaluate_strings_inputs = self._prepare_input(inputs) + _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() + callbacks = _run_manager.get_child() + chain_output = await self.string_evaluator.aevaluate_strings( + **evaluate_strings_inputs, + callbacks=callbacks, + include_run_info=True, + ) + return self._prepare_output(chain_output) + + def _prepare_evaluator_output(self, output: dict[str, Any]) -> EvaluationResult: + feedback: EvaluationResult = output["feedback"] + if RUN_KEY not in feedback.evaluator_info: + feedback.evaluator_info[RUN_KEY] = output[RUN_KEY] + return feedback + + def evaluate_run( + self, + run: Run, + example: Optional[Example] = None, + evaluator_run_id: Optional[uuid.UUID] = None, + ) -> EvaluationResult: + """Evaluate an example.""" + try: + result = self({"run": run, "example": example}, include_run_info=True) + return self._prepare_evaluator_output(result) + except Exception as e: + return EvaluationResult( + key=self.string_evaluator.evaluation_name, + comment=f"Error evaluating run {run.id}: {e}", + # TODO: Add run ID once we can declare it via callbacks + ) + + async def aevaluate_run( + self, + run: Run, + example: Optional[Example] = None, + evaluator_run_id: Optional[uuid.UUID] = None, + ) -> EvaluationResult: + """Evaluate an example.""" + try: + result = await self.acall( + {"run": run, "example": example}, include_run_info=True + ) + return self._prepare_evaluator_output(result) + except Exception as e: + return EvaluationResult( + key=self.string_evaluator.evaluation_name, + comment=f"Error evaluating run {run.id}: {e}", + ) + + @classmethod + def from_run_and_data_type( + cls, + evaluator: StringEvaluator, + run_type: str, + data_type: DataType, + input_key: Optional[str] = None, + prediction_key: Optional[str] = None, + reference_key: Optional[str] = None, + tags: Optional[list[str]] = None, + ) -> StringRunEvaluatorChain: + """ + Create a StringRunEvaluatorChain from an evaluator and the run and dataset types. + + This method provides an easy way to instantiate a StringRunEvaluatorChain, by + taking an evaluator and information about the type of run and the data. + The method supports LLM and chain runs. + + Args: + evaluator (StringEvaluator): The string evaluator to use. + run_type (str): The type of run being evaluated. + Supported types are LLM and Chain. + data_type (DataType): The type of dataset used in the run. + input_key (str, optional): The key used to map the input from the run. + prediction_key (str, optional): The key used to map the prediction from the run. + reference_key (str, optional): The key used to map the reference from the dataset. + tags (List[str], optional): List of tags to attach to the evaluation chain. + + Returns: + StringRunEvaluatorChain: The instantiated evaluation chain. + + Raises: + ValueError: If the run type is not supported, or if the evaluator requires a + reference from the dataset but the reference key is not provided. + + """ # noqa: E501 + + # Configure how run inputs/predictions are passed to the evaluator + if run_type == "llm": + run_mapper: StringRunMapper = LLMStringRunMapper() + elif run_type == "chain": + run_mapper = ChainStringRunMapper( + input_key=input_key, prediction_key=prediction_key + ) + else: + raise ValueError( + f"Unsupported run type {run_type}. Expected one of 'llm' or 'chain'." + ) + + # Configure how example rows are fed as a reference string to the evaluator + if ( + reference_key is not None + or data_type in (DataType.llm, DataType.chat) + or evaluator.requires_reference + ): + example_mapper = StringExampleMapper(reference_key=reference_key) + elif evaluator.requires_reference: + raise ValueError( + f"Evaluator {evaluator.evaluation_name} requires a reference" + " example from the dataset. Please specify the reference key from" + " amongst the dataset outputs keys." + ) + else: + example_mapper = None + return cls( + name=evaluator.evaluation_name, + run_mapper=run_mapper, + example_mapper=example_mapper, + string_evaluator=evaluator, + tags=tags, + ) diff --git a/venv/Lib/site-packages/langchain/smith/evaluation/utils.py b/venv/Lib/site-packages/langchain/smith/evaluation/utils.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/sql_database.py b/venv/Lib/site-packages/langchain/sql_database.py new file mode 100644 index 00000000..70c927cd --- /dev/null +++ b/venv/Lib/site-packages/langchain/sql_database.py @@ -0,0 +1,25 @@ +"""Keep here for backwards compatibility.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import SQLDatabase + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SQLDatabase": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SQLDatabase", +] diff --git a/venv/Lib/site-packages/langchain/storage/__init__.py b/venv/Lib/site-packages/langchain/storage/__init__.py new file mode 100644 index 00000000..1ccdfab2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/storage/__init__.py @@ -0,0 +1,57 @@ +"""Implementations of key-value stores and storage helpers. + +Module provides implementations of various key-value stores that conform +to a simple key-value interface. + +The primary goal of these storages is to support implementation of caching. +""" + +from typing import TYPE_CHECKING, Any + +from langchain_core.stores import ( + InMemoryByteStore, + InMemoryStore, + InvalidKeyException, +) + +from langchain._api import create_importer +from langchain.storage._lc_store import create_kv_docstore, create_lc_store +from langchain.storage.encoder_backed import EncoderBackedStore +from langchain.storage.file_system import LocalFileStore + +if TYPE_CHECKING: + from langchain_community.storage import ( + RedisStore, + UpstashRedisByteStore, + UpstashRedisStore, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "RedisStore": "langchain_community.storage", + "UpstashRedisByteStore": "langchain_community.storage", + "UpstashRedisStore": "langchain_community.storage", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "create_kv_docstore", + "create_lc_store", + "EncoderBackedStore", + "InMemoryByteStore", + "InMemoryStore", + "InvalidKeyException", + "LocalFileStore", + "RedisStore", + "UpstashRedisByteStore", + "UpstashRedisStore", +] diff --git a/venv/Lib/site-packages/langchain/storage/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/storage/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..da9e3d03 Binary files /dev/null and b/venv/Lib/site-packages/langchain/storage/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/storage/__pycache__/_lc_store.cpython-312.pyc b/venv/Lib/site-packages/langchain/storage/__pycache__/_lc_store.cpython-312.pyc new file mode 100644 index 00000000..c4b86267 Binary files /dev/null and b/venv/Lib/site-packages/langchain/storage/__pycache__/_lc_store.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/storage/__pycache__/encoder_backed.cpython-312.pyc b/venv/Lib/site-packages/langchain/storage/__pycache__/encoder_backed.cpython-312.pyc new file mode 100644 index 00000000..1056f882 Binary files /dev/null and b/venv/Lib/site-packages/langchain/storage/__pycache__/encoder_backed.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/storage/__pycache__/exceptions.cpython-312.pyc b/venv/Lib/site-packages/langchain/storage/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 00000000..e4fbd6cd Binary files /dev/null and b/venv/Lib/site-packages/langchain/storage/__pycache__/exceptions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/storage/__pycache__/file_system.cpython-312.pyc b/venv/Lib/site-packages/langchain/storage/__pycache__/file_system.cpython-312.pyc new file mode 100644 index 00000000..46d328d1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/storage/__pycache__/file_system.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/storage/__pycache__/in_memory.cpython-312.pyc b/venv/Lib/site-packages/langchain/storage/__pycache__/in_memory.cpython-312.pyc new file mode 100644 index 00000000..e245d3eb Binary files /dev/null and b/venv/Lib/site-packages/langchain/storage/__pycache__/in_memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/storage/__pycache__/redis.cpython-312.pyc b/venv/Lib/site-packages/langchain/storage/__pycache__/redis.cpython-312.pyc new file mode 100644 index 00000000..c3d32c9a Binary files /dev/null and b/venv/Lib/site-packages/langchain/storage/__pycache__/redis.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/storage/__pycache__/upstash_redis.cpython-312.pyc b/venv/Lib/site-packages/langchain/storage/__pycache__/upstash_redis.cpython-312.pyc new file mode 100644 index 00000000..b754aef6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/storage/__pycache__/upstash_redis.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/storage/_lc_store.py b/venv/Lib/site-packages/langchain/storage/_lc_store.py new file mode 100644 index 00000000..a2ebf691 --- /dev/null +++ b/venv/Lib/site-packages/langchain/storage/_lc_store.py @@ -0,0 +1,89 @@ +"""Create a key-value store for any langchain serializable object.""" + +from typing import Callable, Optional + +from langchain_core.documents import Document +from langchain_core.load import Serializable, dumps, loads +from langchain_core.stores import BaseStore, ByteStore + +from langchain.storage.encoder_backed import EncoderBackedStore + + +def _dump_as_bytes(obj: Serializable) -> bytes: + """Return a bytes representation of a document.""" + return dumps(obj).encode("utf-8") + + +def _dump_document_as_bytes(obj: Document) -> bytes: + """Return a bytes representation of a document.""" + if not isinstance(obj, Document): + raise TypeError("Expected a Document instance") + return dumps(obj).encode("utf-8") + + +def _load_document_from_bytes(serialized: bytes) -> Document: + """Return a document from a bytes representation.""" + obj = loads(serialized.decode("utf-8")) + if not isinstance(obj, Document): + raise TypeError(f"Expected a Document instance. Got {type(obj)}") + return obj + + +def _load_from_bytes(serialized: bytes) -> Serializable: + """Return a document from a bytes representation.""" + return loads(serialized.decode("utf-8")) + + +def _identity(x: str) -> str: + """Return the same object.""" + return x + + +# PUBLIC API + + +def create_lc_store( + store: ByteStore, + *, + key_encoder: Optional[Callable[[str], str]] = None, +) -> BaseStore[str, Serializable]: + """Create a store for langchain serializable objects from a bytes store. + + Args: + store: A bytes store to use as the underlying store. + key_encoder: A function to encode keys; if None uses identity function. + + Returns: + A key-value store for documents. + """ + return EncoderBackedStore( + store, + key_encoder or _identity, + _dump_as_bytes, + _load_from_bytes, + ) + + +def create_kv_docstore( + store: ByteStore, + *, + key_encoder: Optional[Callable[[str], str]] = None, +) -> BaseStore[str, Document]: + """Create a store for langchain Document objects from a bytes store. + + This store does run time type checking to ensure that the values are + Document objects. + + Args: + store: A bytes store to use as the underlying store. + key_encoder: A function to encode keys; if None uses identity function. + + Returns: + A key-value store for documents. + """ + return EncoderBackedStore( + store, + key_encoder or _identity, + _dump_document_as_bytes, + _load_document_from_bytes, + ) diff --git a/venv/Lib/site-packages/langchain/storage/encoder_backed.py b/venv/Lib/site-packages/langchain/storage/encoder_backed.py new file mode 100644 index 00000000..becc86dd --- /dev/null +++ b/venv/Lib/site-packages/langchain/storage/encoder_backed.py @@ -0,0 +1,123 @@ +from collections.abc import AsyncIterator, Iterator, Sequence +from typing import ( + Any, + Callable, + Optional, + TypeVar, + Union, +) + +from langchain_core.stores import BaseStore + +K = TypeVar("K") +V = TypeVar("V") + + +class EncoderBackedStore(BaseStore[K, V]): + """Wraps a store with key and value encoders/decoders. + + Examples that uses JSON for encoding/decoding: + + .. code-block:: python + + import json + + def key_encoder(key: int) -> str: + return json.dumps(key) + + def value_serializer(value: float) -> str: + return json.dumps(value) + + def value_deserializer(serialized_value: str) -> float: + return json.loads(serialized_value) + + # Create an instance of the abstract store + abstract_store = MyCustomStore() + + # Create an instance of the encoder-backed store + store = EncoderBackedStore( + store=abstract_store, + key_encoder=key_encoder, + value_serializer=value_serializer, + value_deserializer=value_deserializer + ) + + # Use the encoder-backed store methods + store.mset([(1, 3.14), (2, 2.718)]) + values = store.mget([1, 2]) # Retrieves [3.14, 2.718] + store.mdelete([1, 2]) # Deletes the keys 1 and 2 + """ + + def __init__( + self, + store: BaseStore[str, Any], + key_encoder: Callable[[K], str], + value_serializer: Callable[[V], bytes], + value_deserializer: Callable[[Any], V], + ) -> None: + """Initialize an EncodedStore.""" + self.store = store + self.key_encoder = key_encoder + self.value_serializer = value_serializer + self.value_deserializer = value_deserializer + + def mget(self, keys: Sequence[K]) -> list[Optional[V]]: + """Get the values associated with the given keys.""" + encoded_keys: list[str] = [self.key_encoder(key) for key in keys] + values = self.store.mget(encoded_keys) + return [ + self.value_deserializer(value) if value is not None else value + for value in values + ] + + async def amget(self, keys: Sequence[K]) -> list[Optional[V]]: + """Get the values associated with the given keys.""" + encoded_keys: list[str] = [self.key_encoder(key) for key in keys] + values = await self.store.amget(encoded_keys) + return [ + self.value_deserializer(value) if value is not None else value + for value in values + ] + + def mset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None: + """Set the values for the given keys.""" + encoded_pairs = [ + (self.key_encoder(key), self.value_serializer(value)) + for key, value in key_value_pairs + ] + self.store.mset(encoded_pairs) + + async def amset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None: + """Set the values for the given keys.""" + encoded_pairs = [ + (self.key_encoder(key), self.value_serializer(value)) + for key, value in key_value_pairs + ] + await self.store.amset(encoded_pairs) + + def mdelete(self, keys: Sequence[K]) -> None: + """Delete the given keys and their associated values.""" + encoded_keys = [self.key_encoder(key) for key in keys] + self.store.mdelete(encoded_keys) + + async def amdelete(self, keys: Sequence[K]) -> None: + """Delete the given keys and their associated values.""" + encoded_keys = [self.key_encoder(key) for key in keys] + await self.store.amdelete(encoded_keys) + + def yield_keys( + self, *, prefix: Optional[str] = None + ) -> Union[Iterator[K], Iterator[str]]: + """Get an iterator over keys that match the given prefix.""" + # For the time being this does not return K, but str + # it's for debugging purposes. Should fix this. + yield from self.store.yield_keys(prefix=prefix) + + async def ayield_keys( + self, *, prefix: Optional[str] = None + ) -> Union[AsyncIterator[K], AsyncIterator[str]]: + """Get an iterator over keys that match the given prefix.""" + # For the time being this does not return K, but str + # it's for debugging purposes. Should fix this. + async for key in self.store.ayield_keys(prefix=prefix): + yield key diff --git a/venv/Lib/site-packages/langchain/storage/exceptions.py b/venv/Lib/site-packages/langchain/storage/exceptions.py new file mode 100644 index 00000000..82d7c8a2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/storage/exceptions.py @@ -0,0 +1,3 @@ +from langchain_core.stores import InvalidKeyException + +__all__ = ["InvalidKeyException"] diff --git a/venv/Lib/site-packages/langchain/storage/file_system.py b/venv/Lib/site-packages/langchain/storage/file_system.py new file mode 100644 index 00000000..ef07f81c --- /dev/null +++ b/venv/Lib/site-packages/langchain/storage/file_system.py @@ -0,0 +1,173 @@ +import os +import re +import time +from collections.abc import Iterator, Sequence +from pathlib import Path +from typing import Optional, Union + +from langchain_core.stores import ByteStore + +from langchain.storage.exceptions import InvalidKeyException + + +class LocalFileStore(ByteStore): + """BaseStore interface that works on the local file system. + + Examples: + Create a LocalFileStore instance and perform operations on it: + + .. code-block:: python + + from langchain.storage import LocalFileStore + + # Instantiate the LocalFileStore with the root path + file_store = LocalFileStore("/path/to/root") + + # Set values for keys + file_store.mset([("key1", b"value1"), ("key2", b"value2")]) + + # Get values for keys + values = file_store.mget(["key1", "key2"]) # Returns [b"value1", b"value2"] + + # Delete keys + file_store.mdelete(["key1"]) + + # Iterate over keys + for key in file_store.yield_keys(): + print(key) # noqa: T201 + + """ + + def __init__( + self, + root_path: Union[str, Path], + *, + chmod_file: Optional[int] = None, + chmod_dir: Optional[int] = None, + update_atime: bool = False, + ) -> None: + """Implement the BaseStore interface for the local file system. + + Args: + root_path (Union[str, Path]): The root path of the file store. All keys are + interpreted as paths relative to this root. + chmod_file: (optional, defaults to `None`) If specified, sets permissions + for newly created files, overriding the current `umask` if needed. + chmod_dir: (optional, defaults to `None`) If specified, sets permissions + for newly created dirs, overriding the current `umask` if needed. + update_atime: (optional, defaults to `False`) If `True`, updates the + filesystem access time (but not the modified time) when a file is read. + This allows MRU/LRU cache policies to be implemented for filesystems + where access time updates are disabled. + """ + self.root_path = Path(root_path).absolute() + self.chmod_file = chmod_file + self.chmod_dir = chmod_dir + self.update_atime = update_atime + + def _get_full_path(self, key: str) -> Path: + """Get the full path for a given key relative to the root path. + + Args: + key (str): The key relative to the root path. + + Returns: + Path: The full path for the given key. + """ + if not re.match(r"^[a-zA-Z0-9_.\-/]+$", key): + raise InvalidKeyException(f"Invalid characters in key: {key}") + full_path = os.path.abspath(self.root_path / key) + common_path = os.path.commonpath([str(self.root_path), full_path]) + if common_path != str(self.root_path): + raise InvalidKeyException( + f"Invalid key: {key}. Key should be relative to the full path." + f"{self.root_path} vs. {common_path} and full path of {full_path}" + ) + + return Path(full_path) + + def _mkdir_for_store(self, dir: Path) -> None: + """Makes a store directory path (including parents) with specified permissions + + This is needed because `Path.mkdir()` is restricted by the current `umask`, + whereas the explicit `os.chmod()` used here is not. + + Args: + dir: (Path) The store directory to make + + Returns: + None + """ + if not dir.exists(): + self._mkdir_for_store(dir.parent) + dir.mkdir(exist_ok=True) + if self.chmod_dir is not None: + os.chmod(dir, self.chmod_dir) + + def mget(self, keys: Sequence[str]) -> list[Optional[bytes]]: + """Get the values associated with the given keys. + + Args: + keys: A sequence of keys. + + Returns: + A sequence of optional values associated with the keys. + If a key is not found, the corresponding value will be None. + """ + values: list[Optional[bytes]] = [] + for key in keys: + full_path = self._get_full_path(key) + if full_path.exists(): + value = full_path.read_bytes() + values.append(value) + if self.update_atime: + # update access time only; preserve modified time + os.utime(full_path, (time.time(), os.stat(full_path).st_mtime)) + else: + values.append(None) + return values + + def mset(self, key_value_pairs: Sequence[tuple[str, bytes]]) -> None: + """Set the values for the given keys. + + Args: + key_value_pairs: A sequence of key-value pairs. + + Returns: + None + """ + for key, value in key_value_pairs: + full_path = self._get_full_path(key) + self._mkdir_for_store(full_path.parent) + full_path.write_bytes(value) + if self.chmod_file is not None: + os.chmod(full_path, self.chmod_file) + + def mdelete(self, keys: Sequence[str]) -> None: + """Delete the given keys and their associated values. + + Args: + keys (Sequence[str]): A sequence of keys to delete. + + Returns: + None + """ + for key in keys: + full_path = self._get_full_path(key) + if full_path.exists(): + full_path.unlink() + + def yield_keys(self, prefix: Optional[str] = None) -> Iterator[str]: + """Get an iterator over keys that match the given prefix. + + Args: + prefix (Optional[str]): The prefix to match. + + Returns: + Iterator[str]: An iterator over keys that match the given prefix. + """ + prefix_path = self._get_full_path(prefix) if prefix else self.root_path + for file in prefix_path.rglob("*"): + if file.is_file(): + relative_path = file.relative_to(self.root_path) + yield str(relative_path) diff --git a/venv/Lib/site-packages/langchain/storage/in_memory.py b/venv/Lib/site-packages/langchain/storage/in_memory.py new file mode 100644 index 00000000..24c4df6f --- /dev/null +++ b/venv/Lib/site-packages/langchain/storage/in_memory.py @@ -0,0 +1,13 @@ +"""In memory store that is not thread safe and has no eviction policy. + +This is a simple implementation of the BaseStore using a dictionary that is useful +primarily for unit testing purposes. +""" + +from langchain_core.stores import InMemoryBaseStore, InMemoryByteStore, InMemoryStore + +__all__ = [ + "InMemoryStore", + "InMemoryBaseStore", + "InMemoryByteStore", +] diff --git a/venv/Lib/site-packages/langchain/storage/redis.py b/venv/Lib/site-packages/langchain/storage/redis.py new file mode 100644 index 00000000..80a6234c --- /dev/null +++ b/venv/Lib/site-packages/langchain/storage/redis.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.storage import RedisStore + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"RedisStore": "langchain_community.storage"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RedisStore", +] diff --git a/venv/Lib/site-packages/langchain/storage/upstash_redis.py b/venv/Lib/site-packages/langchain/storage/upstash_redis.py new file mode 100644 index 00000000..59f3b106 --- /dev/null +++ b/venv/Lib/site-packages/langchain/storage/upstash_redis.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.storage import UpstashRedisByteStore, UpstashRedisStore + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "UpstashRedisStore": "langchain_community.storage", + "UpstashRedisByteStore": "langchain_community.storage", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UpstashRedisStore", + "UpstashRedisByteStore", +] diff --git a/venv/Lib/site-packages/langchain/text_splitter.py b/venv/Lib/site-packages/langchain/text_splitter.py new file mode 100644 index 00000000..9303f0c6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/text_splitter.py @@ -0,0 +1,50 @@ +"""Kept for backwards compatibility.""" + +from langchain_text_splitters import ( + Language, + RecursiveCharacterTextSplitter, + TextSplitter, + Tokenizer, + TokenTextSplitter, +) +from langchain_text_splitters.base import split_text_on_tokens +from langchain_text_splitters.character import CharacterTextSplitter +from langchain_text_splitters.html import ElementType, HTMLHeaderTextSplitter +from langchain_text_splitters.json import RecursiveJsonSplitter +from langchain_text_splitters.konlpy import KonlpyTextSplitter +from langchain_text_splitters.latex import LatexTextSplitter +from langchain_text_splitters.markdown import ( + HeaderType, + LineType, + MarkdownHeaderTextSplitter, + MarkdownTextSplitter, +) +from langchain_text_splitters.nltk import NLTKTextSplitter +from langchain_text_splitters.python import PythonCodeTextSplitter +from langchain_text_splitters.sentence_transformers import ( + SentenceTransformersTokenTextSplitter, +) +from langchain_text_splitters.spacy import SpacyTextSplitter + +__all__ = [ + "TokenTextSplitter", + "TextSplitter", + "Tokenizer", + "Language", + "RecursiveCharacterTextSplitter", + "RecursiveJsonSplitter", + "LatexTextSplitter", + "PythonCodeTextSplitter", + "KonlpyTextSplitter", + "SpacyTextSplitter", + "NLTKTextSplitter", + "split_text_on_tokens", + "SentenceTransformersTokenTextSplitter", + "ElementType", + "HeaderType", + "LineType", + "HTMLHeaderTextSplitter", + "MarkdownHeaderTextSplitter", + "MarkdownTextSplitter", + "CharacterTextSplitter", +] diff --git a/venv/Lib/site-packages/langchain/tools/__init__.py b/venv/Lib/site-packages/langchain/tools/__init__.py new file mode 100644 index 00000000..b6b2b0ae --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/__init__.py @@ -0,0 +1,206 @@ +"""**Tools** are classes that an Agent uses to interact with the world. + +Each tool has a **description**. Agent uses the description to choose the right +tool for the job. + +**Class hierarchy:** + +.. code-block:: + + ToolMetaclass --> BaseTool --> Tool # Examples: AIPluginTool, BaseGraphQLTool + # Examples: BraveSearch, HumanInputRun + +**Main helpers:** + +.. code-block:: + + CallbackManagerForToolRun, AsyncCallbackManagerForToolRun +""" + +import warnings +from typing import Any + +from langchain_core._api import LangChainDeprecationWarning +from langchain_core.tools import ( + BaseTool as BaseTool, +) +from langchain_core.tools import ( + StructuredTool as StructuredTool, +) +from langchain_core.tools import ( + Tool as Tool, +) +from langchain_core.tools.convert import tool as tool + +from langchain._api.interactive_env import is_interactive_env + +# Used for internal purposes +_DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"} + + +def _import_python_tool_PythonAstREPLTool() -> Any: + raise ImportError( + "This tool has been moved to langchain experiment. " + "This tool has access to a python REPL. " + "For best practices make sure to sandbox this tool. " + "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " + "To keep using this code as is, install langchain experimental and " + "update relevant imports replacing 'langchain' with 'langchain_experimental'" + ) + + +def _import_python_tool_PythonREPLTool() -> Any: + raise ImportError( + "This tool has been moved to langchain experiment. " + "This tool has access to a python REPL. " + "For best practices make sure to sandbox this tool. " + "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " + "To keep using this code as is, install langchain experimental and " + "update relevant imports replacing 'langchain' with 'langchain_experimental'" + ) + + +def __getattr__(name: str) -> Any: + if name == "PythonAstREPLTool": + return _import_python_tool_PythonAstREPLTool() + elif name == "PythonREPLTool": + return _import_python_tool_PythonREPLTool() + else: + from langchain_community import tools + + # If not in interactive env, raise warning. + if not is_interactive_env(): + warnings.warn( + "Importing tools from langchain is deprecated. Importing from " + "langchain will no longer be supported as of langchain==0.2.0. " + "Please import from langchain-community instead:\n\n" + f"`from langchain_community.tools import {name}`.\n\n" + "To install langchain-community run " + "`pip install -U langchain-community`.", + category=LangChainDeprecationWarning, + ) + + return getattr(tools, name) + + +__all__ = [ + "StructuredTool", + "BaseTool", + "tool", + "Tool", + "AINAppOps", + "AINOwnerOps", + "AINRuleOps", + "AINTransfer", + "AINValueOps", + "AIPluginTool", + "APIOperation", + "ArxivQueryRun", + "AzureCogsFormRecognizerTool", + "AzureCogsImageAnalysisTool", + "AzureCogsSpeech2TextTool", + "AzureCogsText2SpeechTool", + "AzureCogsTextAnalyticsHealthTool", + "BaseGraphQLTool", + "BaseRequestsTool", + "BaseSQLDatabaseTool", + "BaseSparkSQLTool", + "BearlyInterpreterTool", + "BingSearchResults", + "BingSearchRun", + "BraveSearch", + "ClickTool", + "CopyFileTool", + "CurrentWebPageTool", + "DeleteFileTool", + "DuckDuckGoSearchResults", + "DuckDuckGoSearchRun", + "E2BDataAnalysisTool", + "EdenAiExplicitImageTool", + "EdenAiObjectDetectionTool", + "EdenAiParsingIDTool", + "EdenAiParsingInvoiceTool", + "EdenAiSpeechToTextTool", + "EdenAiTextModerationTool", + "EdenAiTextToSpeechTool", + "EdenaiTool", + "ElevenLabsText2SpeechTool", + "ExtractHyperlinksTool", + "ExtractTextTool", + "FileSearchTool", + "GetElementsTool", + "GmailCreateDraft", + "GmailGetMessage", + "GmailGetThread", + "GmailSearch", + "GmailSendMessage", + "GoogleCloudTextToSpeechTool", + "GooglePlacesTool", + "GoogleSearchResults", + "GoogleSearchRun", + "GoogleSerperResults", + "GoogleSerperRun", + "SearchAPIResults", + "SearchAPIRun", + "HumanInputRun", + "IFTTTWebhook", + "InfoPowerBITool", + "InfoSQLDatabaseTool", + "InfoSparkSQLTool", + "JiraAction", + "JsonGetValueTool", + "JsonListKeysTool", + "ListDirectoryTool", + "ListPowerBITool", + "ListSQLDatabaseTool", + "ListSparkSQLTool", + "MerriamWebsterQueryRun", + "MetaphorSearchResults", + "MoveFileTool", + "NasaAction", + "NavigateBackTool", + "NavigateTool", + "O365CreateDraftMessage", + "O365SearchEmails", + "O365SearchEvents", + "O365SendEvent", + "O365SendMessage", + "OpenAPISpec", + "OpenWeatherMapQueryRun", + "PubmedQueryRun", + "RedditSearchRun", + "QueryCheckerTool", + "QueryPowerBITool", + "QuerySQLCheckerTool", + "QuerySQLDataBaseTool", + "QuerySparkSQLTool", + "ReadFileTool", + "RequestsDeleteTool", + "RequestsGetTool", + "RequestsPatchTool", + "RequestsPostTool", + "RequestsPutTool", + "SteamWebAPIQueryRun", + "SceneXplainTool", + "SearxSearchResults", + "SearxSearchRun", + "ShellTool", + "SlackGetChannel", + "SlackGetMessage", + "SlackScheduleMessage", + "SlackSendMessage", + "SleepTool", + "StdInInquireTool", + "StackExchangeTool", + "SteamshipImageGenerationTool", + "VectorStoreQATool", + "VectorStoreQAWithSourcesTool", + "WikipediaQueryRun", + "WolframAlphaQueryRun", + "WriteFileTool", + "YahooFinanceNewsTool", + "YouTubeSearchTool", + "ZapierNLAListActions", + "ZapierNLARunAction", + "format_tool_to_openai_function", +] diff --git a/venv/Lib/site-packages/langchain/tools/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..0f31c93a Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..448afe85 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/__pycache__/convert_to_openai.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/__pycache__/convert_to_openai.cpython-312.pyc new file mode 100644 index 00000000..f7c23f2e Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/__pycache__/convert_to_openai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/__pycache__/ifttt.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/__pycache__/ifttt.cpython-312.pyc new file mode 100644 index 00000000..b5a88186 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/__pycache__/ifttt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/__pycache__/plugin.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/__pycache__/plugin.cpython-312.pyc new file mode 100644 index 00000000..2565f668 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/__pycache__/plugin.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/__pycache__/render.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/__pycache__/render.cpython-312.pyc new file mode 100644 index 00000000..89360313 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/__pycache__/render.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/__pycache__/retriever.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/__pycache__/retriever.cpython-312.pyc new file mode 100644 index 00000000..1adb862b Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/__pycache__/retriever.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/__pycache__/yahoo_finance_news.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/__pycache__/yahoo_finance_news.cpython-312.pyc new file mode 100644 index 00000000..d77b1908 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/__pycache__/yahoo_finance_news.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/__init__.py b/venv/Lib/site-packages/langchain/tools/ainetwork/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..97a1991c Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/app.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/app.cpython-312.pyc new file mode 100644 index 00000000..281c4b6a Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/app.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..9820162f Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/owner.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/owner.cpython-312.pyc new file mode 100644 index 00000000..5d411019 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/owner.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/rule.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/rule.cpython-312.pyc new file mode 100644 index 00000000..93d18997 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/rule.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/transfer.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/transfer.cpython-312.pyc new file mode 100644 index 00000000..35280cc3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/transfer.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/value.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/value.cpython-312.pyc new file mode 100644 index 00000000..a9be97be Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/ainetwork/__pycache__/value.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/app.py b/venv/Lib/site-packages/langchain/tools/ainetwork/app.py new file mode 100644 index 00000000..a28e8300 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/ainetwork/app.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import AINAppOps + from langchain_community.tools.ainetwork.app import AppOperationType, AppSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AppOperationType": "langchain_community.tools.ainetwork.app", + "AppSchema": "langchain_community.tools.ainetwork.app", + "AINAppOps": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AppOperationType", + "AppSchema", + "AINAppOps", +] diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/base.py b/venv/Lib/site-packages/langchain/tools/ainetwork/base.py new file mode 100644 index 00000000..608c882f --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/ainetwork/base.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "OperationType": "langchain_community.tools.ainetwork.base", + "AINBaseTool": "langchain_community.tools.ainetwork.base", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OperationType", + "AINBaseTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/owner.py b/venv/Lib/site-packages/langchain/tools/ainetwork/owner.py new file mode 100644 index 00000000..1cf8259d --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/ainetwork/owner.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import AINOwnerOps + from langchain_community.tools.ainetwork.owner import RuleSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "RuleSchema": "langchain_community.tools.ainetwork.owner", + "AINOwnerOps": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RuleSchema", + "AINOwnerOps", +] diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/rule.py b/venv/Lib/site-packages/langchain/tools/ainetwork/rule.py new file mode 100644 index 00000000..56496048 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/ainetwork/rule.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import AINRuleOps + from langchain_community.tools.ainetwork.rule import RuleSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "RuleSchema": "langchain_community.tools.ainetwork.rule", + "AINRuleOps": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RuleSchema", + "AINRuleOps", +] diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/transfer.py b/venv/Lib/site-packages/langchain/tools/ainetwork/transfer.py new file mode 100644 index 00000000..2aa04dd9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/ainetwork/transfer.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import AINTransfer + from langchain_community.tools.ainetwork.transfer import TransferSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "TransferSchema": "langchain_community.tools.ainetwork.transfer", + "AINTransfer": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TransferSchema", + "AINTransfer", +] diff --git a/venv/Lib/site-packages/langchain/tools/ainetwork/value.py b/venv/Lib/site-packages/langchain/tools/ainetwork/value.py new file mode 100644 index 00000000..f2a5876f --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/ainetwork/value.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import AINValueOps + from langchain_community.tools.ainetwork.value import ValueSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ValueSchema": "langchain_community.tools.ainetwork.value", + "AINValueOps": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ValueSchema", + "AINValueOps", +] diff --git a/venv/Lib/site-packages/langchain/tools/amadeus/__init__.py b/venv/Lib/site-packages/langchain/tools/amadeus/__init__.py new file mode 100644 index 00000000..74c25f39 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/amadeus/__init__.py @@ -0,0 +1,30 @@ +"""Amadeus tools.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.amadeus.closest_airport import AmadeusClosestAirport + from langchain_community.tools.amadeus.flight_search import AmadeusFlightSearch + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AmadeusClosestAirport": "langchain_community.tools.amadeus.closest_airport", + "AmadeusFlightSearch": "langchain_community.tools.amadeus.flight_search", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AmadeusClosestAirport", + "AmadeusFlightSearch", +] diff --git a/venv/Lib/site-packages/langchain/tools/amadeus/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/amadeus/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..63a53557 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/amadeus/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/amadeus/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/amadeus/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..8866d1b1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/amadeus/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/amadeus/__pycache__/closest_airport.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/amadeus/__pycache__/closest_airport.cpython-312.pyc new file mode 100644 index 00000000..86327ca3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/amadeus/__pycache__/closest_airport.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/amadeus/__pycache__/flight_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/amadeus/__pycache__/flight_search.cpython-312.pyc new file mode 100644 index 00000000..082daba5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/amadeus/__pycache__/flight_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/amadeus/base.py b/venv/Lib/site-packages/langchain/tools/amadeus/base.py new file mode 100644 index 00000000..b5c36e0b --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/amadeus/base.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.amadeus.base import AmadeusBaseTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AmadeusBaseTool": "langchain_community.tools.amadeus.base"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AmadeusBaseTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/amadeus/closest_airport.py b/venv/Lib/site-packages/langchain/tools/amadeus/closest_airport.py new file mode 100644 index 00000000..e51baaa3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/amadeus/closest_airport.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.amadeus.closest_airport import ( + AmadeusClosestAirport, + ClosestAirportSchema, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ClosestAirportSchema": "langchain_community.tools.amadeus.closest_airport", + "AmadeusClosestAirport": "langchain_community.tools.amadeus.closest_airport", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ClosestAirportSchema", + "AmadeusClosestAirport", +] diff --git a/venv/Lib/site-packages/langchain/tools/amadeus/flight_search.py b/venv/Lib/site-packages/langchain/tools/amadeus/flight_search.py new file mode 100644 index 00000000..b83247b2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/amadeus/flight_search.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.amadeus.flight_search import ( + AmadeusFlightSearch, + FlightSearchSchema, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FlightSearchSchema": "langchain_community.tools.amadeus.flight_search", + "AmadeusFlightSearch": "langchain_community.tools.amadeus.flight_search", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FlightSearchSchema", + "AmadeusFlightSearch", +] diff --git a/venv/Lib/site-packages/langchain/tools/arxiv/__init__.py b/venv/Lib/site-packages/langchain/tools/arxiv/__init__.py new file mode 100644 index 00000000..2607cb19 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/arxiv/__init__.py @@ -0,0 +1 @@ +"""Arxiv API toolkit.""" diff --git a/venv/Lib/site-packages/langchain/tools/arxiv/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/arxiv/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..bc8737c3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/arxiv/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/arxiv/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/arxiv/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..f4955fb6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/arxiv/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/arxiv/tool.py b/venv/Lib/site-packages/langchain/tools/arxiv/tool.py new file mode 100644 index 00000000..a5d4c7b9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/arxiv/tool.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ArxivQueryRun + from langchain_community.tools.arxiv.tool import ArxivInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ArxivInput": "langchain_community.tools.arxiv.tool", + "ArxivQueryRun": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ArxivInput", + "ArxivQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__init__.py b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__init__.py new file mode 100644 index 00000000..7922cd4f --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__init__.py @@ -0,0 +1,41 @@ +"""Azure Cognitive Services Tools.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ( + AzureCogsFormRecognizerTool, + AzureCogsImageAnalysisTool, + AzureCogsSpeech2TextTool, + AzureCogsText2SpeechTool, + AzureCogsTextAnalyticsHealthTool, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AzureCogsImageAnalysisTool": "langchain_community.tools", + "AzureCogsFormRecognizerTool": "langchain_community.tools", + "AzureCogsSpeech2TextTool": "langchain_community.tools", + "AzureCogsText2SpeechTool": "langchain_community.tools", + "AzureCogsTextAnalyticsHealthTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureCogsImageAnalysisTool", + "AzureCogsFormRecognizerTool", + "AzureCogsSpeech2TextTool", + "AzureCogsText2SpeechTool", + "AzureCogsTextAnalyticsHealthTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..8510ae2f Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/form_recognizer.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/form_recognizer.cpython-312.pyc new file mode 100644 index 00000000..3d253d3c Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/form_recognizer.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/image_analysis.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/image_analysis.cpython-312.pyc new file mode 100644 index 00000000..517e026b Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/image_analysis.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/speech2text.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/speech2text.cpython-312.pyc new file mode 100644 index 00000000..b4d33219 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/speech2text.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/text2speech.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/text2speech.cpython-312.pyc new file mode 100644 index 00000000..0adf4290 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/text2speech.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/text_analytics_health.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/text_analytics_health.cpython-312.pyc new file mode 100644 index 00000000..a66e88cd Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/__pycache__/text_analytics_health.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/form_recognizer.py b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/form_recognizer.py new file mode 100644 index 00000000..d54ed85f --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/form_recognizer.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import AzureCogsFormRecognizerTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AzureCogsFormRecognizerTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureCogsFormRecognizerTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/image_analysis.py b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/image_analysis.py new file mode 100644 index 00000000..8d2b1324 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/image_analysis.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import AzureCogsImageAnalysisTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AzureCogsImageAnalysisTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureCogsImageAnalysisTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/speech2text.py b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/speech2text.py new file mode 100644 index 00000000..62306fe4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/speech2text.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import AzureCogsSpeech2TextTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AzureCogsSpeech2TextTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureCogsSpeech2TextTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/text2speech.py b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/text2speech.py new file mode 100644 index 00000000..9dadb443 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/text2speech.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import AzureCogsText2SpeechTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AzureCogsText2SpeechTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureCogsText2SpeechTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/text_analytics_health.py b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/text_analytics_health.py new file mode 100644 index 00000000..3fb411c0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/azure_cognitive_services/text_analytics_health.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import AzureCogsTextAnalyticsHealthTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AzureCogsTextAnalyticsHealthTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureCogsTextAnalyticsHealthTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/base.py b/venv/Lib/site-packages/langchain/tools/base.py new file mode 100644 index 00000000..ff81eaa8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/base.py @@ -0,0 +1,19 @@ +from langchain_core.tools import ( + BaseTool, + SchemaAnnotationError, + StructuredTool, + Tool, + ToolException, + create_schema_from_function, + tool, +) + +__all__ = [ + "SchemaAnnotationError", + "create_schema_from_function", + "ToolException", + "BaseTool", + "Tool", + "StructuredTool", + "tool", +] diff --git a/venv/Lib/site-packages/langchain/tools/bearly/__init__.py b/venv/Lib/site-packages/langchain/tools/bearly/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/tools/bearly/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/bearly/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..50e47362 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/bearly/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/bearly/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/bearly/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..bce2d24d Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/bearly/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/bearly/tool.py b/venv/Lib/site-packages/langchain/tools/bearly/tool.py new file mode 100644 index 00000000..64af4a9c --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/bearly/tool.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import BearlyInterpreterTool + from langchain_community.tools.bearly.tool import ( + BearlyInterpreterToolArguments, + FileInfo, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BearlyInterpreterToolArguments": "langchain_community.tools.bearly.tool", + "FileInfo": "langchain_community.tools.bearly.tool", + "BearlyInterpreterTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BearlyInterpreterToolArguments", + "FileInfo", + "BearlyInterpreterTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/bing_search/__init__.py b/venv/Lib/site-packages/langchain/tools/bing_search/__init__.py new file mode 100644 index 00000000..517695ad --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/bing_search/__init__.py @@ -0,0 +1,29 @@ +"""Bing Search API toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import BingSearchResults, BingSearchRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BingSearchRun": "langchain_community.tools", + "BingSearchResults": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BingSearchRun", + "BingSearchResults", +] diff --git a/venv/Lib/site-packages/langchain/tools/bing_search/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/bing_search/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..f6239272 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/bing_search/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/bing_search/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/bing_search/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..bb65bf0a Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/bing_search/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/bing_search/tool.py b/venv/Lib/site-packages/langchain/tools/bing_search/tool.py new file mode 100644 index 00000000..20f88714 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/bing_search/tool.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import BingSearchResults, BingSearchRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BingSearchRun": "langchain_community.tools", + "BingSearchResults": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BingSearchRun", + "BingSearchResults", +] diff --git a/venv/Lib/site-packages/langchain/tools/brave_search/__init__.py b/venv/Lib/site-packages/langchain/tools/brave_search/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/tools/brave_search/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/brave_search/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..a734d5ea Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/brave_search/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/brave_search/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/brave_search/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..06f4c826 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/brave_search/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/brave_search/tool.py b/venv/Lib/site-packages/langchain/tools/brave_search/tool.py new file mode 100644 index 00000000..b02ed3b2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/brave_search/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import BraveSearch + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BraveSearch": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BraveSearch", +] diff --git a/venv/Lib/site-packages/langchain/tools/clickup/__init__.py b/venv/Lib/site-packages/langchain/tools/clickup/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/tools/clickup/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/clickup/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..d56bba22 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/clickup/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/clickup/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/clickup/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..4c0ee5de Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/clickup/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/clickup/tool.py b/venv/Lib/site-packages/langchain/tools/clickup/tool.py new file mode 100644 index 00000000..5c38df83 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/clickup/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.clickup.tool import ClickupAction + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ClickupAction": "langchain_community.tools.clickup.tool"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ClickupAction", +] diff --git a/venv/Lib/site-packages/langchain/tools/convert_to_openai.py b/venv/Lib/site-packages/langchain/tools/convert_to_openai.py new file mode 100644 index 00000000..d9f639a3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/convert_to_openai.py @@ -0,0 +1,4 @@ +from langchain_core.utils.function_calling import format_tool_to_openai_function + +# For backwards compatibility +__all__ = ["format_tool_to_openai_function"] diff --git a/venv/Lib/site-packages/langchain/tools/dataforseo_api_search/__init__.py b/venv/Lib/site-packages/langchain/tools/dataforseo_api_search/__init__.py new file mode 100644 index 00000000..2fdf6e4f --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/dataforseo_api_search/__init__.py @@ -0,0 +1,34 @@ +"""DataForSeo API Toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.dataforseo_api_search.tool import ( + DataForSeoAPISearchResults, + DataForSeoAPISearchRun, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DataForSeoAPISearchRun": "langchain_community.tools.dataforseo_api_search.tool", + "DataForSeoAPISearchResults": ( + "langchain_community.tools.dataforseo_api_search.tool" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DataForSeoAPISearchRun", + "DataForSeoAPISearchResults", +] diff --git a/venv/Lib/site-packages/langchain/tools/dataforseo_api_search/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/dataforseo_api_search/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..9837fd78 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/dataforseo_api_search/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/dataforseo_api_search/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/dataforseo_api_search/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..2f500518 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/dataforseo_api_search/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/dataforseo_api_search/tool.py b/venv/Lib/site-packages/langchain/tools/dataforseo_api_search/tool.py new file mode 100644 index 00000000..8d1b0285 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/dataforseo_api_search/tool.py @@ -0,0 +1,32 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.dataforseo_api_search.tool import ( + DataForSeoAPISearchResults, + DataForSeoAPISearchRun, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DataForSeoAPISearchRun": "langchain_community.tools.dataforseo_api_search.tool", + "DataForSeoAPISearchResults": ( + "langchain_community.tools.dataforseo_api_search.tool" + ), +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DataForSeoAPISearchRun", + "DataForSeoAPISearchResults", +] diff --git a/venv/Lib/site-packages/langchain/tools/ddg_search/__init__.py b/venv/Lib/site-packages/langchain/tools/ddg_search/__init__.py new file mode 100644 index 00000000..5cbe4079 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/ddg_search/__init__.py @@ -0,0 +1,25 @@ +"""DuckDuckGo Search API toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import DuckDuckGoSearchRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DuckDuckGoSearchRun": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DuckDuckGoSearchRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/ddg_search/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/ddg_search/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..ff45a0a9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/ddg_search/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/ddg_search/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/ddg_search/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..268dbe1c Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/ddg_search/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/ddg_search/tool.py b/venv/Lib/site-packages/langchain/tools/ddg_search/tool.py new file mode 100644 index 00000000..e386ac35 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/ddg_search/tool.py @@ -0,0 +1,32 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import DuckDuckGoSearchResults, DuckDuckGoSearchRun + from langchain_community.tools.ddg_search.tool import DDGInput, DuckDuckGoSearchTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DDGInput": "langchain_community.tools.ddg_search.tool", + "DuckDuckGoSearchRun": "langchain_community.tools", + "DuckDuckGoSearchResults": "langchain_community.tools", + "DuckDuckGoSearchTool": "langchain_community.tools.ddg_search.tool", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DDGInput", + "DuckDuckGoSearchRun", + "DuckDuckGoSearchResults", + "DuckDuckGoSearchTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/e2b_data_analysis/__init__.py b/venv/Lib/site-packages/langchain/tools/e2b_data_analysis/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/tools/e2b_data_analysis/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/e2b_data_analysis/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..16ead754 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/e2b_data_analysis/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/e2b_data_analysis/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/e2b_data_analysis/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..f57d8f16 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/e2b_data_analysis/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/e2b_data_analysis/tool.py b/venv/Lib/site-packages/langchain/tools/e2b_data_analysis/tool.py new file mode 100644 index 00000000..479c5eae --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/e2b_data_analysis/tool.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import E2BDataAnalysisTool + from langchain_community.tools.e2b_data_analysis.tool import ( + E2BDataAnalysisToolArguments, + UploadedFile, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "UploadedFile": "langchain_community.tools.e2b_data_analysis.tool", + "E2BDataAnalysisToolArguments": "langchain_community.tools.e2b_data_analysis.tool", + "E2BDataAnalysisTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UploadedFile", + "E2BDataAnalysisToolArguments", + "E2BDataAnalysisTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/edenai/__init__.py b/venv/Lib/site-packages/langchain/tools/edenai/__init__.py new file mode 100644 index 00000000..3c33655d --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/edenai/__init__.py @@ -0,0 +1,50 @@ +"""Edenai Tools.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ( + EdenAiExplicitImageTool, + EdenAiObjectDetectionTool, + EdenAiParsingIDTool, + EdenAiParsingInvoiceTool, + EdenAiSpeechToTextTool, + EdenAiTextModerationTool, + EdenAiTextToSpeechTool, + EdenaiTool, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "EdenAiExplicitImageTool": "langchain_community.tools", + "EdenAiObjectDetectionTool": "langchain_community.tools", + "EdenAiParsingIDTool": "langchain_community.tools", + "EdenAiParsingInvoiceTool": "langchain_community.tools", + "EdenAiTextToSpeechTool": "langchain_community.tools", + "EdenAiSpeechToTextTool": "langchain_community.tools", + "EdenAiTextModerationTool": "langchain_community.tools", + "EdenaiTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EdenAiExplicitImageTool", + "EdenAiObjectDetectionTool", + "EdenAiParsingIDTool", + "EdenAiParsingInvoiceTool", + "EdenAiTextToSpeechTool", + "EdenAiSpeechToTextTool", + "EdenAiTextModerationTool", + "EdenaiTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..59e62819 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/audio_speech_to_text.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/audio_speech_to_text.cpython-312.pyc new file mode 100644 index 00000000..e455f122 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/audio_speech_to_text.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/audio_text_to_speech.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/audio_text_to_speech.cpython-312.pyc new file mode 100644 index 00000000..86d014f2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/audio_text_to_speech.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/edenai_base_tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/edenai_base_tool.cpython-312.pyc new file mode 100644 index 00000000..21593b4e Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/edenai_base_tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/image_explicitcontent.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/image_explicitcontent.cpython-312.pyc new file mode 100644 index 00000000..30bc8756 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/image_explicitcontent.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/image_objectdetection.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/image_objectdetection.cpython-312.pyc new file mode 100644 index 00000000..ee643477 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/image_objectdetection.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/ocr_identityparser.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/ocr_identityparser.cpython-312.pyc new file mode 100644 index 00000000..a8b4acf6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/ocr_identityparser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/ocr_invoiceparser.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/ocr_invoiceparser.cpython-312.pyc new file mode 100644 index 00000000..f762da89 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/ocr_invoiceparser.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/text_moderation.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/text_moderation.cpython-312.pyc new file mode 100644 index 00000000..b8c31a6f Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/edenai/__pycache__/text_moderation.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/edenai/audio_speech_to_text.py b/venv/Lib/site-packages/langchain/tools/edenai/audio_speech_to_text.py new file mode 100644 index 00000000..a5fc771a --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/edenai/audio_speech_to_text.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import EdenAiSpeechToTextTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EdenAiSpeechToTextTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EdenAiSpeechToTextTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/edenai/audio_text_to_speech.py b/venv/Lib/site-packages/langchain/tools/edenai/audio_text_to_speech.py new file mode 100644 index 00000000..84c2bc23 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/edenai/audio_text_to_speech.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import EdenAiTextToSpeechTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EdenAiTextToSpeechTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EdenAiTextToSpeechTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/edenai/edenai_base_tool.py b/venv/Lib/site-packages/langchain/tools/edenai/edenai_base_tool.py new file mode 100644 index 00000000..f7903887 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/edenai/edenai_base_tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import EdenaiTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EdenaiTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EdenaiTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/edenai/image_explicitcontent.py b/venv/Lib/site-packages/langchain/tools/edenai/image_explicitcontent.py new file mode 100644 index 00000000..e6064b34 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/edenai/image_explicitcontent.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import EdenAiExplicitImageTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EdenAiExplicitImageTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EdenAiExplicitImageTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/edenai/image_objectdetection.py b/venv/Lib/site-packages/langchain/tools/edenai/image_objectdetection.py new file mode 100644 index 00000000..16c0f5db --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/edenai/image_objectdetection.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import EdenAiObjectDetectionTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EdenAiObjectDetectionTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EdenAiObjectDetectionTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/edenai/ocr_identityparser.py b/venv/Lib/site-packages/langchain/tools/edenai/ocr_identityparser.py new file mode 100644 index 00000000..aa88f812 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/edenai/ocr_identityparser.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import EdenAiParsingIDTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EdenAiParsingIDTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EdenAiParsingIDTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/edenai/ocr_invoiceparser.py b/venv/Lib/site-packages/langchain/tools/edenai/ocr_invoiceparser.py new file mode 100644 index 00000000..b71097fa --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/edenai/ocr_invoiceparser.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import EdenAiParsingInvoiceTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EdenAiParsingInvoiceTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EdenAiParsingInvoiceTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/edenai/text_moderation.py b/venv/Lib/site-packages/langchain/tools/edenai/text_moderation.py new file mode 100644 index 00000000..6556fae2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/edenai/text_moderation.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import EdenAiTextModerationTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"EdenAiTextModerationTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "EdenAiTextModerationTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/eleven_labs/__init__.py b/venv/Lib/site-packages/langchain/tools/eleven_labs/__init__.py new file mode 100644 index 00000000..802b45d3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/eleven_labs/__init__.py @@ -0,0 +1,25 @@ +"""Eleven Labs Services Tools.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ElevenLabsText2SpeechTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ElevenLabsText2SpeechTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ElevenLabsText2SpeechTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/eleven_labs/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/eleven_labs/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..98f13997 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/eleven_labs/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/eleven_labs/__pycache__/models.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/eleven_labs/__pycache__/models.cpython-312.pyc new file mode 100644 index 00000000..c5a14d56 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/eleven_labs/__pycache__/models.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/eleven_labs/__pycache__/text2speech.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/eleven_labs/__pycache__/text2speech.cpython-312.pyc new file mode 100644 index 00000000..24e98988 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/eleven_labs/__pycache__/text2speech.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/eleven_labs/models.py b/venv/Lib/site-packages/langchain/tools/eleven_labs/models.py new file mode 100644 index 00000000..91a01b05 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/eleven_labs/models.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.eleven_labs.models import ElevenLabsModel + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ElevenLabsModel": "langchain_community.tools.eleven_labs.models"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ElevenLabsModel", +] diff --git a/venv/Lib/site-packages/langchain/tools/eleven_labs/text2speech.py b/venv/Lib/site-packages/langchain/tools/eleven_labs/text2speech.py new file mode 100644 index 00000000..96f32bdb --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/eleven_labs/text2speech.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ElevenLabsText2SpeechTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ElevenLabsText2SpeechTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ElevenLabsText2SpeechTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/file_management/__init__.py b/venv/Lib/site-packages/langchain/tools/file_management/__init__.py new file mode 100644 index 00000000..bb0c3bec --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/file_management/__init__.py @@ -0,0 +1,47 @@ +"""File Management Tools.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ( + CopyFileTool, + DeleteFileTool, + FileSearchTool, + ListDirectoryTool, + MoveFileTool, + ReadFileTool, + WriteFileTool, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CopyFileTool": "langchain_community.tools", + "DeleteFileTool": "langchain_community.tools", + "FileSearchTool": "langchain_community.tools", + "MoveFileTool": "langchain_community.tools", + "ReadFileTool": "langchain_community.tools", + "WriteFileTool": "langchain_community.tools", + "ListDirectoryTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CopyFileTool", + "DeleteFileTool", + "FileSearchTool", + "MoveFileTool", + "ReadFileTool", + "WriteFileTool", + "ListDirectoryTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..4a70c849 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/copy.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/copy.cpython-312.pyc new file mode 100644 index 00000000..1953a234 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/copy.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/delete.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/delete.cpython-312.pyc new file mode 100644 index 00000000..b35b484b Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/delete.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/file_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/file_search.cpython-312.pyc new file mode 100644 index 00000000..0b5c47dd Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/file_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/list_dir.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/list_dir.cpython-312.pyc new file mode 100644 index 00000000..6e5773dd Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/list_dir.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/move.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/move.cpython-312.pyc new file mode 100644 index 00000000..7c73bfc3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/move.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/read.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/read.cpython-312.pyc new file mode 100644 index 00000000..f51f9240 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/read.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/write.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/write.cpython-312.pyc new file mode 100644 index 00000000..ba38ce22 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/file_management/__pycache__/write.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/file_management/copy.py b/venv/Lib/site-packages/langchain/tools/file_management/copy.py new file mode 100644 index 00000000..308d882e --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/file_management/copy.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import CopyFileTool + from langchain_community.tools.file_management.copy import FileCopyInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FileCopyInput": "langchain_community.tools.file_management.copy", + "CopyFileTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FileCopyInput", + "CopyFileTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/file_management/delete.py b/venv/Lib/site-packages/langchain/tools/file_management/delete.py new file mode 100644 index 00000000..01943a7b --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/file_management/delete.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import DeleteFileTool + from langchain_community.tools.file_management.delete import FileDeleteInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FileDeleteInput": "langchain_community.tools.file_management.delete", + "DeleteFileTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FileDeleteInput", + "DeleteFileTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/file_management/file_search.py b/venv/Lib/site-packages/langchain/tools/file_management/file_search.py new file mode 100644 index 00000000..4a812fb0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/file_management/file_search.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import FileSearchTool + from langchain_community.tools.file_management.file_search import FileSearchInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FileSearchInput": "langchain_community.tools.file_management.file_search", + "FileSearchTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FileSearchInput", + "FileSearchTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/file_management/list_dir.py b/venv/Lib/site-packages/langchain/tools/file_management/list_dir.py new file mode 100644 index 00000000..4fd32bea --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/file_management/list_dir.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ListDirectoryTool + from langchain_community.tools.file_management.list_dir import DirectoryListingInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DirectoryListingInput": "langchain_community.tools.file_management.list_dir", + "ListDirectoryTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DirectoryListingInput", + "ListDirectoryTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/file_management/move.py b/venv/Lib/site-packages/langchain/tools/file_management/move.py new file mode 100644 index 00000000..e65194de --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/file_management/move.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import MoveFileTool + from langchain_community.tools.file_management.move import FileMoveInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FileMoveInput": "langchain_community.tools.file_management.move", + "MoveFileTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FileMoveInput", + "MoveFileTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/file_management/read.py b/venv/Lib/site-packages/langchain/tools/file_management/read.py new file mode 100644 index 00000000..4f2c9360 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/file_management/read.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ReadFileTool + from langchain_community.tools.file_management.read import ReadFileInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ReadFileInput": "langchain_community.tools.file_management.read", + "ReadFileTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ReadFileInput", + "ReadFileTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/file_management/write.py b/venv/Lib/site-packages/langchain/tools/file_management/write.py new file mode 100644 index 00000000..99e6873f --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/file_management/write.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import WriteFileTool + from langchain_community.tools.file_management.write import WriteFileInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "WriteFileInput": "langchain_community.tools.file_management.write", + "WriteFileTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WriteFileInput", + "WriteFileTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/github/__init__.py b/venv/Lib/site-packages/langchain/tools/github/__init__.py new file mode 100644 index 00000000..11c741aa --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/github/__init__.py @@ -0,0 +1 @@ +"""GitHub Tool""" diff --git a/venv/Lib/site-packages/langchain/tools/github/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/github/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..a21428f3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/github/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/github/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/github/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..047eb167 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/github/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/github/tool.py b/venv/Lib/site-packages/langchain/tools/github/tool.py new file mode 100644 index 00000000..73e4bb20 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/github/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.github.tool import GitHubAction + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GitHubAction": "langchain_community.tools.github.tool"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GitHubAction", +] diff --git a/venv/Lib/site-packages/langchain/tools/gitlab/__init__.py b/venv/Lib/site-packages/langchain/tools/gitlab/__init__.py new file mode 100644 index 00000000..75ad8d01 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/gitlab/__init__.py @@ -0,0 +1 @@ +"""GitLab Tool""" diff --git a/venv/Lib/site-packages/langchain/tools/gitlab/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/gitlab/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..9b90149e Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/gitlab/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/gitlab/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/gitlab/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..8b3084af Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/gitlab/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/gitlab/tool.py b/venv/Lib/site-packages/langchain/tools/gitlab/tool.py new file mode 100644 index 00000000..1e8f60d7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/gitlab/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.gitlab.tool import GitLabAction + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GitLabAction": "langchain_community.tools.gitlab.tool"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GitLabAction", +] diff --git a/venv/Lib/site-packages/langchain/tools/gmail/__init__.py b/venv/Lib/site-packages/langchain/tools/gmail/__init__.py new file mode 100644 index 00000000..59e81e9f --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/gmail/__init__.py @@ -0,0 +1,41 @@ +"""Gmail tools.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ( + GmailCreateDraft, + GmailGetMessage, + GmailGetThread, + GmailSearch, + GmailSendMessage, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GmailCreateDraft": "langchain_community.tools", + "GmailSendMessage": "langchain_community.tools", + "GmailSearch": "langchain_community.tools", + "GmailGetMessage": "langchain_community.tools", + "GmailGetThread": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GmailCreateDraft", + "GmailSendMessage", + "GmailSearch", + "GmailGetMessage", + "GmailGetThread", +] diff --git a/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..a7a7d17e Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..7e7db159 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/create_draft.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/create_draft.cpython-312.pyc new file mode 100644 index 00000000..58ec3b2f Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/create_draft.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/get_message.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/get_message.cpython-312.pyc new file mode 100644 index 00000000..33410d26 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/get_message.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/get_thread.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/get_thread.cpython-312.pyc new file mode 100644 index 00000000..57853c8d Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/get_thread.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/search.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/search.cpython-312.pyc new file mode 100644 index 00000000..f667284e Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/send_message.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/send_message.cpython-312.pyc new file mode 100644 index 00000000..da437ded Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/gmail/__pycache__/send_message.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/gmail/base.py b/venv/Lib/site-packages/langchain/tools/gmail/base.py new file mode 100644 index 00000000..51fd6345 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/gmail/base.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.gmail.base import GmailBaseTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GmailBaseTool": "langchain_community.tools.gmail.base"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GmailBaseTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/gmail/create_draft.py b/venv/Lib/site-packages/langchain/tools/gmail/create_draft.py new file mode 100644 index 00000000..010da4bd --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/gmail/create_draft.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GmailCreateDraft + from langchain_community.tools.gmail.create_draft import CreateDraftSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CreateDraftSchema": "langchain_community.tools.gmail.create_draft", + "GmailCreateDraft": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CreateDraftSchema", + "GmailCreateDraft", +] diff --git a/venv/Lib/site-packages/langchain/tools/gmail/get_message.py b/venv/Lib/site-packages/langchain/tools/gmail/get_message.py new file mode 100644 index 00000000..fea9ab4f --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/gmail/get_message.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GmailGetMessage + from langchain_community.tools.gmail.get_message import SearchArgsSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SearchArgsSchema": "langchain_community.tools.gmail.get_message", + "GmailGetMessage": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SearchArgsSchema", + "GmailGetMessage", +] diff --git a/venv/Lib/site-packages/langchain/tools/gmail/get_thread.py b/venv/Lib/site-packages/langchain/tools/gmail/get_thread.py new file mode 100644 index 00000000..13774541 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/gmail/get_thread.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GmailGetThread + from langchain_community.tools.gmail.get_thread import GetThreadSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GetThreadSchema": "langchain_community.tools.gmail.get_thread", + "GmailGetThread": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GetThreadSchema", + "GmailGetThread", +] diff --git a/venv/Lib/site-packages/langchain/tools/gmail/search.py b/venv/Lib/site-packages/langchain/tools/gmail/search.py new file mode 100644 index 00000000..07278cff --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/gmail/search.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GmailSearch + from langchain_community.tools.gmail.search import Resource, SearchArgsSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "Resource": "langchain_community.tools.gmail.search", + "SearchArgsSchema": "langchain_community.tools.gmail.search", + "GmailSearch": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Resource", + "SearchArgsSchema", + "GmailSearch", +] diff --git a/venv/Lib/site-packages/langchain/tools/gmail/send_message.py b/venv/Lib/site-packages/langchain/tools/gmail/send_message.py new file mode 100644 index 00000000..94b8fdd3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/gmail/send_message.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GmailSendMessage + from langchain_community.tools.gmail.send_message import SendMessageSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SendMessageSchema": "langchain_community.tools.gmail.send_message", + "GmailSendMessage": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SendMessageSchema", + "GmailSendMessage", +] diff --git a/venv/Lib/site-packages/langchain/tools/golden_query/__init__.py b/venv/Lib/site-packages/langchain/tools/golden_query/__init__.py new file mode 100644 index 00000000..7ade4333 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/golden_query/__init__.py @@ -0,0 +1,25 @@ +"""Golden API toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.golden_query.tool import GoldenQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoldenQueryRun": "langchain_community.tools.golden_query.tool"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoldenQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/golden_query/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/golden_query/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..d5b1d4a7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/golden_query/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/golden_query/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/golden_query/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..c5cef0c1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/golden_query/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/golden_query/tool.py b/venv/Lib/site-packages/langchain/tools/golden_query/tool.py new file mode 100644 index 00000000..0b59bbe2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/golden_query/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.golden_query.tool import GoldenQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoldenQueryRun": "langchain_community.tools.golden_query.tool"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoldenQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_cloud/__init__.py b/venv/Lib/site-packages/langchain/tools/google_cloud/__init__.py new file mode 100644 index 00000000..c9420e27 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_cloud/__init__.py @@ -0,0 +1,25 @@ +"""Google Cloud Tools.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GoogleCloudTextToSpeechTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleCloudTextToSpeechTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleCloudTextToSpeechTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_cloud/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_cloud/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..99653295 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_cloud/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_cloud/__pycache__/texttospeech.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_cloud/__pycache__/texttospeech.cpython-312.pyc new file mode 100644 index 00000000..9cd95b43 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_cloud/__pycache__/texttospeech.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_cloud/texttospeech.py b/venv/Lib/site-packages/langchain/tools/google_cloud/texttospeech.py new file mode 100644 index 00000000..395e618e --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_cloud/texttospeech.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GoogleCloudTextToSpeechTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleCloudTextToSpeechTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleCloudTextToSpeechTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_finance/__init__.py b/venv/Lib/site-packages/langchain/tools/google_finance/__init__.py new file mode 100644 index 00000000..109a6cf5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_finance/__init__.py @@ -0,0 +1,27 @@ +"""Google Finance API Toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.google_finance.tool import GoogleFinanceQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleFinanceQueryRun": "langchain_community.tools.google_finance.tool" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleFinanceQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_finance/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_finance/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..3edd43c0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_finance/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_finance/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_finance/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..a2f11ff4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_finance/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_finance/tool.py b/venv/Lib/site-packages/langchain/tools/google_finance/tool.py new file mode 100644 index 00000000..807d3927 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_finance/tool.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.google_finance.tool import GoogleFinanceQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleFinanceQueryRun": "langchain_community.tools.google_finance.tool" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleFinanceQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_jobs/__init__.py b/venv/Lib/site-packages/langchain/tools/google_jobs/__init__.py new file mode 100644 index 00000000..100fefee --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_jobs/__init__.py @@ -0,0 +1,25 @@ +"""Google Jobs API Toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.google_jobs.tool import GoogleJobsQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleJobsQueryRun": "langchain_community.tools.google_jobs.tool"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleJobsQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_jobs/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_jobs/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..bff3da77 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_jobs/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_jobs/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_jobs/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..b1fd73b2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_jobs/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_jobs/tool.py b/venv/Lib/site-packages/langchain/tools/google_jobs/tool.py new file mode 100644 index 00000000..314d3eeb --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_jobs/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.google_jobs.tool import GoogleJobsQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleJobsQueryRun": "langchain_community.tools.google_jobs.tool"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleJobsQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_lens/__init__.py b/venv/Lib/site-packages/langchain/tools/google_lens/__init__.py new file mode 100644 index 00000000..9d44f0a1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_lens/__init__.py @@ -0,0 +1,25 @@ +"""Google Lens API Toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.google_lens.tool import GoogleLensQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleLensQueryRun": "langchain_community.tools.google_lens.tool"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleLensQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_lens/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_lens/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..e5991324 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_lens/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_lens/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_lens/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..2df993df Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_lens/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_lens/tool.py b/venv/Lib/site-packages/langchain/tools/google_lens/tool.py new file mode 100644 index 00000000..3a24a6a4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_lens/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.google_lens.tool import GoogleLensQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleLensQueryRun": "langchain_community.tools.google_lens.tool"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleLensQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_places/__init__.py b/venv/Lib/site-packages/langchain/tools/google_places/__init__.py new file mode 100644 index 00000000..d1917d11 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_places/__init__.py @@ -0,0 +1,25 @@ +"""Google Places API Toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GooglePlacesTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GooglePlacesTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GooglePlacesTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_places/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_places/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..1da16941 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_places/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_places/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_places/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..e8d1652a Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_places/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_places/tool.py b/venv/Lib/site-packages/langchain/tools/google_places/tool.py new file mode 100644 index 00000000..c1c7dfeb --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_places/tool.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GooglePlacesTool + from langchain_community.tools.google_places.tool import GooglePlacesSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GooglePlacesSchema": "langchain_community.tools.google_places.tool", + "GooglePlacesTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GooglePlacesSchema", + "GooglePlacesTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_scholar/__init__.py b/venv/Lib/site-packages/langchain/tools/google_scholar/__init__.py new file mode 100644 index 00000000..922b4ee0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_scholar/__init__.py @@ -0,0 +1,27 @@ +"""Google Scholar API Toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.google_scholar.tool import GoogleScholarQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleScholarQueryRun": "langchain_community.tools.google_scholar.tool" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleScholarQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_scholar/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_scholar/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..9ce72c10 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_scholar/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_scholar/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_scholar/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..6866f45b Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_scholar/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_scholar/tool.py b/venv/Lib/site-packages/langchain/tools/google_scholar/tool.py new file mode 100644 index 00000000..b9401e57 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_scholar/tool.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.google_scholar.tool import GoogleScholarQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleScholarQueryRun": "langchain_community.tools.google_scholar.tool" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleScholarQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_search/__init__.py b/venv/Lib/site-packages/langchain/tools/google_search/__init__.py new file mode 100644 index 00000000..0d1da942 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_search/__init__.py @@ -0,0 +1,29 @@ +"""Google Search API Toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GoogleSearchResults, GoogleSearchRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleSearchRun": "langchain_community.tools", + "GoogleSearchResults": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleSearchRun", + "GoogleSearchResults", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_search/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_search/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..873648f4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_search/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_search/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_search/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..2d794358 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_search/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_search/tool.py b/venv/Lib/site-packages/langchain/tools/google_search/tool.py new file mode 100644 index 00000000..8484216e --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_search/tool.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GoogleSearchResults, GoogleSearchRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleSearchRun": "langchain_community.tools", + "GoogleSearchResults": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleSearchRun", + "GoogleSearchResults", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_serper/__init__.py b/venv/Lib/site-packages/langchain/tools/google_serper/__init__.py new file mode 100644 index 00000000..9fe3a567 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_serper/__init__.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GoogleSerperResults, GoogleSerperRun + +"""Google Serper API Toolkit.""" +"""Tool for the Serer.dev Google Search API.""" + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleSerperRun": "langchain_community.tools", + "GoogleSerperResults": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleSerperRun", + "GoogleSerperResults", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_serper/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_serper/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..85f4496e Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_serper/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_serper/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_serper/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..23b72a92 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_serper/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_serper/tool.py b/venv/Lib/site-packages/langchain/tools/google_serper/tool.py new file mode 100644 index 00000000..9ccbf937 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_serper/tool.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GoogleSerperResults, GoogleSerperRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleSerperRun": "langchain_community.tools", + "GoogleSerperResults": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleSerperRun", + "GoogleSerperResults", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_trends/__init__.py b/venv/Lib/site-packages/langchain/tools/google_trends/__init__.py new file mode 100644 index 00000000..ecbbc296 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_trends/__init__.py @@ -0,0 +1,27 @@ +"""Google Trends API Toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.google_trends.tool import GoogleTrendsQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleTrendsQueryRun": "langchain_community.tools.google_trends.tool" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleTrendsQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/google_trends/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_trends/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..0c9457b9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_trends/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_trends/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/google_trends/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..235a94f8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/google_trends/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/google_trends/tool.py b/venv/Lib/site-packages/langchain/tools/google_trends/tool.py new file mode 100644 index 00000000..8ff47ba2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/google_trends/tool.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.google_trends.tool import GoogleTrendsQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GoogleTrendsQueryRun": "langchain_community.tools.google_trends.tool" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleTrendsQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/graphql/__init__.py b/venv/Lib/site-packages/langchain/tools/graphql/__init__.py new file mode 100644 index 00000000..7e9a84c3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/graphql/__init__.py @@ -0,0 +1 @@ +"""Tools for interacting with a GraphQL API""" diff --git a/venv/Lib/site-packages/langchain/tools/graphql/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/graphql/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..c728e4a3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/graphql/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/graphql/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/graphql/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..94b944b5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/graphql/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/graphql/tool.py b/venv/Lib/site-packages/langchain/tools/graphql/tool.py new file mode 100644 index 00000000..51966c00 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/graphql/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import BaseGraphQLTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BaseGraphQLTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaseGraphQLTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/human/__init__.py b/venv/Lib/site-packages/langchain/tools/human/__init__.py new file mode 100644 index 00000000..73d71280 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/human/__init__.py @@ -0,0 +1,25 @@ +"""Tool for asking for human input.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import HumanInputRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"HumanInputRun": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HumanInputRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/human/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/human/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..32efbf13 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/human/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/human/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/human/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..77fd235c Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/human/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/human/tool.py b/venv/Lib/site-packages/langchain/tools/human/tool.py new file mode 100644 index 00000000..1ea079d5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/human/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import HumanInputRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"HumanInputRun": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HumanInputRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/ifttt.py b/venv/Lib/site-packages/langchain/tools/ifttt.py new file mode 100644 index 00000000..a016bf7d --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/ifttt.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import IFTTTWebhook + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"IFTTTWebhook": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "IFTTTWebhook", +] diff --git a/venv/Lib/site-packages/langchain/tools/interaction/__init__.py b/venv/Lib/site-packages/langchain/tools/interaction/__init__.py new file mode 100644 index 00000000..be339336 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/interaction/__init__.py @@ -0,0 +1 @@ +"""Tools for interacting with the user.""" diff --git a/venv/Lib/site-packages/langchain/tools/interaction/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/interaction/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..30576423 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/interaction/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/interaction/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/interaction/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..f89a2876 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/interaction/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/interaction/tool.py b/venv/Lib/site-packages/langchain/tools/interaction/tool.py new file mode 100644 index 00000000..7c93d33b --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/interaction/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import StdInInquireTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"StdInInquireTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "StdInInquireTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/jira/__init__.py b/venv/Lib/site-packages/langchain/tools/jira/__init__.py new file mode 100644 index 00000000..06cd8cbc --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/jira/__init__.py @@ -0,0 +1 @@ +"""Jira Tool.""" diff --git a/venv/Lib/site-packages/langchain/tools/jira/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/jira/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..0dfa1cc8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/jira/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/jira/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/jira/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..74a4c554 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/jira/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/jira/tool.py b/venv/Lib/site-packages/langchain/tools/jira/tool.py new file mode 100644 index 00000000..9f8b313d --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/jira/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import JiraAction + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"JiraAction": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JiraAction", +] diff --git a/venv/Lib/site-packages/langchain/tools/json/__init__.py b/venv/Lib/site-packages/langchain/tools/json/__init__.py new file mode 100644 index 00000000..d13302f0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/json/__init__.py @@ -0,0 +1 @@ +"""Tools for interacting with a JSON file.""" diff --git a/venv/Lib/site-packages/langchain/tools/json/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/json/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..ed2fd54e Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/json/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/json/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/json/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..b022458d Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/json/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/json/tool.py b/venv/Lib/site-packages/langchain/tools/json/tool.py new file mode 100644 index 00000000..f4812cf5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/json/tool.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import JsonGetValueTool, JsonListKeysTool + from langchain_community.tools.json.tool import JsonSpec + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "JsonSpec": "langchain_community.tools.json.tool", + "JsonListKeysTool": "langchain_community.tools", + "JsonGetValueTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JsonSpec", + "JsonListKeysTool", + "JsonGetValueTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/memorize/__init__.py b/venv/Lib/site-packages/langchain/tools/memorize/__init__.py new file mode 100644 index 00000000..805388c0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/memorize/__init__.py @@ -0,0 +1,25 @@ +"""Unsupervised learning based memorization.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.memorize.tool import Memorize + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Memorize": "langchain_community.tools.memorize.tool"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Memorize", +] diff --git a/venv/Lib/site-packages/langchain/tools/memorize/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/memorize/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..eb3ec659 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/memorize/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/memorize/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/memorize/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..fed09c56 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/memorize/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/memorize/tool.py b/venv/Lib/site-packages/langchain/tools/memorize/tool.py new file mode 100644 index 00000000..d0e72223 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/memorize/tool.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.memorize.tool import Memorize, TrainableLLM + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "TrainableLLM": "langchain_community.tools.memorize.tool", + "Memorize": "langchain_community.tools.memorize.tool", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TrainableLLM", + "Memorize", +] diff --git a/venv/Lib/site-packages/langchain/tools/merriam_webster/__init__.py b/venv/Lib/site-packages/langchain/tools/merriam_webster/__init__.py new file mode 100644 index 00000000..73390d54 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/merriam_webster/__init__.py @@ -0,0 +1 @@ +"""Merriam-Webster API toolkit.""" diff --git a/venv/Lib/site-packages/langchain/tools/merriam_webster/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/merriam_webster/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..6afb7c49 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/merriam_webster/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/merriam_webster/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/merriam_webster/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..cef36a39 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/merriam_webster/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/merriam_webster/tool.py b/venv/Lib/site-packages/langchain/tools/merriam_webster/tool.py new file mode 100644 index 00000000..b0fa9b81 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/merriam_webster/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import MerriamWebsterQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MerriamWebsterQueryRun": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MerriamWebsterQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/metaphor_search/__init__.py b/venv/Lib/site-packages/langchain/tools/metaphor_search/__init__.py new file mode 100644 index 00000000..960356ae --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/metaphor_search/__init__.py @@ -0,0 +1,25 @@ +"""Metaphor Search API toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import MetaphorSearchResults + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MetaphorSearchResults": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MetaphorSearchResults", +] diff --git a/venv/Lib/site-packages/langchain/tools/metaphor_search/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/metaphor_search/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..3509c863 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/metaphor_search/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/metaphor_search/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/metaphor_search/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..6c181570 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/metaphor_search/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/metaphor_search/tool.py b/venv/Lib/site-packages/langchain/tools/metaphor_search/tool.py new file mode 100644 index 00000000..1bd97be6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/metaphor_search/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import MetaphorSearchResults + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MetaphorSearchResults": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MetaphorSearchResults", +] diff --git a/venv/Lib/site-packages/langchain/tools/multion/__init__.py b/venv/Lib/site-packages/langchain/tools/multion/__init__.py new file mode 100644 index 00000000..c3030878 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/multion/__init__.py @@ -0,0 +1,33 @@ +"""MutliOn Client API tools.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.multion.close_session import MultionCloseSession + from langchain_community.tools.multion.create_session import MultionCreateSession + from langchain_community.tools.multion.update_session import MultionUpdateSession + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MultionCreateSession": "langchain_community.tools.multion.create_session", + "MultionUpdateSession": "langchain_community.tools.multion.update_session", + "MultionCloseSession": "langchain_community.tools.multion.close_session", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MultionCreateSession", + "MultionUpdateSession", + "MultionCloseSession", +] diff --git a/venv/Lib/site-packages/langchain/tools/multion/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/multion/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..b589df00 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/multion/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/multion/__pycache__/close_session.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/multion/__pycache__/close_session.cpython-312.pyc new file mode 100644 index 00000000..672a9ce0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/multion/__pycache__/close_session.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/multion/__pycache__/create_session.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/multion/__pycache__/create_session.cpython-312.pyc new file mode 100644 index 00000000..68952b87 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/multion/__pycache__/create_session.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/multion/__pycache__/update_session.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/multion/__pycache__/update_session.cpython-312.pyc new file mode 100644 index 00000000..f96a0087 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/multion/__pycache__/update_session.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/multion/close_session.py b/venv/Lib/site-packages/langchain/tools/multion/close_session.py new file mode 100644 index 00000000..d95a3e38 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/multion/close_session.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.multion.close_session import ( + CloseSessionSchema, + MultionCloseSession, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CloseSessionSchema": "langchain_community.tools.multion.close_session", + "MultionCloseSession": "langchain_community.tools.multion.close_session", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CloseSessionSchema", + "MultionCloseSession", +] diff --git a/venv/Lib/site-packages/langchain/tools/multion/create_session.py b/venv/Lib/site-packages/langchain/tools/multion/create_session.py new file mode 100644 index 00000000..bc14b22c --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/multion/create_session.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.multion.create_session import ( + CreateSessionSchema, + MultionCreateSession, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CreateSessionSchema": "langchain_community.tools.multion.create_session", + "MultionCreateSession": "langchain_community.tools.multion.create_session", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CreateSessionSchema", + "MultionCreateSession", +] diff --git a/venv/Lib/site-packages/langchain/tools/multion/update_session.py b/venv/Lib/site-packages/langchain/tools/multion/update_session.py new file mode 100644 index 00000000..e49da10e --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/multion/update_session.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.multion.update_session import ( + MultionUpdateSession, + UpdateSessionSchema, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "UpdateSessionSchema": "langchain_community.tools.multion.update_session", + "MultionUpdateSession": "langchain_community.tools.multion.update_session", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "UpdateSessionSchema", + "MultionUpdateSession", +] diff --git a/venv/Lib/site-packages/langchain/tools/nasa/__init__.py b/venv/Lib/site-packages/langchain/tools/nasa/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/tools/nasa/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/nasa/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..9de60efa Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/nasa/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/nasa/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/nasa/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..ca95eae0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/nasa/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/nasa/tool.py b/venv/Lib/site-packages/langchain/tools/nasa/tool.py new file mode 100644 index 00000000..e242d847 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/nasa/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import NasaAction + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NasaAction": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NasaAction", +] diff --git a/venv/Lib/site-packages/langchain/tools/nuclia/__init__.py b/venv/Lib/site-packages/langchain/tools/nuclia/__init__.py new file mode 100644 index 00000000..7d3b687a --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/nuclia/__init__.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.nuclia.tool import NucliaUnderstandingAPI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NucliaUnderstandingAPI": "langchain_community.tools.nuclia.tool"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NucliaUnderstandingAPI", +] diff --git a/venv/Lib/site-packages/langchain/tools/nuclia/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/nuclia/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..9364065d Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/nuclia/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/nuclia/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/nuclia/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..15478db5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/nuclia/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/nuclia/tool.py b/venv/Lib/site-packages/langchain/tools/nuclia/tool.py new file mode 100644 index 00000000..d5f483f1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/nuclia/tool.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.nuclia.tool import NUASchema, NucliaUnderstandingAPI + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "NUASchema": "langchain_community.tools.nuclia.tool", + "NucliaUnderstandingAPI": "langchain_community.tools.nuclia.tool", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NUASchema", + "NucliaUnderstandingAPI", +] diff --git a/venv/Lib/site-packages/langchain/tools/office365/__init__.py b/venv/Lib/site-packages/langchain/tools/office365/__init__.py new file mode 100644 index 00000000..d2108510 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/office365/__init__.py @@ -0,0 +1,41 @@ +"""O365 tools.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ( + O365CreateDraftMessage, + O365SearchEmails, + O365SearchEvents, + O365SendEvent, + O365SendMessage, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "O365SearchEmails": "langchain_community.tools", + "O365SearchEvents": "langchain_community.tools", + "O365CreateDraftMessage": "langchain_community.tools", + "O365SendMessage": "langchain_community.tools", + "O365SendEvent": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "O365SearchEmails", + "O365SearchEvents", + "O365CreateDraftMessage", + "O365SendMessage", + "O365SendEvent", +] diff --git a/venv/Lib/site-packages/langchain/tools/office365/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..5ea03971 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/office365/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..53a55d33 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/office365/__pycache__/create_draft_message.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/create_draft_message.cpython-312.pyc new file mode 100644 index 00000000..160a50dc Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/create_draft_message.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/office365/__pycache__/events_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/events_search.cpython-312.pyc new file mode 100644 index 00000000..ffce9e91 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/events_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/office365/__pycache__/messages_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/messages_search.cpython-312.pyc new file mode 100644 index 00000000..e086592e Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/messages_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/office365/__pycache__/send_event.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/send_event.cpython-312.pyc new file mode 100644 index 00000000..fba6bcd4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/send_event.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/office365/__pycache__/send_message.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/send_message.cpython-312.pyc new file mode 100644 index 00000000..271da232 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/office365/__pycache__/send_message.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/office365/base.py b/venv/Lib/site-packages/langchain/tools/office365/base.py new file mode 100644 index 00000000..192eef5b --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/office365/base.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.office365.base import O365BaseTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"O365BaseTool": "langchain_community.tools.office365.base"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "O365BaseTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/office365/create_draft_message.py b/venv/Lib/site-packages/langchain/tools/office365/create_draft_message.py new file mode 100644 index 00000000..04c6a97e --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/office365/create_draft_message.py @@ -0,0 +1,32 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import O365CreateDraftMessage + from langchain_community.tools.office365.create_draft_message import ( + CreateDraftMessageSchema, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CreateDraftMessageSchema": ( + "langchain_community.tools.office365.create_draft_message" + ), + "O365CreateDraftMessage": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CreateDraftMessageSchema", + "O365CreateDraftMessage", +] diff --git a/venv/Lib/site-packages/langchain/tools/office365/events_search.py b/venv/Lib/site-packages/langchain/tools/office365/events_search.py new file mode 100644 index 00000000..1a4a82e0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/office365/events_search.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import O365SearchEvents + from langchain_community.tools.office365.events_search import SearchEventsInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SearchEventsInput": "langchain_community.tools.office365.events_search", + "O365SearchEvents": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SearchEventsInput", + "O365SearchEvents", +] diff --git a/venv/Lib/site-packages/langchain/tools/office365/messages_search.py b/venv/Lib/site-packages/langchain/tools/office365/messages_search.py new file mode 100644 index 00000000..5ae016bf --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/office365/messages_search.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import O365SearchEmails + from langchain_community.tools.office365.messages_search import SearchEmailsInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SearchEmailsInput": "langchain_community.tools.office365.messages_search", + "O365SearchEmails": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SearchEmailsInput", + "O365SearchEmails", +] diff --git a/venv/Lib/site-packages/langchain/tools/office365/send_event.py b/venv/Lib/site-packages/langchain/tools/office365/send_event.py new file mode 100644 index 00000000..4b7d11a3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/office365/send_event.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import O365SendEvent + from langchain_community.tools.office365.send_event import SendEventSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SendEventSchema": "langchain_community.tools.office365.send_event", + "O365SendEvent": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SendEventSchema", + "O365SendEvent", +] diff --git a/venv/Lib/site-packages/langchain/tools/office365/send_message.py b/venv/Lib/site-packages/langchain/tools/office365/send_message.py new file mode 100644 index 00000000..09949f67 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/office365/send_message.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import O365SendMessage + from langchain_community.tools.office365.send_message import SendMessageSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SendMessageSchema": "langchain_community.tools.office365.send_message", + "O365SendMessage": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SendMessageSchema", + "O365SendMessage", +] diff --git a/venv/Lib/site-packages/langchain/tools/openapi/__init__.py b/venv/Lib/site-packages/langchain/tools/openapi/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/tools/openapi/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/openapi/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..9f19a91f Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/openapi/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/openapi/utils/__init__.py b/venv/Lib/site-packages/langchain/tools/openapi/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/tools/openapi/utils/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/openapi/utils/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..912c7bba Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/openapi/utils/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/openapi/utils/__pycache__/api_models.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/openapi/utils/__pycache__/api_models.cpython-312.pyc new file mode 100644 index 00000000..f0215584 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/openapi/utils/__pycache__/api_models.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/openapi/utils/__pycache__/openapi_utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/openapi/utils/__pycache__/openapi_utils.cpython-312.pyc new file mode 100644 index 00000000..c79263b5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/openapi/utils/__pycache__/openapi_utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/openapi/utils/api_models.py b/venv/Lib/site-packages/langchain/tools/openapi/utils/api_models.py new file mode 100644 index 00000000..a587d635 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/openapi/utils/api_models.py @@ -0,0 +1,54 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import APIOperation + from langchain_community.tools.openapi.utils.api_models import ( + INVALID_LOCATION_TEMPL, + PRIMITIVE_TYPES, + SCHEMA_TYPE, + SUPPORTED_LOCATIONS, + APIProperty, + APIPropertyBase, + APIPropertyLocation, + APIRequestBody, + APIRequestBodyProperty, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "APIPropertyLocation": "langchain_community.tools.openapi.utils.api_models", + "APIPropertyBase": "langchain_community.tools.openapi.utils.api_models", + "APIProperty": "langchain_community.tools.openapi.utils.api_models", + "APIRequestBodyProperty": "langchain_community.tools.openapi.utils.api_models", + "APIRequestBody": "langchain_community.tools.openapi.utils.api_models", + "APIOperation": "langchain_community.tools", + "INVALID_LOCATION_TEMPL": "langchain_community.tools.openapi.utils.api_models", + "SCHEMA_TYPE": "langchain_community.tools.openapi.utils.api_models", + "PRIMITIVE_TYPES": "langchain_community.tools.openapi.utils.api_models", + "SUPPORTED_LOCATIONS": "langchain_community.tools.openapi.utils.api_models", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PRIMITIVE_TYPES", + "APIPropertyLocation", + "SUPPORTED_LOCATIONS", + "INVALID_LOCATION_TEMPL", + "SCHEMA_TYPE", + "APIPropertyBase", + "APIProperty", + "APIRequestBodyProperty", + "APIRequestBody", + "APIOperation", +] diff --git a/venv/Lib/site-packages/langchain/tools/openapi/utils/openapi_utils.py b/venv/Lib/site-packages/langchain/tools/openapi/utils/openapi_utils.py new file mode 100644 index 00000000..208b1a2f --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/openapi/utils/openapi_utils.py @@ -0,0 +1,30 @@ +"""Utility functions for parsing an OpenAPI spec. Kept for backwards compat.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import OpenAPISpec + from langchain_community.utilities.openapi import HTTPVerb + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "HTTPVerb": "langchain_community.utilities.openapi", + "OpenAPISpec": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HTTPVerb", + "OpenAPISpec", +] diff --git a/venv/Lib/site-packages/langchain/tools/openweathermap/__init__.py b/venv/Lib/site-packages/langchain/tools/openweathermap/__init__.py new file mode 100644 index 00000000..28c8fed0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/openweathermap/__init__.py @@ -0,0 +1,25 @@ +"""OpenWeatherMap API toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import OpenWeatherMapQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OpenWeatherMapQueryRun": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OpenWeatherMapQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/openweathermap/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/openweathermap/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..0c2c2baf Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/openweathermap/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/openweathermap/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/openweathermap/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..a85951eb Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/openweathermap/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/openweathermap/tool.py b/venv/Lib/site-packages/langchain/tools/openweathermap/tool.py new file mode 100644 index 00000000..5a80337d --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/openweathermap/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import OpenWeatherMapQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OpenWeatherMapQueryRun": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OpenWeatherMapQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/playwright/__init__.py b/venv/Lib/site-packages/langchain/tools/playwright/__init__.py new file mode 100644 index 00000000..352088c2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/playwright/__init__.py @@ -0,0 +1,47 @@ +"""Browser tools and toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ( + ClickTool, + CurrentWebPageTool, + ExtractHyperlinksTool, + ExtractTextTool, + GetElementsTool, + NavigateBackTool, + NavigateTool, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "NavigateTool": "langchain_community.tools", + "NavigateBackTool": "langchain_community.tools", + "ExtractTextTool": "langchain_community.tools", + "ExtractHyperlinksTool": "langchain_community.tools", + "GetElementsTool": "langchain_community.tools", + "ClickTool": "langchain_community.tools", + "CurrentWebPageTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NavigateTool", + "NavigateBackTool", + "ExtractTextTool", + "ExtractHyperlinksTool", + "GetElementsTool", + "ClickTool", + "CurrentWebPageTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..d4e5fecd Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..6a22041d Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/click.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/click.cpython-312.pyc new file mode 100644 index 00000000..4c90c412 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/click.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/current_page.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/current_page.cpython-312.pyc new file mode 100644 index 00000000..f2f78139 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/current_page.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/extract_hyperlinks.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/extract_hyperlinks.cpython-312.pyc new file mode 100644 index 00000000..cd15ab99 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/extract_hyperlinks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/extract_text.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/extract_text.cpython-312.pyc new file mode 100644 index 00000000..bca16d6f Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/extract_text.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/get_elements.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/get_elements.cpython-312.pyc new file mode 100644 index 00000000..063f59fd Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/get_elements.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/navigate.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/navigate.cpython-312.pyc new file mode 100644 index 00000000..0a5b592e Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/navigate.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/navigate_back.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/navigate_back.cpython-312.pyc new file mode 100644 index 00000000..e3d9b545 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/playwright/__pycache__/navigate_back.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/playwright/base.py b/venv/Lib/site-packages/langchain/tools/playwright/base.py new file mode 100644 index 00000000..65a7053c --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/playwright/base.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.playwright.base import BaseBrowserTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BaseBrowserTool": "langchain_community.tools.playwright.base"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaseBrowserTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/playwright/click.py b/venv/Lib/site-packages/langchain/tools/playwright/click.py new file mode 100644 index 00000000..75e601d1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/playwright/click.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ClickTool + from langchain_community.tools.playwright.click import ClickToolInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ClickToolInput": "langchain_community.tools.playwright.click", + "ClickTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ClickToolInput", + "ClickTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/playwright/current_page.py b/venv/Lib/site-packages/langchain/tools/playwright/current_page.py new file mode 100644 index 00000000..f57bed9c --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/playwright/current_page.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import CurrentWebPageTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"CurrentWebPageTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CurrentWebPageTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/playwright/extract_hyperlinks.py b/venv/Lib/site-packages/langchain/tools/playwright/extract_hyperlinks.py new file mode 100644 index 00000000..e8320e6d --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/playwright/extract_hyperlinks.py @@ -0,0 +1,32 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ExtractHyperlinksTool + from langchain_community.tools.playwright.extract_hyperlinks import ( + ExtractHyperlinksToolInput, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ExtractHyperlinksToolInput": ( + "langchain_community.tools.playwright.extract_hyperlinks" + ), + "ExtractHyperlinksTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ExtractHyperlinksToolInput", + "ExtractHyperlinksTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/playwright/extract_text.py b/venv/Lib/site-packages/langchain/tools/playwright/extract_text.py new file mode 100644 index 00000000..af2e85d1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/playwright/extract_text.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ExtractTextTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ExtractTextTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ExtractTextTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/playwright/get_elements.py b/venv/Lib/site-packages/langchain/tools/playwright/get_elements.py new file mode 100644 index 00000000..75d6d776 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/playwright/get_elements.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import GetElementsTool + from langchain_community.tools.playwright.get_elements import GetElementsToolInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GetElementsToolInput": "langchain_community.tools.playwright.get_elements", + "GetElementsTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GetElementsToolInput", + "GetElementsTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/playwright/navigate.py b/venv/Lib/site-packages/langchain/tools/playwright/navigate.py new file mode 100644 index 00000000..734ef229 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/playwright/navigate.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import NavigateTool + from langchain_community.tools.playwright.navigate import NavigateToolInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "NavigateToolInput": "langchain_community.tools.playwright.navigate", + "NavigateTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NavigateToolInput", + "NavigateTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/playwright/navigate_back.py b/venv/Lib/site-packages/langchain/tools/playwright/navigate_back.py new file mode 100644 index 00000000..ac2f924e --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/playwright/navigate_back.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import NavigateBackTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NavigateBackTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NavigateBackTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/plugin.py b/venv/Lib/site-packages/langchain/tools/plugin.py new file mode 100644 index 00000000..47c71e8e --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/plugin.py @@ -0,0 +1,32 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import AIPluginTool + from langchain_community.tools.plugin import AIPlugin, AIPluginToolSchema, ApiConfig + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ApiConfig": "langchain_community.tools.plugin", + "AIPlugin": "langchain_community.tools.plugin", + "AIPluginToolSchema": "langchain_community.tools.plugin", + "AIPluginTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ApiConfig", + "AIPlugin", + "AIPluginToolSchema", + "AIPluginTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/powerbi/__init__.py b/venv/Lib/site-packages/langchain/tools/powerbi/__init__.py new file mode 100644 index 00000000..3ecc25a1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/powerbi/__init__.py @@ -0,0 +1 @@ +"""Tools for interacting with a PowerBI dataset.""" diff --git a/venv/Lib/site-packages/langchain/tools/powerbi/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/powerbi/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..1b115ab5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/powerbi/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/powerbi/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/powerbi/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..a0d6ee49 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/powerbi/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/powerbi/tool.py b/venv/Lib/site-packages/langchain/tools/powerbi/tool.py new file mode 100644 index 00000000..3cd41667 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/powerbi/tool.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ( + InfoPowerBITool, + ListPowerBITool, + QueryPowerBITool, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "QueryPowerBITool": "langchain_community.tools", + "InfoPowerBITool": "langchain_community.tools", + "ListPowerBITool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "QueryPowerBITool", + "InfoPowerBITool", + "ListPowerBITool", +] diff --git a/venv/Lib/site-packages/langchain/tools/pubmed/__init__.py b/venv/Lib/site-packages/langchain/tools/pubmed/__init__.py new file mode 100644 index 00000000..687e908e --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/pubmed/__init__.py @@ -0,0 +1 @@ +"""PubMed API toolkit.""" diff --git a/venv/Lib/site-packages/langchain/tools/pubmed/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/pubmed/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..899ff9ed Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/pubmed/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/pubmed/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/pubmed/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..a468df22 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/pubmed/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/pubmed/tool.py b/venv/Lib/site-packages/langchain/tools/pubmed/tool.py new file mode 100644 index 00000000..dcbbd44f --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/pubmed/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import PubmedQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PubmedQueryRun": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PubmedQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/python/__init__.py b/venv/Lib/site-packages/langchain/tools/python/__init__.py new file mode 100644 index 00000000..9c60cf9c --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/python/__init__.py @@ -0,0 +1,12 @@ +from typing import Any + + +def __getattr__(name: str = "") -> Any: + raise AttributeError( + "This tool has been moved to langchain experiment. " + "This tool has access to a python REPL. " + "For best practices make sure to sandbox this tool. " + "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " + "To keep using this code as is, install langchain experimental and " + "update relevant imports replacing 'langchain' with 'langchain_experimental'" + ) diff --git a/venv/Lib/site-packages/langchain/tools/python/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/python/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..0099be6b Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/python/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/reddit_search/__init__.py b/venv/Lib/site-packages/langchain/tools/reddit_search/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/tools/reddit_search/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/reddit_search/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..7baae0df Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/reddit_search/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/reddit_search/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/reddit_search/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..c04c2e27 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/reddit_search/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/reddit_search/tool.py b/venv/Lib/site-packages/langchain/tools/reddit_search/tool.py new file mode 100644 index 00000000..29c9bed9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/reddit_search/tool.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import RedditSearchRun, RedditSearchSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "RedditSearchSchema": "langchain_community.tools", + "RedditSearchRun": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RedditSearchSchema", + "RedditSearchRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/render.py b/venv/Lib/site-packages/langchain/tools/render.py new file mode 100644 index 00000000..c9754601 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/render.py @@ -0,0 +1,23 @@ +"""Different methods for rendering Tools to be passed to LLMs. + +Depending on the LLM you are using and the prompting strategy you are using, +you may want Tools to be rendered in a different way. +This module contains various ways to render tools. +""" + +# For backwards compatibility +from langchain_core.tools import ( + render_text_description, + render_text_description_and_args, +) +from langchain_core.utils.function_calling import ( + format_tool_to_openai_function, + format_tool_to_openai_tool, +) + +__all__ = [ + "render_text_description", + "render_text_description_and_args", + "format_tool_to_openai_tool", + "format_tool_to_openai_function", +] diff --git a/venv/Lib/site-packages/langchain/tools/requests/__init__.py b/venv/Lib/site-packages/langchain/tools/requests/__init__.py new file mode 100644 index 00000000..ec421f18 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/requests/__init__.py @@ -0,0 +1 @@ +"""Tools for making requests to an API endpoint.""" diff --git a/venv/Lib/site-packages/langchain/tools/requests/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/requests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..1e9d0017 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/requests/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/requests/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/requests/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..37f4f792 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/requests/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/requests/tool.py b/venv/Lib/site-packages/langchain/tools/requests/tool.py new file mode 100644 index 00000000..9a7efd6f --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/requests/tool.py @@ -0,0 +1,42 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ( + BaseRequestsTool, + RequestsDeleteTool, + RequestsGetTool, + RequestsPatchTool, + RequestsPostTool, + RequestsPutTool, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BaseRequestsTool": "langchain_community.tools", + "RequestsGetTool": "langchain_community.tools", + "RequestsPostTool": "langchain_community.tools", + "RequestsPatchTool": "langchain_community.tools", + "RequestsPutTool": "langchain_community.tools", + "RequestsDeleteTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaseRequestsTool", + "RequestsGetTool", + "RequestsPostTool", + "RequestsPatchTool", + "RequestsPutTool", + "RequestsDeleteTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/retriever.py b/venv/Lib/site-packages/langchain/tools/retriever.py new file mode 100644 index 00000000..39d2a52e --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/retriever.py @@ -0,0 +1,11 @@ +from langchain_core.tools import ( + create_retriever_tool, + render_text_description, + render_text_description_and_args, +) + +__all__ = [ + "create_retriever_tool", + "render_text_description", + "render_text_description_and_args", +] diff --git a/venv/Lib/site-packages/langchain/tools/scenexplain/__init__.py b/venv/Lib/site-packages/langchain/tools/scenexplain/__init__.py new file mode 100644 index 00000000..2e6553b7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/scenexplain/__init__.py @@ -0,0 +1 @@ +"""SceneXplain API toolkit.""" diff --git a/venv/Lib/site-packages/langchain/tools/scenexplain/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/scenexplain/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..4bed4b95 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/scenexplain/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/scenexplain/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/scenexplain/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..e1bc4895 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/scenexplain/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/scenexplain/tool.py b/venv/Lib/site-packages/langchain/tools/scenexplain/tool.py new file mode 100644 index 00000000..a0fa9865 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/scenexplain/tool.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import SceneXplainTool + from langchain_community.tools.scenexplain.tool import SceneXplainInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SceneXplainInput": "langchain_community.tools.scenexplain.tool", + "SceneXplainTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SceneXplainInput", + "SceneXplainTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/searchapi/__init__.py b/venv/Lib/site-packages/langchain/tools/searchapi/__init__.py new file mode 100644 index 00000000..eafee894 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/searchapi/__init__.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import SearchAPIResults, SearchAPIRun + +"""SearchApi.io API Toolkit.""" +"""Tool for the SearchApi.io Google SERP API.""" + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SearchAPIResults": "langchain_community.tools", + "SearchAPIRun": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SearchAPIResults", + "SearchAPIRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/searchapi/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/searchapi/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..6b3f55d9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/searchapi/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/searchapi/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/searchapi/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..c97e25b4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/searchapi/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/searchapi/tool.py b/venv/Lib/site-packages/langchain/tools/searchapi/tool.py new file mode 100644 index 00000000..18c69cb6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/searchapi/tool.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import SearchAPIResults, SearchAPIRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SearchAPIRun": "langchain_community.tools", + "SearchAPIResults": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SearchAPIRun", + "SearchAPIResults", +] diff --git a/venv/Lib/site-packages/langchain/tools/searx_search/__init__.py b/venv/Lib/site-packages/langchain/tools/searx_search/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/tools/searx_search/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/searx_search/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..446d998d Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/searx_search/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/searx_search/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/searx_search/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..6d100e6d Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/searx_search/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/searx_search/tool.py b/venv/Lib/site-packages/langchain/tools/searx_search/tool.py new file mode 100644 index 00000000..94b4e560 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/searx_search/tool.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import SearxSearchResults, SearxSearchRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SearxSearchRun": "langchain_community.tools", + "SearxSearchResults": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SearxSearchRun", + "SearxSearchResults", +] diff --git a/venv/Lib/site-packages/langchain/tools/shell/__init__.py b/venv/Lib/site-packages/langchain/tools/shell/__init__.py new file mode 100644 index 00000000..4defaaa6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/shell/__init__.py @@ -0,0 +1,25 @@ +"""Shell tool.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ShellTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ShellTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ShellTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/shell/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/shell/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..98739d85 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/shell/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/shell/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/shell/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..81373f32 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/shell/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/shell/tool.py b/venv/Lib/site-packages/langchain/tools/shell/tool.py new file mode 100644 index 00000000..26b3a3a9 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/shell/tool.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ShellTool + from langchain_community.tools.shell.tool import ShellInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ShellInput": "langchain_community.tools.shell.tool", + "ShellTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ShellInput", + "ShellTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/slack/__init__.py b/venv/Lib/site-packages/langchain/tools/slack/__init__.py new file mode 100644 index 00000000..c30c589e --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/slack/__init__.py @@ -0,0 +1,38 @@ +"""Slack tools.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ( + SlackGetChannel, + SlackGetMessage, + SlackScheduleMessage, + SlackSendMessage, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SlackGetChannel": "langchain_community.tools", + "SlackGetMessage": "langchain_community.tools", + "SlackScheduleMessage": "langchain_community.tools", + "SlackSendMessage": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SlackGetChannel", + "SlackGetMessage", + "SlackScheduleMessage", + "SlackSendMessage", +] diff --git a/venv/Lib/site-packages/langchain/tools/slack/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/slack/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..4724d7c6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/slack/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/slack/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/slack/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..d6f52618 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/slack/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/slack/__pycache__/get_channel.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/slack/__pycache__/get_channel.cpython-312.pyc new file mode 100644 index 00000000..adf1a4a8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/slack/__pycache__/get_channel.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/slack/__pycache__/get_message.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/slack/__pycache__/get_message.cpython-312.pyc new file mode 100644 index 00000000..c3c1d6d2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/slack/__pycache__/get_message.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/slack/__pycache__/schedule_message.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/slack/__pycache__/schedule_message.cpython-312.pyc new file mode 100644 index 00000000..306123be Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/slack/__pycache__/schedule_message.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/slack/__pycache__/send_message.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/slack/__pycache__/send_message.cpython-312.pyc new file mode 100644 index 00000000..2d523673 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/slack/__pycache__/send_message.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/slack/base.py b/venv/Lib/site-packages/langchain/tools/slack/base.py new file mode 100644 index 00000000..93394b91 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/slack/base.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.slack.base import SlackBaseTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SlackBaseTool": "langchain_community.tools.slack.base"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SlackBaseTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/slack/get_channel.py b/venv/Lib/site-packages/langchain/tools/slack/get_channel.py new file mode 100644 index 00000000..a0dc98e0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/slack/get_channel.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import SlackGetChannel + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SlackGetChannel": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SlackGetChannel", +] diff --git a/venv/Lib/site-packages/langchain/tools/slack/get_message.py b/venv/Lib/site-packages/langchain/tools/slack/get_message.py new file mode 100644 index 00000000..8969bae3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/slack/get_message.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import SlackGetMessage + from langchain_community.tools.slack.get_message import SlackGetMessageSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SlackGetMessageSchema": "langchain_community.tools.slack.get_message", + "SlackGetMessage": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SlackGetMessageSchema", + "SlackGetMessage", +] diff --git a/venv/Lib/site-packages/langchain/tools/slack/schedule_message.py b/venv/Lib/site-packages/langchain/tools/slack/schedule_message.py new file mode 100644 index 00000000..89408227 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/slack/schedule_message.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import SlackScheduleMessage + from langchain_community.tools.slack.schedule_message import ScheduleMessageSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ScheduleMessageSchema": "langchain_community.tools.slack.schedule_message", + "SlackScheduleMessage": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ScheduleMessageSchema", + "SlackScheduleMessage", +] diff --git a/venv/Lib/site-packages/langchain/tools/slack/send_message.py b/venv/Lib/site-packages/langchain/tools/slack/send_message.py new file mode 100644 index 00000000..8adc70a3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/slack/send_message.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import SlackSendMessage + from langchain_community.tools.slack.send_message import SendMessageSchema + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SendMessageSchema": "langchain_community.tools.slack.send_message", + "SlackSendMessage": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SendMessageSchema", + "SlackSendMessage", +] diff --git a/venv/Lib/site-packages/langchain/tools/sleep/__init__.py b/venv/Lib/site-packages/langchain/tools/sleep/__init__.py new file mode 100644 index 00000000..4d6319e2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/sleep/__init__.py @@ -0,0 +1 @@ +"""Sleep tool.""" diff --git a/venv/Lib/site-packages/langchain/tools/sleep/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/sleep/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..75110060 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/sleep/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/sleep/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/sleep/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..737b7fec Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/sleep/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/sleep/tool.py b/venv/Lib/site-packages/langchain/tools/sleep/tool.py new file mode 100644 index 00000000..ba113a3d --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/sleep/tool.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import SleepTool + from langchain_community.tools.sleep.tool import SleepInput + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SleepInput": "langchain_community.tools.sleep.tool", + "SleepTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SleepInput", + "SleepTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/spark_sql/__init__.py b/venv/Lib/site-packages/langchain/tools/spark_sql/__init__.py new file mode 100644 index 00000000..01039b77 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/spark_sql/__init__.py @@ -0,0 +1 @@ +"""Tools for interacting with Spark SQL.""" diff --git a/venv/Lib/site-packages/langchain/tools/spark_sql/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/spark_sql/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..abf77496 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/spark_sql/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/spark_sql/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/spark_sql/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..449406e7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/spark_sql/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/spark_sql/tool.py b/venv/Lib/site-packages/langchain/tools/spark_sql/tool.py new file mode 100644 index 00000000..afec32b0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/spark_sql/tool.py @@ -0,0 +1,39 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ( + BaseSparkSQLTool, + InfoSparkSQLTool, + ListSparkSQLTool, + QueryCheckerTool, + QuerySparkSQLTool, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BaseSparkSQLTool": "langchain_community.tools", + "QuerySparkSQLTool": "langchain_community.tools", + "InfoSparkSQLTool": "langchain_community.tools", + "ListSparkSQLTool": "langchain_community.tools", + "QueryCheckerTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaseSparkSQLTool", + "QuerySparkSQLTool", + "InfoSparkSQLTool", + "ListSparkSQLTool", + "QueryCheckerTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/sql_database/__init__.py b/venv/Lib/site-packages/langchain/tools/sql_database/__init__.py new file mode 100644 index 00000000..90fb3be1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/sql_database/__init__.py @@ -0,0 +1 @@ +"""Tools for interacting with a SQL database.""" diff --git a/venv/Lib/site-packages/langchain/tools/sql_database/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/sql_database/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..d3cd7e21 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/sql_database/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/sql_database/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/sql_database/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..793973e1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/sql_database/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/sql_database/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/sql_database/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..cfd7b76d Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/sql_database/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/sql_database/prompt.py b/venv/Lib/site-packages/langchain/tools/sql_database/prompt.py new file mode 100644 index 00000000..c583ff6d --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/sql_database/prompt.py @@ -0,0 +1,24 @@ +"""For backwards compatibility.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.sql_database.prompt import QUERY_CHECKER + + +_importer = create_importer( + __package__, + deprecated_lookups={ + "QUERY_CHECKER": "langchain_community.tools.sql_database.prompt", + }, +) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _importer(name) + + +__all__ = ["QUERY_CHECKER"] diff --git a/venv/Lib/site-packages/langchain/tools/sql_database/tool.py b/venv/Lib/site-packages/langchain/tools/sql_database/tool.py new file mode 100644 index 00000000..1df1d645 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/sql_database/tool.py @@ -0,0 +1,39 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ( + BaseSQLDatabaseTool, + InfoSQLDatabaseTool, + ListSQLDatabaseTool, + QuerySQLCheckerTool, + QuerySQLDataBaseTool, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BaseSQLDatabaseTool": "langchain_community.tools", + "QuerySQLDataBaseTool": "langchain_community.tools", + "InfoSQLDatabaseTool": "langchain_community.tools", + "ListSQLDatabaseTool": "langchain_community.tools", + "QuerySQLCheckerTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaseSQLDatabaseTool", + "QuerySQLDataBaseTool", + "InfoSQLDatabaseTool", + "ListSQLDatabaseTool", + "QuerySQLCheckerTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/stackexchange/__init__.py b/venv/Lib/site-packages/langchain/tools/stackexchange/__init__.py new file mode 100644 index 00000000..1fa9a483 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/stackexchange/__init__.py @@ -0,0 +1 @@ +"""StackExchange API toolkit.""" diff --git a/venv/Lib/site-packages/langchain/tools/stackexchange/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/stackexchange/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..f9cb284f Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/stackexchange/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/stackexchange/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/stackexchange/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..6f057020 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/stackexchange/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/stackexchange/tool.py b/venv/Lib/site-packages/langchain/tools/stackexchange/tool.py new file mode 100644 index 00000000..0bb00c98 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/stackexchange/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import StackExchangeTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"StackExchangeTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "StackExchangeTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/steam/__init__.py b/venv/Lib/site-packages/langchain/tools/steam/__init__.py new file mode 100644 index 00000000..9367fd95 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/steam/__init__.py @@ -0,0 +1 @@ +"""Steam API toolkit""" diff --git a/venv/Lib/site-packages/langchain/tools/steam/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/steam/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..c61cdbcf Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/steam/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/steam/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/steam/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..31c9bc20 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/steam/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/steam/tool.py b/venv/Lib/site-packages/langchain/tools/steam/tool.py new file mode 100644 index 00000000..1fa1f99b --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/steam/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import SteamWebAPIQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SteamWebAPIQueryRun": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SteamWebAPIQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/steamship_image_generation/__init__.py b/venv/Lib/site-packages/langchain/tools/steamship_image_generation/__init__.py new file mode 100644 index 00000000..fab0691f --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/steamship_image_generation/__init__.py @@ -0,0 +1,25 @@ +"""Tool to generate an image.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import SteamshipImageGenerationTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SteamshipImageGenerationTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SteamshipImageGenerationTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/steamship_image_generation/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/steamship_image_generation/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..6452d7af Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/steamship_image_generation/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/steamship_image_generation/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/steamship_image_generation/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..a85cc701 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/steamship_image_generation/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/steamship_image_generation/tool.py b/venv/Lib/site-packages/langchain/tools/steamship_image_generation/tool.py new file mode 100644 index 00000000..9f086733 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/steamship_image_generation/tool.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import SteamshipImageGenerationTool + from langchain_community.tools.steamship_image_generation.tool import ModelName + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ModelName": "langchain_community.tools.steamship_image_generation.tool", + "SteamshipImageGenerationTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ModelName", + "SteamshipImageGenerationTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/tavily_search/__init__.py b/venv/Lib/site-packages/langchain/tools/tavily_search/__init__.py new file mode 100644 index 00000000..17a9bc59 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/tavily_search/__init__.py @@ -0,0 +1,32 @@ +"""Tavily Search API toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.tavily_search.tool import ( + TavilyAnswer, + TavilySearchResults, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "TavilySearchResults": "langchain_community.tools.tavily_search.tool", + "TavilyAnswer": "langchain_community.tools.tavily_search.tool", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TavilySearchResults", + "TavilyAnswer", +] diff --git a/venv/Lib/site-packages/langchain/tools/tavily_search/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/tavily_search/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..b0d9d690 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/tavily_search/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/tavily_search/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/tavily_search/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..cdd99d90 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/tavily_search/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/tavily_search/tool.py b/venv/Lib/site-packages/langchain/tools/tavily_search/tool.py new file mode 100644 index 00000000..fabf4efa --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/tavily_search/tool.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.tavily_search.tool import ( + TavilyAnswer, + TavilyInput, + TavilySearchResults, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "TavilyInput": "langchain_community.tools.tavily_search.tool", + "TavilySearchResults": "langchain_community.tools.tavily_search.tool", + "TavilyAnswer": "langchain_community.tools.tavily_search.tool", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TavilyInput", + "TavilySearchResults", + "TavilyAnswer", +] diff --git a/venv/Lib/site-packages/langchain/tools/vectorstore/__init__.py b/venv/Lib/site-packages/langchain/tools/vectorstore/__init__.py new file mode 100644 index 00000000..2bb63810 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/vectorstore/__init__.py @@ -0,0 +1 @@ +"""Simple tool wrapper around VectorDBQA chain.""" diff --git a/venv/Lib/site-packages/langchain/tools/vectorstore/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/vectorstore/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..082a2ab8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/vectorstore/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/vectorstore/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/vectorstore/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..fd3a2c41 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/vectorstore/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/vectorstore/tool.py b/venv/Lib/site-packages/langchain/tools/vectorstore/tool.py new file mode 100644 index 00000000..4487a2d6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/vectorstore/tool.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ( + VectorStoreQATool, + VectorStoreQAWithSourcesTool, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "VectorStoreQATool": "langchain_community.tools", + "VectorStoreQAWithSourcesTool": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "VectorStoreQATool", + "VectorStoreQAWithSourcesTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/wikipedia/__init__.py b/venv/Lib/site-packages/langchain/tools/wikipedia/__init__.py new file mode 100644 index 00000000..0b3edd08 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/wikipedia/__init__.py @@ -0,0 +1 @@ +"""Wikipedia API toolkit.""" diff --git a/venv/Lib/site-packages/langchain/tools/wikipedia/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/wikipedia/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..ede56795 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/wikipedia/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/wikipedia/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/wikipedia/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..a3dfe250 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/wikipedia/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/wikipedia/tool.py b/venv/Lib/site-packages/langchain/tools/wikipedia/tool.py new file mode 100644 index 00000000..04ba7a57 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/wikipedia/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import WikipediaQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WikipediaQueryRun": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WikipediaQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/wolfram_alpha/__init__.py b/venv/Lib/site-packages/langchain/tools/wolfram_alpha/__init__.py new file mode 100644 index 00000000..1450e29c --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/wolfram_alpha/__init__.py @@ -0,0 +1,25 @@ +"""Wolfram Alpha API toolkit.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import WolframAlphaQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WolframAlphaQueryRun": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WolframAlphaQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/wolfram_alpha/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/wolfram_alpha/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..dabda5d2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/wolfram_alpha/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/wolfram_alpha/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/wolfram_alpha/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..c08ff95c Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/wolfram_alpha/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/wolfram_alpha/tool.py b/venv/Lib/site-packages/langchain/tools/wolfram_alpha/tool.py new file mode 100644 index 00000000..58909cfe --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/wolfram_alpha/tool.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import WolframAlphaQueryRun + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WolframAlphaQueryRun": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WolframAlphaQueryRun", +] diff --git a/venv/Lib/site-packages/langchain/tools/yahoo_finance_news.py b/venv/Lib/site-packages/langchain/tools/yahoo_finance_news.py new file mode 100644 index 00000000..239abb16 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/yahoo_finance_news.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import YahooFinanceNewsTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"YahooFinanceNewsTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "YahooFinanceNewsTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/youtube/__init__.py b/venv/Lib/site-packages/langchain/tools/youtube/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain/tools/youtube/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/youtube/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..9293f2fd Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/youtube/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/youtube/__pycache__/search.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/youtube/__pycache__/search.cpython-312.pyc new file mode 100644 index 00000000..d4ddd9da Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/youtube/__pycache__/search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/youtube/search.py b/venv/Lib/site-packages/langchain/tools/youtube/search.py new file mode 100644 index 00000000..14fc9ca4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/youtube/search.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import YouTubeSearchTool + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"YouTubeSearchTool": "langchain_community.tools"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "YouTubeSearchTool", +] diff --git a/venv/Lib/site-packages/langchain/tools/zapier/__init__.py b/venv/Lib/site-packages/langchain/tools/zapier/__init__.py new file mode 100644 index 00000000..573590b0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/zapier/__init__.py @@ -0,0 +1,29 @@ +"""Zapier Tool.""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ZapierNLAListActions, ZapierNLARunAction + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ZapierNLARunAction": "langchain_community.tools", + "ZapierNLAListActions": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ZapierNLARunAction", + "ZapierNLAListActions", +] diff --git a/venv/Lib/site-packages/langchain/tools/zapier/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/zapier/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..b181706d Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/zapier/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/zapier/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain/tools/zapier/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..a573d1e7 Binary files /dev/null and b/venv/Lib/site-packages/langchain/tools/zapier/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/tools/zapier/tool.py b/venv/Lib/site-packages/langchain/tools/zapier/tool.py new file mode 100644 index 00000000..b69ab3f3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/tools/zapier/tool.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import ZapierNLAListActions, ZapierNLARunAction + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ZapierNLARunAction": "langchain_community.tools", + "ZapierNLAListActions": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ZapierNLARunAction", + "ZapierNLAListActions", +] diff --git a/venv/Lib/site-packages/langchain/utilities/__init__.py b/venv/Lib/site-packages/langchain/utilities/__init__.py new file mode 100644 index 00000000..1ee41550 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/__init__.py @@ -0,0 +1,168 @@ +"""**Utilities** are the integrations with third-part systems and packages. + +Other LangChain classes use **Utilities** to interact with third-part systems +and packages. +""" + +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import ( + AlphaVantageAPIWrapper, + ApifyWrapper, + ArceeWrapper, + ArxivAPIWrapper, + BibtexparserWrapper, + BingSearchAPIWrapper, + BraveSearchWrapper, + DuckDuckGoSearchAPIWrapper, + GoldenQueryAPIWrapper, + GoogleFinanceAPIWrapper, + GoogleJobsAPIWrapper, + GoogleLensAPIWrapper, + GooglePlacesAPIWrapper, + GoogleScholarAPIWrapper, + GoogleSearchAPIWrapper, + GoogleSerperAPIWrapper, + GoogleTrendsAPIWrapper, + GraphQLAPIWrapper, + JiraAPIWrapper, + LambdaWrapper, + MaxComputeAPIWrapper, + MerriamWebsterAPIWrapper, + MetaphorSearchAPIWrapper, + NasaAPIWrapper, + OpenWeatherMapAPIWrapper, + OutlineAPIWrapper, + Portkey, + PowerBIDataset, + PubMedAPIWrapper, + Requests, + RequestsWrapper, + SceneXplainAPIWrapper, + SearchApiAPIWrapper, + SearxSearchWrapper, + SerpAPIWrapper, + SparkSQL, + SQLDatabase, + StackExchangeAPIWrapper, + SteamWebAPIWrapper, + TensorflowDatasets, + TextRequestsWrapper, + TwilioAPIWrapper, + WikipediaAPIWrapper, + WolframAlphaAPIWrapper, + ZapierNLAWrapper, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AlphaVantageAPIWrapper": "langchain_community.utilities", + "ApifyWrapper": "langchain_community.utilities", + "ArceeWrapper": "langchain_community.utilities", + "ArxivAPIWrapper": "langchain_community.utilities", + "BibtexparserWrapper": "langchain_community.utilities", + "BingSearchAPIWrapper": "langchain_community.utilities", + "BraveSearchWrapper": "langchain_community.utilities", + "DuckDuckGoSearchAPIWrapper": "langchain_community.utilities", + "GoldenQueryAPIWrapper": "langchain_community.utilities", + "GoogleFinanceAPIWrapper": "langchain_community.utilities", + "GoogleLensAPIWrapper": "langchain_community.utilities", + "GoogleJobsAPIWrapper": "langchain_community.utilities", + "GooglePlacesAPIWrapper": "langchain_community.utilities", + "GoogleScholarAPIWrapper": "langchain_community.utilities", + "GoogleTrendsAPIWrapper": "langchain_community.utilities", + "GoogleSearchAPIWrapper": "langchain_community.utilities", + "GoogleSerperAPIWrapper": "langchain_community.utilities", + "GraphQLAPIWrapper": "langchain_community.utilities", + "JiraAPIWrapper": "langchain_community.utilities", + "LambdaWrapper": "langchain_community.utilities", + "MaxComputeAPIWrapper": "langchain_community.utilities", + "MerriamWebsterAPIWrapper": "langchain_community.utilities", + "MetaphorSearchAPIWrapper": "langchain_community.utilities", + "NasaAPIWrapper": "langchain_community.utilities", + "OpenWeatherMapAPIWrapper": "langchain_community.utilities", + "OutlineAPIWrapper": "langchain_community.utilities", + "Portkey": "langchain_community.utilities", + "PowerBIDataset": "langchain_community.utilities", + "PubMedAPIWrapper": "langchain_community.utilities", + # We will not list PythonREPL in __all__ since it has been removed from community + # it'll proxy to community package, which will raise an appropriate exception. + "PythonREPL": "langchain_community.utilities", + "Requests": "langchain_community.utilities", + "SteamWebAPIWrapper": "langchain_community.utilities", + "SQLDatabase": "langchain_community.utilities", + "SceneXplainAPIWrapper": "langchain_community.utilities", + "SearchApiAPIWrapper": "langchain_community.utilities", + "SearxSearchWrapper": "langchain_community.utilities", + "SerpAPIWrapper": "langchain_community.utilities", + "SparkSQL": "langchain_community.utilities", + "StackExchangeAPIWrapper": "langchain_community.utilities", + "TensorflowDatasets": "langchain_community.utilities", + "RequestsWrapper": "langchain_community.utilities", + "TextRequestsWrapper": "langchain_community.utilities", + "TwilioAPIWrapper": "langchain_community.utilities", + "WikipediaAPIWrapper": "langchain_community.utilities", + "WolframAlphaAPIWrapper": "langchain_community.utilities", + "ZapierNLAWrapper": "langchain_community.utilities", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AlphaVantageAPIWrapper", + "ApifyWrapper", + "ArceeWrapper", + "ArxivAPIWrapper", + "BibtexparserWrapper", + "BingSearchAPIWrapper", + "BraveSearchWrapper", + "DuckDuckGoSearchAPIWrapper", + "GoldenQueryAPIWrapper", + "GoogleFinanceAPIWrapper", + "GoogleLensAPIWrapper", + "GoogleJobsAPIWrapper", + "GooglePlacesAPIWrapper", + "GoogleScholarAPIWrapper", + "GoogleTrendsAPIWrapper", + "GoogleSearchAPIWrapper", + "GoogleSerperAPIWrapper", + "GraphQLAPIWrapper", + "JiraAPIWrapper", + "LambdaWrapper", + "MaxComputeAPIWrapper", + "MerriamWebsterAPIWrapper", + "MetaphorSearchAPIWrapper", + "NasaAPIWrapper", + "OpenWeatherMapAPIWrapper", + "OutlineAPIWrapper", + "Portkey", + "PowerBIDataset", + "PubMedAPIWrapper", + "Requests", + "SteamWebAPIWrapper", + "SQLDatabase", + "SceneXplainAPIWrapper", + "SearchApiAPIWrapper", + "SearxSearchWrapper", + "SerpAPIWrapper", + "SparkSQL", + "StackExchangeAPIWrapper", + "TensorflowDatasets", + "RequestsWrapper", + "TextRequestsWrapper", + "TwilioAPIWrapper", + "WikipediaAPIWrapper", + "WolframAlphaAPIWrapper", + "ZapierNLAWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..a97ed69f Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/alpha_vantage.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/alpha_vantage.cpython-312.pyc new file mode 100644 index 00000000..1970d50d Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/alpha_vantage.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/anthropic.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/anthropic.cpython-312.pyc new file mode 100644 index 00000000..b795ee29 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/anthropic.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/apify.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/apify.cpython-312.pyc new file mode 100644 index 00000000..ec1ef66d Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/apify.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/arcee.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/arcee.cpython-312.pyc new file mode 100644 index 00000000..8cc700c2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/arcee.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/arxiv.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/arxiv.cpython-312.pyc new file mode 100644 index 00000000..18794b3d Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/arxiv.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/asyncio.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/asyncio.cpython-312.pyc new file mode 100644 index 00000000..94596da8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/asyncio.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/awslambda.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/awslambda.cpython-312.pyc new file mode 100644 index 00000000..6c3c2a0b Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/awslambda.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/bibtex.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/bibtex.cpython-312.pyc new file mode 100644 index 00000000..d0b4e5ae Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/bibtex.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/bing_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/bing_search.cpython-312.pyc new file mode 100644 index 00000000..0dd99b72 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/bing_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/brave_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/brave_search.cpython-312.pyc new file mode 100644 index 00000000..e09878a0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/brave_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/clickup.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/clickup.cpython-312.pyc new file mode 100644 index 00000000..6061fe15 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/clickup.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/dalle_image_generator.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/dalle_image_generator.cpython-312.pyc new file mode 100644 index 00000000..29fea0ac Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/dalle_image_generator.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/dataforseo_api_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/dataforseo_api_search.cpython-312.pyc new file mode 100644 index 00000000..ff0fe4f1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/dataforseo_api_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/duckduckgo_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/duckduckgo_search.cpython-312.pyc new file mode 100644 index 00000000..c870baa1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/duckduckgo_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/github.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/github.cpython-312.pyc new file mode 100644 index 00000000..0f44c1ff Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/github.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/gitlab.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/gitlab.cpython-312.pyc new file mode 100644 index 00000000..5173158e Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/gitlab.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/golden_query.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/golden_query.cpython-312.pyc new file mode 100644 index 00000000..6684d85d Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/golden_query.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/google_finance.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_finance.cpython-312.pyc new file mode 100644 index 00000000..b3757961 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_finance.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/google_jobs.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_jobs.cpython-312.pyc new file mode 100644 index 00000000..de107cfb Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_jobs.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/google_lens.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_lens.cpython-312.pyc new file mode 100644 index 00000000..53c610d1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_lens.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/google_places_api.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_places_api.cpython-312.pyc new file mode 100644 index 00000000..47bcf76f Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_places_api.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/google_scholar.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_scholar.cpython-312.pyc new file mode 100644 index 00000000..5678cfc3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_scholar.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/google_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_search.cpython-312.pyc new file mode 100644 index 00000000..8945c6d6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/google_serper.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_serper.cpython-312.pyc new file mode 100644 index 00000000..ca5b27ee Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_serper.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/google_trends.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_trends.cpython-312.pyc new file mode 100644 index 00000000..f249231e Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/google_trends.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/graphql.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/graphql.cpython-312.pyc new file mode 100644 index 00000000..1ab03227 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/graphql.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/jira.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/jira.cpython-312.pyc new file mode 100644 index 00000000..c6d5ab81 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/jira.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/loading.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/loading.cpython-312.pyc new file mode 100644 index 00000000..9d31c452 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/loading.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/max_compute.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/max_compute.cpython-312.pyc new file mode 100644 index 00000000..1adf9a6d Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/max_compute.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/merriam_webster.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/merriam_webster.cpython-312.pyc new file mode 100644 index 00000000..176b95f8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/merriam_webster.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/metaphor_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/metaphor_search.cpython-312.pyc new file mode 100644 index 00000000..5874d096 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/metaphor_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/nasa.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/nasa.cpython-312.pyc new file mode 100644 index 00000000..d412f026 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/nasa.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/opaqueprompts.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/opaqueprompts.cpython-312.pyc new file mode 100644 index 00000000..ae960de9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/opaqueprompts.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/openapi.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/openapi.cpython-312.pyc new file mode 100644 index 00000000..0f3b97ef Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/openapi.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/openweathermap.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/openweathermap.cpython-312.pyc new file mode 100644 index 00000000..99f329af Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/openweathermap.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/outline.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/outline.cpython-312.pyc new file mode 100644 index 00000000..0e48520f Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/outline.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/portkey.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/portkey.cpython-312.pyc new file mode 100644 index 00000000..5c4a035c Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/portkey.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/powerbi.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/powerbi.cpython-312.pyc new file mode 100644 index 00000000..4305292c Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/powerbi.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/pubmed.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/pubmed.cpython-312.pyc new file mode 100644 index 00000000..2d9e4af0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/pubmed.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/python.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/python.cpython-312.pyc new file mode 100644 index 00000000..ff58e3ce Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/python.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/reddit_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/reddit_search.cpython-312.pyc new file mode 100644 index 00000000..b02a12ed Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/reddit_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/redis.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/redis.cpython-312.pyc new file mode 100644 index 00000000..93a3e133 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/redis.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/requests.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/requests.cpython-312.pyc new file mode 100644 index 00000000..fddbad70 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/requests.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/scenexplain.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/scenexplain.cpython-312.pyc new file mode 100644 index 00000000..452531f8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/scenexplain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/searchapi.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/searchapi.cpython-312.pyc new file mode 100644 index 00000000..e439268a Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/searchapi.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/searx_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/searx_search.cpython-312.pyc new file mode 100644 index 00000000..b0a8e8c8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/searx_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/serpapi.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/serpapi.cpython-312.pyc new file mode 100644 index 00000000..22b18fda Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/serpapi.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/spark_sql.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/spark_sql.cpython-312.pyc new file mode 100644 index 00000000..245d92f5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/spark_sql.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/sql_database.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/sql_database.cpython-312.pyc new file mode 100644 index 00000000..d1aae316 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/sql_database.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/stackexchange.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/stackexchange.cpython-312.pyc new file mode 100644 index 00000000..310e3a4c Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/stackexchange.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/steam.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/steam.cpython-312.pyc new file mode 100644 index 00000000..9d3730f9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/steam.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/tavily_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/tavily_search.cpython-312.pyc new file mode 100644 index 00000000..6793f0a3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/tavily_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/tensorflow_datasets.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/tensorflow_datasets.cpython-312.pyc new file mode 100644 index 00000000..03433058 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/tensorflow_datasets.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/twilio.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/twilio.cpython-312.pyc new file mode 100644 index 00000000..03bdf714 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/twilio.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/vertexai.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/vertexai.cpython-312.pyc new file mode 100644 index 00000000..84b69cd4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/vertexai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/wikipedia.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/wikipedia.cpython-312.pyc new file mode 100644 index 00000000..98a4f6c2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/wikipedia.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/wolfram_alpha.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/wolfram_alpha.cpython-312.pyc new file mode 100644 index 00000000..f021f7c8 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/wolfram_alpha.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/__pycache__/zapier.cpython-312.pyc b/venv/Lib/site-packages/langchain/utilities/__pycache__/zapier.cpython-312.pyc new file mode 100644 index 00000000..dd7c246c Binary files /dev/null and b/venv/Lib/site-packages/langchain/utilities/__pycache__/zapier.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utilities/alpha_vantage.py b/venv/Lib/site-packages/langchain/utilities/alpha_vantage.py new file mode 100644 index 00000000..e9560ea5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/alpha_vantage.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import AlphaVantageAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AlphaVantageAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AlphaVantageAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/anthropic.py b/venv/Lib/site-packages/langchain/utilities/anthropic.py new file mode 100644 index 00000000..994bf22f --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/anthropic.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.anthropic import ( + get_num_tokens_anthropic, + get_token_ids_anthropic, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "get_num_tokens_anthropic": "langchain_community.utilities.anthropic", + "get_token_ids_anthropic": "langchain_community.utilities.anthropic", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "get_num_tokens_anthropic", + "get_token_ids_anthropic", +] diff --git a/venv/Lib/site-packages/langchain/utilities/apify.py b/venv/Lib/site-packages/langchain/utilities/apify.py new file mode 100644 index 00000000..d906ffad --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/apify.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import ApifyWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ApifyWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ApifyWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/arcee.py b/venv/Lib/site-packages/langchain/utilities/arcee.py new file mode 100644 index 00000000..db6adef3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/arcee.py @@ -0,0 +1,45 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import ArceeWrapper + from langchain_community.utilities.arcee import ( + ArceeDocument, + ArceeDocumentAdapter, + ArceeDocumentSource, + ArceeRoute, + DALMFilter, + DALMFilterType, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ArceeRoute": "langchain_community.utilities.arcee", + "DALMFilterType": "langchain_community.utilities.arcee", + "DALMFilter": "langchain_community.utilities.arcee", + "ArceeDocumentSource": "langchain_community.utilities.arcee", + "ArceeDocument": "langchain_community.utilities.arcee", + "ArceeDocumentAdapter": "langchain_community.utilities.arcee", + "ArceeWrapper": "langchain_community.utilities", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ArceeRoute", + "DALMFilterType", + "DALMFilter", + "ArceeDocumentSource", + "ArceeDocument", + "ArceeDocumentAdapter", + "ArceeWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/arxiv.py b/venv/Lib/site-packages/langchain/utilities/arxiv.py new file mode 100644 index 00000000..d3f3faae --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/arxiv.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import ArxivAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ArxivAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ArxivAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/asyncio.py b/venv/Lib/site-packages/langchain/utilities/asyncio.py new file mode 100644 index 00000000..d7db052e --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/asyncio.py @@ -0,0 +1,11 @@ +"""Shims for asyncio features that may be missing from older python versions""" + +import sys + +if sys.version_info[:2] < (3, 11): + from async_timeout import timeout as asyncio_timeout +else: + from asyncio import timeout as asyncio_timeout + + +__all__ = ["asyncio_timeout"] diff --git a/venv/Lib/site-packages/langchain/utilities/awslambda.py b/venv/Lib/site-packages/langchain/utilities/awslambda.py new file mode 100644 index 00000000..6d952017 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/awslambda.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import LambdaWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"LambdaWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LambdaWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/bibtex.py b/venv/Lib/site-packages/langchain/utilities/bibtex.py new file mode 100644 index 00000000..0f435ec1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/bibtex.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import BibtexparserWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BibtexparserWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BibtexparserWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/bing_search.py b/venv/Lib/site-packages/langchain/utilities/bing_search.py new file mode 100644 index 00000000..895a5f4c --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/bing_search.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import BingSearchAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BingSearchAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BingSearchAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/brave_search.py b/venv/Lib/site-packages/langchain/utilities/brave_search.py new file mode 100644 index 00000000..73eef15f --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/brave_search.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import BraveSearchWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BraveSearchWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BraveSearchWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/clickup.py b/venv/Lib/site-packages/langchain/utilities/clickup.py new file mode 100644 index 00000000..d7a6d126 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/clickup.py @@ -0,0 +1,45 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.clickup import ( + ClickupAPIWrapper, + Component, + CUList, + Member, + Space, + Task, + Team, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "Component": "langchain_community.utilities.clickup", + "Task": "langchain_community.utilities.clickup", + "CUList": "langchain_community.utilities.clickup", + "Member": "langchain_community.utilities.clickup", + "Team": "langchain_community.utilities.clickup", + "Space": "langchain_community.utilities.clickup", + "ClickupAPIWrapper": "langchain_community.utilities.clickup", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Component", + "Task", + "CUList", + "Member", + "Team", + "Space", + "ClickupAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/dalle_image_generator.py b/venv/Lib/site-packages/langchain/utilities/dalle_image_generator.py new file mode 100644 index 00000000..b62c1034 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/dalle_image_generator.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DallEAPIWrapper": "langchain_community.utilities.dalle_image_generator" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DallEAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/dataforseo_api_search.py b/venv/Lib/site-packages/langchain/utilities/dataforseo_api_search.py new file mode 100644 index 00000000..14345a56 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/dataforseo_api_search.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.dataforseo_api_search import DataForSeoAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DataForSeoAPIWrapper": "langchain_community.utilities.dataforseo_api_search" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DataForSeoAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/duckduckgo_search.py b/venv/Lib/site-packages/langchain/utilities/duckduckgo_search.py new file mode 100644 index 00000000..d7741e9a --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/duckduckgo_search.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import DuckDuckGoSearchAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DuckDuckGoSearchAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DuckDuckGoSearchAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/github.py b/venv/Lib/site-packages/langchain/utilities/github.py new file mode 100644 index 00000000..1f4e18f1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/github.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.github import GitHubAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GitHubAPIWrapper": "langchain_community.utilities.github"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GitHubAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/gitlab.py b/venv/Lib/site-packages/langchain/utilities/gitlab.py new file mode 100644 index 00000000..eb1723ac --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/gitlab.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.gitlab import GitLabAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GitLabAPIWrapper": "langchain_community.utilities.gitlab"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GitLabAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/golden_query.py b/venv/Lib/site-packages/langchain/utilities/golden_query.py new file mode 100644 index 00000000..85f04b80 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/golden_query.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import GoldenQueryAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoldenQueryAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoldenQueryAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/google_finance.py b/venv/Lib/site-packages/langchain/utilities/google_finance.py new file mode 100644 index 00000000..a29a6bce --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/google_finance.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import GoogleFinanceAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleFinanceAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleFinanceAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/google_jobs.py b/venv/Lib/site-packages/langchain/utilities/google_jobs.py new file mode 100644 index 00000000..11ce337d --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/google_jobs.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import GoogleJobsAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleJobsAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleJobsAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/google_lens.py b/venv/Lib/site-packages/langchain/utilities/google_lens.py new file mode 100644 index 00000000..0e7dc4b4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/google_lens.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import GoogleLensAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleLensAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleLensAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/google_places_api.py b/venv/Lib/site-packages/langchain/utilities/google_places_api.py new file mode 100644 index 00000000..92059699 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/google_places_api.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import GooglePlacesAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GooglePlacesAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GooglePlacesAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/google_scholar.py b/venv/Lib/site-packages/langchain/utilities/google_scholar.py new file mode 100644 index 00000000..bc70bfc1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/google_scholar.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import GoogleScholarAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleScholarAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleScholarAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/google_search.py b/venv/Lib/site-packages/langchain/utilities/google_search.py new file mode 100644 index 00000000..970d5d28 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/google_search.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import GoogleSearchAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleSearchAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleSearchAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/google_serper.py b/venv/Lib/site-packages/langchain/utilities/google_serper.py new file mode 100644 index 00000000..d751672a --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/google_serper.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import GoogleSerperAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleSerperAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleSerperAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/google_trends.py b/venv/Lib/site-packages/langchain/utilities/google_trends.py new file mode 100644 index 00000000..86a5bd78 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/google_trends.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import GoogleTrendsAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GoogleTrendsAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GoogleTrendsAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/graphql.py b/venv/Lib/site-packages/langchain/utilities/graphql.py new file mode 100644 index 00000000..d75ddb63 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/graphql.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import GraphQLAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"GraphQLAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "GraphQLAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/jira.py b/venv/Lib/site-packages/langchain/utilities/jira.py new file mode 100644 index 00000000..65858806 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/jira.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import JiraAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"JiraAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "JiraAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/loading.py b/venv/Lib/site-packages/langchain/utilities/loading.py new file mode 100644 index 00000000..a2337b6d --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/loading.py @@ -0,0 +1,4 @@ +from langchain_core.utils.loading import try_load_from_hub + +# For backwards compatibility +__all__ = ["try_load_from_hub"] diff --git a/venv/Lib/site-packages/langchain/utilities/max_compute.py b/venv/Lib/site-packages/langchain/utilities/max_compute.py new file mode 100644 index 00000000..7797a22f --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/max_compute.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import MaxComputeAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MaxComputeAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MaxComputeAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/merriam_webster.py b/venv/Lib/site-packages/langchain/utilities/merriam_webster.py new file mode 100644 index 00000000..e8a35960 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/merriam_webster.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import MerriamWebsterAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MerriamWebsterAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MerriamWebsterAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/metaphor_search.py b/venv/Lib/site-packages/langchain/utilities/metaphor_search.py new file mode 100644 index 00000000..9534267c --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/metaphor_search.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import MetaphorSearchAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MetaphorSearchAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MetaphorSearchAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/nasa.py b/venv/Lib/site-packages/langchain/utilities/nasa.py new file mode 100644 index 00000000..cf7c48c0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/nasa.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import NasaAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NasaAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NasaAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/opaqueprompts.py b/venv/Lib/site-packages/langchain/utilities/opaqueprompts.py new file mode 100644 index 00000000..07780a9d --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/opaqueprompts.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.opaqueprompts import desanitize, sanitize + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "sanitize": "langchain_community.utilities.opaqueprompts", + "desanitize": "langchain_community.utilities.opaqueprompts", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "sanitize", + "desanitize", +] diff --git a/venv/Lib/site-packages/langchain/utilities/openapi.py b/venv/Lib/site-packages/langchain/utilities/openapi.py new file mode 100644 index 00000000..90e5abea --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/openapi.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools import OpenAPISpec + from langchain_community.utilities.openapi import HTTPVerb + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "HTTPVerb": "langchain_community.utilities.openapi", + "OpenAPISpec": "langchain_community.tools", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HTTPVerb", + "OpenAPISpec", +] diff --git a/venv/Lib/site-packages/langchain/utilities/openweathermap.py b/venv/Lib/site-packages/langchain/utilities/openweathermap.py new file mode 100644 index 00000000..c0c42110 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/openweathermap.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import OpenWeatherMapAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OpenWeatherMapAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OpenWeatherMapAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/outline.py b/venv/Lib/site-packages/langchain/utilities/outline.py new file mode 100644 index 00000000..0a86724e --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/outline.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import OutlineAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OutlineAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OutlineAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/portkey.py b/venv/Lib/site-packages/langchain/utilities/portkey.py new file mode 100644 index 00000000..62a786dc --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/portkey.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import Portkey + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Portkey": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Portkey", +] diff --git a/venv/Lib/site-packages/langchain/utilities/powerbi.py b/venv/Lib/site-packages/langchain/utilities/powerbi.py new file mode 100644 index 00000000..5220d8ee --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/powerbi.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import PowerBIDataset + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PowerBIDataset": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PowerBIDataset", +] diff --git a/venv/Lib/site-packages/langchain/utilities/pubmed.py b/venv/Lib/site-packages/langchain/utilities/pubmed.py new file mode 100644 index 00000000..f97fe468 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/pubmed.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import PubMedAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PubMedAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PubMedAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/python.py b/venv/Lib/site-packages/langchain/utilities/python.py new file mode 100644 index 00000000..2a077fcf --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/python.py @@ -0,0 +1,19 @@ +"""For backwards compatibility.""" + +from typing import Any + +from langchain._api import create_importer + +# Code has been removed from the community package as well. +# We'll proxy to community package, which will raise an appropriate exception, +# but we'll not include this in __all__, so it won't be listed as importable. + +_importer = create_importer( + __package__, + deprecated_lookups={"PythonREPL": "langchain_community.utilities.python"}, +) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _importer(name) diff --git a/venv/Lib/site-packages/langchain/utilities/reddit_search.py b/venv/Lib/site-packages/langchain/utilities/reddit_search.py new file mode 100644 index 00000000..fa8288ad --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/reddit_search.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "RedditSearchAPIWrapper": "langchain_community.utilities.reddit_search" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RedditSearchAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/redis.py b/venv/Lib/site-packages/langchain/utilities/redis.py new file mode 100644 index 00000000..db58dae1 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/redis.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.redis import ( + TokenEscaper, + check_redis_module_exist, + get_client, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "TokenEscaper": "langchain_community.utilities.redis", + "check_redis_module_exist": "langchain_community.utilities.redis", + "get_client": "langchain_community.utilities.redis", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TokenEscaper", + "check_redis_module_exist", + "get_client", +] diff --git a/venv/Lib/site-packages/langchain/utilities/requests.py b/venv/Lib/site-packages/langchain/utilities/requests.py new file mode 100644 index 00000000..4acff814 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/requests.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import Requests, RequestsWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "Requests": "langchain_community.utilities", + "RequestsWrapper": "langchain_community.utilities", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Requests", + "RequestsWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/scenexplain.py b/venv/Lib/site-packages/langchain/utilities/scenexplain.py new file mode 100644 index 00000000..9c1c52c0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/scenexplain.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import SceneXplainAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SceneXplainAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SceneXplainAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/searchapi.py b/venv/Lib/site-packages/langchain/utilities/searchapi.py new file mode 100644 index 00000000..77366ca0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/searchapi.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import SearchApiAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SearchApiAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SearchApiAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/searx_search.py b/venv/Lib/site-packages/langchain/utilities/searx_search.py new file mode 100644 index 00000000..72131c3e --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/searx_search.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import SearxSearchWrapper + from langchain_community.utilities.searx_search import SearxResults + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SearxResults": "langchain_community.utilities.searx_search", + "SearxSearchWrapper": "langchain_community.utilities", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SearxResults", + "SearxSearchWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/serpapi.py b/venv/Lib/site-packages/langchain/utilities/serpapi.py new file mode 100644 index 00000000..eb3e44b8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/serpapi.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import SerpAPIWrapper + from langchain_community.utilities.serpapi import HiddenPrints + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "HiddenPrints": "langchain_community.utilities.serpapi", + "SerpAPIWrapper": "langchain_community.utilities", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "HiddenPrints", + "SerpAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/spark_sql.py b/venv/Lib/site-packages/langchain/utilities/spark_sql.py new file mode 100644 index 00000000..2c747025 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/spark_sql.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import SparkSQL + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SparkSQL": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SparkSQL", +] diff --git a/venv/Lib/site-packages/langchain/utilities/sql_database.py b/venv/Lib/site-packages/langchain/utilities/sql_database.py new file mode 100644 index 00000000..c5648057 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/sql_database.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import SQLDatabase + from langchain_community.utilities.sql_database import truncate_word + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "truncate_word": "langchain_community.utilities.sql_database", + "SQLDatabase": "langchain_community.utilities", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "truncate_word", + "SQLDatabase", +] diff --git a/venv/Lib/site-packages/langchain/utilities/stackexchange.py b/venv/Lib/site-packages/langchain/utilities/stackexchange.py new file mode 100644 index 00000000..bee2a8c5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/stackexchange.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import StackExchangeAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"StackExchangeAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "StackExchangeAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/steam.py b/venv/Lib/site-packages/langchain/utilities/steam.py new file mode 100644 index 00000000..55906f92 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/steam.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import SteamWebAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SteamWebAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SteamWebAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/tavily_search.py b/venv/Lib/site-packages/langchain/utilities/tavily_search.py new file mode 100644 index 00000000..e3bae586 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/tavily_search.py @@ -0,0 +1,25 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "TavilySearchAPIWrapper": "langchain_community.utilities.tavily_search" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TavilySearchAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/tensorflow_datasets.py b/venv/Lib/site-packages/langchain/utilities/tensorflow_datasets.py new file mode 100644 index 00000000..a94fc1f6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/tensorflow_datasets.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import TensorflowDatasets + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TensorflowDatasets": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TensorflowDatasets", +] diff --git a/venv/Lib/site-packages/langchain/utilities/twilio.py b/venv/Lib/site-packages/langchain/utilities/twilio.py new file mode 100644 index 00000000..9c91b51d --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/twilio.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import TwilioAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TwilioAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TwilioAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/vertexai.py b/venv/Lib/site-packages/langchain/utilities/vertexai.py new file mode 100644 index 00000000..6b9fca5c --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/vertexai.py @@ -0,0 +1,36 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.vertexai import ( + create_retry_decorator, + get_client_info, + init_vertexai, + raise_vertex_import_error, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "create_retry_decorator": "langchain_community.utilities.vertexai", + "raise_vertex_import_error": "langchain_community.utilities.vertexai", + "init_vertexai": "langchain_community.utilities.vertexai", + "get_client_info": "langchain_community.utilities.vertexai", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "create_retry_decorator", + "raise_vertex_import_error", + "init_vertexai", + "get_client_info", +] diff --git a/venv/Lib/site-packages/langchain/utilities/wikipedia.py b/venv/Lib/site-packages/langchain/utilities/wikipedia.py new file mode 100644 index 00000000..5160839a --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/wikipedia.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import WikipediaAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WikipediaAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WikipediaAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/wolfram_alpha.py b/venv/Lib/site-packages/langchain/utilities/wolfram_alpha.py new file mode 100644 index 00000000..827075ee --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/wolfram_alpha.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import WolframAlphaAPIWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"WolframAlphaAPIWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "WolframAlphaAPIWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utilities/zapier.py b/venv/Lib/site-packages/langchain/utilities/zapier.py new file mode 100644 index 00000000..71c9afde --- /dev/null +++ b/venv/Lib/site-packages/langchain/utilities/zapier.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities import ZapierNLAWrapper + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ZapierNLAWrapper": "langchain_community.utilities"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ZapierNLAWrapper", +] diff --git a/venv/Lib/site-packages/langchain/utils/__init__.py b/venv/Lib/site-packages/langchain/utils/__init__.py new file mode 100644 index 00000000..edd3840a --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/__init__.py @@ -0,0 +1,77 @@ +""" +**Utility functions** for LangChain. + +These functions do not depend on any other LangChain module. +""" + +from typing import TYPE_CHECKING, Any + +from langchain_core.utils import ( + comma_list, + get_from_dict_or_env, + get_from_env, + stringify_dict, + stringify_value, +) +from langchain_core.utils.formatting import StrictFormatter, formatter +from langchain_core.utils.input import ( + get_bolded_text, + get_color_mapping, + get_colored_text, + print_text, +) +from langchain_core.utils.utils import ( + check_package_version, + convert_to_secret_str, + get_pydantic_field_names, + guard_import, + mock_now, + raise_for_status_with_text, + xor_args, +) + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utils.math import ( + cosine_similarity, + cosine_similarity_top_k, + ) + +# Not deprecated right now because we will likely need to move these functions +# back into langchain (as long as we're OK with the dependency on numpy). +_MODULE_LOOKUP = { + "cosine_similarity": "langchain_community.utils.math", + "cosine_similarity_top_k": "langchain_community.utils.math", +} + +_import_attribute = create_importer(__package__, module_lookup=_MODULE_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "StrictFormatter", + "check_package_version", + "comma_list", + "convert_to_secret_str", + "cosine_similarity", + "cosine_similarity_top_k", + "get_bolded_text", + "get_color_mapping", + "get_colored_text", + "get_from_dict_or_env", + "get_from_env", + "formatter", + "get_pydantic_field_names", + "guard_import", + "mock_now", + "print_text", + "raise_for_status_with_text", + "stringify_dict", + "stringify_value", + "xor_args", +] diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..28bbc4e3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/aiter.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/aiter.cpython-312.pyc new file mode 100644 index 00000000..59097643 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/aiter.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/env.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/env.cpython-312.pyc new file mode 100644 index 00000000..13f921a1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/env.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/ernie_functions.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/ernie_functions.cpython-312.pyc new file mode 100644 index 00000000..63c079c0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/ernie_functions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/formatting.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/formatting.cpython-312.pyc new file mode 100644 index 00000000..9d38a5d6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/formatting.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/html.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/html.cpython-312.pyc new file mode 100644 index 00000000..2619a3cd Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/html.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/input.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/input.cpython-312.pyc new file mode 100644 index 00000000..27149650 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/input.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/iter.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/iter.cpython-312.pyc new file mode 100644 index 00000000..c03d2140 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/iter.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/json_schema.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/json_schema.cpython-312.pyc new file mode 100644 index 00000000..d0426c00 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/json_schema.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/loading.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/loading.cpython-312.pyc new file mode 100644 index 00000000..40286284 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/loading.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/math.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/math.cpython-312.pyc new file mode 100644 index 00000000..fafd8ecf Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/math.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/openai.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/openai.cpython-312.pyc new file mode 100644 index 00000000..c0ea3b15 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/openai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/openai_functions.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/openai_functions.cpython-312.pyc new file mode 100644 index 00000000..1cb75b79 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/openai_functions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/pydantic.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/pydantic.cpython-312.pyc new file mode 100644 index 00000000..02102719 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/pydantic.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/strings.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/strings.cpython-312.pyc new file mode 100644 index 00000000..1a548f66 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/strings.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/utils/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..169070b1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/utils/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/utils/aiter.py b/venv/Lib/site-packages/langchain/utils/aiter.py new file mode 100644 index 00000000..cab956b5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/aiter.py @@ -0,0 +1,3 @@ +from langchain_core.utils.aiter import NoLock, Tee, py_anext + +__all__ = ["py_anext", "NoLock", "Tee"] diff --git a/venv/Lib/site-packages/langchain/utils/env.py b/venv/Lib/site-packages/langchain/utils/env.py new file mode 100644 index 00000000..b1e212d2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/env.py @@ -0,0 +1,3 @@ +from langchain_core.utils.env import get_from_dict_or_env, get_from_env + +__all__ = ["get_from_dict_or_env", "get_from_env"] diff --git a/venv/Lib/site-packages/langchain/utils/ernie_functions.py b/venv/Lib/site-packages/langchain/utils/ernie_functions.py new file mode 100644 index 00000000..251d5fb2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/ernie_functions.py @@ -0,0 +1,36 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utils.ernie_functions import ( + FunctionDescription, + ToolDescription, + convert_pydantic_to_ernie_function, + convert_pydantic_to_ernie_tool, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "FunctionDescription": "langchain_community.utils.ernie_functions", + "ToolDescription": "langchain_community.utils.ernie_functions", + "convert_pydantic_to_ernie_function": "langchain_community.utils.ernie_functions", + "convert_pydantic_to_ernie_tool": "langchain_community.utils.ernie_functions", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FunctionDescription", + "ToolDescription", + "convert_pydantic_to_ernie_function", + "convert_pydantic_to_ernie_tool", +] diff --git a/venv/Lib/site-packages/langchain/utils/formatting.py b/venv/Lib/site-packages/langchain/utils/formatting.py new file mode 100644 index 00000000..212bff83 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/formatting.py @@ -0,0 +1,3 @@ +from langchain_core.utils.formatting import StrictFormatter + +__all__ = ["StrictFormatter"] diff --git a/venv/Lib/site-packages/langchain/utils/html.py b/venv/Lib/site-packages/langchain/utils/html.py new file mode 100644 index 00000000..a5684b2e --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/html.py @@ -0,0 +1,19 @@ +from langchain_core.utils.html import ( + DEFAULT_LINK_REGEX, + PREFIXES_TO_IGNORE, + PREFIXES_TO_IGNORE_REGEX, + SUFFIXES_TO_IGNORE, + SUFFIXES_TO_IGNORE_REGEX, + extract_sub_links, + find_all_links, +) + +__all__ = [ + "PREFIXES_TO_IGNORE", + "SUFFIXES_TO_IGNORE", + "SUFFIXES_TO_IGNORE_REGEX", + "PREFIXES_TO_IGNORE_REGEX", + "DEFAULT_LINK_REGEX", + "find_all_links", + "extract_sub_links", +] diff --git a/venv/Lib/site-packages/langchain/utils/input.py b/venv/Lib/site-packages/langchain/utils/input.py new file mode 100644 index 00000000..563cc506 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/input.py @@ -0,0 +1,8 @@ +from langchain_core.utils.input import ( + get_bolded_text, + get_color_mapping, + get_colored_text, + print_text, +) + +__all__ = ["get_color_mapping", "get_colored_text", "get_bolded_text", "print_text"] diff --git a/venv/Lib/site-packages/langchain/utils/iter.py b/venv/Lib/site-packages/langchain/utils/iter.py new file mode 100644 index 00000000..a4059721 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/iter.py @@ -0,0 +1,3 @@ +from langchain_core.utils.iter import NoLock, Tee, batch_iterate, tee_peer + +__all__ = ["NoLock", "tee_peer", "Tee", "batch_iterate"] diff --git a/venv/Lib/site-packages/langchain/utils/json_schema.py b/venv/Lib/site-packages/langchain/utils/json_schema.py new file mode 100644 index 00000000..0e61b7be --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/json_schema.py @@ -0,0 +1,13 @@ +from langchain_core.utils.json_schema import ( + _dereference_refs_helper, + _infer_skip_keys, + _retrieve_ref, + dereference_refs, +) + +__all__ = [ + "_retrieve_ref", + "_dereference_refs_helper", + "_infer_skip_keys", + "dereference_refs", +] diff --git a/venv/Lib/site-packages/langchain/utils/loading.py b/venv/Lib/site-packages/langchain/utils/loading.py new file mode 100644 index 00000000..b048d383 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/loading.py @@ -0,0 +1,3 @@ +from langchain_core.utils.loading import try_load_from_hub + +__all__ = ["try_load_from_hub"] diff --git a/venv/Lib/site-packages/langchain/utils/math.py b/venv/Lib/site-packages/langchain/utils/math.py new file mode 100644 index 00000000..da1e6ce3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/math.py @@ -0,0 +1,32 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utils.math import ( + cosine_similarity, + cosine_similarity_top_k, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +# Not marked as deprecated since we may want to move the functionality +# into langchain as long as we're OK with numpy as the dependency. +_MODULE_LOOKUP = { + "cosine_similarity": "langchain_community.utils.math", + "cosine_similarity_top_k": "langchain_community.utils.math", +} + +_import_attribute = create_importer(__package__, module_lookup=_MODULE_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "cosine_similarity", + "cosine_similarity_top_k", +] diff --git a/venv/Lib/site-packages/langchain/utils/openai.py b/venv/Lib/site-packages/langchain/utils/openai.py new file mode 100644 index 00000000..2b159aec --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/openai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utils.openai import is_openai_v1 + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"is_openai_v1": "langchain_community.utils.openai"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "is_openai_v1", +] diff --git a/venv/Lib/site-packages/langchain/utils/openai_functions.py b/venv/Lib/site-packages/langchain/utils/openai_functions.py new file mode 100644 index 00000000..6e093c35 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/openai_functions.py @@ -0,0 +1,13 @@ +from langchain_core.utils.function_calling import ( + FunctionDescription, + ToolDescription, + convert_pydantic_to_openai_function, + convert_pydantic_to_openai_tool, +) + +__all__ = [ + "FunctionDescription", + "ToolDescription", + "convert_pydantic_to_openai_function", + "convert_pydantic_to_openai_tool", +] diff --git a/venv/Lib/site-packages/langchain/utils/pydantic.py b/venv/Lib/site-packages/langchain/utils/pydantic.py new file mode 100644 index 00000000..a5000f23 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/pydantic.py @@ -0,0 +1,3 @@ +from langchain_core.utils.pydantic import get_pydantic_major_version + +__all__ = ["get_pydantic_major_version"] diff --git a/venv/Lib/site-packages/langchain/utils/strings.py b/venv/Lib/site-packages/langchain/utils/strings.py new file mode 100644 index 00000000..f8e88564 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/strings.py @@ -0,0 +1,3 @@ +from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value + +__all__ = ["stringify_value", "stringify_dict", "comma_list"] diff --git a/venv/Lib/site-packages/langchain/utils/utils.py b/venv/Lib/site-packages/langchain/utils/utils.py new file mode 100644 index 00000000..57629433 --- /dev/null +++ b/venv/Lib/site-packages/langchain/utils/utils.py @@ -0,0 +1,21 @@ +from langchain_core.utils.utils import ( + build_extra_kwargs, + check_package_version, + convert_to_secret_str, + get_pydantic_field_names, + guard_import, + mock_now, + raise_for_status_with_text, + xor_args, +) + +__all__ = [ + "xor_args", + "raise_for_status_with_text", + "mock_now", + "guard_import", + "check_package_version", + "get_pydantic_field_names", + "build_extra_kwargs", + "convert_to_secret_str", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/__init__.py b/venv/Lib/site-packages/langchain/vectorstores/__init__.py new file mode 100644 index 00000000..603421aa --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/__init__.py @@ -0,0 +1,262 @@ +"""**Vector store** stores embedded data and performs vector search. + +One of the most common ways to store and search over unstructured data is to +embed it and store the resulting embedding vectors, and then query the store +and retrieve the data that are 'most similar' to the embedded query. + +**Class hierarchy:** + +.. code-block:: + + VectorStore --> # Examples: Annoy, FAISS, Milvus + + BaseRetriever --> VectorStoreRetriever --> Retriever # Example: VespaRetriever + +**Main helpers:** + +.. code-block:: + + Embeddings, Document +""" # noqa: E501 + +from typing import TYPE_CHECKING, Any + +from langchain_core.vectorstores import VectorStore + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import ( + FAISS, + AlibabaCloudOpenSearch, + AlibabaCloudOpenSearchSettings, + AnalyticDB, + Annoy, + AstraDB, + AtlasDB, + AwaDB, + AzureCosmosDBVectorSearch, + AzureSearch, + Bagel, + Cassandra, + Chroma, + Clarifai, + Clickhouse, + ClickhouseSettings, + DashVector, + DatabricksVectorSearch, + DeepLake, + Dingo, + DocArrayHnswSearch, + DocArrayInMemorySearch, + DuckDB, + EcloudESVectorStore, + ElasticKnnSearch, + ElasticsearchStore, + ElasticVectorSearch, + Epsilla, + Hologres, + LanceDB, + LLMRails, + Marqo, + MatchingEngine, + Meilisearch, + Milvus, + MomentoVectorIndex, + MongoDBAtlasVectorSearch, + MyScale, + MyScaleSettings, + Neo4jVector, + NeuralDBClientVectorStore, + NeuralDBVectorStore, + OpenSearchVectorSearch, + PGEmbedding, + PGVector, + Pinecone, + Qdrant, + Redis, + Rockset, + ScaNN, + SemaDB, + SingleStoreDB, + SKLearnVectorStore, + SQLiteVSS, + StarRocks, + SupabaseVectorStore, + Tair, + TencentVectorDB, + Tigris, + TileDB, + TimescaleVector, + Typesense, + USearch, + Vald, + Vearch, + Vectara, + VespaStore, + Weaviate, + Yellowbrick, + ZepVectorStore, + Zilliz, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AlibabaCloudOpenSearch": "langchain_community.vectorstores", + "AlibabaCloudOpenSearchSettings": "langchain_community.vectorstores", + "AnalyticDB": "langchain_community.vectorstores", + "Annoy": "langchain_community.vectorstores", + "AstraDB": "langchain_community.vectorstores", + "AtlasDB": "langchain_community.vectorstores", + "AwaDB": "langchain_community.vectorstores", + "AzureCosmosDBVectorSearch": "langchain_community.vectorstores", + "AzureSearch": "langchain_community.vectorstores", + "Bagel": "langchain_community.vectorstores", + "Cassandra": "langchain_community.vectorstores", + "Chroma": "langchain_community.vectorstores", + "Clarifai": "langchain_community.vectorstores", + "Clickhouse": "langchain_community.vectorstores", + "ClickhouseSettings": "langchain_community.vectorstores", + "DashVector": "langchain_community.vectorstores", + "DatabricksVectorSearch": "langchain_community.vectorstores", + "DeepLake": "langchain_community.vectorstores", + "Dingo": "langchain_community.vectorstores", + "DocArrayHnswSearch": "langchain_community.vectorstores", + "DocArrayInMemorySearch": "langchain_community.vectorstores", + "DuckDB": "langchain_community.vectorstores", + "EcloudESVectorStore": "langchain_community.vectorstores", + "ElasticKnnSearch": "langchain_community.vectorstores", + "ElasticsearchStore": "langchain_community.vectorstores", + "ElasticVectorSearch": "langchain_community.vectorstores", + "Epsilla": "langchain_community.vectorstores", + "FAISS": "langchain_community.vectorstores", + "Hologres": "langchain_community.vectorstores", + "LanceDB": "langchain_community.vectorstores", + "LLMRails": "langchain_community.vectorstores", + "Marqo": "langchain_community.vectorstores", + "MatchingEngine": "langchain_community.vectorstores", + "Meilisearch": "langchain_community.vectorstores", + "Milvus": "langchain_community.vectorstores", + "MomentoVectorIndex": "langchain_community.vectorstores", + "MongoDBAtlasVectorSearch": "langchain_community.vectorstores", + "MyScale": "langchain_community.vectorstores", + "MyScaleSettings": "langchain_community.vectorstores", + "Neo4jVector": "langchain_community.vectorstores", + "NeuralDBClientVectorStore": "langchain_community.vectorstores", + "NeuralDBVectorStore": "langchain_community.vectorstores", + "NEuralDBVectorStore": "langchain_community.vectorstores", + "OpenSearchVectorSearch": "langchain_community.vectorstores", + "PGEmbedding": "langchain_community.vectorstores", + "PGVector": "langchain_community.vectorstores", + "Pinecone": "langchain_community.vectorstores", + "Qdrant": "langchain_community.vectorstores", + "Redis": "langchain_community.vectorstores", + "Rockset": "langchain_community.vectorstores", + "ScaNN": "langchain_community.vectorstores", + "SemaDB": "langchain_community.vectorstores", + "SingleStoreDB": "langchain_community.vectorstores", + "SKLearnVectorStore": "langchain_community.vectorstores", + "SQLiteVSS": "langchain_community.vectorstores", + "StarRocks": "langchain_community.vectorstores", + "SupabaseVectorStore": "langchain_community.vectorstores", + "Tair": "langchain_community.vectorstores", + "TencentVectorDB": "langchain_community.vectorstores", + "Tigris": "langchain_community.vectorstores", + "TileDB": "langchain_community.vectorstores", + "TimescaleVector": "langchain_community.vectorstores", + "Typesense": "langchain_community.vectorstores", + "USearch": "langchain_community.vectorstores", + "Vald": "langchain_community.vectorstores", + "Vearch": "langchain_community.vectorstores", + "Vectara": "langchain_community.vectorstores", + "VespaStore": "langchain_community.vectorstores", + "Weaviate": "langchain_community.vectorstores", + "Yellowbrick": "langchain_community.vectorstores", + "ZepVectorStore": "langchain_community.vectorstores", + "Zilliz": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AlibabaCloudOpenSearch", + "AlibabaCloudOpenSearchSettings", + "AnalyticDB", + "Annoy", + "AstraDB", + "AtlasDB", + "AwaDB", + "AzureCosmosDBVectorSearch", + "AzureSearch", + "Bagel", + "Cassandra", + "Chroma", + "Clarifai", + "Clickhouse", + "ClickhouseSettings", + "DashVector", + "DatabricksVectorSearch", + "DeepLake", + "Dingo", + "DocArrayHnswSearch", + "DocArrayInMemorySearch", + "DuckDB", + "EcloudESVectorStore", + "ElasticKnnSearch", + "ElasticsearchStore", + "ElasticVectorSearch", + "Epsilla", + "FAISS", + "Hologres", + "LanceDB", + "LLMRails", + "Marqo", + "MatchingEngine", + "Meilisearch", + "Milvus", + "MomentoVectorIndex", + "MongoDBAtlasVectorSearch", + "MyScale", + "MyScaleSettings", + "Neo4jVector", + "NeuralDBClientVectorStore", + "NeuralDBVectorStore", + "OpenSearchVectorSearch", + "PGEmbedding", + "PGVector", + "Pinecone", + "Qdrant", + "Redis", + "Rockset", + "ScaNN", + "SemaDB", + "SingleStoreDB", + "SKLearnVectorStore", + "SQLiteVSS", + "StarRocks", + "SupabaseVectorStore", + "Tair", + "TencentVectorDB", + "Tigris", + "TileDB", + "TimescaleVector", + "Typesense", + "USearch", + "Vald", + "Vearch", + "Vectara", + "VectorStore", + "VespaStore", + "Weaviate", + "Yellowbrick", + "ZepVectorStore", + "Zilliz", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..0137ac50 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/alibabacloud_opensearch.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/alibabacloud_opensearch.cpython-312.pyc new file mode 100644 index 00000000..64f047e6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/alibabacloud_opensearch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/analyticdb.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/analyticdb.cpython-312.pyc new file mode 100644 index 00000000..8e4787b3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/analyticdb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/annoy.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/annoy.cpython-312.pyc new file mode 100644 index 00000000..197dc599 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/annoy.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/astradb.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/astradb.cpython-312.pyc new file mode 100644 index 00000000..8f0619fe Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/astradb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/atlas.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/atlas.cpython-312.pyc new file mode 100644 index 00000000..55996acc Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/atlas.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/awadb.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/awadb.cpython-312.pyc new file mode 100644 index 00000000..3c751f50 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/awadb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/azure_cosmos_db.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/azure_cosmos_db.cpython-312.pyc new file mode 100644 index 00000000..3291080c Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/azure_cosmos_db.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/azuresearch.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/azuresearch.cpython-312.pyc new file mode 100644 index 00000000..904aa5a5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/azuresearch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/bageldb.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/bageldb.cpython-312.pyc new file mode 100644 index 00000000..1914dfb2 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/bageldb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/baiducloud_vector_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/baiducloud_vector_search.cpython-312.pyc new file mode 100644 index 00000000..78e69a7b Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/baiducloud_vector_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..c5af68f0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/cassandra.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/cassandra.cpython-312.pyc new file mode 100644 index 00000000..4ea12c51 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/cassandra.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/chroma.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/chroma.cpython-312.pyc new file mode 100644 index 00000000..18435c44 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/chroma.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/clarifai.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/clarifai.cpython-312.pyc new file mode 100644 index 00000000..ac4dbbf4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/clarifai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/clickhouse.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/clickhouse.cpython-312.pyc new file mode 100644 index 00000000..c4236b54 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/clickhouse.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/dashvector.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/dashvector.cpython-312.pyc new file mode 100644 index 00000000..a9171674 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/dashvector.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/databricks_vector_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/databricks_vector_search.cpython-312.pyc new file mode 100644 index 00000000..141d7659 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/databricks_vector_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/deeplake.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/deeplake.cpython-312.pyc new file mode 100644 index 00000000..8497a829 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/deeplake.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/dingo.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/dingo.cpython-312.pyc new file mode 100644 index 00000000..532fb43d Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/dingo.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/elastic_vector_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/elastic_vector_search.cpython-312.pyc new file mode 100644 index 00000000..426c8a70 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/elastic_vector_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/elasticsearch.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/elasticsearch.cpython-312.pyc new file mode 100644 index 00000000..732ac9fe Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/elasticsearch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/epsilla.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/epsilla.cpython-312.pyc new file mode 100644 index 00000000..c1a8011f Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/epsilla.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/faiss.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/faiss.cpython-312.pyc new file mode 100644 index 00000000..a77c0910 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/faiss.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/hippo.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/hippo.cpython-312.pyc new file mode 100644 index 00000000..4dac2ccd Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/hippo.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/hologres.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/hologres.cpython-312.pyc new file mode 100644 index 00000000..295b4b66 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/hologres.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/lancedb.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/lancedb.cpython-312.pyc new file mode 100644 index 00000000..7a755099 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/lancedb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/llm_rails.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/llm_rails.cpython-312.pyc new file mode 100644 index 00000000..b8bdc997 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/llm_rails.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/marqo.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/marqo.cpython-312.pyc new file mode 100644 index 00000000..6cf2defd Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/marqo.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/matching_engine.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/matching_engine.cpython-312.pyc new file mode 100644 index 00000000..b36d4905 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/matching_engine.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/meilisearch.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/meilisearch.cpython-312.pyc new file mode 100644 index 00000000..670fa7ac Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/meilisearch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/milvus.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/milvus.cpython-312.pyc new file mode 100644 index 00000000..0567b44f Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/milvus.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/momento_vector_index.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/momento_vector_index.cpython-312.pyc new file mode 100644 index 00000000..05c9a817 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/momento_vector_index.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/mongodb_atlas.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/mongodb_atlas.cpython-312.pyc new file mode 100644 index 00000000..73b413a5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/mongodb_atlas.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/myscale.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/myscale.cpython-312.pyc new file mode 100644 index 00000000..695b910f Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/myscale.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/neo4j_vector.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/neo4j_vector.cpython-312.pyc new file mode 100644 index 00000000..94b75065 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/neo4j_vector.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/nucliadb.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/nucliadb.cpython-312.pyc new file mode 100644 index 00000000..16d52a15 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/nucliadb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/opensearch_vector_search.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/opensearch_vector_search.cpython-312.pyc new file mode 100644 index 00000000..65437471 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/opensearch_vector_search.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/pgembedding.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/pgembedding.cpython-312.pyc new file mode 100644 index 00000000..3b505ee0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/pgembedding.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/pgvecto_rs.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/pgvecto_rs.cpython-312.pyc new file mode 100644 index 00000000..fa9102bb Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/pgvecto_rs.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/pgvector.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/pgvector.cpython-312.pyc new file mode 100644 index 00000000..08e62000 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/pgvector.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/pinecone.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/pinecone.cpython-312.pyc new file mode 100644 index 00000000..c93e0562 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/pinecone.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/qdrant.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/qdrant.cpython-312.pyc new file mode 100644 index 00000000..d7d35b1d Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/qdrant.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/rocksetdb.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/rocksetdb.cpython-312.pyc new file mode 100644 index 00000000..b6401c6c Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/rocksetdb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/scann.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/scann.cpython-312.pyc new file mode 100644 index 00000000..1f75eb71 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/scann.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/semadb.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/semadb.cpython-312.pyc new file mode 100644 index 00000000..91be87e9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/semadb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/singlestoredb.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/singlestoredb.cpython-312.pyc new file mode 100644 index 00000000..02849777 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/singlestoredb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/sklearn.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/sklearn.cpython-312.pyc new file mode 100644 index 00000000..478807d3 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/sklearn.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/sqlitevss.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/sqlitevss.cpython-312.pyc new file mode 100644 index 00000000..3b282b3a Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/sqlitevss.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/starrocks.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/starrocks.cpython-312.pyc new file mode 100644 index 00000000..bed4a4ea Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/starrocks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/supabase.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/supabase.cpython-312.pyc new file mode 100644 index 00000000..02ab526b Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/supabase.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/tair.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/tair.cpython-312.pyc new file mode 100644 index 00000000..e4fc6c5e Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/tair.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/tencentvectordb.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/tencentvectordb.cpython-312.pyc new file mode 100644 index 00000000..2171c81d Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/tencentvectordb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/tigris.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/tigris.cpython-312.pyc new file mode 100644 index 00000000..a6142268 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/tigris.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/tiledb.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/tiledb.cpython-312.pyc new file mode 100644 index 00000000..f61d4572 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/tiledb.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/timescalevector.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/timescalevector.cpython-312.pyc new file mode 100644 index 00000000..3a7712f9 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/timescalevector.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/typesense.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/typesense.cpython-312.pyc new file mode 100644 index 00000000..1950272d Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/typesense.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/usearch.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/usearch.cpython-312.pyc new file mode 100644 index 00000000..bae58697 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/usearch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..22cb8282 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/vald.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/vald.cpython-312.pyc new file mode 100644 index 00000000..762f304d Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/vald.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/vearch.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/vearch.cpython-312.pyc new file mode 100644 index 00000000..ee199ce5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/vearch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/vectara.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/vectara.cpython-312.pyc new file mode 100644 index 00000000..7f9bae0e Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/vectara.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/vespa.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/vespa.cpython-312.pyc new file mode 100644 index 00000000..bc107cf0 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/vespa.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/weaviate.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/weaviate.cpython-312.pyc new file mode 100644 index 00000000..7f2119b5 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/weaviate.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/xata.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/xata.cpython-312.pyc new file mode 100644 index 00000000..1c8c7f97 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/xata.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/yellowbrick.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/yellowbrick.cpython-312.pyc new file mode 100644 index 00000000..270ffc9c Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/yellowbrick.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/zep.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/zep.cpython-312.pyc new file mode 100644 index 00000000..c49d82c1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/zep.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/__pycache__/zilliz.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/zilliz.cpython-312.pyc new file mode 100644 index 00000000..36b19c78 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/__pycache__/zilliz.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/alibabacloud_opensearch.py b/venv/Lib/site-packages/langchain/vectorstores/alibabacloud_opensearch.py new file mode 100644 index 00000000..77773507 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/alibabacloud_opensearch.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import ( + AlibabaCloudOpenSearch, + AlibabaCloudOpenSearchSettings, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AlibabaCloudOpenSearchSettings": "langchain_community.vectorstores", + "AlibabaCloudOpenSearch": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AlibabaCloudOpenSearchSettings", + "AlibabaCloudOpenSearch", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/analyticdb.py b/venv/Lib/site-packages/langchain/vectorstores/analyticdb.py new file mode 100644 index 00000000..aed482a2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/analyticdb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import AnalyticDB + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AnalyticDB": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AnalyticDB", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/annoy.py b/venv/Lib/site-packages/langchain/vectorstores/annoy.py new file mode 100644 index 00000000..0f7fdbe7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/annoy.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Annoy + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Annoy": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Annoy", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/astradb.py b/venv/Lib/site-packages/langchain/vectorstores/astradb.py new file mode 100644 index 00000000..659a7011 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/astradb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import AstraDB + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AstraDB": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AstraDB", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/atlas.py b/venv/Lib/site-packages/langchain/vectorstores/atlas.py new file mode 100644 index 00000000..f7aa5396 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/atlas.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import AtlasDB + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AtlasDB": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AtlasDB", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/awadb.py b/venv/Lib/site-packages/langchain/vectorstores/awadb.py new file mode 100644 index 00000000..9966d341 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/awadb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import AwaDB + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"AwaDB": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AwaDB", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/azure_cosmos_db.py b/venv/Lib/site-packages/langchain/vectorstores/azure_cosmos_db.py new file mode 100644 index 00000000..3b6a8df5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/azure_cosmos_db.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import AzureCosmosDBVectorSearch + from langchain_community.vectorstores.azure_cosmos_db import CosmosDBSimilarityType + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CosmosDBSimilarityType": "langchain_community.vectorstores.azure_cosmos_db", + "AzureCosmosDBVectorSearch": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CosmosDBSimilarityType", + "AzureCosmosDBVectorSearch", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/azuresearch.py b/venv/Lib/site-packages/langchain/vectorstores/azuresearch.py new file mode 100644 index 00000000..f4483170 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/azuresearch.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import AzureSearch + from langchain_community.vectorstores.azuresearch import ( + AzureSearchVectorStoreRetriever, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "AzureSearch": "langchain_community.vectorstores", + "AzureSearchVectorStoreRetriever": "langchain_community.vectorstores.azuresearch", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "AzureSearch", + "AzureSearchVectorStoreRetriever", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/bageldb.py b/venv/Lib/site-packages/langchain/vectorstores/bageldb.py new file mode 100644 index 00000000..aa6b299b --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/bageldb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Bagel + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Bagel": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Bagel", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/baiducloud_vector_search.py b/venv/Lib/site-packages/langchain/vectorstores/baiducloud_vector_search.py new file mode 100644 index 00000000..d181a3d7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/baiducloud_vector_search.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import BESVectorStore + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"BESVectorStore": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BESVectorStore", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/base.py b/venv/Lib/site-packages/langchain/vectorstores/base.py new file mode 100644 index 00000000..59a719b5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/base.py @@ -0,0 +1,3 @@ +from langchain_core.vectorstores import VectorStore, VectorStoreRetriever + +__all__ = ["VectorStore", "VectorStoreRetriever"] diff --git a/venv/Lib/site-packages/langchain/vectorstores/cassandra.py b/venv/Lib/site-packages/langchain/vectorstores/cassandra.py new file mode 100644 index 00000000..08e60fa6 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/cassandra.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Cassandra + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Cassandra": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Cassandra", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/chroma.py b/venv/Lib/site-packages/langchain/vectorstores/chroma.py new file mode 100644 index 00000000..3b4816e8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/chroma.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Chroma + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Chroma": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Chroma", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/clarifai.py b/venv/Lib/site-packages/langchain/vectorstores/clarifai.py new file mode 100644 index 00000000..4e42317b --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/clarifai.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Clarifai + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Clarifai": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Clarifai", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/clickhouse.py b/venv/Lib/site-packages/langchain/vectorstores/clickhouse.py new file mode 100644 index 00000000..09795a14 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/clickhouse.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Clickhouse, ClickhouseSettings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ClickhouseSettings": "langchain_community.vectorstores", + "Clickhouse": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ClickhouseSettings", + "Clickhouse", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/dashvector.py b/venv/Lib/site-packages/langchain/vectorstores/dashvector.py new file mode 100644 index 00000000..df90dbed --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/dashvector.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import DashVector + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DashVector": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DashVector", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/databricks_vector_search.py b/venv/Lib/site-packages/langchain/vectorstores/databricks_vector_search.py new file mode 100644 index 00000000..d416c14a --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/databricks_vector_search.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import DatabricksVectorSearch + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DatabricksVectorSearch": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DatabricksVectorSearch", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/deeplake.py b/venv/Lib/site-packages/langchain/vectorstores/deeplake.py new file mode 100644 index 00000000..24239246 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/deeplake.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import DeepLake + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DeepLake": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DeepLake", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/dingo.py b/venv/Lib/site-packages/langchain/vectorstores/dingo.py new file mode 100644 index 00000000..d737cc3f --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/dingo.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Dingo + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Dingo": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Dingo", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/docarray/__init__.py b/venv/Lib/site-packages/langchain/vectorstores/docarray/__init__.py new file mode 100644 index 00000000..1b42310b --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/docarray/__init__.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import ( + DocArrayHnswSearch, + DocArrayInMemorySearch, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DocArrayHnswSearch": "langchain_community.vectorstores", + "DocArrayInMemorySearch": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DocArrayHnswSearch", + "DocArrayInMemorySearch", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/docarray/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/docarray/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..48bb4657 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/docarray/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/docarray/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/docarray/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..e98ba9e4 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/docarray/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/docarray/__pycache__/hnsw.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/docarray/__pycache__/hnsw.cpython-312.pyc new file mode 100644 index 00000000..8e8e6ebf Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/docarray/__pycache__/hnsw.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/docarray/__pycache__/in_memory.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/docarray/__pycache__/in_memory.cpython-312.pyc new file mode 100644 index 00000000..a6d4775d Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/docarray/__pycache__/in_memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/docarray/base.py b/venv/Lib/site-packages/langchain/vectorstores/docarray/base.py new file mode 100644 index 00000000..6114854d --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/docarray/base.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores.docarray.base import DocArrayIndex + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DocArrayIndex": "langchain_community.vectorstores.docarray.base"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DocArrayIndex", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/docarray/hnsw.py b/venv/Lib/site-packages/langchain/vectorstores/docarray/hnsw.py new file mode 100644 index 00000000..bd846cf5 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/docarray/hnsw.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import DocArrayHnswSearch + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DocArrayHnswSearch": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DocArrayHnswSearch", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/docarray/in_memory.py b/venv/Lib/site-packages/langchain/vectorstores/docarray/in_memory.py new file mode 100644 index 00000000..467493ec --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/docarray/in_memory.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import DocArrayInMemorySearch + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"DocArrayInMemorySearch": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DocArrayInMemorySearch", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/elastic_vector_search.py b/venv/Lib/site-packages/langchain/vectorstores/elastic_vector_search.py new file mode 100644 index 00000000..cb75713b --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/elastic_vector_search.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import ElasticKnnSearch, ElasticVectorSearch + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ElasticVectorSearch": "langchain_community.vectorstores", + "ElasticKnnSearch": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ElasticVectorSearch", + "ElasticKnnSearch", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/elasticsearch.py b/venv/Lib/site-packages/langchain/vectorstores/elasticsearch.py new file mode 100644 index 00000000..616a8df3 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/elasticsearch.py @@ -0,0 +1,39 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import ElasticsearchStore + from langchain_community.vectorstores.elasticsearch import ( + ApproxRetrievalStrategy, + BaseRetrievalStrategy, + ExactRetrievalStrategy, + SparseRetrievalStrategy, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BaseRetrievalStrategy": "langchain_community.vectorstores.elasticsearch", + "ApproxRetrievalStrategy": "langchain_community.vectorstores.elasticsearch", + "ExactRetrievalStrategy": "langchain_community.vectorstores.elasticsearch", + "SparseRetrievalStrategy": "langchain_community.vectorstores.elasticsearch", + "ElasticsearchStore": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaseRetrievalStrategy", + "ApproxRetrievalStrategy", + "ExactRetrievalStrategy", + "SparseRetrievalStrategy", + "ElasticsearchStore", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/epsilla.py b/venv/Lib/site-packages/langchain/vectorstores/epsilla.py new file mode 100644 index 00000000..c2bf16e8 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/epsilla.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Epsilla + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Epsilla": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Epsilla", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/faiss.py b/venv/Lib/site-packages/langchain/vectorstores/faiss.py new file mode 100644 index 00000000..4bd19111 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/faiss.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import FAISS + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"FAISS": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "FAISS", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/hippo.py b/venv/Lib/site-packages/langchain/vectorstores/hippo.py new file mode 100644 index 00000000..bd08c1a4 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/hippo.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores.hippo import Hippo + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Hippo": "langchain_community.vectorstores.hippo"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Hippo", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/hologres.py b/venv/Lib/site-packages/langchain/vectorstores/hologres.py new file mode 100644 index 00000000..fb3ef87a --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/hologres.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Hologres + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Hologres": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Hologres", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/lancedb.py b/venv/Lib/site-packages/langchain/vectorstores/lancedb.py new file mode 100644 index 00000000..2d1c2f0d --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/lancedb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import LanceDB + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"LanceDB": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LanceDB", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/llm_rails.py b/venv/Lib/site-packages/langchain/vectorstores/llm_rails.py new file mode 100644 index 00000000..8b3b6597 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/llm_rails.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import LLMRails + from langchain_community.vectorstores.llm_rails import LLMRailsRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "LLMRails": "langchain_community.vectorstores", + "LLMRailsRetriever": "langchain_community.vectorstores.llm_rails", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "LLMRails", + "LLMRailsRetriever", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/marqo.py b/venv/Lib/site-packages/langchain/vectorstores/marqo.py new file mode 100644 index 00000000..7db956ce --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/marqo.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Marqo + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Marqo": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Marqo", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/matching_engine.py b/venv/Lib/site-packages/langchain/vectorstores/matching_engine.py new file mode 100644 index 00000000..0e15eb4c --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/matching_engine.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import MatchingEngine + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MatchingEngine": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MatchingEngine", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/meilisearch.py b/venv/Lib/site-packages/langchain/vectorstores/meilisearch.py new file mode 100644 index 00000000..640241bf --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/meilisearch.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Meilisearch + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Meilisearch": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Meilisearch", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/milvus.py b/venv/Lib/site-packages/langchain/vectorstores/milvus.py new file mode 100644 index 00000000..cc7bbfa7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/milvus.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Milvus + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Milvus": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Milvus", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/momento_vector_index.py b/venv/Lib/site-packages/langchain/vectorstores/momento_vector_index.py new file mode 100644 index 00000000..8fef15d0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/momento_vector_index.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import MomentoVectorIndex + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MomentoVectorIndex": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MomentoVectorIndex", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/mongodb_atlas.py b/venv/Lib/site-packages/langchain/vectorstores/mongodb_atlas.py new file mode 100644 index 00000000..39325c57 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/mongodb_atlas.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import MongoDBAtlasVectorSearch + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"MongoDBAtlasVectorSearch": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MongoDBAtlasVectorSearch", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/myscale.py b/venv/Lib/site-packages/langchain/vectorstores/myscale.py new file mode 100644 index 00000000..69271ef0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/myscale.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import MyScale, MyScaleSettings + from langchain_community.vectorstores.myscale import MyScaleWithoutJSON + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "MyScaleSettings": "langchain_community.vectorstores", + "MyScale": "langchain_community.vectorstores", + "MyScaleWithoutJSON": "langchain_community.vectorstores.myscale", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "MyScaleSettings", + "MyScale", + "MyScaleWithoutJSON", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/neo4j_vector.py b/venv/Lib/site-packages/langchain/vectorstores/neo4j_vector.py new file mode 100644 index 00000000..65f0f8f2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/neo4j_vector.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Neo4jVector + from langchain_community.vectorstores.neo4j_vector import SearchType + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "SearchType": "langchain_community.vectorstores.neo4j_vector", + "Neo4jVector": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SearchType", + "Neo4jVector", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/nucliadb.py b/venv/Lib/site-packages/langchain/vectorstores/nucliadb.py new file mode 100644 index 00000000..b097ce57 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/nucliadb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores.nucliadb import NucliaDB + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"NucliaDB": "langchain_community.vectorstores.nucliadb"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "NucliaDB", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/opensearch_vector_search.py b/venv/Lib/site-packages/langchain/vectorstores/opensearch_vector_search.py new file mode 100644 index 00000000..6afc9864 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/opensearch_vector_search.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import OpenSearchVectorSearch + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"OpenSearchVectorSearch": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "OpenSearchVectorSearch", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/pgembedding.py b/venv/Lib/site-packages/langchain/vectorstores/pgembedding.py new file mode 100644 index 00000000..ff1ab865 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/pgembedding.py @@ -0,0 +1,36 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import PGEmbedding + from langchain_community.vectorstores.pgembedding import ( + CollectionStore, + EmbeddingStore, + QueryResult, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CollectionStore": "langchain_community.vectorstores.pgembedding", + "EmbeddingStore": "langchain_community.vectorstores.pgembedding", + "QueryResult": "langchain_community.vectorstores.pgembedding", + "PGEmbedding": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CollectionStore", + "EmbeddingStore", + "QueryResult", + "PGEmbedding", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/pgvecto_rs.py b/venv/Lib/site-packages/langchain/vectorstores/pgvecto_rs.py new file mode 100644 index 00000000..b717fd97 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/pgvecto_rs.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores.pgvecto_rs import PGVecto_rs + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PGVecto_rs": "langchain_community.vectorstores.pgvecto_rs"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PGVecto_rs", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/pgvector.py b/venv/Lib/site-packages/langchain/vectorstores/pgvector.py new file mode 100644 index 00000000..a45ef5ab --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/pgvector.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import PGVector + from langchain_community.vectorstores.pgvector import DistanceStrategy + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DistanceStrategy": "langchain_community.vectorstores.pgvector", + "PGVector": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DistanceStrategy", + "PGVector", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/pinecone.py b/venv/Lib/site-packages/langchain/vectorstores/pinecone.py new file mode 100644 index 00000000..da1bc8af --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/pinecone.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Pinecone + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Pinecone": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Pinecone", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/qdrant.py b/venv/Lib/site-packages/langchain/vectorstores/qdrant.py new file mode 100644 index 00000000..474424fd --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/qdrant.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Qdrant + from langchain_community.vectorstores.qdrant import QdrantException + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "QdrantException": "langchain_community.vectorstores.qdrant", + "Qdrant": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "QdrantException", + "Qdrant", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/redis/__init__.py b/venv/Lib/site-packages/langchain/vectorstores/redis/__init__.py new file mode 100644 index 00000000..35484442 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/redis/__init__.py @@ -0,0 +1,42 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Redis + from langchain_community.vectorstores.redis.base import RedisVectorStoreRetriever + from langchain_community.vectorstores.redis.filters import ( + RedisFilter, + RedisNum, + RedisTag, + RedisText, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "Redis": "langchain_community.vectorstores", + "RedisFilter": "langchain_community.vectorstores.redis.filters", + "RedisTag": "langchain_community.vectorstores.redis.filters", + "RedisText": "langchain_community.vectorstores.redis.filters", + "RedisNum": "langchain_community.vectorstores.redis.filters", + "RedisVectorStoreRetriever": "langchain_community.vectorstores.redis.base", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Redis", + "RedisFilter", + "RedisTag", + "RedisText", + "RedisNum", + "RedisVectorStoreRetriever", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/redis/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/redis/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..6e94d309 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/redis/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/redis/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/redis/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..aec6dcf1 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/redis/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/redis/__pycache__/filters.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/redis/__pycache__/filters.cpython-312.pyc new file mode 100644 index 00000000..bd75bfc6 Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/redis/__pycache__/filters.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/redis/__pycache__/schema.cpython-312.pyc b/venv/Lib/site-packages/langchain/vectorstores/redis/__pycache__/schema.cpython-312.pyc new file mode 100644 index 00000000..84d7983d Binary files /dev/null and b/venv/Lib/site-packages/langchain/vectorstores/redis/__pycache__/schema.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain/vectorstores/redis/base.py b/venv/Lib/site-packages/langchain/vectorstores/redis/base.py new file mode 100644 index 00000000..3360858b --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/redis/base.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Redis + from langchain_community.vectorstores.redis.base import ( + RedisVectorStoreRetriever, + check_index_exists, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "check_index_exists": "langchain_community.vectorstores.redis.base", + "Redis": "langchain_community.vectorstores", + "RedisVectorStoreRetriever": "langchain_community.vectorstores.redis.base", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "check_index_exists", + "Redis", + "RedisVectorStoreRetriever", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/redis/filters.py b/venv/Lib/site-packages/langchain/vectorstores/redis/filters.py new file mode 100644 index 00000000..628f32c7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/redis/filters.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores.redis.filters import ( + RedisFilter, + RedisFilterExpression, + RedisFilterField, + RedisFilterOperator, + RedisNum, + RedisTag, + RedisText, + check_operator_misuse, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "RedisFilterOperator": "langchain_community.vectorstores.redis.filters", + "RedisFilter": "langchain_community.vectorstores.redis.filters", + "RedisFilterField": "langchain_community.vectorstores.redis.filters", + "check_operator_misuse": "langchain_community.vectorstores.redis.filters", + "RedisTag": "langchain_community.vectorstores.redis.filters", + "RedisNum": "langchain_community.vectorstores.redis.filters", + "RedisText": "langchain_community.vectorstores.redis.filters", + "RedisFilterExpression": "langchain_community.vectorstores.redis.filters", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RedisFilterOperator", + "RedisFilter", + "RedisFilterField", + "check_operator_misuse", + "RedisTag", + "RedisNum", + "RedisText", + "RedisFilterExpression", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/redis/schema.py b/venv/Lib/site-packages/langchain/vectorstores/redis/schema.py new file mode 100644 index 00000000..7b796cab --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/redis/schema.py @@ -0,0 +1,54 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores.redis.schema import ( + FlatVectorField, + HNSWVectorField, + NumericFieldSchema, + RedisDistanceMetric, + RedisField, + RedisModel, + RedisVectorField, + TagFieldSchema, + TextFieldSchema, + read_schema, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "RedisDistanceMetric": "langchain_community.vectorstores.redis.schema", + "RedisField": "langchain_community.vectorstores.redis.schema", + "TextFieldSchema": "langchain_community.vectorstores.redis.schema", + "TagFieldSchema": "langchain_community.vectorstores.redis.schema", + "NumericFieldSchema": "langchain_community.vectorstores.redis.schema", + "RedisVectorField": "langchain_community.vectorstores.redis.schema", + "FlatVectorField": "langchain_community.vectorstores.redis.schema", + "HNSWVectorField": "langchain_community.vectorstores.redis.schema", + "RedisModel": "langchain_community.vectorstores.redis.schema", + "read_schema": "langchain_community.vectorstores.redis.schema", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "RedisDistanceMetric", + "RedisField", + "TextFieldSchema", + "TagFieldSchema", + "NumericFieldSchema", + "RedisVectorField", + "FlatVectorField", + "HNSWVectorField", + "RedisModel", + "read_schema", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/rocksetdb.py b/venv/Lib/site-packages/langchain/vectorstores/rocksetdb.py new file mode 100644 index 00000000..108ef9de --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/rocksetdb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Rockset + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Rockset": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Rockset", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/scann.py b/venv/Lib/site-packages/langchain/vectorstores/scann.py new file mode 100644 index 00000000..faf70c36 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/scann.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import ScaNN + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"ScaNN": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ScaNN", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/semadb.py b/venv/Lib/site-packages/langchain/vectorstores/semadb.py new file mode 100644 index 00000000..53863ae2 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/semadb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import SemaDB + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SemaDB": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SemaDB", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/singlestoredb.py b/venv/Lib/site-packages/langchain/vectorstores/singlestoredb.py new file mode 100644 index 00000000..b363fb39 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/singlestoredb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import SingleStoreDB + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SingleStoreDB": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SingleStoreDB", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/sklearn.py b/venv/Lib/site-packages/langchain/vectorstores/sklearn.py new file mode 100644 index 00000000..f3646985 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/sklearn.py @@ -0,0 +1,42 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import SKLearnVectorStore + from langchain_community.vectorstores.sklearn import ( + BaseSerializer, + BsonSerializer, + JsonSerializer, + ParquetSerializer, + SKLearnVectorStoreException, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "BaseSerializer": "langchain_community.vectorstores.sklearn", + "JsonSerializer": "langchain_community.vectorstores.sklearn", + "BsonSerializer": "langchain_community.vectorstores.sklearn", + "ParquetSerializer": "langchain_community.vectorstores.sklearn", + "SKLearnVectorStoreException": "langchain_community.vectorstores.sklearn", + "SKLearnVectorStore": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "BaseSerializer", + "JsonSerializer", + "BsonSerializer", + "ParquetSerializer", + "SKLearnVectorStoreException", + "SKLearnVectorStore", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/sqlitevss.py b/venv/Lib/site-packages/langchain/vectorstores/sqlitevss.py new file mode 100644 index 00000000..99a072e0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/sqlitevss.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import SQLiteVSS + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SQLiteVSS": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SQLiteVSS", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/starrocks.py b/venv/Lib/site-packages/langchain/vectorstores/starrocks.py new file mode 100644 index 00000000..bbc3523f --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/starrocks.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import StarRocks + from langchain_community.vectorstores.starrocks import StarRocksSettings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "StarRocksSettings": "langchain_community.vectorstores.starrocks", + "StarRocks": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "StarRocksSettings", + "StarRocks", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/supabase.py b/venv/Lib/site-packages/langchain/vectorstores/supabase.py new file mode 100644 index 00000000..50d15842 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/supabase.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import SupabaseVectorStore + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SupabaseVectorStore": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "SupabaseVectorStore", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/tair.py b/venv/Lib/site-packages/langchain/vectorstores/tair.py new file mode 100644 index 00000000..90e00b9c --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/tair.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Tair + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Tair": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Tair", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/tencentvectordb.py b/venv/Lib/site-packages/langchain/vectorstores/tencentvectordb.py new file mode 100644 index 00000000..85dab65f --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/tencentvectordb.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import TencentVectorDB + from langchain_community.vectorstores.tencentvectordb import ( + ConnectionParams, + IndexParams, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "ConnectionParams": "langchain_community.vectorstores.tencentvectordb", + "IndexParams": "langchain_community.vectorstores.tencentvectordb", + "TencentVectorDB": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "ConnectionParams", + "IndexParams", + "TencentVectorDB", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/tigris.py b/venv/Lib/site-packages/langchain/vectorstores/tigris.py new file mode 100644 index 00000000..00f2a90b --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/tigris.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Tigris + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Tigris": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Tigris", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/tiledb.py b/venv/Lib/site-packages/langchain/vectorstores/tiledb.py new file mode 100644 index 00000000..67c83cb7 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/tiledb.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import TileDB + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TileDB": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TileDB", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/timescalevector.py b/venv/Lib/site-packages/langchain/vectorstores/timescalevector.py new file mode 100644 index 00000000..b242d50f --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/timescalevector.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import TimescaleVector + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TimescaleVector": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TimescaleVector", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/typesense.py b/venv/Lib/site-packages/langchain/vectorstores/typesense.py new file mode 100644 index 00000000..b7d41ebe --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/typesense.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Typesense + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Typesense": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Typesense", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/usearch.py b/venv/Lib/site-packages/langchain/vectorstores/usearch.py new file mode 100644 index 00000000..69af9352 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/usearch.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import USearch + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"USearch": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "USearch", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/utils.py b/venv/Lib/site-packages/langchain/vectorstores/utils.py new file mode 100644 index 00000000..e657035a --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/utils.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores.utils import ( + DistanceStrategy, + filter_complex_metadata, + maximal_marginal_relevance, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "DistanceStrategy": "langchain_community.vectorstores.utils", + "maximal_marginal_relevance": "langchain_community.vectorstores.utils", + "filter_complex_metadata": "langchain_community.vectorstores.utils", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "DistanceStrategy", + "maximal_marginal_relevance", + "filter_complex_metadata", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/vald.py b/venv/Lib/site-packages/langchain/vectorstores/vald.py new file mode 100644 index 00000000..7c357022 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/vald.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Vald + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Vald": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Vald", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/vearch.py b/venv/Lib/site-packages/langchain/vectorstores/vearch.py new file mode 100644 index 00000000..37985a41 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/vearch.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Vearch + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Vearch": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Vearch", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/vectara.py b/venv/Lib/site-packages/langchain/vectorstores/vectara.py new file mode 100644 index 00000000..c4fc08ca --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/vectara.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Vectara + from langchain_community.vectorstores.vectara import VectaraRetriever + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "Vectara": "langchain_community.vectorstores", + "VectaraRetriever": "langchain_community.vectorstores.vectara", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Vectara", + "VectaraRetriever", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/vespa.py b/venv/Lib/site-packages/langchain/vectorstores/vespa.py new file mode 100644 index 00000000..f0c95476 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/vespa.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import VespaStore + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"VespaStore": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "VespaStore", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/weaviate.py b/venv/Lib/site-packages/langchain/vectorstores/weaviate.py new file mode 100644 index 00000000..35fcb6ab --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/weaviate.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Weaviate + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Weaviate": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Weaviate", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/xata.py b/venv/Lib/site-packages/langchain/vectorstores/xata.py new file mode 100644 index 00000000..59e38855 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/xata.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores.xata import XataVectorStore + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"XataVectorStore": "langchain_community.vectorstores.xata"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "XataVectorStore", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/yellowbrick.py b/venv/Lib/site-packages/langchain/vectorstores/yellowbrick.py new file mode 100644 index 00000000..c7e30ca0 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/yellowbrick.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Yellowbrick + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Yellowbrick": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Yellowbrick", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/zep.py b/venv/Lib/site-packages/langchain/vectorstores/zep.py new file mode 100644 index 00000000..6bd0b721 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/zep.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import ZepVectorStore + from langchain_community.vectorstores.zep import CollectionConfig + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "CollectionConfig": "langchain_community.vectorstores.zep", + "ZepVectorStore": "langchain_community.vectorstores", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "CollectionConfig", + "ZepVectorStore", +] diff --git a/venv/Lib/site-packages/langchain/vectorstores/zilliz.py b/venv/Lib/site-packages/langchain/vectorstores/zilliz.py new file mode 100644 index 00000000..411fae53 --- /dev/null +++ b/venv/Lib/site-packages/langchain/vectorstores/zilliz.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.vectorstores import Zilliz + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"Zilliz": "langchain_community.vectorstores"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "Zilliz", +] diff --git a/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/INSTALLER b/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/METADATA b/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/METADATA new file mode 100644 index 00000000..63f0154a --- /dev/null +++ b/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/METADATA @@ -0,0 +1,109 @@ +Metadata-Version: 2.1 +Name: langchain-core +Version: 0.3.59 +Summary: Building applications with LLMs through composability +License: MIT +Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/core +Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-core%3D%3D0%22&expanded=true +Project-URL: repository, https://github.com/langchain-ai/langchain +Requires-Python: >=3.9 +Requires-Dist: langsmith<0.4,>=0.1.125 +Requires-Dist: tenacity!=8.4.0,<10.0.0,>=8.1.0 +Requires-Dist: jsonpatch<2.0,>=1.33 +Requires-Dist: PyYAML>=5.3 +Requires-Dist: packaging<25,>=23.2 +Requires-Dist: typing-extensions>=4.7 +Requires-Dist: pydantic<3.0.0,>=2.5.2; python_full_version < "3.12.4" +Requires-Dist: pydantic<3.0.0,>=2.7.4; python_full_version >= "3.12.4" +Description-Content-Type: text/markdown + +# 🦜🍎️ LangChain Core + +[![Downloads](https://static.pepy.tech/badge/langchain_core/month)](https://pepy.tech/project/langchain_core) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) + +## Quick Install + +```bash +pip install langchain-core +``` + +## What is it? + +LangChain Core contains the base abstractions that power the rest of the LangChain ecosystem. + +These abstractions are designed to be as modular and simple as possible. Examples of these abstractions include those for language models, document loaders, embedding models, vectorstores, retrievers, and more. + +The benefit of having these abstractions is that any provider can implement the required interface and then easily be used in the rest of the LangChain ecosystem. + +For full documentation see the [API reference](https://python.langchain.com/api_reference/core/index.html). + +## 1️⃣ Core Interface: Runnables + +The concept of a Runnable is central to LangChain Core – it is the interface that most LangChain Core components implement, giving them + +- a common invocation interface (invoke, batch, stream, etc.) +- built-in utilities for retries, fallbacks, schemas and runtime configurability +- easy deployment with [LangServe](https://github.com/langchain-ai/langserve) + +For more check out the [runnable docs](https://python.langchain.com/docs/expression_language/interface). Examples of components that implement the interface include: LLMs, Chat Models, Prompts, Retrievers, Tools, Output Parsers. + +You can use LangChain Core objects in two ways: + +1. **imperative**, ie. call them directly, eg. `model.invoke(...)` + +2. **declarative**, with LangChain Expression Language (LCEL) + +3. or a mix of both! eg. one of the steps in your LCEL sequence can be a custom function + +| Feature | Imperative | Declarative | +| --------- | ------------------------------- | -------------- | +| Syntax | All of Python | LCEL | +| Tracing | ✅ – Automatic | ✅ – Automatic | +| Parallel | ✅ – with threads or coroutines | ✅ – Automatic | +| Streaming | ✅ – by yielding | ✅ – Automatic | +| Async | ✅ – by writing async functions | ✅ – Automatic | + +## ⚡️ What is LangChain Expression Language? + +LangChain Expression Language (LCEL) is a _declarative language_ for composing LangChain Core runnables into sequences (or DAGs), covering the most common patterns when building with LLMs. + +LangChain Core compiles LCEL sequences to an _optimized execution plan_, with automatic parallelization, streaming, tracing, and async support. + +For more check out the [LCEL docs](https://python.langchain.com/docs/expression_language/). + +![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/static/svg/langchain_stack_112024.svg "LangChain Framework Overview") + +For more advanced use cases, also check out [LangGraph](https://github.com/langchain-ai/langgraph), which is a graph-based runner for cyclic and recursive LLM workflows. + +## 📕 Releases & Versioning + +`langchain-core` is currently on version `0.1.x`. + +As `langchain-core` contains the base abstractions and runtime for the whole LangChain ecosystem, we will communicate any breaking changes with advance notice and version bumps. The exception for this is anything in `langchain_core.beta`. The reason for `langchain_core.beta` is that given the rate of change of the field, being able to move quickly is still a priority, and this module is our attempt to do so. + +Minor version increases will occur for: + +- Breaking changes for any public interfaces NOT in `langchain_core.beta` + +Patch version increases will occur for: + +- Bug fixes +- New features +- Any changes to private interfaces +- Any changes to `langchain_core.beta` + +## 💁 Contributing + +As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation. + +For detailed information on how to contribute, see the [Contributing Guide](https://python.langchain.com/docs/contributing/). + +## ⛰️ Why build on top of LangChain Core? + +The whole LangChain ecosystem is built on top of LangChain Core, so you're in good company when building on top of it. Some of the benefits: + +- **Modularity**: LangChain Core is designed around abstractions that are independent of each other, and not tied to any specific model provider. +- **Stability**: We are committed to a stable versioning scheme, and will communicate any breaking changes with advance notice and version bumps. +- **Battle-tested**: LangChain Core components have the largest install base in the LLM ecosystem, and are used in production by many companies. +- **Community**: LangChain Core is developed in the open, and we welcome contributions from the community. diff --git a/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/RECORD b/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/RECORD new file mode 100644 index 00000000..ca34de8f --- /dev/null +++ b/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/RECORD @@ -0,0 +1,344 @@ +langchain_core-0.3.59.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +langchain_core-0.3.59.dist-info/METADATA,sha256=RGiVEILqRu2N-P8Z7GUm4XgjWasIaMkZ0Sp1YdnOQao,5887 +langchain_core-0.3.59.dist-info/RECORD,, +langchain_core-0.3.59.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90 +langchain_core-0.3.59.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34 +langchain_core/__init__.py,sha256=AN-KPu2IuqeQGc-m9dcDfoTIvBno5-ZdUNEVwIIoZM0,709 +langchain_core/__pycache__/__init__.cpython-312.pyc,, +langchain_core/__pycache__/_import_utils.cpython-312.pyc,, +langchain_core/__pycache__/agents.cpython-312.pyc,, +langchain_core/__pycache__/caches.cpython-312.pyc,, +langchain_core/__pycache__/chat_history.cpython-312.pyc,, +langchain_core/__pycache__/chat_loaders.cpython-312.pyc,, +langchain_core/__pycache__/chat_sessions.cpython-312.pyc,, +langchain_core/__pycache__/env.cpython-312.pyc,, +langchain_core/__pycache__/exceptions.cpython-312.pyc,, +langchain_core/__pycache__/globals.cpython-312.pyc,, +langchain_core/__pycache__/memory.cpython-312.pyc,, +langchain_core/__pycache__/prompt_values.cpython-312.pyc,, +langchain_core/__pycache__/rate_limiters.cpython-312.pyc,, +langchain_core/__pycache__/retrievers.cpython-312.pyc,, +langchain_core/__pycache__/stores.cpython-312.pyc,, +langchain_core/__pycache__/structured_query.cpython-312.pyc,, +langchain_core/__pycache__/sys_info.cpython-312.pyc,, +langchain_core/__pycache__/version.cpython-312.pyc,, +langchain_core/_api/__init__.py,sha256=9NCR6MojAHzznfX98hza3fVn3PQ4JdTNyPUOkovwB1Q,1976 +langchain_core/_api/__pycache__/__init__.cpython-312.pyc,, +langchain_core/_api/__pycache__/beta_decorator.cpython-312.pyc,, +langchain_core/_api/__pycache__/deprecation.cpython-312.pyc,, +langchain_core/_api/__pycache__/internal.cpython-312.pyc,, +langchain_core/_api/__pycache__/path.cpython-312.pyc,, +langchain_core/_api/beta_decorator.py,sha256=osyHHMFFC4jT59CSlauU8HnVxReBfEaA-USTkvh7yAI,9942 +langchain_core/_api/deprecation.py,sha256=KNiZvPIccaffbkhN7D0o9K0n4tydOMpSwX5lfKFhCmk,20552 +langchain_core/_api/internal.py,sha256=aOZkYANu747LyWzyAk-0KE4RjdTYj18Wtlh7F9_qyPM,683 +langchain_core/_api/path.py,sha256=M93Jo_1CUpShRyqB6m___Qjczm1RU1D7yb4LSGaiysk,984 +langchain_core/_import_utils.py,sha256=hzGmPpoLFeDGg6o96J39RPtMl_I6GUxW-_2JxGTJcIk,1250 +langchain_core/agents.py,sha256=r2GDNZeHrGR83URVMBn_-q18enwg1o-1aZlTlke3ep0,8466 +langchain_core/beta/__init__.py,sha256=8phOlCdTByvzqN1DR4CU_rvaO4SDRebKATmFKj0B5Nw,68 +langchain_core/beta/__pycache__/__init__.cpython-312.pyc,, +langchain_core/beta/runnables/__init__.py,sha256=KPVZTs2phF46kEB7mn0M75UeSw8nylbTZ4HYpLT0ywE,17 +langchain_core/beta/runnables/__pycache__/__init__.cpython-312.pyc,, +langchain_core/beta/runnables/__pycache__/context.cpython-312.pyc,, +langchain_core/beta/runnables/context.py,sha256=GiZ01qfR5t670hIGMAqhzHKdVwAnaR18tD2N8FsRyjU,13453 +langchain_core/caches.py,sha256=jzH1jy1VcOWZT08viIAyl-9AJ3_KI6ge2jBEqxBvTwM,9667 +langchain_core/callbacks/__init__.py,sha256=71DA4wJI3AudCYQvAIy2ZOauw-hKu-VHRl9XrZWMOg4,4347 +langchain_core/callbacks/__pycache__/__init__.cpython-312.pyc,, +langchain_core/callbacks/__pycache__/base.cpython-312.pyc,, +langchain_core/callbacks/__pycache__/file.cpython-312.pyc,, +langchain_core/callbacks/__pycache__/manager.cpython-312.pyc,, +langchain_core/callbacks/__pycache__/stdout.cpython-312.pyc,, +langchain_core/callbacks/__pycache__/streaming_stdout.cpython-312.pyc,, +langchain_core/callbacks/__pycache__/usage.cpython-312.pyc,, +langchain_core/callbacks/base.py,sha256=6MXzjic-DrGaLYjkplA857Eg3-YoosKcVh45QQejI2g,36875 +langchain_core/callbacks/file.py,sha256=VKcc6xYBLqyfpCNJUJ7lFT3vnDOIx1jJcaWGrYDcGWI,4919 +langchain_core/callbacks/manager.py,sha256=VyLY-N_kNJpB_K8BUBzI-78BYYX4mCHQfR0GwEDx4YY,89185 +langchain_core/callbacks/stdout.py,sha256=hQ1gjpshNHGdbCS8cH6_oTc4nM8tCWzGNXrbm9dJeaY,4113 +langchain_core/callbacks/streaming_stdout.py,sha256=92UQWxL9HBzdCpn47AF-ZE_jGkkebMn2Z_l24ndMBMI,4646 +langchain_core/callbacks/usage.py,sha256=ba9-YS0ulugJJz_yoFL2A1RL5tXvrRAQ0MoFXxZqx6E,5060 +langchain_core/chat_history.py,sha256=un-5bq7rWqMXmQh68BZNyKJPY7QuOy-9ebahq4SAZ8s,8379 +langchain_core/chat_loaders.py,sha256=b57Gl3KGPxq9gYJjetsHfJm1I6kSqi7bDE91fJJOR84,601 +langchain_core/chat_sessions.py,sha256=YEO3ck5_wRGd3a2EnGD7M_wTvNC_4T1IVjQWekagwaM,564 +langchain_core/document_loaders/__init__.py,sha256=ZwpBY2fX4-8S_iYArqebUi6Ou75D42LCNHNbGCaq-Eo,975 +langchain_core/document_loaders/__pycache__/__init__.cpython-312.pyc,, +langchain_core/document_loaders/__pycache__/base.cpython-312.pyc,, +langchain_core/document_loaders/__pycache__/blob_loaders.cpython-312.pyc,, +langchain_core/document_loaders/__pycache__/langsmith.cpython-312.pyc,, +langchain_core/document_loaders/base.py,sha256=dIkxB9EjJPHDCOanfG4L_BQzPpYo-J-pui0vryoWtCE,4256 +langchain_core/document_loaders/blob_loaders.py,sha256=4m1k8boiwXw3z4yMYT8bnYUA-eGTPtEZyUxZvI3GbTs,1077 +langchain_core/document_loaders/langsmith.py,sha256=6Y7SAexwt1G3tYtPjvk3g-hBJARiQWYAY5tixDGclLU,5402 +langchain_core/documents/__init__.py,sha256=M_c7G5OODxHtWbMRinbfsBwtNg0TdMSrgMUFr-6fRt8,850 +langchain_core/documents/__pycache__/__init__.cpython-312.pyc,, +langchain_core/documents/__pycache__/base.cpython-312.pyc,, +langchain_core/documents/__pycache__/compressor.cpython-312.pyc,, +langchain_core/documents/__pycache__/transformers.cpython-312.pyc,, +langchain_core/documents/base.py,sha256=O-bg0CI-_nol4ChObjwEUsttexPs2q50tQIX1fcT8dc,10387 +langchain_core/documents/compressor.py,sha256=pbabH4kKqBplmdtMzNLlEaP7JATwQW22W0Y8AGmU5kA,1992 +langchain_core/documents/transformers.py,sha256=Nym6dVdg6S3ktfNsTzdg5iuk9-dbutPoK7zEjY5Zo-I,2549 +langchain_core/embeddings/__init__.py,sha256=0SfcdkVSSXmTFXznUyeZq_b1ajpwIGDueGAAfwyMpUY,774 +langchain_core/embeddings/__pycache__/__init__.cpython-312.pyc,, +langchain_core/embeddings/__pycache__/embeddings.cpython-312.pyc,, +langchain_core/embeddings/__pycache__/fake.cpython-312.pyc,, +langchain_core/embeddings/embeddings.py,sha256=u50T2VxLLyfGBCKcVtWfSiZrtKua8sOSHwSSHRKtcno,2405 +langchain_core/embeddings/fake.py,sha256=xsKT0bvaf0wX12Ry62XKLZMn-r14iHyaWAjG7l1q-Io,3913 +langchain_core/env.py,sha256=lBACwu8P4BgftWYCgKJAy1m--wMp_KIbPcDPN2iDB8o,646 +langchain_core/example_selectors/__init__.py,sha256=k8y0chtEhaHf8Y1_nZVDsb9CWDdRIWFb9U806mnbGvo,1394 +langchain_core/example_selectors/__pycache__/__init__.cpython-312.pyc,, +langchain_core/example_selectors/__pycache__/base.cpython-312.pyc,, +langchain_core/example_selectors/__pycache__/length_based.cpython-312.pyc,, +langchain_core/example_selectors/__pycache__/semantic_similarity.cpython-312.pyc,, +langchain_core/example_selectors/base.py,sha256=cOk3gehxDQoqpLBJ5UxejjdnFIFbuktrkAMtZ4_2DlU,1520 +langchain_core/example_selectors/length_based.py,sha256=VlWoGhppKrKYKRyi0qBdhq4TbD-6pDHobx3fMGWoqfY,3375 +langchain_core/example_selectors/semantic_similarity.py,sha256=flhao1yNBnaDkM2MlwFd2m4m2dBc_IlEMnmSWV61IVE,13739 +langchain_core/exceptions.py,sha256=nGD_r_MAZSbraqzWUTzreALmPBSg4XA3zyTWd3kmMWE,3114 +langchain_core/globals.py,sha256=Y6uVfEmgAw5_TGb9T3ODOZokfEkExDgWdN-ptUkj8do,8937 +langchain_core/indexing/__init__.py,sha256=lwm2Awn0OTTT-tFHQa2bDyKM3rsl97a4HbDtYnJt22c,1274 +langchain_core/indexing/__pycache__/__init__.cpython-312.pyc,, +langchain_core/indexing/__pycache__/api.cpython-312.pyc,, +langchain_core/indexing/__pycache__/base.cpython-312.pyc,, +langchain_core/indexing/__pycache__/in_memory.cpython-312.pyc,, +langchain_core/indexing/api.py,sha256=HZJdH84UNeTG7snZ2kgLYiXzITQZL80OmP0yRwMeJZ8,32607 +langchain_core/indexing/base.py,sha256=OoS3omb9lFqNtL5FYXIrs8yzjD7Mr8an5cb6ZBcFMbI,23298 +langchain_core/indexing/in_memory.py,sha256=-qyKjAWJFWxtH_MbUu3JJct0x3R_pbHyHuxA4Cra1nA,2709 +langchain_core/language_models/__init__.py,sha256=IdNhb6-kivpPTPTykMuYDLfzBP44Pl82eMhVw-1XF58,3778 +langchain_core/language_models/__pycache__/__init__.cpython-312.pyc,, +langchain_core/language_models/__pycache__/_utils.cpython-312.pyc,, +langchain_core/language_models/__pycache__/base.cpython-312.pyc,, +langchain_core/language_models/__pycache__/chat_models.cpython-312.pyc,, +langchain_core/language_models/__pycache__/fake.cpython-312.pyc,, +langchain_core/language_models/__pycache__/fake_chat_models.cpython-312.pyc,, +langchain_core/language_models/__pycache__/llms.cpython-312.pyc,, +langchain_core/language_models/_utils.py,sha256=hR7_UhOpg9BtE4NsN3DAa4GBXqkcVXV-b2ggZEGD0ac,4783 +langchain_core/language_models/base.py,sha256=hQXammG0peipizMIdI3XXcLr9cWi8ottE-od9tsRad0,14432 +langchain_core/language_models/chat_models.py,sha256=4TzhTdDisRBmERwqnfAuJIH5Dh2PNEqq0J_mcYDVHR4,67059 +langchain_core/language_models/fake.py,sha256=h9LhVTkmYLXkJ1_VvsKhqYVpkQsM7eAr9geXF_IVkPs,3772 +langchain_core/language_models/fake_chat_models.py,sha256=vt0N35tlETJrStWcr2cZrknjDUMKzZjikb7Ftndzgik,12832 +langchain_core/language_models/llms.py,sha256=KgdqX-HXS3XNz-wTy4c4Y-51dGqPzBjZP27ECgR3em0,56569 +langchain_core/load/__init__.py,sha256=remW-gt_pfJh0mEEqhNUeeSvMR3tTiQJQfdH1rUShcs,1150 +langchain_core/load/__pycache__/__init__.cpython-312.pyc,, +langchain_core/load/__pycache__/dump.cpython-312.pyc,, +langchain_core/load/__pycache__/load.cpython-312.pyc,, +langchain_core/load/__pycache__/mapping.cpython-312.pyc,, +langchain_core/load/__pycache__/serializable.cpython-312.pyc,, +langchain_core/load/dump.py,sha256=xQMuWsbCpgt8ce_muZuHUOOY9Ju-_voQyHc_fkv18mo,2667 +langchain_core/load/load.py,sha256=ZC8JJViIY9rJnV1c9ttTAuQqS7v2ssL2FahsOZs67-Y,8424 +langchain_core/load/mapping.py,sha256=nnFXiTdQkfdv41_wP38aWGtpp9svxW6fwVyC3LmRkok,29633 +langchain_core/load/serializable.py,sha256=m6cjURpY_Xh5wvjWCT6GkQydU71qn6nzi16QzYMJoOU,11684 +langchain_core/memory.py,sha256=OpFqP91EHORg8ddleQLPFqZLvC1YWT42w1Z_9JW54Uk,3644 +langchain_core/messages/__init__.py,sha256=bwL5MQwSxRtUvz4k-85n30BP3ngzKx5muXbdDxbcjZg,4253 +langchain_core/messages/__pycache__/__init__.cpython-312.pyc,, +langchain_core/messages/__pycache__/ai.cpython-312.pyc,, +langchain_core/messages/__pycache__/base.cpython-312.pyc,, +langchain_core/messages/__pycache__/chat.cpython-312.pyc,, +langchain_core/messages/__pycache__/content_blocks.cpython-312.pyc,, +langchain_core/messages/__pycache__/function.cpython-312.pyc,, +langchain_core/messages/__pycache__/human.cpython-312.pyc,, +langchain_core/messages/__pycache__/modifier.cpython-312.pyc,, +langchain_core/messages/__pycache__/system.cpython-312.pyc,, +langchain_core/messages/__pycache__/tool.cpython-312.pyc,, +langchain_core/messages/__pycache__/utils.cpython-312.pyc,, +langchain_core/messages/ai.py,sha256=rsnde8Wlfc8X6T5urzAL69IWynvjxHmXWPGVttQAh8Y,17772 +langchain_core/messages/base.py,sha256=cWvq7qfi0FZ4bSUHOzH-SfVwFU076F9tZhFVLnE8nNk,9638 +langchain_core/messages/chat.py,sha256=Vgk3y03F9NP-wKkXAjBDLOtrH43NpEMN2xaWRp6qhRA,2260 +langchain_core/messages/content_blocks.py,sha256=-eH-p_bDhp9o71EGcA0XJ-lhGf4VMFiPLRN45fjxmSE,5014 +langchain_core/messages/function.py,sha256=QO2WgKmJ5nm7QL-xXG11Fmz3qFkHm1lL0k41WjDeEZE,2157 +langchain_core/messages/human.py,sha256=oUKkV5H9j-z6KIWtKwDRwHfQudPkLQOcWQSVpNYJeWQ,1928 +langchain_core/messages/modifier.py,sha256=1PDFQ6393-nKQqQnhhEkmS8UyT8uCY412gchWPQ73Zw,863 +langchain_core/messages/system.py,sha256=Zbb8zeezWs8SN6nOP-MjeBed5OtNetAsdGzf3lcl2Yc,1741 +langchain_core/messages/tool.py,sha256=c7Np4zkWIF-Yn_5Fvh-OlLA73llvYcDT9p34QXea5is,12201 +langchain_core/messages/utils.py,sha256=cInmigvIYBYq6euAKCijnPZsnAqAGbMfa-kctWKrt8Y,67469 +langchain_core/output_parsers/__init__.py,sha256=c1n8spEg4rHTSIpMsNRxGMs-vRCEhP_Ae_oLti2CW5k,2873 +langchain_core/output_parsers/__pycache__/__init__.cpython-312.pyc,, +langchain_core/output_parsers/__pycache__/base.cpython-312.pyc,, +langchain_core/output_parsers/__pycache__/format_instructions.cpython-312.pyc,, +langchain_core/output_parsers/__pycache__/json.cpython-312.pyc,, +langchain_core/output_parsers/__pycache__/list.cpython-312.pyc,, +langchain_core/output_parsers/__pycache__/openai_functions.cpython-312.pyc,, +langchain_core/output_parsers/__pycache__/openai_tools.cpython-312.pyc,, +langchain_core/output_parsers/__pycache__/pydantic.cpython-312.pyc,, +langchain_core/output_parsers/__pycache__/string.cpython-312.pyc,, +langchain_core/output_parsers/__pycache__/transform.cpython-312.pyc,, +langchain_core/output_parsers/__pycache__/xml.cpython-312.pyc,, +langchain_core/output_parsers/base.py,sha256=1ZKnoN6a1euFs3_DBYdi4eJyhx_nXArC6OZJjX8fyaU,11161 +langchain_core/output_parsers/format_instructions.py,sha256=8oUbeysnVGvXWyNd5gqXlEL850D31gMTy74GflsuvRU,553 +langchain_core/output_parsers/json.py,sha256=KFaWBScOwNRXTFZZFf6_TWamt_XP675dPAc_oNPc9IU,4769 +langchain_core/output_parsers/list.py,sha256=y2t5_1eG1n7EARskge2MQ58uBaT1NksQXhVLidSXf1A,7624 +langchain_core/output_parsers/openai_functions.py,sha256=h2vL0oIWRwpPLtnNchkPaJiCrXE0lTN3_pLvmkh9oaw,10494 +langchain_core/output_parsers/openai_tools.py,sha256=GLSQMJ4TD05TZOtLVnhwI9ZfMVNmRm3FNE3QCWDioOM,11059 +langchain_core/output_parsers/pydantic.py,sha256=gx1CZOTDuugNACz5zt4sNSpO3YFf5Qpx0jjP7dPEIYU,4649 +langchain_core/output_parsers/string.py,sha256=F82gzziR6Ovea8kfkZD0gIgYBb3g7DWxuE_V523J3X8,898 +langchain_core/output_parsers/transform.py,sha256=Ff9PJqoIhPfgSjAL-nOBh2W7Dc44e15Ow36pyd7M8VA,5806 +langchain_core/output_parsers/xml.py,sha256=hDCV4HhPIrO_nWAV5IQcz4RJ6HoJC9btipZDhD4yZsM,10927 +langchain_core/outputs/__init__.py,sha256=AtGW1qQJOX3B-n8S8BlZdCDHUyAyTYK0dfs9ywcLrEo,2133 +langchain_core/outputs/__pycache__/__init__.cpython-312.pyc,, +langchain_core/outputs/__pycache__/chat_generation.cpython-312.pyc,, +langchain_core/outputs/__pycache__/chat_result.cpython-312.pyc,, +langchain_core/outputs/__pycache__/generation.cpython-312.pyc,, +langchain_core/outputs/__pycache__/llm_result.cpython-312.pyc,, +langchain_core/outputs/__pycache__/run_info.cpython-312.pyc,, +langchain_core/outputs/chat_generation.py,sha256=hJ8RcKWAQE8y9HZ_OZ_iKfR3jTUJ2eGLNrduel18QCM,4293 +langchain_core/outputs/chat_result.py,sha256=us15wVh00AYkIVNmf0VETEI9aoEQy-cT-SIXMX-98Zc,1356 +langchain_core/outputs/generation.py,sha256=hYl5K90Eul8ldn6UEFwt1fqnMHRG5tI96SY74vm_O50,2312 +langchain_core/outputs/llm_result.py,sha256=dGOds21b1__h8WxI3c_-9_8rzy_gd0MUmH_Gfwz_CzI,3598 +langchain_core/outputs/run_info.py,sha256=xCMWdsHfgnnodaf4OCMvZaWUfS836X7mV15JPkqvZjo,594 +langchain_core/prompt_values.py,sha256=HuG3X7gIYRXfFwpdOYnwksJM-OmcdAFchjoln1nXSg0,4002 +langchain_core/prompts/__init__.py,sha256=uPeciJFnfvTVpxbAlBoo1BIwo8SvLd1YwI-TEBQ8SmU,4123 +langchain_core/prompts/__pycache__/__init__.cpython-312.pyc,, +langchain_core/prompts/__pycache__/base.cpython-312.pyc,, +langchain_core/prompts/__pycache__/chat.cpython-312.pyc,, +langchain_core/prompts/__pycache__/dict.cpython-312.pyc,, +langchain_core/prompts/__pycache__/few_shot.cpython-312.pyc,, +langchain_core/prompts/__pycache__/few_shot_with_templates.cpython-312.pyc,, +langchain_core/prompts/__pycache__/image.cpython-312.pyc,, +langchain_core/prompts/__pycache__/loading.cpython-312.pyc,, +langchain_core/prompts/__pycache__/message.cpython-312.pyc,, +langchain_core/prompts/__pycache__/pipeline.cpython-312.pyc,, +langchain_core/prompts/__pycache__/prompt.cpython-312.pyc,, +langchain_core/prompts/__pycache__/string.cpython-312.pyc,, +langchain_core/prompts/__pycache__/structured.cpython-312.pyc,, +langchain_core/prompts/base.py,sha256=NMpygbGTa8P_yyQ7syf1yBtMtWeydHhbPAnSbIa05Dw,16088 +langchain_core/prompts/chat.py,sha256=MjqBniMP65tKfYMPVZkikP73HWT1lNA7T9P4vLt4KG4,51856 +langchain_core/prompts/dict.py,sha256=mTUZ1YcOUpw6uzYavLokiPwXfwfxi4Dvx5uhQA4BbEo,4568 +langchain_core/prompts/few_shot.py,sha256=RukjrZKkCIYjoZ1zNp8GtFx1nruKIBAu1Cyd77rtq3E,16190 +langchain_core/prompts/few_shot_with_templates.py,sha256=7uD9OZ2y0gxMLMLizV4Ww5cwo-h6bT3urwibwvYK_TE,7743 +langchain_core/prompts/image.py,sha256=-TH3IanHifgA_p_dO92Wqd9vpMTCc8AQOc3uEGc0RFk,4571 +langchain_core/prompts/loading.py,sha256=QR5EcIqKMFkBG0Dh7NOftG1yBXX9oeV9qLzhYH1b9Hs,7050 +langchain_core/prompts/message.py,sha256=L6QbRIv03pr0fJtayhnoynmIEFKIRTTAuWgrx5wLSv0,2592 +langchain_core/prompts/pipeline.py,sha256=8deq1Jdw3MYNLLVCJIybThS68x-so6jDf610HM3DiEY,4732 +langchain_core/prompts/prompt.py,sha256=Pyw8Yrux8XohmTQLa-P4qQ2nRaP-WU4jggVdm_tdLFo,11271 +langchain_core/prompts/string.py,sha256=5vbp610xtuRuz8q2PI1vCAsBLEDeDDq79G-YAcWOyRI,10246 +langchain_core/prompts/structured.py,sha256=wpg8QLVK0CqMw6f_VH0bkfqOju2K4SSwXOH5vu7pWTM,5756 +langchain_core/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain_core/pydantic_v1/__init__.py,sha256=Gr-ePaQcEswjsMfS93vUXqfbOc0bLnhlbTJd7h61zxw,1781 +langchain_core/pydantic_v1/__pycache__/__init__.cpython-312.pyc,, +langchain_core/pydantic_v1/__pycache__/dataclasses.cpython-312.pyc,, +langchain_core/pydantic_v1/__pycache__/main.cpython-312.pyc,, +langchain_core/pydantic_v1/dataclasses.py,sha256=lG_6HThJk4-P9mSOWwO-FeA9lyB_r1WjS1Q1eP9xzq8,996 +langchain_core/pydantic_v1/main.py,sha256=F_4_8f0uP3i8liMpNekmkNKuasrn3DEODDFSuBfjQJY,993 +langchain_core/rate_limiters.py,sha256=pUoyVUDGhSclWOWESrk-_upEKqp61EmyIz-SDfF3UHo,9588 +langchain_core/retrievers.py,sha256=BJs-D613Y9Olg88EenMZHY5tde7TJ0xxpvRAZEAIOx4,16707 +langchain_core/runnables/__init__.py,sha256=HlmOBrdSLMmd5xsK73uVwQI0TVwX0Sj3cxG6hq38MZ4,3858 +langchain_core/runnables/__pycache__/__init__.cpython-312.pyc,, +langchain_core/runnables/__pycache__/base.cpython-312.pyc,, +langchain_core/runnables/__pycache__/branch.cpython-312.pyc,, +langchain_core/runnables/__pycache__/config.cpython-312.pyc,, +langchain_core/runnables/__pycache__/configurable.cpython-312.pyc,, +langchain_core/runnables/__pycache__/fallbacks.cpython-312.pyc,, +langchain_core/runnables/__pycache__/graph.cpython-312.pyc,, +langchain_core/runnables/__pycache__/graph_ascii.cpython-312.pyc,, +langchain_core/runnables/__pycache__/graph_mermaid.cpython-312.pyc,, +langchain_core/runnables/__pycache__/graph_png.cpython-312.pyc,, +langchain_core/runnables/__pycache__/history.cpython-312.pyc,, +langchain_core/runnables/__pycache__/passthrough.cpython-312.pyc,, +langchain_core/runnables/__pycache__/retry.cpython-312.pyc,, +langchain_core/runnables/__pycache__/router.cpython-312.pyc,, +langchain_core/runnables/__pycache__/schema.cpython-312.pyc,, +langchain_core/runnables/__pycache__/utils.cpython-312.pyc,, +langchain_core/runnables/base.py,sha256=mYabX1I-wh-Ftmk8AjxrmLiN_dhHaKyhYfQ6wCwwePU,220855 +langchain_core/runnables/branch.py,sha256=suvBrdEzH6sb4uDIOwHZodEpDYhyt3FdPJlQ1J1n8AE,16501 +langchain_core/runnables/config.py,sha256=77AImgeg7IWdcXNQ1bLv_oKOBMwNVmvw9mcWTy0ciIM,20395 +langchain_core/runnables/configurable.py,sha256=9cICu36sZAp40oN5gRlJn1Sg9GuTG0j-74VShfNaSYk,24344 +langchain_core/runnables/fallbacks.py,sha256=L1uJC5cLfMnWPXJVcXo8ZgCRo-BFmG8AXH6J9_afhjM,24316 +langchain_core/runnables/graph.py,sha256=IfV4HHEJyU2dTuE03RLyQORBxdGsK3MTOADgR4OV6kQ,23285 +langchain_core/runnables/graph_ascii.py,sha256=VgPhRsjnHDfd4fUGKKBKQG08cTuf0oGEZERzd9zx_g4,9910 +langchain_core/runnables/graph_mermaid.py,sha256=r70-8PxUpQBzQGfoqOnF0f5CHFUEat0mUPRjr3MNXEY,16584 +langchain_core/runnables/graph_png.py,sha256=A12vGi7oujuD22NhxEpbZc07a18O1HLxuvW8Gf0etXk,5901 +langchain_core/runnables/history.py,sha256=6l8QGRhvQq0jJcQGkS7D0bfWF8Vj6CpOL8uNiFcl55o,24959 +langchain_core/runnables/passthrough.py,sha256=WZaJd6sj1LwA8N4JyoR3OH6CGrLZ1rHgmWZA5pvSHPU,25966 +langchain_core/runnables/retry.py,sha256=IzCtwr51qCXIGHCWyzA7TVtEtu_lQgvDJLPVhGm5-NA,12764 +langchain_core/runnables/router.py,sha256=hFrVAdMtVMqufzu1U5SrNsliLG7Wf1yRthXQ1-eiSU0,7237 +langchain_core/runnables/schema.py,sha256=3wNfFjjNtNwfYt7tArA46cuzxCVQABjgXz6ZdECkBI4,5542 +langchain_core/runnables/utils.py,sha256=0-IhEwmzb2sXtCKJ9NrFhOSc7eZhyGrX7nJecsCboDk,23409 +langchain_core/stores.py,sha256=fiKOpG-Zh3KX5CizyxeC0ltQLbKH03X1K-f7xH4JKnA,10819 +langchain_core/structured_query.py,sha256=T-dvM9P5hyVbH0Nx08TzdleZgXCYBsc_1iRPgyYrqCo,5272 +langchain_core/sys_info.py,sha256=85XDNlyXHVlxdNnPP1DHLDN3f6-MDJH5zAUZh9xNP1M,4087 +langchain_core/tools/__init__.py,sha256=JggWN2KLO_xiomksrOSiHJDs-I0g5E76KERtylxfo0M,2860 +langchain_core/tools/__pycache__/__init__.cpython-312.pyc,, +langchain_core/tools/__pycache__/base.cpython-312.pyc,, +langchain_core/tools/__pycache__/convert.cpython-312.pyc,, +langchain_core/tools/__pycache__/render.cpython-312.pyc,, +langchain_core/tools/__pycache__/retriever.cpython-312.pyc,, +langchain_core/tools/__pycache__/simple.cpython-312.pyc,, +langchain_core/tools/__pycache__/structured.cpython-312.pyc,, +langchain_core/tools/base.py,sha256=p6kDHV9BedeBQiesvOpEubw34ryBAAc4YieNCaYyrGU,42459 +langchain_core/tools/convert.py,sha256=PV13P_ik1Y2KWnbqORu5M3rkVTCIApLxRPmpgrsFWxk,15682 +langchain_core/tools/render.py,sha256=BosvIWrSvOJgRg_gaSDBS58j99gwQHsLhprOXeJP53I,1842 +langchain_core/tools/retriever.py,sha256=zlSV3HnWhhmtZtkNGbNQW9wxv8GptJKmDhzqZj8e36o,3873 +langchain_core/tools/simple.py,sha256=GwawH2sfn05W18g8H4NKOza-X5Rrw-pdPwUmVBitO3Y,6048 +langchain_core/tools/structured.py,sha256=z1h9Pqb-inl5uvMykLmQbeqPZ6xBxxiyuh9P7gxBYDM,8723 +langchain_core/tracers/__init__.py,sha256=lMMiPSLXT9zLA1yCJ8eB1Vf639Mpnz1OC9ZaPgEbgoY,1611 +langchain_core/tracers/__pycache__/__init__.cpython-312.pyc,, +langchain_core/tracers/__pycache__/_streaming.cpython-312.pyc,, +langchain_core/tracers/__pycache__/base.cpython-312.pyc,, +langchain_core/tracers/__pycache__/context.cpython-312.pyc,, +langchain_core/tracers/__pycache__/core.cpython-312.pyc,, +langchain_core/tracers/__pycache__/evaluation.cpython-312.pyc,, +langchain_core/tracers/__pycache__/event_stream.cpython-312.pyc,, +langchain_core/tracers/__pycache__/langchain.cpython-312.pyc,, +langchain_core/tracers/__pycache__/langchain_v1.cpython-312.pyc,, +langchain_core/tracers/__pycache__/log_stream.cpython-312.pyc,, +langchain_core/tracers/__pycache__/memory_stream.cpython-312.pyc,, +langchain_core/tracers/__pycache__/root_listeners.cpython-312.pyc,, +langchain_core/tracers/__pycache__/run_collector.cpython-312.pyc,, +langchain_core/tracers/__pycache__/schemas.cpython-312.pyc,, +langchain_core/tracers/__pycache__/stdout.cpython-312.pyc,, +langchain_core/tracers/_streaming.py,sha256=TT2N_dzOQIqEM9dH7v3d_-eZKEfkcQxMJqItsMofMpY,960 +langchain_core/tracers/base.py,sha256=6TWPk6fL4Ep4ywh3q-aGzy-PdiaH6hDZhLs5Z4bL45Q,26025 +langchain_core/tracers/context.py,sha256=7TTqCOMTV3F-SX513lmXtaq0R3PNuAIKrYTpIFjxWRs,7106 +langchain_core/tracers/core.py,sha256=zI6gVD69qFNiPqsroL1Qvj6df9L3bekbpdP4Zgkjqbk,22740 +langchain_core/tracers/evaluation.py,sha256=_8WDpkqpIVtCcnm7IiHFTU2RU2BaOxqrEj-MwVYlmYU,8393 +langchain_core/tracers/event_stream.py,sha256=3zXVJ9TrGseKdsJnrIeKN2T6_qsFvap2fvfR_fp1pTo,33581 +langchain_core/tracers/langchain.py,sha256=l6xFdBU-Pfr8uKlqp2AMEdVVSS_tg4K8k_2GeBlT8VU,11204 +langchain_core/tracers/langchain_v1.py,sha256=Fra8JU3HPs_PLeTMbLcM1NLqEqPnKB6xcX4myjFfbnY,727 +langchain_core/tracers/log_stream.py,sha256=-Ej7OrlZhTysvSTIu6Reg6FQdGrBpLFKXnBSp7CpVc4,24015 +langchain_core/tracers/memory_stream.py,sha256=3A-cwA3-lq5YFbCZWYM8kglVv1bPT4kwM2L_q8axkhU,5032 +langchain_core/tracers/root_listeners.py,sha256=VRr3jnSSLYsIqYEmw9OjbjGgj4897c4fnNqhMhKDfys,4672 +langchain_core/tracers/run_collector.py,sha256=Tnnz5sfKkUI6Rapj8mGjScYGkyEKRyicWOhvEXHV3qE,1622 +langchain_core/tracers/schemas.py,sha256=gDLeMQ_uJuqOIt9MIWrHmtWqAqLTVWSu5MpMPt8y7jM,3741 +langchain_core/tracers/stdout.py,sha256=T-Eu_3jmQXAEQSYrk3Di3S9IOftmuM6P8NcMTSFkfIw,6821 +langchain_core/utils/__init__.py,sha256=JfqwEHkZKKQ5eChe_Cz242mRDZ7u9qUTJbklljjVxwY,3036 +langchain_core/utils/__pycache__/__init__.cpython-312.pyc,, +langchain_core/utils/__pycache__/_merge.cpython-312.pyc,, +langchain_core/utils/__pycache__/aiter.cpython-312.pyc,, +langchain_core/utils/__pycache__/env.cpython-312.pyc,, +langchain_core/utils/__pycache__/formatting.cpython-312.pyc,, +langchain_core/utils/__pycache__/function_calling.cpython-312.pyc,, +langchain_core/utils/__pycache__/html.cpython-312.pyc,, +langchain_core/utils/__pycache__/image.cpython-312.pyc,, +langchain_core/utils/__pycache__/input.cpython-312.pyc,, +langchain_core/utils/__pycache__/interactive_env.cpython-312.pyc,, +langchain_core/utils/__pycache__/iter.cpython-312.pyc,, +langchain_core/utils/__pycache__/json.cpython-312.pyc,, +langchain_core/utils/__pycache__/json_schema.cpython-312.pyc,, +langchain_core/utils/__pycache__/loading.cpython-312.pyc,, +langchain_core/utils/__pycache__/mustache.cpython-312.pyc,, +langchain_core/utils/__pycache__/pydantic.cpython-312.pyc,, +langchain_core/utils/__pycache__/strings.cpython-312.pyc,, +langchain_core/utils/__pycache__/usage.cpython-312.pyc,, +langchain_core/utils/__pycache__/utils.cpython-312.pyc,, +langchain_core/utils/_merge.py,sha256=FUcKO2paHCi6fYKqt4G95wFtw_D2PutyA5pi8XkfkqQ,5637 +langchain_core/utils/aiter.py,sha256=gzJj2OG03lZxi1wCD9htqDb56qRwUBDBSXf-yREb1Ks,10712 +langchain_core/utils/env.py,sha256=W3WEuWfBxsMF_9m9XGGKDM-TzfXhxps39SQ0bK_KOSA,2491 +langchain_core/utils/formatting.py,sha256=fkieArzKXxSsLcEa3B-MX60O4ZLeeLjiPtVtxCJPcOU,1480 +langchain_core/utils/function_calling.py,sha256=3O7IH6qNpFYc91X1fr95HL3Brh0xqpS7UHOXXpp6wmQ,27737 +langchain_core/utils/html.py,sha256=fUogMGhd-VoUbsGnMyY6v_gv9nbxJy-vmC4yfICcflM,3780 +langchain_core/utils/image.py,sha256=99sCtCnA8uMtjp4vzq-l6dUrxnspEjoDO4QJTrrFyKM,532 +langchain_core/utils/input.py,sha256=z3tubdUtsoHqfTyiBGfELLr1xemSe-pGvhfAeGE6O2g,1958 +langchain_core/utils/interactive_env.py,sha256=Apx6gRncLvidU75maFoI-Gfx-FhDqO2vyiZnR32QAaE,200 +langchain_core/utils/iter.py,sha256=GILpf9XK8l51RydScNcuWeBvhgCuUYlalJLsjeP9PV0,7411 +langchain_core/utils/json.py,sha256=7K3dV2aOfT-1cLl5ZQrfmw9sVnLrn7batTsByzjlPdg,6197 +langchain_core/utils/json_schema.py,sha256=qHkMkEwytAKuBF8bVFaLNILagoSBGZVBeDyfgFHXTkg,3534 +langchain_core/utils/loading.py,sha256=7B9nuzOutgknzj5-8W6eorC9EUsNuO-1w4jh-aVf8ms,931 +langchain_core/utils/mustache.py,sha256=9EnNIR917BhLxKOlc5bcPWJso4a1iRLlHmHDYLx1Aws,21101 +langchain_core/utils/pydantic.py,sha256=2ZgJkzMiPxENhWkNoGtj1FJT6yslkHqCM5bQwLzSN6o,21132 +langchain_core/utils/strings.py,sha256=LIh8uZcGlEKI_SnbOA_PsZxcU6QI5GQKTj0hxOraIv0,1016 +langchain_core/utils/usage.py,sha256=EYv0poDqA7VejEsPyoA19lEt9M4L24Tppf4OPtOjGwI,1202 +langchain_core/utils/utils.py,sha256=RK9JRNsdb4mXu1XYuJFuvDqyglSpnr6ak0vb0ELc7Eo,15043 +langchain_core/vectorstores/__init__.py,sha256=LDDEDhLj2PdemVv9zyT70mUtIsaV0BzAHy9Rrjgp5OI,804 +langchain_core/vectorstores/__pycache__/__init__.cpython-312.pyc,, +langchain_core/vectorstores/__pycache__/base.cpython-312.pyc,, +langchain_core/vectorstores/__pycache__/in_memory.cpython-312.pyc,, +langchain_core/vectorstores/__pycache__/utils.cpython-312.pyc,, +langchain_core/vectorstores/base.py,sha256=yrsqKj62P7_RlNQ1yI1Ogjb_l4nKsJe-cA08ALl9iI0,42027 +langchain_core/vectorstores/in_memory.py,sha256=rFjFWej4_0RQOnJ-Q0NYiN_4xCwW__3ngmM-MAaKIt0,18062 +langchain_core/vectorstores/utils.py,sha256=k8owmg2LDwfgbjkPZSOI_ctuzpN_vtMlUsA4tySesdQ,4269 +langchain_core/version.py,sha256=3IRfAKHh8AkEpkVV8D7LILQuwcWbnhGLBobT2yd4sAw,76 diff --git a/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/WHEEL b/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/WHEEL new file mode 100644 index 00000000..45ec8c4e --- /dev/null +++ b/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: pdm-backend (2.4.4) +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/entry_points.txt b/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/entry_points.txt new file mode 100644 index 00000000..c3ad4726 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core-0.3.59.dist-info/entry_points.txt @@ -0,0 +1,4 @@ +[console_scripts] + +[gui_scripts] + diff --git a/venv/Lib/site-packages/langchain_core/__init__.py b/venv/Lib/site-packages/langchain_core/__init__.py new file mode 100644 index 00000000..498f503a --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/__init__.py @@ -0,0 +1,20 @@ +"""``langchain-core`` defines the base abstractions for the LangChain ecosystem. + +The interfaces for core components like chat models, LLMs, vector stores, retrievers, +and more are defined here. The universal invocation protocol (Runnables) along with +a syntax for combining components (LangChain Expression Language) are also defined here. + +No third-party integrations are defined here. The dependencies are kept purposefully +very lightweight. +""" + +from langchain_core._api import ( + surface_langchain_beta_warnings, + surface_langchain_deprecation_warnings, +) +from langchain_core.version import VERSION + +__version__ = VERSION + +surface_langchain_deprecation_warnings() +surface_langchain_beta_warnings() diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..725e9082 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/_import_utils.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/_import_utils.cpython-312.pyc new file mode 100644 index 00000000..6d4b916f Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/_import_utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/agents.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/agents.cpython-312.pyc new file mode 100644 index 00000000..8cf000bf Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/agents.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/caches.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/caches.cpython-312.pyc new file mode 100644 index 00000000..3c5d7008 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/caches.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/chat_history.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/chat_history.cpython-312.pyc new file mode 100644 index 00000000..6776f0ab Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/chat_history.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/chat_loaders.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/chat_loaders.cpython-312.pyc new file mode 100644 index 00000000..a78a362c Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/chat_loaders.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/chat_sessions.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/chat_sessions.cpython-312.pyc new file mode 100644 index 00000000..3cc84498 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/chat_sessions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/env.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/env.cpython-312.pyc new file mode 100644 index 00000000..977512dd Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/env.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/exceptions.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 00000000..fe7adb8c Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/exceptions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/globals.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/globals.cpython-312.pyc new file mode 100644 index 00000000..d603fce3 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/globals.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/memory.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/memory.cpython-312.pyc new file mode 100644 index 00000000..71d0be1a Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/prompt_values.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/prompt_values.cpython-312.pyc new file mode 100644 index 00000000..8a6abdcd Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/prompt_values.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/rate_limiters.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/rate_limiters.cpython-312.pyc new file mode 100644 index 00000000..9f41ae6e Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/rate_limiters.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/retrievers.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/retrievers.cpython-312.pyc new file mode 100644 index 00000000..1f46b0d5 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/retrievers.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/stores.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/stores.cpython-312.pyc new file mode 100644 index 00000000..62419d59 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/stores.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/structured_query.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/structured_query.cpython-312.pyc new file mode 100644 index 00000000..f7609238 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/structured_query.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/sys_info.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/sys_info.cpython-312.pyc new file mode 100644 index 00000000..9589ac95 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/sys_info.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/__pycache__/version.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/__pycache__/version.cpython-312.pyc new file mode 100644 index 00000000..97030ad6 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/__pycache__/version.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/_api/__init__.py b/venv/Lib/site-packages/langchain_core/_api/__init__.py new file mode 100644 index 00000000..01b52e86 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/_api/__init__.py @@ -0,0 +1,69 @@ +"""Helper functions for managing the LangChain API. + +This module is only relevant for LangChain developers, not for users. + +.. warning:: + + This module and its submodules are for internal use only. Do not use them + in your own code. We may change the API at any time with no warning. + +""" + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from .beta_decorator import ( + LangChainBetaWarning, + beta, + suppress_langchain_beta_warning, + surface_langchain_beta_warnings, + ) + from .deprecation import ( + LangChainDeprecationWarning, + deprecated, + suppress_langchain_deprecation_warning, + surface_langchain_deprecation_warnings, + warn_deprecated, + ) + from .path import as_import_path, get_relative_path + +__all__ = ( + "as_import_path", + "beta", + "deprecated", + "get_relative_path", + "LangChainBetaWarning", + "LangChainDeprecationWarning", + "suppress_langchain_beta_warning", + "surface_langchain_beta_warnings", + "suppress_langchain_deprecation_warning", + "surface_langchain_deprecation_warnings", + "warn_deprecated", +) + +_dynamic_imports = { + "LangChainBetaWarning": "beta_decorator", + "beta": "beta_decorator", + "suppress_langchain_beta_warning": "beta_decorator", + "surface_langchain_beta_warnings": "beta_decorator", + "as_import_path": "path", + "get_relative_path": "path", + "LangChainDeprecationWarning": "deprecation", + "deprecated": "deprecation", + "surface_langchain_deprecation_warnings": "deprecation", + "suppress_langchain_deprecation_warning": "deprecation", + "warn_deprecated": "deprecation", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/_api/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/_api/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..93e5af20 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/_api/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/_api/__pycache__/beta_decorator.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/_api/__pycache__/beta_decorator.cpython-312.pyc new file mode 100644 index 00000000..7dc74322 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/_api/__pycache__/beta_decorator.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/_api/__pycache__/deprecation.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/_api/__pycache__/deprecation.cpython-312.pyc new file mode 100644 index 00000000..a0230296 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/_api/__pycache__/deprecation.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/_api/__pycache__/internal.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/_api/__pycache__/internal.cpython-312.pyc new file mode 100644 index 00000000..3d1eb96b Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/_api/__pycache__/internal.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/_api/__pycache__/path.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/_api/__pycache__/path.cpython-312.pyc new file mode 100644 index 00000000..53d6e05d Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/_api/__pycache__/path.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/_api/beta_decorator.py b/venv/Lib/site-packages/langchain_core/_api/beta_decorator.py new file mode 100644 index 00000000..8fe8d2b8 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/_api/beta_decorator.py @@ -0,0 +1,286 @@ +"""Helper functions for marking parts of the LangChain API as beta. + +This module was loosely adapted from matplotlibs _api/deprecation.py module: + +https://github.com/matplotlib/matplotlib/blob/main/lib/matplotlib/_api/deprecation.py + +.. warning:: + + This module is for internal use only. Do not use it in your own code. + We may change the API at any time with no warning. +""" + +import contextlib +import functools +import inspect +import warnings +from collections.abc import Generator +from typing import Any, Callable, TypeVar, Union, cast + +from langchain_core._api.internal import is_caller_internal + + +class LangChainBetaWarning(DeprecationWarning): + """A class for issuing beta warnings for LangChain users.""" + + +# PUBLIC API + + +T = TypeVar("T", bound=Union[Callable[..., Any], type]) + + +def beta( + *, + message: str = "", + name: str = "", + obj_type: str = "", + addendum: str = "", +) -> Callable[[T], T]: + """Decorator to mark a function, a class, or a property as beta. + + When marking a classmethod, a staticmethod, or a property, the + ``@beta`` decorator should go *under* ``@classmethod`` and + ``@staticmethod`` (i.e., `beta` should directly decorate the + underlying callable), but *over* ``@property``. + + When marking a class ``C`` intended to be used as a base class in a + multiple inheritance hierarchy, ``C`` *must* define an ``__init__`` method + (if ``C`` instead inherited its ``__init__`` from its own base class, then + ``@beta`` would mess up ``__init__`` inheritance when installing its + own (annotation-emitting) ``C.__init__``). + + Args: + message : str, optional + Override the default beta message. The %(since)s, + %(name)s, %(alternative)s, %(obj_type)s, %(addendum)s, + and %(removal)s format specifiers will be replaced by the + values of the respective arguments passed to this function. + name : str, optional + The name of the beta object. + obj_type : str, optional + The object type being beta. + addendum : str, optional + Additional text appended directly to the final message. + + Examples: + + .. code-block:: python + + @beta + def the_function_to_annotate(): + pass + """ + + def beta( + obj: T, + *, + _obj_type: str = obj_type, + _name: str = name, + _message: str = message, + _addendum: str = addendum, + ) -> T: + """Implementation of the decorator returned by `beta`.""" + + def emit_warning() -> None: + """Emit the warning.""" + warn_beta( + message=_message, + name=_name, + obj_type=_obj_type, + addendum=_addendum, + ) + + warned = False + + def warning_emitting_wrapper(*args: Any, **kwargs: Any) -> Any: + """Wrapper for the original wrapped callable that emits a warning. + + Args: + *args: The positional arguments to the function. + **kwargs: The keyword arguments to the function. + + Returns: + The return value of the function being wrapped. + """ + nonlocal warned + if not warned and not is_caller_internal(): + warned = True + emit_warning() + return wrapped(*args, **kwargs) + + async def awarning_emitting_wrapper(*args: Any, **kwargs: Any) -> Any: + """Same as warning_emitting_wrapper, but for async functions.""" + nonlocal warned + if not warned and not is_caller_internal(): + warned = True + emit_warning() + return await wrapped(*args, **kwargs) + + if isinstance(obj, type): + if not _obj_type: + _obj_type = "class" + wrapped = obj.__init__ # type: ignore[misc] + _name = _name or obj.__qualname__ + old_doc = obj.__doc__ + + def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: # noqa: ARG001 + """Finalize the annotation of a class.""" + # Can't set new_doc on some extension objects. + with contextlib.suppress(AttributeError): + obj.__doc__ = new_doc + + def warn_if_direct_instance( + self: Any, *args: Any, **kwargs: Any + ) -> Any: + """Warn that the class is in beta.""" + nonlocal warned + if not warned and type(self) is obj and not is_caller_internal(): + warned = True + emit_warning() + return wrapped(self, *args, **kwargs) + + obj.__init__ = functools.wraps(obj.__init__)( # type: ignore[misc] + warn_if_direct_instance + ) + return cast("T", obj) + + elif isinstance(obj, property): + # note(erick): this block doesn't seem to be used? + if not _obj_type: + _obj_type = "attribute" + wrapped = None + _name = _name or obj.fget.__qualname__ + old_doc = obj.__doc__ + + class _BetaProperty(property): + """A beta property.""" + + def __init__( + self, + fget: Union[Callable[[Any], Any], None] = None, + fset: Union[Callable[[Any, Any], None], None] = None, + fdel: Union[Callable[[Any], None], None] = None, + doc: Union[str, None] = None, + ) -> None: + super().__init__(fget, fset, fdel, doc) + self.__orig_fget = fget + self.__orig_fset = fset + self.__orig_fdel = fdel + + def __get__( + self, instance: Any, owner: Union[type, None] = None + ) -> Any: + if instance is not None or owner is not None: + emit_warning() + return self.fget(instance) + + def __set__(self, instance: Any, value: Any) -> None: + if instance is not None: + emit_warning() + return self.fset(instance, value) + + def __delete__(self, instance: Any) -> None: + if instance is not None: + emit_warning() + return self.fdel(instance) + + def __set_name__(self, owner: Union[type, None], set_name: str) -> None: + nonlocal _name + if _name == "": + _name = set_name + + def finalize(wrapper: Callable[..., Any], new_doc: str) -> Any: # noqa: ARG001 + """Finalize the property.""" + return _BetaProperty( + fget=obj.fget, fset=obj.fset, fdel=obj.fdel, doc=new_doc + ) + + else: + _name = _name or obj.__qualname__ + if not _obj_type: + # edge case: when a function is within another function + # within a test, this will call it a "method" not a "function" + _obj_type = "function" if "." not in _name else "method" + wrapped = obj + old_doc = wrapped.__doc__ + + def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: + """Wrap the wrapped function using the wrapper and update the docstring. + + Args: + wrapper: The wrapper function. + new_doc: The new docstring. + + Returns: + The wrapped function. + """ + wrapper = functools.wraps(wrapped)(wrapper) + wrapper.__doc__ = new_doc + return cast("T", wrapper) + + old_doc = inspect.cleandoc(old_doc or "").strip("\n") or "" + components = [message, addendum] + details = " ".join([component.strip() for component in components if component]) + new_doc = f".. beta::\n {details}\n\n{old_doc}\n" + + if inspect.iscoroutinefunction(obj): + return finalize(awarning_emitting_wrapper, new_doc) + return finalize(warning_emitting_wrapper, new_doc) + + return beta + + +@contextlib.contextmanager +def suppress_langchain_beta_warning() -> Generator[None, None, None]: + """Context manager to suppress LangChainDeprecationWarning.""" + with warnings.catch_warnings(): + warnings.simplefilter("ignore", LangChainBetaWarning) + yield + + +def warn_beta( + *, + message: str = "", + name: str = "", + obj_type: str = "", + addendum: str = "", +) -> None: + """Display a standardized beta annotation. + + Arguments: + message : str, optional + Override the default beta message. The + %(name)s, %(obj_type)s, %(addendum)s + format specifiers will be replaced by the + values of the respective arguments passed to this function. + name : str, optional + The name of the annotated object. + obj_type : str, optional + The object type being annotated. + addendum : str, optional + Additional text appended directly to the final message. + """ + if not message: + message = "" + + if obj_type: + message += f"The {obj_type} `{name}`" + else: + message += f"`{name}`" + + message += " is in beta. It is actively being worked on, so the API may change." + + if addendum: + message += f" {addendum}" + + warning = LangChainBetaWarning(message) + warnings.warn(warning, category=LangChainBetaWarning, stacklevel=4) + + +def surface_langchain_beta_warnings() -> None: + """Unmute LangChain beta warnings.""" + warnings.filterwarnings( + "default", + category=LangChainBetaWarning, + ) diff --git a/venv/Lib/site-packages/langchain_core/_api/deprecation.py b/venv/Lib/site-packages/langchain_core/_api/deprecation.py new file mode 100644 index 00000000..fcbfc58b --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/_api/deprecation.py @@ -0,0 +1,575 @@ +"""Helper functions for deprecating parts of the LangChain API. + +This module was adapted from matplotlibs _api/deprecation.py module: + +https://github.com/matplotlib/matplotlib/blob/main/lib/matplotlib/_api/deprecation.py + +.. warning:: + + This module is for internal use only. Do not use it in your own code. + We may change the API at any time with no warning. +""" + +import contextlib +import functools +import inspect +import warnings +from collections.abc import Generator +from typing import ( + Any, + Callable, + TypeVar, + Union, + cast, +) + +from typing_extensions import ParamSpec + +from langchain_core._api.internal import is_caller_internal + + +class LangChainDeprecationWarning(DeprecationWarning): + """A class for issuing deprecation warnings for LangChain users.""" + + +class LangChainPendingDeprecationWarning(PendingDeprecationWarning): + """A class for issuing deprecation warnings for LangChain users.""" + + +# PUBLIC API + + +# Last Any should be FieldInfoV1 but this leads to circular imports +T = TypeVar("T", bound=Union[type, Callable[..., Any], Any]) + + +def _validate_deprecation_params( + removal: str, + alternative: str, + alternative_import: str, + *, + pending: bool, +) -> None: + """Validate the deprecation parameters.""" + if pending and removal: + msg = "A pending deprecation cannot have a scheduled removal" + raise ValueError(msg) + if alternative and alternative_import: + msg = "Cannot specify both alternative and alternative_import" + raise ValueError(msg) + + if alternative_import and "." not in alternative_import: + msg = ( + "alternative_import must be a fully qualified module path. Got " + f" {alternative_import}" + ) + raise ValueError(msg) + + +def deprecated( + since: str, + *, + message: str = "", + name: str = "", + alternative: str = "", + alternative_import: str = "", + pending: bool = False, + obj_type: str = "", + addendum: str = "", + removal: str = "", + package: str = "", +) -> Callable[[T], T]: + """Decorator to mark a function, a class, or a property as deprecated. + + When deprecating a classmethod, a staticmethod, or a property, the + ``@deprecated`` decorator should go *under* ``@classmethod`` and + ``@staticmethod`` (i.e., `deprecated` should directly decorate the + underlying callable), but *over* ``@property``. + + When deprecating a class ``C`` intended to be used as a base class in a + multiple inheritance hierarchy, ``C`` *must* define an ``__init__`` method + (if ``C`` instead inherited its ``__init__`` from its own base class, then + ``@deprecated`` would mess up ``__init__`` inheritance when installing its + own (deprecation-emitting) ``C.__init__``). + + Parameters are the same as for `warn_deprecated`, except that *obj_type* + defaults to 'class' if decorating a class, 'attribute' if decorating a + property, and 'function' otherwise. + + Args: + since : str + The release at which this API became deprecated. + message : str, optional + Override the default deprecation message. The %(since)s, + %(name)s, %(alternative)s, %(obj_type)s, %(addendum)s, + and %(removal)s format specifiers will be replaced by the + values of the respective arguments passed to this function. + name : str, optional + The name of the deprecated object. + alternative : str, optional + An alternative API that the user may use in place of the + deprecated API. The deprecation warning will tell the user + about this alternative if provided. + alternative_import: str, optional + An alternative import that the user may use instead. + pending : bool, optional + If True, uses a PendingDeprecationWarning instead of a + DeprecationWarning. Cannot be used together with removal. + obj_type : str, optional + The object type being deprecated. + addendum : str, optional + Additional text appended directly to the final message. + removal : str, optional + The expected removal version. With the default (an empty + string), a removal version is automatically computed from + since. Set to other Falsy values to not schedule a removal + date. Cannot be used together with pending. + package: str, optional + The package of the deprecated object. + + Examples: + + .. code-block:: python + + @deprecated('1.4.0') + def the_function_to_deprecate(): + pass + """ + _validate_deprecation_params( + removal, alternative, alternative_import, pending=pending + ) + + def deprecate( + obj: T, + *, + _obj_type: str = obj_type, + _name: str = name, + _message: str = message, + _alternative: str = alternative, + _alternative_import: str = alternative_import, + _pending: bool = pending, + _addendum: str = addendum, + _package: str = package, + ) -> T: + """Implementation of the decorator returned by `deprecated`.""" + from langchain_core.utils.pydantic import ( # type: ignore[attr-defined] + FieldInfoV1, + FieldInfoV2, + ) + + def emit_warning() -> None: + """Emit the warning.""" + warn_deprecated( + since, + message=_message, + name=_name, + alternative=_alternative, + alternative_import=_alternative_import, + pending=_pending, + obj_type=_obj_type, + addendum=_addendum, + removal=removal, + package=_package, + ) + + warned = False + + def warning_emitting_wrapper(*args: Any, **kwargs: Any) -> Any: + """Wrapper for the original wrapped callable that emits a warning. + + Args: + *args: The positional arguments to the function. + **kwargs: The keyword arguments to the function. + + Returns: + The return value of the function being wrapped. + """ + nonlocal warned + if not warned and not is_caller_internal(): + warned = True + emit_warning() + return wrapped(*args, **kwargs) + + async def awarning_emitting_wrapper(*args: Any, **kwargs: Any) -> Any: + """Same as warning_emitting_wrapper, but for async functions.""" + nonlocal warned + if not warned and not is_caller_internal(): + warned = True + emit_warning() + return await wrapped(*args, **kwargs) + + _package = _package or obj.__module__.split(".")[0].replace("_", "-") + + if isinstance(obj, type): + if not _obj_type: + _obj_type = "class" + wrapped = obj.__init__ # type: ignore[misc] + _name = _name or obj.__qualname__ + old_doc = obj.__doc__ + + def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: # noqa: ARG001 + """Finalize the deprecation of a class.""" + # Can't set new_doc on some extension objects. + with contextlib.suppress(AttributeError): + obj.__doc__ = new_doc + + def warn_if_direct_instance( + self: Any, *args: Any, **kwargs: Any + ) -> Any: + """Warn that the class is in beta.""" + nonlocal warned + if not warned and type(self) is obj and not is_caller_internal(): + warned = True + emit_warning() + return wrapped(self, *args, **kwargs) + + obj.__init__ = functools.wraps(obj.__init__)( # type: ignore[misc] + warn_if_direct_instance + ) + return cast("T", obj) + + elif isinstance(obj, FieldInfoV1): + wrapped = None + if not _obj_type: + _obj_type = "attribute" + if not _name: + msg = f"Field {obj} must have a name to be deprecated." + raise ValueError(msg) + old_doc = obj.description + + def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: # noqa: ARG001 + return cast( + "T", + FieldInfoV1( + default=obj.default, + default_factory=obj.default_factory, + description=new_doc, + alias=obj.alias, + exclude=obj.exclude, + ), + ) + + elif isinstance(obj, FieldInfoV2): + wrapped = None + if not _obj_type: + _obj_type = "attribute" + if not _name: + msg = f"Field {obj} must have a name to be deprecated." + raise ValueError(msg) + old_doc = obj.description + + def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: # noqa: ARG001 + return cast( + "T", + FieldInfoV2( + default=obj.default, + default_factory=obj.default_factory, + description=new_doc, + alias=obj.alias, + exclude=obj.exclude, + ), + ) + + elif isinstance(obj, property): + if not _obj_type: + _obj_type = "attribute" + wrapped = None + _name = _name or cast("Union[type, Callable]", obj.fget).__qualname__ + old_doc = obj.__doc__ + + class _DeprecatedProperty(property): + """A deprecated property.""" + + def __init__( + self, + fget: Union[Callable[[Any], Any], None] = None, + fset: Union[Callable[[Any, Any], None], None] = None, + fdel: Union[Callable[[Any], None], None] = None, + doc: Union[str, None] = None, + ) -> None: + super().__init__(fget, fset, fdel, doc) + self.__orig_fget = fget + self.__orig_fset = fset + self.__orig_fdel = fdel + + def __get__( + self, instance: Any, owner: Union[type, None] = None + ) -> Any: + if instance is not None or owner is not None: + emit_warning() + if self.fget is None: + return None + return self.fget(instance) + + def __set__(self, instance: Any, value: Any) -> None: + if instance is not None: + emit_warning() + if self.fset is not None: + self.fset(instance, value) + + def __delete__(self, instance: Any) -> None: + if instance is not None: + emit_warning() + if self.fdel is not None: + self.fdel(instance) + + def __set_name__(self, owner: Union[type, None], set_name: str) -> None: + nonlocal _name + if _name == "": + _name = set_name + + def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: # noqa: ARG001 + """Finalize the property.""" + return cast( + "T", + _DeprecatedProperty( + fget=obj.fget, fset=obj.fset, fdel=obj.fdel, doc=new_doc + ), + ) + + else: + _name = _name or cast("Union[type, Callable]", obj).__qualname__ + if not _obj_type: + # edge case: when a function is within another function + # within a test, this will call it a "method" not a "function" + _obj_type = "function" if "." not in _name else "method" + wrapped = obj + old_doc = wrapped.__doc__ + + def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: + """Wrap the wrapped function using the wrapper and update the docstring. + + Args: + wrapper: The wrapper function. + new_doc: The new docstring. + + Returns: + The wrapped function. + """ + wrapper = functools.wraps(wrapped)(wrapper) + wrapper.__doc__ = new_doc + return cast("T", wrapper) + + old_doc = inspect.cleandoc(old_doc or "").strip("\n") + + # old_doc can be None + if not old_doc: + old_doc = "" + + # Modify the docstring to include a deprecation notice. + if ( + _alternative + and _alternative.split(".")[-1].lower() == _alternative.split(".")[-1] + ): + _alternative = f":meth:`~{_alternative}`" + elif _alternative: + _alternative = f":class:`~{_alternative}`" + + if ( + _alternative_import + and _alternative_import.split(".")[-1].lower() + == _alternative_import.split(".")[-1] + ): + _alternative_import = f":meth:`~{_alternative_import}`" + elif _alternative_import: + _alternative_import = f":class:`~{_alternative_import}`" + + components = [ + _message, + f"Use {_alternative} instead." if _alternative else "", + f"Use ``{_alternative_import}`` instead." if _alternative_import else "", + _addendum, + ] + details = " ".join([component.strip() for component in components if component]) + package = _package or ( + _name.split(".")[0].replace("_", "-") if "." in _name else None + ) + if removal: + if removal.startswith("1.") and package and package.startswith("langchain"): + removal_str = f"It will not be removed until {package}=={removal}." + else: + removal_str = f"It will be removed in {package}=={removal}." + else: + removal_str = "" + new_doc = f"""\ +.. deprecated:: {since} {details} {removal_str} + +{old_doc}\ +""" + + if inspect.iscoroutinefunction(obj): + return finalize(awarning_emitting_wrapper, new_doc) + return finalize(warning_emitting_wrapper, new_doc) + + return deprecate + + +@contextlib.contextmanager +def suppress_langchain_deprecation_warning() -> Generator[None, None, None]: + """Context manager to suppress LangChainDeprecationWarning.""" + with warnings.catch_warnings(): + warnings.simplefilter("ignore", LangChainDeprecationWarning) + warnings.simplefilter("ignore", LangChainPendingDeprecationWarning) + yield + + +def warn_deprecated( + since: str, + *, + message: str = "", + name: str = "", + alternative: str = "", + alternative_import: str = "", + pending: bool = False, + obj_type: str = "", + addendum: str = "", + removal: str = "", + package: str = "", +) -> None: + """Display a standardized deprecation. + + Arguments: + since : str + The release at which this API became deprecated. + message : str, optional + Override the default deprecation message. The %(since)s, + %(name)s, %(alternative)s, %(obj_type)s, %(addendum)s, + and %(removal)s format specifiers will be replaced by the + values of the respective arguments passed to this function. + name : str, optional + The name of the deprecated object. + alternative : str, optional + An alternative API that the user may use in place of the + deprecated API. The deprecation warning will tell the user + about this alternative if provided. + alternative_import: str, optional + An alternative import that the user may use instead. + pending : bool, optional + If True, uses a PendingDeprecationWarning instead of a + DeprecationWarning. Cannot be used together with removal. + obj_type : str, optional + The object type being deprecated. + addendum : str, optional + Additional text appended directly to the final message. + removal : str, optional + The expected removal version. With the default (an empty + string), a removal version is automatically computed from + since. Set to other Falsy values to not schedule a removal + date. Cannot be used together with pending. + package: str, optional + The package of the deprecated object. + """ + if not pending: + if not removal: + removal = f"in {removal}" if removal else "within ?? minor releases" + msg = ( + f"Need to determine which default deprecation schedule to use. " + f"{removal}" + ) + raise NotImplementedError(msg) + removal = f"in {removal}" + + if not message: + message = "" + _package = ( + package or name.split(".")[0].replace("_", "-") + if "." in name + else "LangChain" + ) + + if obj_type: + message += f"The {obj_type} `{name}`" + else: + message += f"`{name}`" + + if pending: + message += " will be deprecated in a future version" + else: + message += f" was deprecated in {_package} {since}" + + if removal: + message += f" and will be removed {removal}" + + if alternative_import: + alt_package = alternative_import.split(".")[0].replace("_", "-") + if alt_package == _package: + message += f". Use {alternative_import} instead." + else: + alt_module, alt_name = alternative_import.rsplit(".", 1) + message += ( + f". An updated version of the {obj_type} exists in the " + f"{alt_package} package and should be used instead. To use it run " + f"`pip install -U {alt_package}` and import as " + f"`from {alt_module} import {alt_name}`." + ) + elif alternative: + message += f". Use {alternative} instead." + + if addendum: + message += f" {addendum}" + + warning_cls = ( + LangChainPendingDeprecationWarning if pending else LangChainDeprecationWarning + ) + warning = warning_cls(message) + warnings.warn(warning, category=LangChainDeprecationWarning, stacklevel=4) + + +def surface_langchain_deprecation_warnings() -> None: + """Unmute LangChain deprecation warnings.""" + warnings.filterwarnings( + "default", + category=LangChainPendingDeprecationWarning, + ) + + warnings.filterwarnings( + "default", + category=LangChainDeprecationWarning, + ) + + +_P = ParamSpec("_P") +_R = TypeVar("_R") + + +def rename_parameter( + *, + since: str, + removal: str, + old: str, + new: str, +) -> Callable[[Callable[_P, _R]], Callable[_P, _R]]: + """Decorator indicating that parameter *old* of *func* is renamed to *new*. + + The actual implementation of *func* should use *new*, not *old*. If *old* + is passed to *func*, a DeprecationWarning is emitted, and its value is + used, even if *new* is also passed by keyword. + + Example: + + .. code-block:: python + + @_api.rename_parameter("3.1", "bad_name", "good_name") + def func(good_name): ... + """ + + def decorator(f: Callable[_P, _R]) -> Callable[_P, _R]: + @functools.wraps(f) + def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R: + if new in kwargs and old in kwargs: + msg = f"{f.__name__}() got multiple values for argument {new!r}" + raise TypeError(msg) + if old in kwargs: + warn_deprecated( + since, + removal=removal, + message=f"The parameter `{old}` of `{f.__name__}` was " + f"deprecated in {since} and will be removed " + f"in {removal} Use `{new}` instead.", + ) + kwargs[new] = kwargs.pop(old) + return f(*args, **kwargs) + + return wrapper + + return decorator diff --git a/venv/Lib/site-packages/langchain_core/_api/internal.py b/venv/Lib/site-packages/langchain_core/_api/internal.py new file mode 100644 index 00000000..e5ef4300 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/_api/internal.py @@ -0,0 +1,22 @@ +import inspect + + +def is_caller_internal(depth: int = 2) -> bool: + """Return whether the caller at `depth` of this function is internal.""" + try: + frame = inspect.currentframe() + except AttributeError: + return False + if frame is None: + return False + try: + for _ in range(depth): + frame = frame.f_back + if frame is None: + return False + # Directly access the module name from the frame's global variables + module_globals = frame.f_globals + caller_module_name = module_globals.get("__name__", "") + return caller_module_name.startswith("langchain") + finally: + del frame diff --git a/venv/Lib/site-packages/langchain_core/_api/path.py b/venv/Lib/site-packages/langchain_core/_api/path.py new file mode 100644 index 00000000..0589ae44 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/_api/path.py @@ -0,0 +1,36 @@ +import os +from pathlib import Path +from typing import Optional, Union + +HERE = Path(__file__).parent + +# Get directory of langchain package +PACKAGE_DIR = HERE.parent +SEPARATOR = os.sep + + +def get_relative_path( + file: Union[Path, str], *, relative_to: Path = PACKAGE_DIR +) -> str: + """Get the path of the file as a relative path to the package directory.""" + if isinstance(file, str): + file = Path(file) + return str(file.relative_to(relative_to)) + + +def as_import_path( + file: Union[Path, str], + *, + suffix: Optional[str] = None, + relative_to: Path = PACKAGE_DIR, +) -> str: + """Path of the file as a LangChain import exclude langchain top namespace.""" + if isinstance(file, str): + file = Path(file) + path = get_relative_path(file, relative_to=relative_to) + if file.is_file(): + path = path[: -len(file.suffix)] + import_path = path.replace(SEPARATOR, ".") + if suffix: + import_path += "." + suffix + return import_path diff --git a/venv/Lib/site-packages/langchain_core/_import_utils.py b/venv/Lib/site-packages/langchain_core/_import_utils.py new file mode 100644 index 00000000..6c1d99a3 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/_import_utils.py @@ -0,0 +1,34 @@ +from importlib import import_module +from typing import Union + + +def import_attr( + attr_name: str, + module_name: Union[str, None], + package: Union[str, None], +) -> object: + """Import an attribute from a module located in a package. + + This utility function is used in custom __getattr__ methods within __init__.py + files to dynamically import attributes. + + Args: + attr_name: The name of the attribute to import. + module_name: The name of the module to import from. If None, the attribute + is imported from the package itself. + package: The name of the package where the module is located. + """ + if module_name == "__module__" or module_name is None: + try: + result = import_module(f".{attr_name}", package=package) + except ModuleNotFoundError: + msg = f"module '{package!r}' has no attribute {attr_name!r}" + raise AttributeError(msg) from None + else: + try: + module = import_module(f".{module_name}", package=package) + except ModuleNotFoundError: + msg = f"module '{package!r}.{module_name!r}' not found" + raise ImportError(msg) from None + result = getattr(module, attr_name) + return result diff --git a/venv/Lib/site-packages/langchain_core/agents.py b/venv/Lib/site-packages/langchain_core/agents.py new file mode 100644 index 00000000..74bfcdd4 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/agents.py @@ -0,0 +1,238 @@ +"""Schema definitions for representing agent actions, observations, and return values. + +**ATTENTION** The schema definitions are provided for backwards compatibility. + + New agents should be built using the langgraph library + (https://github.com/langchain-ai/langgraph)), which provides a simpler + and more flexible way to define agents. + + Please see the migration guide for information on how to migrate existing + agents to modern langgraph agents: + https://python.langchain.com/docs/how_to/migrate_agent/ + +Agents use language models to choose a sequence of actions to take. + +A basic agent works in the following manner: + +1. Given a prompt an agent uses an LLM to request an action to take (e.g., a tool to run). +2. The agent executes the action (e.g., runs the tool), and receives an observation. +3. The agent returns the observation to the LLM, which can then be used to generate the next action. +4. When the agent reaches a stopping condition, it returns a final return value. + +The schemas for the agents themselves are defined in langchain.agents.agent. +""" # noqa: E501 + +from __future__ import annotations + +import json +from collections.abc import Sequence +from typing import Any, Literal, Union + +from langchain_core.load.serializable import Serializable +from langchain_core.messages import ( + AIMessage, + BaseMessage, + FunctionMessage, + HumanMessage, +) + + +class AgentAction(Serializable): + """Represents a request to execute an action by an agent. + + The action consists of the name of the tool to execute and the input to pass + to the tool. The log is used to pass along extra information about the action. + """ + + tool: str + """The name of the Tool to execute.""" + tool_input: Union[str, dict] + """The input to pass in to the Tool.""" + log: str + """Additional information to log about the action. + This log can be used in a few ways. First, it can be used to audit + what exactly the LLM predicted to lead to this (tool, tool_input). + Second, it can be used in future iterations to show the LLMs prior + thoughts. This is useful when (tool, tool_input) does not contain + full information about the LLM prediction (for example, any `thought` + before the tool/tool_input).""" + type: Literal["AgentAction"] = "AgentAction" + + # Override init to support instantiation by position for backward compat. + def __init__( + self, tool: str, tool_input: Union[str, dict], log: str, **kwargs: Any + ): + """Create an AgentAction. + + Args: + tool: The name of the tool to execute. + tool_input: The input to pass in to the Tool. + log: Additional information to log about the action. + """ + super().__init__(tool=tool, tool_input=tool_input, log=log, **kwargs) + + @classmethod + def is_lc_serializable(cls) -> bool: + """AgentAction is serializable. + + Returns: + True + """ + return True + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + Default is ["langchain", "schema", "agent"]. + """ + return ["langchain", "schema", "agent"] + + @property + def messages(self) -> Sequence[BaseMessage]: + """Return the messages that correspond to this action.""" + return _convert_agent_action_to_messages(self) + + +class AgentActionMessageLog(AgentAction): + """Representation of an action to be executed by an agent. + + This is similar to AgentAction, but includes a message log consisting of + chat messages. This is useful when working with ChatModels, and is used + to reconstruct conversation history from the agent's perspective. + """ + + message_log: Sequence[BaseMessage] + """Similar to log, this can be used to pass along extra + information about what exact messages were predicted by the LLM + before parsing out the (tool, tool_input). This is again useful + if (tool, tool_input) cannot be used to fully recreate the LLM + prediction, and you need that LLM prediction (for future agent iteration). + Compared to `log`, this is useful when the underlying LLM is a + ChatModel (and therefore returns messages rather than a string).""" + # Ignoring type because we're overriding the type from AgentAction. + # And this is the correct thing to do in this case. + # The type literal is used for serialization purposes. + type: Literal["AgentActionMessageLog"] = "AgentActionMessageLog" # type: ignore[assignment] + + +class AgentStep(Serializable): + """Result of running an AgentAction.""" + + action: AgentAction + """The AgentAction that was executed.""" + observation: Any + """The result of the AgentAction.""" + + @property + def messages(self) -> Sequence[BaseMessage]: + """Messages that correspond to this observation.""" + return _convert_agent_observation_to_messages(self.action, self.observation) + + +class AgentFinish(Serializable): + """Final return value of an ActionAgent. + + Agents return an AgentFinish when they have reached a stopping condition. + """ + + return_values: dict + """Dictionary of return values.""" + log: str + """Additional information to log about the return value. + This is used to pass along the full LLM prediction, not just the parsed out + return value. For example, if the full LLM prediction was + `Final Answer: 2` you may want to just return `2` as a return value, but pass + along the full string as a `log` (for debugging or observability purposes). + """ + type: Literal["AgentFinish"] = "AgentFinish" + + def __init__(self, return_values: dict, log: str, **kwargs: Any): + """Override init to support instantiation by position for backward compat.""" + super().__init__(return_values=return_values, log=log, **kwargs) + + @classmethod + def is_lc_serializable(cls) -> bool: + """Return whether or not the class is serializable.""" + return True + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + Default namespace is ["langchain", "schema", "agent"]. + """ + return ["langchain", "schema", "agent"] + + @property + def messages(self) -> Sequence[BaseMessage]: + """Messages that correspond to this observation.""" + return [AIMessage(content=self.log)] + + +def _convert_agent_action_to_messages( + agent_action: AgentAction, +) -> Sequence[BaseMessage]: + """Convert an agent action to a message. + + This code is used to reconstruct the original AI message from the agent action. + + Args: + agent_action: Agent action to convert. + + Returns: + AIMessage that corresponds to the original tool invocation. + """ + if isinstance(agent_action, AgentActionMessageLog): + return agent_action.message_log + return [AIMessage(content=agent_action.log)] + + +def _convert_agent_observation_to_messages( + agent_action: AgentAction, observation: Any +) -> Sequence[BaseMessage]: + """Convert an agent action to a message. + + This code is used to reconstruct the original AI message from the agent action. + + Args: + agent_action: Agent action to convert. + observation: Observation to convert to a message. + + Returns: + AIMessage that corresponds to the original tool invocation. + """ + if isinstance(agent_action, AgentActionMessageLog): + return [_create_function_message(agent_action, observation)] + content = observation + if not isinstance(observation, str): + try: + content = json.dumps(observation, ensure_ascii=False) + except Exception: + content = str(observation) + return [HumanMessage(content=content)] + + +def _create_function_message( + agent_action: AgentAction, observation: Any +) -> FunctionMessage: + """Convert agent action and observation into a function message. + + Args: + agent_action: the tool invocation request from the agent. + observation: the result of the tool invocation. + + Returns: + FunctionMessage that corresponds to the original tool invocation. + """ + if not isinstance(observation, str): + try: + content = json.dumps(observation, ensure_ascii=False) + except Exception: + content = str(observation) + else: + content = observation + return FunctionMessage( + name=agent_action.tool, + content=content, + ) diff --git a/venv/Lib/site-packages/langchain_core/beta/__init__.py b/venv/Lib/site-packages/langchain_core/beta/__init__.py new file mode 100644 index 00000000..7f79e3a4 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/beta/__init__.py @@ -0,0 +1 @@ +"""Some **beta** features that are not yet ready for production.""" diff --git a/venv/Lib/site-packages/langchain_core/beta/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/beta/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..acb79034 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/beta/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/beta/runnables/__init__.py b/venv/Lib/site-packages/langchain_core/beta/runnables/__init__.py new file mode 100644 index 00000000..72d18c15 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/beta/runnables/__init__.py @@ -0,0 +1 @@ +"""Runnables.""" diff --git a/venv/Lib/site-packages/langchain_core/beta/runnables/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/beta/runnables/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..8f644420 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/beta/runnables/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/beta/runnables/__pycache__/context.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/beta/runnables/__pycache__/context.cpython-312.pyc new file mode 100644 index 00000000..28a232df Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/beta/runnables/__pycache__/context.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/beta/runnables/context.py b/venv/Lib/site-packages/langchain_core/beta/runnables/context.py new file mode 100644 index 00000000..3aa76f34 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/beta/runnables/context.py @@ -0,0 +1,447 @@ +"""Context management for runnables.""" + +import asyncio +import threading +from collections import defaultdict +from collections.abc import Awaitable, Mapping, Sequence +from functools import partial +from itertools import groupby +from typing import ( + Any, + Callable, + Optional, + TypeVar, + Union, +) + +from pydantic import ConfigDict +from typing_extensions import override + +from langchain_core._api.beta_decorator import beta +from langchain_core.runnables.base import ( + Runnable, + RunnableSerializable, + coerce_to_runnable, +) +from langchain_core.runnables.config import RunnableConfig, ensure_config, patch_config +from langchain_core.runnables.utils import ConfigurableFieldSpec, Input, Output + +T = TypeVar("T") +Values = dict[Union[asyncio.Event, threading.Event], Any] +CONTEXT_CONFIG_PREFIX = "__context__/" +CONTEXT_CONFIG_SUFFIX_GET = "/get" +CONTEXT_CONFIG_SUFFIX_SET = "/set" + + +async def _asetter(done: asyncio.Event, values: Values, value: T) -> T: + values[done] = value + done.set() + return value + + +async def _agetter(done: asyncio.Event, values: Values) -> Any: + await done.wait() + return values[done] + + +def _setter(done: threading.Event, values: Values, value: T) -> T: + values[done] = value + done.set() + return value + + +def _getter(done: threading.Event, values: Values) -> Any: + done.wait() + return values[done] + + +def _key_from_id(id_: str) -> str: + wout_prefix = id_.split(CONTEXT_CONFIG_PREFIX, maxsplit=1)[1] + if wout_prefix.endswith(CONTEXT_CONFIG_SUFFIX_GET): + return wout_prefix[: -len(CONTEXT_CONFIG_SUFFIX_GET)] + if wout_prefix.endswith(CONTEXT_CONFIG_SUFFIX_SET): + return wout_prefix[: -len(CONTEXT_CONFIG_SUFFIX_SET)] + msg = f"Invalid context config id {id_}" + raise ValueError(msg) + + +def _config_with_context( + config: RunnableConfig, + steps: list[Runnable], + setter: Callable, + getter: Callable, + event_cls: Union[type[threading.Event], type[asyncio.Event]], +) -> RunnableConfig: + if any(k.startswith(CONTEXT_CONFIG_PREFIX) for k in config.get("configurable", {})): + return config + + context_specs = [ + (spec, i) + for i, step in enumerate(steps) + for spec in step.config_specs + if spec.id.startswith(CONTEXT_CONFIG_PREFIX) + ] + grouped_by_key = { + key: list(group) + for key, group in groupby( + sorted(context_specs, key=lambda s: s[0].id), + key=lambda s: _key_from_id(s[0].id), + ) + } + deps_by_key = { + key: { + _key_from_id(dep) for spec in group for dep in (spec[0].dependencies or []) + } + for key, group in grouped_by_key.items() + } + + values: Values = {} + events: defaultdict[str, Union[asyncio.Event, threading.Event]] = defaultdict( + event_cls + ) + context_funcs: dict[str, Callable[[], Any]] = {} + for key, group in grouped_by_key.items(): + getters = [s for s in group if s[0].id.endswith(CONTEXT_CONFIG_SUFFIX_GET)] + setters = [s for s in group if s[0].id.endswith(CONTEXT_CONFIG_SUFFIX_SET)] + + for dep in deps_by_key[key]: + if key in deps_by_key[dep]: + msg = f"Deadlock detected between context keys {key} and {dep}" + raise ValueError(msg) + if len(setters) != 1: + msg = f"Expected exactly one setter for context key {key}" + raise ValueError(msg) + setter_idx = setters[0][1] + if any(getter_idx < setter_idx for _, getter_idx in getters): + msg = f"Context setter for key {key} must be defined after all getters." + raise ValueError(msg) + + if getters: + context_funcs[getters[0][0].id] = partial(getter, events[key], values) + context_funcs[setters[0][0].id] = partial(setter, events[key], values) + + return patch_config(config, configurable=context_funcs) + + +def aconfig_with_context( + config: RunnableConfig, + steps: list[Runnable], +) -> RunnableConfig: + """Asynchronously patch a runnable config with context getters and setters. + + Args: + config: The runnable config. + steps: The runnable steps. + + Returns: + The patched runnable config. + """ + return _config_with_context(config, steps, _asetter, _agetter, asyncio.Event) + + +def config_with_context( + config: RunnableConfig, + steps: list[Runnable], +) -> RunnableConfig: + """Patch a runnable config with context getters and setters. + + Args: + config: The runnable config. + steps: The runnable steps. + + Returns: + The patched runnable config. + """ + return _config_with_context(config, steps, _setter, _getter, threading.Event) + + +@beta() +class ContextGet(RunnableSerializable): + """Get a context value.""" + + prefix: str = "" + + key: Union[str, list[str]] + + @override + def __str__(self) -> str: + return f"ContextGet({_print_keys(self.key)})" + + @property + def ids(self) -> list[str]: + """The context getter ids.""" + prefix = self.prefix + "/" if self.prefix else "" + keys = self.key if isinstance(self.key, list) else [self.key] + return [ + f"{CONTEXT_CONFIG_PREFIX}{prefix}{k}{CONTEXT_CONFIG_SUFFIX_GET}" + for k in keys + ] + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + return super().config_specs + [ + ConfigurableFieldSpec( + id=id_, + annotation=Callable[[], Any], + ) + for id_ in self.ids + ] + + @override + def invoke( + self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Any: + config = ensure_config(config) + configurable = config.get("configurable", {}) + if isinstance(self.key, list): + return {key: configurable[id_]() for key, id_ in zip(self.key, self.ids)} + return configurable[self.ids[0]]() + + @override + async def ainvoke( + self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Any: + config = ensure_config(config) + configurable = config.get("configurable", {}) + if isinstance(self.key, list): + values = await asyncio.gather(*(configurable[id_]() for id_ in self.ids)) + return dict(zip(self.key, values)) + return await configurable[self.ids[0]]() + + +SetValue = Union[ + Runnable[Input, Output], + Callable[[Input], Output], + Callable[[Input], Awaitable[Output]], + Any, +] + + +def _coerce_set_value(value: SetValue) -> Runnable[Input, Output]: + if not isinstance(value, Runnable) and not callable(value): + return coerce_to_runnable(lambda _: value) + return coerce_to_runnable(value) + + +@beta() +class ContextSet(RunnableSerializable): + """Set a context value.""" + + prefix: str = "" + + keys: Mapping[str, Optional[Runnable]] + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def __init__( + self, + key: Optional[str] = None, + value: Optional[SetValue] = None, + prefix: str = "", + **kwargs: SetValue, + ): + """Create a context setter. + + Args: + key: The context setter key. + value: The context setter value. + prefix: The context setter prefix. + **kwargs: Additional context setter key-value pairs. + """ + if key is not None: + kwargs[key] = value + super().__init__( # type: ignore[call-arg] + keys={ + k: _coerce_set_value(v) if v is not None else None + for k, v in kwargs.items() + }, + prefix=prefix, + ) + + @override + def __str__(self) -> str: + return f"ContextSet({_print_keys(list(self.keys.keys()))})" + + @property + def ids(self) -> list[str]: + """The context setter ids.""" + prefix = self.prefix + "/" if self.prefix else "" + return [ + f"{CONTEXT_CONFIG_PREFIX}{prefix}{key}{CONTEXT_CONFIG_SUFFIX_SET}" + for key in self.keys + ] + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + mapper_config_specs = [ + s + for mapper in self.keys.values() + if mapper is not None + for s in mapper.config_specs + ] + for spec in mapper_config_specs: + if spec.id.endswith(CONTEXT_CONFIG_SUFFIX_GET): + getter_key = spec.id.split("/")[1] + if getter_key in self.keys: + msg = f"Circular reference in context setter for key {getter_key}" + raise ValueError(msg) + return super().config_specs + [ + ConfigurableFieldSpec( + id=id_, + annotation=Callable[[], Any], + ) + for id_ in self.ids + ] + + @override + def invoke( + self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Any: + config = ensure_config(config) + configurable = config.get("configurable", {}) + for id_, mapper in zip(self.ids, self.keys.values()): + if mapper is not None: + configurable[id_](mapper.invoke(input, config)) + else: + configurable[id_](input) + return input + + @override + async def ainvoke( + self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Any: + config = ensure_config(config) + configurable = config.get("configurable", {}) + for id_, mapper in zip(self.ids, self.keys.values()): + if mapper is not None: + await configurable[id_](await mapper.ainvoke(input, config)) + else: + await configurable[id_](input) + return input + + +class Context: + """Context for a runnable. + + The `Context` class provides methods for creating context scopes, + getters, and setters within a runnable. It allows for managing + and accessing contextual information throughout the execution + of a program. + + Example: + .. code-block:: python + + from langchain_core.beta.runnables.context import Context + from langchain_core.runnables.passthrough import RunnablePassthrough + from langchain_core.prompts.prompt import PromptTemplate + from langchain_core.output_parsers.string import StrOutputParser + from tests.unit_tests.fake.llm import FakeListLLM + + chain = ( + Context.setter("input") + | { + "context": RunnablePassthrough() + | Context.setter("context"), + "question": RunnablePassthrough(), + } + | PromptTemplate.from_template("{context} {question}") + | FakeListLLM(responses=["hello"]) + | StrOutputParser() + | { + "result": RunnablePassthrough(), + "context": Context.getter("context"), + "input": Context.getter("input"), + } + ) + + # Use the chain + output = chain.invoke("What's your name?") + print(output["result"]) # Output: "hello" + print(output["context"]) # Output: "What's your name?" + print(output["input"]) # Output: "What's your name? + """ + + @staticmethod + def create_scope(scope: str, /) -> "PrefixContext": + """Create a context scope. + + Args: + scope: The scope. + + Returns: + The context scope. + """ + return PrefixContext(prefix=scope) + + @staticmethod + def getter(key: Union[str, list[str]], /) -> ContextGet: + """Return a context getter. + + Args: + key: The context getter key. + """ + return ContextGet(key=key) + + @staticmethod + def setter( + _key: Optional[str] = None, + _value: Optional[SetValue] = None, + /, + **kwargs: SetValue, + ) -> ContextSet: + """Return a context setter. + + Args: + _key: The context setter key. + _value: The context setter value. + **kwargs: Additional context setter key-value pairs. + """ + return ContextSet(_key, _value, prefix="", **kwargs) + + +class PrefixContext: + """Context for a runnable with a prefix.""" + + prefix: str = "" + + def __init__(self, prefix: str = ""): + """Create a prefix context. + + Args: + prefix: The prefix. + """ + self.prefix = prefix + + def getter(self, key: Union[str, list[str]], /) -> ContextGet: + """Return a prefixed context getter. + + Args: + key: The context getter key. + """ + return ContextGet(key=key, prefix=self.prefix) + + def setter( + self, + _key: Optional[str] = None, + _value: Optional[SetValue] = None, + /, + **kwargs: SetValue, + ) -> ContextSet: + """Return a prefixed context setter. + + Args: + _key: The context setter key. + _value: The context setter value. + **kwargs: Additional context setter key-value pairs. + """ + return ContextSet(_key, _value, prefix=self.prefix, **kwargs) + + +def _print_keys(keys: Union[str, Sequence[str]]) -> str: + if isinstance(keys, str): + return f"'{keys}'" + return ", ".join(f"'{k}'" for k in keys) diff --git a/venv/Lib/site-packages/langchain_core/caches.py b/venv/Lib/site-packages/langchain_core/caches.py new file mode 100644 index 00000000..2ccbf06b --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/caches.py @@ -0,0 +1,236 @@ +"""Cache classes. + +.. warning:: + Beta Feature! + +**Cache** provides an optional caching layer for LLMs. + +Cache is useful for two reasons: + +- It can save you money by reducing the number of API calls you make to the LLM + provider if you're often requesting the same completion multiple times. +- It can speed up your application by reducing the number of API calls you make + to the LLM provider. + +Cache directly competes with Memory. See documentation for Pros and Cons. + +**Class hierarchy:** + +.. code-block:: + + BaseCache --> Cache # Examples: InMemoryCache, RedisCache, GPTCache +""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from collections.abc import Sequence +from typing import Any, Optional + +from typing_extensions import override + +from langchain_core.outputs import Generation +from langchain_core.runnables import run_in_executor + +RETURN_VAL_TYPE = Sequence[Generation] + + +class BaseCache(ABC): + """Interface for a caching layer for LLMs and Chat models. + + The cache interface consists of the following methods: + + - lookup: Look up a value based on a prompt and llm_string. + - update: Update the cache based on a prompt and llm_string. + - clear: Clear the cache. + + In addition, the cache interface provides an async version of each method. + + The default implementation of the async methods is to run the synchronous + method in an executor. It's recommended to override the async methods + and provide async implementations to avoid unnecessary overhead. + """ + + @abstractmethod + def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: + """Look up based on prompt and llm_string. + + A cache implementation is expected to generate a key from the 2-tuple + of prompt and llm_string (e.g., by concatenating them with a delimiter). + + Args: + prompt: a string representation of the prompt. + In the case of a Chat model, the prompt is a non-trivial + serialization of the prompt into the language model. + llm_string: A string representation of the LLM configuration. + This is used to capture the invocation parameters of the LLM + (e.g., model name, temperature, stop tokens, max tokens, etc.). + These invocation parameters are serialized into a string + representation. + + Returns: + On a cache miss, return None. On a cache hit, return the cached value. + The cached value is a list of Generations (or subclasses). + """ + + @abstractmethod + def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: + """Update cache based on prompt and llm_string. + + The prompt and llm_string are used to generate a key for the cache. + The key should match that of the lookup method. + + Args: + prompt: a string representation of the prompt. + In the case of a Chat model, the prompt is a non-trivial + serialization of the prompt into the language model. + llm_string: A string representation of the LLM configuration. + This is used to capture the invocation parameters of the LLM + (e.g., model name, temperature, stop tokens, max tokens, etc.). + These invocation parameters are serialized into a string + representation. + return_val: The value to be cached. The value is a list of Generations + (or subclasses). + """ + + @abstractmethod + def clear(self, **kwargs: Any) -> None: + """Clear cache that can take additional keyword arguments.""" + + async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: + """Async look up based on prompt and llm_string. + + A cache implementation is expected to generate a key from the 2-tuple + of prompt and llm_string (e.g., by concatenating them with a delimiter). + + Args: + prompt: a string representation of the prompt. + In the case of a Chat model, the prompt is a non-trivial + serialization of the prompt into the language model. + llm_string: A string representation of the LLM configuration. + This is used to capture the invocation parameters of the LLM + (e.g., model name, temperature, stop tokens, max tokens, etc.). + These invocation parameters are serialized into a string + representation. + + Returns: + On a cache miss, return None. On a cache hit, return the cached value. + The cached value is a list of Generations (or subclasses). + """ + return await run_in_executor(None, self.lookup, prompt, llm_string) + + async def aupdate( + self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE + ) -> None: + """Async update cache based on prompt and llm_string. + + The prompt and llm_string are used to generate a key for the cache. + The key should match that of the look up method. + + Args: + prompt: a string representation of the prompt. + In the case of a Chat model, the prompt is a non-trivial + serialization of the prompt into the language model. + llm_string: A string representation of the LLM configuration. + This is used to capture the invocation parameters of the LLM + (e.g., model name, temperature, stop tokens, max tokens, etc.). + These invocation parameters are serialized into a string + representation. + return_val: The value to be cached. The value is a list of Generations + (or subclasses). + """ + return await run_in_executor(None, self.update, prompt, llm_string, return_val) + + async def aclear(self, **kwargs: Any) -> None: + """Async clear cache that can take additional keyword arguments.""" + return await run_in_executor(None, self.clear, **kwargs) + + +class InMemoryCache(BaseCache): + """Cache that stores things in memory.""" + + def __init__(self, *, maxsize: Optional[int] = None) -> None: + """Initialize with empty cache. + + Args: + maxsize: The maximum number of items to store in the cache. + If None, the cache has no maximum size. + If the cache exceeds the maximum size, the oldest items are removed. + Default is None. + + Raises: + ValueError: If maxsize is less than or equal to 0. + """ + self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {} + if maxsize is not None and maxsize <= 0: + msg = "maxsize must be greater than 0" + raise ValueError(msg) + self._maxsize = maxsize + + def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: + """Look up based on prompt and llm_string. + + Args: + prompt: a string representation of the prompt. + In the case of a Chat model, the prompt is a non-trivial + serialization of the prompt into the language model. + llm_string: A string representation of the LLM configuration. + + Returns: + On a cache miss, return None. On a cache hit, return the cached value. + """ + return self._cache.get((prompt, llm_string), None) + + def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: + """Update cache based on prompt and llm_string. + + Args: + prompt: a string representation of the prompt. + In the case of a Chat model, the prompt is a non-trivial + serialization of the prompt into the language model. + llm_string: A string representation of the LLM configuration. + return_val: The value to be cached. The value is a list of Generations + (or subclasses). + """ + if self._maxsize is not None and len(self._cache) == self._maxsize: + del self._cache[next(iter(self._cache))] + self._cache[(prompt, llm_string)] = return_val + + @override + def clear(self, **kwargs: Any) -> None: + """Clear cache.""" + self._cache = {} + + async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: + """Async look up based on prompt and llm_string. + + Args: + prompt: a string representation of the prompt. + In the case of a Chat model, the prompt is a non-trivial + serialization of the prompt into the language model. + llm_string: A string representation of the LLM configuration. + + Returns: + On a cache miss, return None. On a cache hit, return the cached value. + """ + return self.lookup(prompt, llm_string) + + async def aupdate( + self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE + ) -> None: + """Async update cache based on prompt and llm_string. + + Args: + prompt: a string representation of the prompt. + In the case of a Chat model, the prompt is a non-trivial + serialization of the prompt into the language model. + llm_string: A string representation of the LLM configuration. + return_val: The value to be cached. The value is a list of Generations + (or subclasses). + """ + self.update(prompt, llm_string, return_val) + + @override + async def aclear(self, **kwargs: Any) -> None: + """Async clear cache.""" + self.clear() diff --git a/venv/Lib/site-packages/langchain_core/callbacks/__init__.py b/venv/Lib/site-packages/langchain_core/callbacks/__init__.py new file mode 100644 index 00000000..6e842b96 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/callbacks/__init__.py @@ -0,0 +1,139 @@ +"""**Callback handlers** allow listening to events in LangChain. + +**Class hierarchy:** + +.. code-block:: + + BaseCallbackHandler --> CallbackHandler # Example: AimCallbackHandler +""" + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.callbacks.base import ( + AsyncCallbackHandler, + BaseCallbackHandler, + BaseCallbackManager, + CallbackManagerMixin, + Callbacks, + ChainManagerMixin, + LLMManagerMixin, + RetrieverManagerMixin, + RunManagerMixin, + ToolManagerMixin, + ) + from langchain_core.callbacks.file import FileCallbackHandler + from langchain_core.callbacks.manager import ( + AsyncCallbackManager, + AsyncCallbackManagerForChainGroup, + AsyncCallbackManagerForChainRun, + AsyncCallbackManagerForLLMRun, + AsyncCallbackManagerForRetrieverRun, + AsyncCallbackManagerForToolRun, + AsyncParentRunManager, + AsyncRunManager, + BaseRunManager, + CallbackManager, + CallbackManagerForChainGroup, + CallbackManagerForChainRun, + CallbackManagerForLLMRun, + CallbackManagerForRetrieverRun, + CallbackManagerForToolRun, + ParentRunManager, + RunManager, + adispatch_custom_event, + dispatch_custom_event, + ) + from langchain_core.callbacks.stdout import StdOutCallbackHandler + from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler + from langchain_core.callbacks.usage import ( + UsageMetadataCallbackHandler, + get_usage_metadata_callback, + ) + +__all__ = ( + "dispatch_custom_event", + "adispatch_custom_event", + "RetrieverManagerMixin", + "LLMManagerMixin", + "ChainManagerMixin", + "ToolManagerMixin", + "Callbacks", + "CallbackManagerMixin", + "RunManagerMixin", + "BaseCallbackHandler", + "AsyncCallbackHandler", + "BaseCallbackManager", + "BaseRunManager", + "RunManager", + "ParentRunManager", + "AsyncRunManager", + "AsyncParentRunManager", + "CallbackManagerForLLMRun", + "AsyncCallbackManagerForLLMRun", + "CallbackManagerForChainRun", + "AsyncCallbackManagerForChainRun", + "CallbackManagerForToolRun", + "AsyncCallbackManagerForToolRun", + "CallbackManagerForRetrieverRun", + "AsyncCallbackManagerForRetrieverRun", + "CallbackManager", + "CallbackManagerForChainGroup", + "AsyncCallbackManager", + "AsyncCallbackManagerForChainGroup", + "StdOutCallbackHandler", + "StreamingStdOutCallbackHandler", + "FileCallbackHandler", + "UsageMetadataCallbackHandler", + "get_usage_metadata_callback", +) + +_dynamic_imports = { + "AsyncCallbackHandler": "base", + "BaseCallbackHandler": "base", + "BaseCallbackManager": "base", + "CallbackManagerMixin": "base", + "Callbacks": "base", + "ChainManagerMixin": "base", + "LLMManagerMixin": "base", + "RetrieverManagerMixin": "base", + "RunManagerMixin": "base", + "ToolManagerMixin": "base", + "FileCallbackHandler": "file", + "AsyncCallbackManager": "manager", + "AsyncCallbackManagerForChainGroup": "manager", + "AsyncCallbackManagerForChainRun": "manager", + "AsyncCallbackManagerForLLMRun": "manager", + "AsyncCallbackManagerForRetrieverRun": "manager", + "AsyncCallbackManagerForToolRun": "manager", + "AsyncParentRunManager": "manager", + "AsyncRunManager": "manager", + "BaseRunManager": "manager", + "CallbackManager": "manager", + "CallbackManagerForChainGroup": "manager", + "CallbackManagerForChainRun": "manager", + "CallbackManagerForLLMRun": "manager", + "CallbackManagerForRetrieverRun": "manager", + "CallbackManagerForToolRun": "manager", + "ParentRunManager": "manager", + "RunManager": "manager", + "adispatch_custom_event": "manager", + "dispatch_custom_event": "manager", + "StdOutCallbackHandler": "stdout", + "StreamingStdOutCallbackHandler": "streaming_stdout", + "UsageMetadataCallbackHandler": "usage", + "get_usage_metadata_callback": "usage", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..44d57e04 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..f1d25ed8 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/file.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/file.cpython-312.pyc new file mode 100644 index 00000000..49b9c4b9 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/file.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/manager.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/manager.cpython-312.pyc new file mode 100644 index 00000000..be92b972 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/manager.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/stdout.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/stdout.cpython-312.pyc new file mode 100644 index 00000000..c8d1fa8d Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/stdout.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/streaming_stdout.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/streaming_stdout.cpython-312.pyc new file mode 100644 index 00000000..d5e9b824 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/streaming_stdout.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/usage.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/usage.cpython-312.pyc new file mode 100644 index 00000000..517df63b Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/callbacks/__pycache__/usage.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/callbacks/base.py b/venv/Lib/site-packages/langchain_core/callbacks/base.py new file mode 100644 index 00000000..1a792521 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/callbacks/base.py @@ -0,0 +1,1101 @@ +"""Base callback handler for LangChain.""" + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, Any, Optional, Union + +from typing_extensions import Self + +if TYPE_CHECKING: + from collections.abc import Sequence + from uuid import UUID + + from tenacity import RetryCallState + + from langchain_core.agents import AgentAction, AgentFinish + from langchain_core.documents import Document + from langchain_core.messages import BaseMessage + from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult + +_LOGGER = logging.getLogger(__name__) + + +class RetrieverManagerMixin: + """Mixin for Retriever callbacks.""" + + def on_retriever_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run when Retriever errors. + + Args: + error (BaseException): The error that occurred. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + def on_retriever_end( + self, + documents: Sequence[Document], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run when Retriever ends running. + + Args: + documents (Sequence[Document]): The documents retrieved. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + +class LLMManagerMixin: + """Mixin for LLM callbacks.""" + + def on_llm_new_token( + self, + token: str, + *, + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run on new LLM token. Only available when streaming is enabled. + + Args: + token (str): The new token. + chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk, + containing content and other information. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + def on_llm_end( + self, + response: LLMResult, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run when LLM ends running. + + Args: + response (LLMResult): The response which was generated. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + def on_llm_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run when LLM errors. + + Args: + error (BaseException): The error that occurred. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + +class ChainManagerMixin: + """Mixin for chain callbacks.""" + + def on_chain_end( + self, + outputs: dict[str, Any], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run when chain ends running. + + Args: + outputs (dict[str, Any]): The outputs of the chain. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + def on_chain_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run when chain errors. + + Args: + error (BaseException): The error that occurred. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + def on_agent_action( + self, + action: AgentAction, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run on agent action. + + Args: + action (AgentAction): The agent action. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + def on_agent_finish( + self, + finish: AgentFinish, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run on the agent end. + + Args: + finish (AgentFinish): The agent finish. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + +class ToolManagerMixin: + """Mixin for tool callbacks.""" + + def on_tool_end( + self, + output: Any, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run when the tool ends running. + + Args: + output (Any): The output of the tool. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + def on_tool_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run when tool errors. + + Args: + error (BaseException): The error that occurred. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + +class CallbackManagerMixin: + """Mixin for callback manager.""" + + def on_llm_start( + self, + serialized: dict[str, Any], + prompts: list[str], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Run when LLM starts running. + + **ATTENTION**: This method is called for non-chat models (regular LLMs). If + you're implementing a handler for a chat model, + you should use on_chat_model_start instead. + + Args: + serialized (dict[str, Any]): The serialized LLM. + prompts (list[str]): The prompts. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + metadata (Optional[dict[str, Any]]): The metadata. + kwargs (Any): Additional keyword arguments. + """ + + def on_chat_model_start( + self, + serialized: dict[str, Any], + messages: list[list[BaseMessage]], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Run when a chat model starts running. + + **ATTENTION**: This method is called for chat models. If you're implementing + a handler for a non-chat model, you should use on_llm_start instead. + + Args: + serialized (dict[str, Any]): The serialized chat model. + messages (list[list[BaseMessage]]): The messages. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + metadata (Optional[dict[str, Any]]): The metadata. + kwargs (Any): Additional keyword arguments. + """ + # NotImplementedError is thrown intentionally + # Callback handler will fall back to on_llm_start if this is exception is thrown + msg = f"{self.__class__.__name__} does not implement `on_chat_model_start`" + raise NotImplementedError(msg) + + def on_retriever_start( + self, + serialized: dict[str, Any], + query: str, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Run when the Retriever starts running. + + Args: + serialized (dict[str, Any]): The serialized Retriever. + query (str): The query. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + metadata (Optional[dict[str, Any]]): The metadata. + kwargs (Any): Additional keyword arguments. + """ + + def on_chain_start( + self, + serialized: dict[str, Any], + inputs: dict[str, Any], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Run when a chain starts running. + + Args: + serialized (dict[str, Any]): The serialized chain. + inputs (dict[str, Any]): The inputs. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + metadata (Optional[dict[str, Any]]): The metadata. + kwargs (Any): Additional keyword arguments. + """ + + def on_tool_start( + self, + serialized: dict[str, Any], + input_str: str, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + inputs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Run when the tool starts running. + + Args: + serialized (dict[str, Any]): The serialized tool. + input_str (str): The input string. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + metadata (Optional[dict[str, Any]]): The metadata. + inputs (Optional[dict[str, Any]]): The inputs. + kwargs (Any): Additional keyword arguments. + """ + + +class RunManagerMixin: + """Mixin for run manager.""" + + def on_text( + self, + text: str, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run on an arbitrary text. + + Args: + text (str): The text. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + def on_retry( + self, + retry_state: RetryCallState, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run on a retry event. + + Args: + retry_state (RetryCallState): The retry state. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + def on_custom_event( + self, + name: str, + data: Any, + *, + run_id: UUID, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Override to define a handler for a custom event. + + Args: + name: The name of the custom event. + data: The data for the custom event. Format will match + the format specified by the user. + run_id: The ID of the run. + tags: The tags associated with the custom event + (includes inherited tags). + metadata: The metadata associated with the custom event + (includes inherited metadata). + + .. versionadded:: 0.2.15 + """ + + +class BaseCallbackHandler( + LLMManagerMixin, + ChainManagerMixin, + ToolManagerMixin, + RetrieverManagerMixin, + CallbackManagerMixin, + RunManagerMixin, +): + """Base callback handler for LangChain.""" + + raise_error: bool = False + """Whether to raise an error if an exception occurs.""" + + run_inline: bool = False + """Whether to run the callback inline.""" + + @property + def ignore_llm(self) -> bool: + """Whether to ignore LLM callbacks.""" + return False + + @property + def ignore_retry(self) -> bool: + """Whether to ignore retry callbacks.""" + return False + + @property + def ignore_chain(self) -> bool: + """Whether to ignore chain callbacks.""" + return False + + @property + def ignore_agent(self) -> bool: + """Whether to ignore agent callbacks.""" + return False + + @property + def ignore_retriever(self) -> bool: + """Whether to ignore retriever callbacks.""" + return False + + @property + def ignore_chat_model(self) -> bool: + """Whether to ignore chat model callbacks.""" + return False + + @property + def ignore_custom_event(self) -> bool: + """Ignore custom event.""" + return False + + +class AsyncCallbackHandler(BaseCallbackHandler): + """Async callback handler for LangChain.""" + + async def on_llm_start( + self, + serialized: dict[str, Any], + prompts: list[str], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + """Run when LLM starts running. + + **ATTENTION**: This method is called for non-chat models (regular LLMs). If + you're implementing a handler for a chat model, + you should use on_chat_model_start instead. + + Args: + serialized (dict[str, Any]): The serialized LLM. + prompts (list[str]): The prompts. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + metadata (Optional[dict[str, Any]]): The metadata. + kwargs (Any): Additional keyword arguments. + """ + + async def on_chat_model_start( + self, + serialized: dict[str, Any], + messages: list[list[BaseMessage]], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Run when a chat model starts running. + + **ATTENTION**: This method is called for chat models. If you're implementing + a handler for a non-chat model, you should use on_llm_start instead. + + Args: + serialized (dict[str, Any]): The serialized chat model. + messages (list[list[BaseMessage]]): The messages. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + metadata (Optional[dict[str, Any]]): The metadata. + kwargs (Any): Additional keyword arguments. + """ + # NotImplementedError is thrown intentionally + # Callback handler will fall back to on_llm_start if this is exception is thrown + msg = f"{self.__class__.__name__} does not implement `on_chat_model_start`" + raise NotImplementedError(msg) + + async def on_llm_new_token( + self, + token: str, + *, + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Run on new LLM token. Only available when streaming is enabled. + + Args: + token (str): The new token. + chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk, + containing content and other information. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + kwargs (Any): Additional keyword arguments. + """ + + async def on_llm_end( + self, + response: LLMResult, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Run when LLM ends running. + + Args: + response (LLMResult): The response which was generated. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + kwargs (Any): Additional keyword arguments. + """ + + async def on_llm_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Run when LLM errors. + + Args: + error: The error that occurred. + run_id: The run ID. This is the ID of the current run. + parent_run_id: The parent run ID. This is the ID of the parent run. + tags: The tags. + kwargs (Any): Additional keyword arguments. + - response (LLMResult): The response which was generated before + the error occurred. + """ + + async def on_chain_start( + self, + serialized: dict[str, Any], + inputs: dict[str, Any], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + """Run when a chain starts running. + + Args: + serialized (dict[str, Any]): The serialized chain. + inputs (dict[str, Any]): The inputs. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + metadata (Optional[dict[str, Any]]): The metadata. + kwargs (Any): Additional keyword arguments. + """ + + async def on_chain_end( + self, + outputs: dict[str, Any], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Run when a chain ends running. + + Args: + outputs (dict[str, Any]): The outputs of the chain. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + kwargs (Any): Additional keyword arguments. + """ + + async def on_chain_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Run when chain errors. + + Args: + error (BaseException): The error that occurred. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + kwargs (Any): Additional keyword arguments. + """ + + async def on_tool_start( + self, + serialized: dict[str, Any], + input_str: str, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + inputs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + """Run when the tool starts running. + + Args: + serialized (dict[str, Any]): The serialized tool. + input_str (str): The input string. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + metadata (Optional[dict[str, Any]]): The metadata. + inputs (Optional[dict[str, Any]]): The inputs. + kwargs (Any): Additional keyword arguments. + """ + + async def on_tool_end( + self, + output: Any, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Run when the tool ends running. + + Args: + output (Any): The output of the tool. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + kwargs (Any): Additional keyword arguments. + """ + + async def on_tool_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Run when tool errors. + + Args: + error (BaseException): The error that occurred. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + kwargs (Any): Additional keyword arguments. + """ + + async def on_text( + self, + text: str, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Run on an arbitrary text. + + Args: + text (str): The text. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + kwargs (Any): Additional keyword arguments. + """ + + async def on_retry( + self, + retry_state: RetryCallState, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Any: + """Run on a retry event. + + Args: + retry_state (RetryCallState): The retry state. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + kwargs (Any): Additional keyword arguments. + """ + + async def on_agent_action( + self, + action: AgentAction, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Run on agent action. + + Args: + action (AgentAction): The agent action. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + kwargs (Any): Additional keyword arguments. + """ + + async def on_agent_finish( + self, + finish: AgentFinish, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Run on the agent end. + + Args: + finish (AgentFinish): The agent finish. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + kwargs (Any): Additional keyword arguments. + """ + + async def on_retriever_start( + self, + serialized: dict[str, Any], + query: str, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + """Run on the retriever start. + + Args: + serialized (dict[str, Any]): The serialized retriever. + query (str): The query. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + metadata (Optional[dict[str, Any]]): The metadata. + kwargs (Any): Additional keyword arguments. + """ + + async def on_retriever_end( + self, + documents: Sequence[Document], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Run on the retriever end. + + Args: + documents (Sequence[Document]): The documents retrieved. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + kwargs (Any): Additional keyword arguments. + """ + + async def on_retriever_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Run on retriever error. + + Args: + error (BaseException): The error that occurred. + run_id (UUID): The run ID. This is the ID of the current run. + parent_run_id (UUID): The parent run ID. This is the ID of the parent run. + tags (Optional[list[str]]): The tags. + kwargs (Any): Additional keyword arguments. + """ + + async def on_custom_event( + self, + name: str, + data: Any, + *, + run_id: UUID, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + """Override to define a handler for a custom event. + + Args: + name: The name of the custom event. + data: The data for the custom event. Format will match + the format specified by the user. + run_id: The ID of the run. + tags: The tags associated with the custom event + (includes inherited tags). + metadata: The metadata associated with the custom event + (includes inherited metadata). + + .. versionadded:: 0.2.15 + """ + + +class BaseCallbackManager(CallbackManagerMixin): + """Base callback manager for LangChain.""" + + def __init__( + self, + handlers: list[BaseCallbackHandler], + inheritable_handlers: Optional[list[BaseCallbackHandler]] = None, + parent_run_id: Optional[UUID] = None, + *, + tags: Optional[list[str]] = None, + inheritable_tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + inheritable_metadata: Optional[dict[str, Any]] = None, + ) -> None: + """Initialize callback manager. + + Args: + handlers (list[BaseCallbackHandler]): The handlers. + inheritable_handlers (Optional[list[BaseCallbackHandler]]): + The inheritable handlers. Default is None. + parent_run_id (Optional[UUID]): The parent run ID. Default is None. + tags (Optional[list[str]]): The tags. Default is None. + inheritable_tags (Optional[list[str]]): The inheritable tags. + Default is None. + metadata (Optional[dict[str, Any]]): The metadata. Default is None. + inheritable_metadata (Optional[dict[str, Any]]): The inheritable metadata. + Default is None. + """ + self.handlers: list[BaseCallbackHandler] = handlers + self.inheritable_handlers: list[BaseCallbackHandler] = ( + inheritable_handlers or [] + ) + self.parent_run_id: Optional[UUID] = parent_run_id + self.tags = tags or [] + self.inheritable_tags = inheritable_tags or [] + self.metadata = metadata or {} + self.inheritable_metadata = inheritable_metadata or {} + + def copy(self) -> Self: + """Copy the callback manager.""" + return self.__class__( + handlers=self.handlers.copy(), + inheritable_handlers=self.inheritable_handlers.copy(), + parent_run_id=self.parent_run_id, + tags=self.tags.copy(), + inheritable_tags=self.inheritable_tags.copy(), + metadata=self.metadata.copy(), + inheritable_metadata=self.inheritable_metadata.copy(), + ) + + def merge(self, other: BaseCallbackManager) -> Self: + """Merge the callback manager with another callback manager. + + May be overwritten in subclasses. Primarily used internally + within merge_configs. + + Returns: + BaseCallbackManager: The merged callback manager of the same type + as the current object. + + Example: Merging two callback managers. + + .. code-block:: python + + from langchain_core.callbacks.manager import CallbackManager, trace_as_chain_group + from langchain_core.callbacks.stdout import StdOutCallbackHandler + + manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"]) + with trace_as_chain_group("My Group Name", tags=["tag1"]) as group_manager: + merged_manager = group_manager.merge(manager) + print(merged_manager.handlers) + # [ + # , + # , + # ] + + print(merged_manager.tags) + # ['tag2', 'tag1'] + + """ # noqa: E501 + manager = self.__class__( + parent_run_id=self.parent_run_id or other.parent_run_id, + handlers=[], + inheritable_handlers=[], + tags=list(set(self.tags + other.tags)), + inheritable_tags=list(set(self.inheritable_tags + other.inheritable_tags)), + metadata={ + **self.metadata, + **other.metadata, + }, + ) + + handlers = self.handlers + other.handlers + inheritable_handlers = self.inheritable_handlers + other.inheritable_handlers + + for handler in handlers: + manager.add_handler(handler) + + for handler in inheritable_handlers: + manager.add_handler(handler, inherit=True) + return manager + + @property + def is_async(self) -> bool: + """Whether the callback manager is async.""" + return False + + def add_handler( + self, + handler: BaseCallbackHandler, + inherit: bool = True, # noqa: FBT001,FBT002 + ) -> None: + """Add a handler to the callback manager. + + Args: + handler (BaseCallbackHandler): The handler to add. + inherit (bool): Whether to inherit the handler. Default is True. + """ + if handler not in self.handlers: + self.handlers.append(handler) + if inherit and handler not in self.inheritable_handlers: + self.inheritable_handlers.append(handler) + + def remove_handler(self, handler: BaseCallbackHandler) -> None: + """Remove a handler from the callback manager. + + Args: + handler (BaseCallbackHandler): The handler to remove. + """ + if handler in self.handlers: + self.handlers.remove(handler) + if handler in self.inheritable_handlers: + self.inheritable_handlers.remove(handler) + + def set_handlers( + self, + handlers: list[BaseCallbackHandler], + inherit: bool = True, # noqa: FBT001,FBT002 + ) -> None: + """Set handlers as the only handlers on the callback manager. + + Args: + handlers (list[BaseCallbackHandler]): The handlers to set. + inherit (bool): Whether to inherit the handlers. Default is True. + """ + self.handlers = [] + self.inheritable_handlers = [] + for handler in handlers: + self.add_handler(handler, inherit=inherit) + + def set_handler( + self, + handler: BaseCallbackHandler, + inherit: bool = True, # noqa: FBT001,FBT002 + ) -> None: + """Set handler as the only handler on the callback manager. + + Args: + handler (BaseCallbackHandler): The handler to set. + inherit (bool): Whether to inherit the handler. Default is True. + """ + self.set_handlers([handler], inherit=inherit) + + def add_tags( + self, + tags: list[str], + inherit: bool = True, # noqa: FBT001,FBT002 + ) -> None: + """Add tags to the callback manager. + + Args: + tags (list[str]): The tags to add. + inherit (bool): Whether to inherit the tags. Default is True. + """ + for tag in tags: + if tag in self.tags: + self.remove_tags([tag]) + self.tags.extend(tags) + if inherit: + self.inheritable_tags.extend(tags) + + def remove_tags(self, tags: list[str]) -> None: + """Remove tags from the callback manager. + + Args: + tags (list[str]): The tags to remove. + """ + for tag in tags: + self.tags.remove(tag) + self.inheritable_tags.remove(tag) + + def add_metadata( + self, + metadata: dict[str, Any], + inherit: bool = True, # noqa: FBT001,FBT002 + ) -> None: + """Add metadata to the callback manager. + + Args: + metadata (dict[str, Any]): The metadata to add. + inherit (bool): Whether to inherit the metadata. Default is True. + """ + self.metadata.update(metadata) + if inherit: + self.inheritable_metadata.update(metadata) + + def remove_metadata(self, keys: list[str]) -> None: + """Remove metadata from the callback manager. + + Args: + keys (list[str]): The keys to remove. + """ + for key in keys: + self.metadata.pop(key) + self.inheritable_metadata.pop(key) + + +Callbacks = Optional[Union[list[BaseCallbackHandler], BaseCallbackManager]] diff --git a/venv/Lib/site-packages/langchain_core/callbacks/file.py b/venv/Lib/site-packages/langchain_core/callbacks/file.py new file mode 100644 index 00000000..d2dcd0e7 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/callbacks/file.py @@ -0,0 +1,144 @@ +"""Callback Handler that writes to a file.""" + +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, Any, Optional, TextIO, cast + +from typing_extensions import override + +from langchain_core.callbacks import BaseCallbackHandler +from langchain_core.utils.input import print_text + +if TYPE_CHECKING: + from langchain_core.agents import AgentAction, AgentFinish + + +class FileCallbackHandler(BaseCallbackHandler): + """Callback Handler that writes to a file. + + Parameters: + filename: The file to write to. + mode: The mode to open the file in. Defaults to "a". + color: The color to use for the text. + """ + + def __init__( + self, filename: str, mode: str = "a", color: Optional[str] = None + ) -> None: + """Initialize callback handler. + + Args: + filename: The filename to write to. + mode: The mode to open the file in. Defaults to "a". + color: The color to use for the text. Defaults to None. + """ + self.file = cast("TextIO", Path(filename).open(mode, encoding="utf-8")) # noqa: SIM115 + self.color = color + + def __del__(self) -> None: + """Destructor to cleanup when done.""" + self.file.close() + + @override + def on_chain_start( + self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any + ) -> None: + """Print out that we are entering a chain. + + Args: + serialized (dict[str, Any]): The serialized chain. + inputs (dict[str, Any]): The inputs to the chain. + **kwargs (Any): Additional keyword arguments. + """ + if "name" in kwargs: + name = kwargs["name"] + elif serialized: + name = serialized.get("name", serialized.get("id", [""])[-1]) + else: + name = "" + print_text( + f"\n\n\033[1m> Entering new {name} chain...\033[0m", + end="\n", + file=self.file, + ) + + @override + def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None: + """Print out that we finished a chain. + + Args: + outputs (dict[str, Any]): The outputs of the chain. + **kwargs (Any): Additional keyword arguments. + """ + print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file) + + @override + def on_agent_action( + self, action: AgentAction, color: Optional[str] = None, **kwargs: Any + ) -> Any: + """Run on agent action. + + Args: + action (AgentAction): The agent action. + color (Optional[str], optional): The color to use for the text. + Defaults to None. + **kwargs (Any): Additional keyword arguments. + """ + print_text(action.log, color=color or self.color, file=self.file) + + @override + def on_tool_end( + self, + output: str, + color: Optional[str] = None, + observation_prefix: Optional[str] = None, + llm_prefix: Optional[str] = None, + **kwargs: Any, + ) -> None: + """If not the final action, print out observation. + + Args: + output (str): The output to print. + color (Optional[str], optional): The color to use for the text. + Defaults to None. + observation_prefix (Optional[str], optional): The observation prefix. + Defaults to None. + llm_prefix (Optional[str], optional): The LLM prefix. + Defaults to None. + **kwargs (Any): Additional keyword arguments. + """ + if observation_prefix is not None: + print_text(f"\n{observation_prefix}", file=self.file) + print_text(output, color=color or self.color, file=self.file) + if llm_prefix is not None: + print_text(f"\n{llm_prefix}", file=self.file) + + @override + def on_text( + self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any + ) -> None: + """Run when the agent ends. + + Args: + text (str): The text to print. + color (Optional[str], optional): The color to use for the text. + Defaults to None. + end (str, optional): The end character. Defaults to "". + **kwargs (Any): Additional keyword arguments. + """ + print_text(text, color=color or self.color, end=end, file=self.file) + + @override + def on_agent_finish( + self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any + ) -> None: + """Run on the agent end. + + Args: + finish (AgentFinish): The agent finish. + color (Optional[str], optional): The color to use for the text. + Defaults to None. + **kwargs (Any): Additional keyword arguments. + """ + print_text(finish.log, color=color or self.color, end="\n", file=self.file) diff --git a/venv/Lib/site-packages/langchain_core/callbacks/manager.py b/venv/Lib/site-packages/langchain_core/callbacks/manager.py new file mode 100644 index 00000000..1220ed81 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/callbacks/manager.py @@ -0,0 +1,2638 @@ +"""Run managers.""" + +from __future__ import annotations + +import asyncio +import atexit +import functools +import logging +import uuid +from abc import ABC, abstractmethod +from concurrent.futures import ThreadPoolExecutor +from contextlib import asynccontextmanager, contextmanager +from contextvars import copy_context +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + TypeVar, + Union, + cast, +) +from uuid import UUID + +from langsmith.run_helpers import get_tracing_context +from typing_extensions import Self, override + +from langchain_core.callbacks.base import ( + BaseCallbackHandler, + BaseCallbackManager, + Callbacks, + ChainManagerMixin, + LLMManagerMixin, + RetrieverManagerMixin, + RunManagerMixin, + ToolManagerMixin, +) +from langchain_core.callbacks.stdout import StdOutCallbackHandler +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_core.tracers.schemas import Run +from langchain_core.utils.env import env_var_is_set + +if TYPE_CHECKING: + from collections.abc import AsyncGenerator, Coroutine, Generator, Sequence + + from tenacity import RetryCallState + + from langchain_core.agents import AgentAction, AgentFinish + from langchain_core.documents import Document + from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult + from langchain_core.runnables.config import RunnableConfig + +logger = logging.getLogger(__name__) + + +def _get_debug() -> bool: + from langchain_core.globals import get_debug + + return get_debug() + + +@contextmanager +def trace_as_chain_group( + group_name: str, + callback_manager: Optional[CallbackManager] = None, + *, + inputs: Optional[dict[str, Any]] = None, + project_name: Optional[str] = None, + example_id: Optional[Union[str, UUID]] = None, + run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, +) -> Generator[CallbackManagerForChainGroup, None, None]: + """Get a callback manager for a chain group in a context manager. + + Useful for grouping different calls together as a single run even if + they aren't composed in a single chain. + + Args: + group_name (str): The name of the chain group. + callback_manager (CallbackManager, optional): The callback manager to use. + Defaults to None. + inputs (dict[str, Any], optional): The inputs to the chain group. + Defaults to None. + project_name (str, optional): The name of the project. + Defaults to None. + example_id (str or UUID, optional): The ID of the example. + Defaults to None. + run_id (UUID, optional): The ID of the run. + tags (list[str], optional): The inheritable tags to apply to all runs. + Defaults to None. + metadata (dict[str, Any], optional): The metadata to apply to all runs. + Defaults to None. + + Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith. + + Returns: + CallbackManagerForChainGroup: The callback manager for the chain group. + + Example: + .. code-block:: python + + llm_input = "Foo" + with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager: + # Use the callback manager for the chain group + res = llm.invoke(llm_input, {"callbacks": manager}) + manager.on_chain_end({"output": res}) + """ # noqa: E501 + from langchain_core.tracers.context import _get_trace_callbacks + + cb = _get_trace_callbacks( + project_name, example_id, callback_manager=callback_manager + ) + cm = CallbackManager.configure( + inheritable_callbacks=cb, + inheritable_tags=tags, + inheritable_metadata=metadata, + ) + + run_manager = cm.on_chain_start({"name": group_name}, inputs or {}, run_id=run_id) + child_cm = run_manager.get_child() + group_cm = CallbackManagerForChainGroup( + child_cm.handlers, + child_cm.inheritable_handlers, + child_cm.parent_run_id, + parent_run_manager=run_manager, + tags=child_cm.tags, + inheritable_tags=child_cm.inheritable_tags, + metadata=child_cm.metadata, + inheritable_metadata=child_cm.inheritable_metadata, + ) + try: + yield group_cm + except Exception as e: + if not group_cm.ended: + run_manager.on_chain_error(e) + raise + else: + if not group_cm.ended: + run_manager.on_chain_end({}) + + +@asynccontextmanager +async def atrace_as_chain_group( + group_name: str, + callback_manager: Optional[AsyncCallbackManager] = None, + *, + inputs: Optional[dict[str, Any]] = None, + project_name: Optional[str] = None, + example_id: Optional[Union[str, UUID]] = None, + run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, +) -> AsyncGenerator[AsyncCallbackManagerForChainGroup, None]: + """Get an async callback manager for a chain group in a context manager. + + Useful for grouping different async calls together as a single run even if + they aren't composed in a single chain. + + Args: + group_name (str): The name of the chain group. + callback_manager (AsyncCallbackManager, optional): The async callback manager to use, + which manages tracing and other callback behavior. Defaults to None. + inputs (dict[str, Any], optional): The inputs to the chain group. + Defaults to None. + project_name (str, optional): The name of the project. + Defaults to None. + example_id (str or UUID, optional): The ID of the example. + Defaults to None. + run_id (UUID, optional): The ID of the run. + tags (list[str], optional): The inheritable tags to apply to all runs. + Defaults to None. + metadata (dict[str, Any], optional): The metadata to apply to all runs. + Defaults to None. + + Returns: + AsyncCallbackManager: The async callback manager for the chain group. + + Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith. + + Example: + .. code-block:: python + + llm_input = "Foo" + async with atrace_as_chain_group("group_name", inputs={"input": llm_input}) as manager: + # Use the async callback manager for the chain group + res = await llm.ainvoke(llm_input, {"callbacks": manager}) + await manager.on_chain_end({"output": res}) + """ # noqa: E501 + from langchain_core.tracers.context import _get_trace_callbacks + + cb = _get_trace_callbacks( + project_name, example_id, callback_manager=callback_manager + ) + cm = AsyncCallbackManager.configure( + inheritable_callbacks=cb, inheritable_tags=tags, inheritable_metadata=metadata + ) + + run_manager = await cm.on_chain_start( + {"name": group_name}, inputs or {}, run_id=run_id + ) + child_cm = run_manager.get_child() + group_cm = AsyncCallbackManagerForChainGroup( + child_cm.handlers, + child_cm.inheritable_handlers, + child_cm.parent_run_id, + parent_run_manager=run_manager, + tags=child_cm.tags, + inheritable_tags=child_cm.inheritable_tags, + metadata=child_cm.metadata, + inheritable_metadata=child_cm.inheritable_metadata, + ) + try: + yield group_cm + except Exception as e: + if not group_cm.ended: + await run_manager.on_chain_error(e) + raise + else: + if not group_cm.ended: + await run_manager.on_chain_end({}) + + +Func = TypeVar("Func", bound=Callable) + + +def shielded(func: Func) -> Func: + """Makes so an awaitable method is always shielded from cancellation. + + Args: + func (Callable): The function to shield. + + Returns: + Callable: The shielded function + """ + + @functools.wraps(func) + async def wrapped(*args: Any, **kwargs: Any) -> Any: + return await asyncio.shield(func(*args, **kwargs)) + + return cast("Func", wrapped) + + +def handle_event( + handlers: list[BaseCallbackHandler], + event_name: str, + ignore_condition_name: Optional[str], + *args: Any, + **kwargs: Any, +) -> None: + """Generic event handler for CallbackManager. + + Note: This function is used by LangServe to handle events. + + Args: + handlers: The list of handlers that will handle the event. + event_name: The name of the event (e.g., "on_llm_start"). + ignore_condition_name: Name of the attribute defined on handler + that if True will cause the handler to be skipped for the given event. + *args: The arguments to pass to the event handler. + **kwargs: The keyword arguments to pass to the event handler + """ + coros: list[Coroutine[Any, Any, Any]] = [] + + try: + message_strings: Optional[list[str]] = None + for handler in handlers: + try: + if ignore_condition_name is None or not getattr( + handler, ignore_condition_name + ): + event = getattr(handler, event_name)(*args, **kwargs) + if asyncio.iscoroutine(event): + coros.append(event) + except NotImplementedError as e: + if event_name == "on_chat_model_start": + if message_strings is None: + message_strings = [get_buffer_string(m) for m in args[1]] + handle_event( + [handler], + "on_llm_start", + "ignore_llm", + args[0], + message_strings, + *args[2:], + **kwargs, + ) + else: + handler_name = handler.__class__.__name__ + logger.warning( + "NotImplementedError in %s.%s callback: %s", + handler_name, + event_name, + repr(e), + ) + except Exception as e: + logger.warning( + "Error in %s.%s callback: %s", + handler.__class__.__name__, + event_name, + repr(e), + ) + if handler.raise_error: + raise + finally: + if coros: + try: + # Raises RuntimeError if there is no current event loop. + asyncio.get_running_loop() + loop_running = True + except RuntimeError: + loop_running = False + + if loop_running: + # If we try to submit this coroutine to the running loop + # we end up in a deadlock, as we'd have gotten here from a + # running coroutine, which we cannot interrupt to run this one. + # The solution is to run the synchronous function on the globally shared + # thread pool executor to avoid blocking the main event loop. + _executor().submit( + cast("Callable", copy_context().run), _run_coros, coros + ).result() + else: + # If there's no running loop, we can run the coroutines directly. + _run_coros(coros) + + +def _run_coros(coros: list[Coroutine[Any, Any, Any]]) -> None: + if hasattr(asyncio, "Runner"): + # Python 3.11+ + # Run the coroutines in a new event loop, taking care to + # - install signal handlers + # - run pending tasks scheduled by `coros` + # - close asyncgens and executors + # - close the loop + with asyncio.Runner() as runner: + # Run the coroutine, get the result + for coro in coros: + try: + runner.run(coro) + except Exception as e: + logger.warning("Error in callback coroutine: %s", repr(e)) + + # Run pending tasks scheduled by coros until they are all done + while pending := asyncio.all_tasks(runner.get_loop()): + runner.run(asyncio.wait(pending)) + else: + # Before Python 3.11 we need to run each coroutine in a new event loop + # as the Runner api is not available. + for coro in coros: + try: + asyncio.run(coro) + except Exception as e: + logger.warning("Error in callback coroutine: %s", repr(e)) + + +async def _ahandle_event_for_handler( + handler: BaseCallbackHandler, + event_name: str, + ignore_condition_name: Optional[str], + *args: Any, + **kwargs: Any, +) -> None: + try: + if ignore_condition_name is None or not getattr(handler, ignore_condition_name): + event = getattr(handler, event_name) + if asyncio.iscoroutinefunction(event): + await event(*args, **kwargs) + elif handler.run_inline: + event(*args, **kwargs) + else: + await asyncio.get_event_loop().run_in_executor( + None, + cast( + "Callable", + functools.partial(copy_context().run, event, *args, **kwargs), + ), + ) + except NotImplementedError as e: + if event_name == "on_chat_model_start": + message_strings = [get_buffer_string(m) for m in args[1]] + await _ahandle_event_for_handler( + handler, + "on_llm_start", + "ignore_llm", + args[0], + message_strings, + *args[2:], + **kwargs, + ) + else: + logger.warning( + "NotImplementedError in %s.%s callback: %s", + handler.__class__.__name__, + event_name, + repr(e), + ) + except Exception as e: + logger.warning( + "Error in %s.%s callback: %s", + handler.__class__.__name__, + event_name, + repr(e), + ) + if handler.raise_error: + raise + + +async def ahandle_event( + handlers: list[BaseCallbackHandler], + event_name: str, + ignore_condition_name: Optional[str], + *args: Any, + **kwargs: Any, +) -> None: + """Async generic event handler for AsyncCallbackManager. + + Note: This function is used by LangServe to handle events. + + Args: + handlers: The list of handlers that will handle the event. + event_name: The name of the event (e.g., "on_llm_start"). + ignore_condition_name: Name of the attribute defined on handler + that if True will cause the handler to be skipped for the given event. + *args: The arguments to pass to the event handler. + **kwargs: The keyword arguments to pass to the event handler. + """ + for handler in [h for h in handlers if h.run_inline]: + await _ahandle_event_for_handler( + handler, event_name, ignore_condition_name, *args, **kwargs + ) + await asyncio.gather( + *( + _ahandle_event_for_handler( + handler, + event_name, + ignore_condition_name, + *args, + **kwargs, + ) + for handler in handlers + if not handler.run_inline + ) + ) + + +class BaseRunManager(RunManagerMixin): + """Base class for run manager (a bound callback manager).""" + + def __init__( + self, + *, + run_id: UUID, + handlers: list[BaseCallbackHandler], + inheritable_handlers: list[BaseCallbackHandler], + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + inheritable_tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + inheritable_metadata: Optional[dict[str, Any]] = None, + ) -> None: + """Initialize the run manager. + + Args: + run_id (UUID): The ID of the run. + handlers (list[BaseCallbackHandler]): The list of handlers. + inheritable_handlers (list[BaseCallbackHandler]): + The list of inheritable handlers. + parent_run_id (UUID, optional): The ID of the parent run. + Defaults to None. + tags (Optional[list[str]]): The list of tags. Defaults to None. + inheritable_tags (Optional[list[str]]): The list of inheritable tags. + Defaults to None. + metadata (Optional[dict[str, Any]]): The metadata. + Defaults to None. + inheritable_metadata (Optional[dict[str, Any]]): The inheritable metadata. + Defaults to None. + """ + self.run_id = run_id + self.handlers = handlers + self.inheritable_handlers = inheritable_handlers + self.parent_run_id = parent_run_id + self.tags = tags or [] + self.inheritable_tags = inheritable_tags or [] + self.metadata = metadata or {} + self.inheritable_metadata = inheritable_metadata or {} + + @classmethod + def get_noop_manager(cls) -> Self: + """Return a manager that doesn't perform any operations. + + Returns: + BaseRunManager: The noop manager. + """ + return cls( + run_id=uuid.uuid4(), + handlers=[], + inheritable_handlers=[], + tags=[], + inheritable_tags=[], + metadata={}, + inheritable_metadata={}, + ) + + +class RunManager(BaseRunManager): + """Sync Run Manager.""" + + def on_text( + self, + text: str, + **kwargs: Any, + ) -> Any: + """Run when a text is received. + + Args: + text (str): The received text. + **kwargs (Any): Additional keyword arguments. + + Returns: + Any: The result of the callback. + """ + handle_event( + self.handlers, + "on_text", + None, + text, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + def on_retry( + self, + retry_state: RetryCallState, + **kwargs: Any, + ) -> None: + """Run when a retry is received. + + Args: + retry_state (RetryCallState): The retry state. + **kwargs (Any): Additional keyword arguments. + """ + handle_event( + self.handlers, + "on_retry", + "ignore_retry", + retry_state, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + +class ParentRunManager(RunManager): + """Sync Parent Run Manager.""" + + def get_child(self, tag: Optional[str] = None) -> CallbackManager: + """Get a child callback manager. + + Args: + tag (str, optional): The tag for the child callback manager. + Defaults to None. + + Returns: + CallbackManager: The child callback manager. + """ + manager = CallbackManager(handlers=[], parent_run_id=self.run_id) + manager.set_handlers(self.inheritable_handlers) + manager.add_tags(self.inheritable_tags) + manager.add_metadata(self.inheritable_metadata) + if tag is not None: + manager.add_tags([tag], inherit=False) + return manager + + +class AsyncRunManager(BaseRunManager, ABC): + """Async Run Manager.""" + + @abstractmethod + def get_sync(self) -> RunManager: + """Get the equivalent sync RunManager. + + Returns: + RunManager: The sync RunManager. + """ + + async def on_text( + self, + text: str, + **kwargs: Any, + ) -> Any: + """Run when a text is received. + + Args: + text (str): The received text. + **kwargs (Any): Additional keyword arguments. + + Returns: + Any: The result of the callback. + """ + await ahandle_event( + self.handlers, + "on_text", + None, + text, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + async def on_retry( + self, + retry_state: RetryCallState, + **kwargs: Any, + ) -> None: + """Async run when a retry is received. + + Args: + retry_state (RetryCallState): The retry state. + **kwargs (Any): Additional keyword arguments. + """ + await ahandle_event( + self.handlers, + "on_retry", + "ignore_retry", + retry_state, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + +class AsyncParentRunManager(AsyncRunManager): + """Async Parent Run Manager.""" + + def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: + """Get a child callback manager. + + Args: + tag (str, optional): The tag for the child callback manager. + Defaults to None. + + Returns: + AsyncCallbackManager: The child callback manager. + """ + manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id) + manager.set_handlers(self.inheritable_handlers) + manager.add_tags(self.inheritable_tags) + manager.add_metadata(self.inheritable_metadata) + if tag is not None: + manager.add_tags([tag], inherit=False) + return manager + + +class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): + """Callback manager for LLM run.""" + + def on_llm_new_token( + self, + token: str, + *, + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + **kwargs: Any, + ) -> None: + """Run when LLM generates a new token. + + Args: + token (str): The new token. + chunk (Optional[Union[GenerationChunk, ChatGenerationChunk]], optional): + The chunk. Defaults to None. + **kwargs (Any): Additional keyword arguments. + """ + handle_event( + self.handlers, + "on_llm_new_token", + "ignore_llm", + token=token, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + chunk=chunk, + **kwargs, + ) + + def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + """Run when LLM ends running. + + Args: + response (LLMResult): The LLM result. + **kwargs (Any): Additional keyword arguments. + """ + handle_event( + self.handlers, + "on_llm_end", + "ignore_llm", + response, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + def on_llm_error( + self, + error: BaseException, + **kwargs: Any, + ) -> None: + """Run when LLM errors. + + Args: + error (Exception or KeyboardInterrupt): The error. + kwargs (Any): Additional keyword arguments. + - response (LLMResult): The response which was generated before + the error occurred. + """ + handle_event( + self.handlers, + "on_llm_error", + "ignore_llm", + error, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + +class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): + """Async callback manager for LLM run.""" + + def get_sync(self) -> CallbackManagerForLLMRun: + """Get the equivalent sync RunManager. + + Returns: + CallbackManagerForLLMRun: The sync RunManager. + """ + return CallbackManagerForLLMRun( + run_id=self.run_id, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + + @shielded + async def on_llm_new_token( + self, + token: str, + *, + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + **kwargs: Any, + ) -> None: + """Run when LLM generates a new token. + + Args: + token (str): The new token. + chunk (Optional[Union[GenerationChunk, ChatGenerationChunk]], optional): + The chunk. Defaults to None. + **kwargs (Any): Additional keyword arguments. + """ + await ahandle_event( + self.handlers, + "on_llm_new_token", + "ignore_llm", + token, + chunk=chunk, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + @shielded + async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + """Run when LLM ends running. + + Args: + response (LLMResult): The LLM result. + **kwargs (Any): Additional keyword arguments. + """ + await ahandle_event( + self.handlers, + "on_llm_end", + "ignore_llm", + response, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + @shielded + async def on_llm_error( + self, + error: BaseException, + **kwargs: Any, + ) -> None: + """Run when LLM errors. + + Args: + error (Exception or KeyboardInterrupt): The error. + kwargs (Any): Additional keyword arguments. + - response (LLMResult): The response which was generated before + the error occurred. + + + + """ + await ahandle_event( + self.handlers, + "on_llm_error", + "ignore_llm", + error, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + +class CallbackManagerForChainRun(ParentRunManager, ChainManagerMixin): + """Callback manager for chain run.""" + + def on_chain_end(self, outputs: Union[dict[str, Any], Any], **kwargs: Any) -> None: + """Run when chain ends running. + + Args: + outputs (Union[dict[str, Any], Any]): The outputs of the chain. + **kwargs (Any): Additional keyword arguments. + """ + handle_event( + self.handlers, + "on_chain_end", + "ignore_chain", + outputs, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + def on_chain_error( + self, + error: BaseException, + **kwargs: Any, + ) -> None: + """Run when chain errors. + + Args: + error (Exception or KeyboardInterrupt): The error. + **kwargs (Any): Additional keyword arguments. + """ + handle_event( + self.handlers, + "on_chain_error", + "ignore_chain", + error, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: + """Run when agent action is received. + + Args: + action (AgentAction): The agent action. + **kwargs (Any): Additional keyword arguments. + + Returns: + Any: The result of the callback. + """ + handle_event( + self.handlers, + "on_agent_action", + "ignore_agent", + action, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: + """Run when agent finish is received. + + Args: + finish (AgentFinish): The agent finish. + **kwargs (Any): Additional keyword arguments. + + Returns: + Any: The result of the callback. + """ + handle_event( + self.handlers, + "on_agent_finish", + "ignore_agent", + finish, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + +class AsyncCallbackManagerForChainRun(AsyncParentRunManager, ChainManagerMixin): + """Async callback manager for chain run.""" + + def get_sync(self) -> CallbackManagerForChainRun: + """Get the equivalent sync RunManager. + + Returns: + CallbackManagerForChainRun: The sync RunManager. + """ + return CallbackManagerForChainRun( + run_id=self.run_id, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + + @shielded + async def on_chain_end( + self, outputs: Union[dict[str, Any], Any], **kwargs: Any + ) -> None: + """Run when a chain ends running. + + Args: + outputs (Union[dict[str, Any], Any]): The outputs of the chain. + **kwargs (Any): Additional keyword arguments. + """ + await ahandle_event( + self.handlers, + "on_chain_end", + "ignore_chain", + outputs, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + @shielded + async def on_chain_error( + self, + error: BaseException, + **kwargs: Any, + ) -> None: + """Run when chain errors. + + Args: + error (Exception or KeyboardInterrupt): The error. + **kwargs (Any): Additional keyword arguments. + """ + await ahandle_event( + self.handlers, + "on_chain_error", + "ignore_chain", + error, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + @shielded + async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: + """Run when agent action is received. + + Args: + action (AgentAction): The agent action. + **kwargs (Any): Additional keyword arguments. + + Returns: + Any: The result of the callback. + """ + await ahandle_event( + self.handlers, + "on_agent_action", + "ignore_agent", + action, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + @shielded + async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: + """Run when agent finish is received. + + Args: + finish (AgentFinish): The agent finish. + **kwargs (Any): Additional keyword arguments. + + Returns: + Any: The result of the callback. + """ + await ahandle_event( + self.handlers, + "on_agent_finish", + "ignore_agent", + finish, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + +class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin): + """Callback manager for tool run.""" + + def on_tool_end( + self, + output: Any, + **kwargs: Any, + ) -> None: + """Run when the tool ends running. + + Args: + output (Any): The output of the tool. + **kwargs (Any): Additional keyword arguments. + """ + handle_event( + self.handlers, + "on_tool_end", + "ignore_agent", + output, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + def on_tool_error( + self, + error: BaseException, + **kwargs: Any, + ) -> None: + """Run when tool errors. + + Args: + error (Exception or KeyboardInterrupt): The error. + **kwargs (Any): Additional keyword arguments. + """ + handle_event( + self.handlers, + "on_tool_error", + "ignore_agent", + error, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + +class AsyncCallbackManagerForToolRun(AsyncParentRunManager, ToolManagerMixin): + """Async callback manager for tool run.""" + + def get_sync(self) -> CallbackManagerForToolRun: + """Get the equivalent sync RunManager. + + Returns: + CallbackManagerForToolRun: The sync RunManager. + """ + return CallbackManagerForToolRun( + run_id=self.run_id, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + + @shielded + async def on_tool_end(self, output: Any, **kwargs: Any) -> None: + """Async run when the tool ends running. + + Args: + output (Any): The output of the tool. + **kwargs (Any): Additional keyword arguments. + """ + await ahandle_event( + self.handlers, + "on_tool_end", + "ignore_agent", + output, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + @shielded + async def on_tool_error( + self, + error: BaseException, + **kwargs: Any, + ) -> None: + """Run when tool errors. + + Args: + error (Exception or KeyboardInterrupt): The error. + **kwargs (Any): Additional keyword arguments. + """ + await ahandle_event( + self.handlers, + "on_tool_error", + "ignore_agent", + error, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + +class CallbackManagerForRetrieverRun(ParentRunManager, RetrieverManagerMixin): + """Callback manager for retriever run.""" + + def on_retriever_end( + self, + documents: Sequence[Document], + **kwargs: Any, + ) -> None: + """Run when retriever ends running. + + Args: + documents (Sequence[Document]): The retrieved documents. + **kwargs (Any): Additional keyword arguments. + """ + handle_event( + self.handlers, + "on_retriever_end", + "ignore_retriever", + documents, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + def on_retriever_error( + self, + error: BaseException, + **kwargs: Any, + ) -> None: + """Run when retriever errors. + + Args: + error (BaseException): The error. + **kwargs (Any): Additional keyword arguments. + """ + handle_event( + self.handlers, + "on_retriever_error", + "ignore_retriever", + error, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + +class AsyncCallbackManagerForRetrieverRun( + AsyncParentRunManager, + RetrieverManagerMixin, +): + """Async callback manager for retriever run.""" + + def get_sync(self) -> CallbackManagerForRetrieverRun: + """Get the equivalent sync RunManager. + + Returns: + CallbackManagerForRetrieverRun: The sync RunManager. + """ + return CallbackManagerForRetrieverRun( + run_id=self.run_id, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + + @shielded + async def on_retriever_end( + self, documents: Sequence[Document], **kwargs: Any + ) -> None: + """Run when the retriever ends running. + + Args: + documents (Sequence[Document]): The retrieved documents. + **kwargs (Any): Additional keyword arguments. + """ + await ahandle_event( + self.handlers, + "on_retriever_end", + "ignore_retriever", + documents, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + @shielded + async def on_retriever_error( + self, + error: BaseException, + **kwargs: Any, + ) -> None: + """Run when retriever errors. + + Args: + error (BaseException): The error. + **kwargs (Any): Additional keyword arguments. + """ + await ahandle_event( + self.handlers, + "on_retriever_error", + "ignore_retriever", + error, + run_id=self.run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + **kwargs, + ) + + +class CallbackManager(BaseCallbackManager): + """Callback manager for LangChain.""" + + def on_llm_start( + self, + serialized: dict[str, Any], + prompts: list[str], + run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> list[CallbackManagerForLLMRun]: + """Run when LLM starts running. + + Args: + serialized (dict[str, Any]): The serialized LLM. + prompts (list[str]): The list of prompts. + run_id (UUID, optional): The ID of the run. Defaults to None. + **kwargs (Any): Additional keyword arguments. + + Returns: + list[CallbackManagerForLLMRun]: A callback manager for each + prompt as an LLM run. + """ + managers = [] + for i, prompt in enumerate(prompts): + # Can't have duplicate runs with the same run ID (if provided) + run_id_ = run_id if i == 0 and run_id is not None else uuid.uuid4() + handle_event( + self.handlers, + "on_llm_start", + "ignore_llm", + serialized, + [prompt], + run_id=run_id_, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + **kwargs, + ) + + managers.append( + CallbackManagerForLLMRun( + run_id=run_id_, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + ) + + return managers + + def on_chat_model_start( + self, + serialized: dict[str, Any], + messages: list[list[BaseMessage]], + run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> list[CallbackManagerForLLMRun]: + """Run when chat model starts running. + + Args: + serialized (dict[str, Any]): The serialized LLM. + messages (list[list[BaseMessage]]): The list of messages. + run_id (UUID, optional): The ID of the run. Defaults to None. + **kwargs (Any): Additional keyword arguments. + + Returns: + list[CallbackManagerForLLMRun]: A callback manager for each + list of messages as an LLM run. + """ + managers = [] + for message_list in messages: + if run_id is not None: + run_id_ = run_id + run_id = None + else: + run_id_ = uuid.uuid4() + handle_event( + self.handlers, + "on_chat_model_start", + "ignore_chat_model", + serialized, + [message_list], + run_id=run_id_, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + **kwargs, + ) + + managers.append( + CallbackManagerForLLMRun( + run_id=run_id_, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + ) + + return managers + + def on_chain_start( + self, + serialized: Optional[dict[str, Any]], + inputs: Union[dict[str, Any], Any], + run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> CallbackManagerForChainRun: + """Run when chain starts running. + + Args: + serialized (Optional[dict[str, Any]]): The serialized chain. + inputs (Union[dict[str, Any], Any]): The inputs to the chain. + run_id (UUID, optional): The ID of the run. Defaults to None. + **kwargs (Any): Additional keyword arguments. + + Returns: + CallbackManagerForChainRun: The callback manager for the chain run. + """ + if run_id is None: + run_id = uuid.uuid4() + handle_event( + self.handlers, + "on_chain_start", + "ignore_chain", + serialized, + inputs, + run_id=run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + **kwargs, + ) + + return CallbackManagerForChainRun( + run_id=run_id, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + + @override + def on_tool_start( + self, + serialized: Optional[dict[str, Any]], + input_str: str, + run_id: Optional[UUID] = None, + parent_run_id: Optional[UUID] = None, + inputs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> CallbackManagerForToolRun: + """Run when tool starts running. + + Args: + serialized: Serialized representation of the tool. + input_str: The input to the tool as a string. + Non-string inputs are cast to strings. + run_id: ID for the run. Defaults to None. + parent_run_id: The ID of the parent run. Defaults to None. + inputs: The original input to the tool if provided. + Recommended for usage instead of input_str when the original + input is needed. + If provided, the inputs are expected to be formatted as a dict. + The keys will correspond to the named-arguments in the tool. + **kwargs (Any): Additional keyword arguments. + + Returns: + CallbackManagerForToolRun: The callback manager for the tool run. + """ + if run_id is None: + run_id = uuid.uuid4() + + handle_event( + self.handlers, + "on_tool_start", + "ignore_agent", + serialized, + input_str, + run_id=run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + inputs=inputs, + **kwargs, + ) + + return CallbackManagerForToolRun( + run_id=run_id, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + + @override + def on_retriever_start( + self, + serialized: Optional[dict[str, Any]], + query: str, + run_id: Optional[UUID] = None, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> CallbackManagerForRetrieverRun: + """Run when the retriever starts running. + + Args: + serialized (Optional[dict[str, Any]]): The serialized retriever. + query (str): The query. + run_id (UUID, optional): The ID of the run. Defaults to None. + parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. + **kwargs (Any): Additional keyword arguments. + """ + if run_id is None: + run_id = uuid.uuid4() + + handle_event( + self.handlers, + "on_retriever_start", + "ignore_retriever", + serialized, + query, + run_id=run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + **kwargs, + ) + + return CallbackManagerForRetrieverRun( + run_id=run_id, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + + def on_custom_event( + self, + name: str, + data: Any, + run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> None: + """Dispatch an adhoc event to the handlers (async version). + + This event should NOT be used in any internal LangChain code. The event + is meant specifically for users of the library to dispatch custom + events that are tailored to their application. + + Args: + name: The name of the adhoc event. + data: The data for the adhoc event. + run_id: The ID of the run. Defaults to None. + + .. versionadded:: 0.2.14 + """ + if kwargs: + msg = ( + "The dispatcher API does not accept additional keyword arguments." + "Please do not pass any additional keyword arguments, instead " + "include them in the data field." + ) + raise ValueError(msg) + if run_id is None: + run_id = uuid.uuid4() + + handle_event( + self.handlers, + "on_custom_event", + "ignore_custom_event", + name, + data, + run_id=run_id, + tags=self.tags, + metadata=self.metadata, + ) + + @classmethod + def configure( + cls, + inheritable_callbacks: Callbacks = None, + local_callbacks: Callbacks = None, + verbose: bool = False, # noqa: FBT001,FBT002 + inheritable_tags: Optional[list[str]] = None, + local_tags: Optional[list[str]] = None, + inheritable_metadata: Optional[dict[str, Any]] = None, + local_metadata: Optional[dict[str, Any]] = None, + ) -> CallbackManager: + """Configure the callback manager. + + Args: + inheritable_callbacks (Optional[Callbacks], optional): The inheritable + callbacks. Defaults to None. + local_callbacks (Optional[Callbacks], optional): The local callbacks. + Defaults to None. + verbose (bool, optional): Whether to enable verbose mode. Defaults to False. + inheritable_tags (Optional[list[str]], optional): The inheritable tags. + Defaults to None. + local_tags (Optional[list[str]], optional): The local tags. + Defaults to None. + inheritable_metadata (Optional[dict[str, Any]], optional): The inheritable + metadata. Defaults to None. + local_metadata (Optional[dict[str, Any]], optional): The local metadata. + Defaults to None. + + Returns: + CallbackManager: The configured callback manager. + """ + return _configure( + cls, + inheritable_callbacks, + local_callbacks, + inheritable_tags, + local_tags, + inheritable_metadata, + local_metadata, + verbose=verbose, + ) + + +class CallbackManagerForChainGroup(CallbackManager): + """Callback manager for the chain group.""" + + def __init__( + self, + handlers: list[BaseCallbackHandler], + inheritable_handlers: Optional[list[BaseCallbackHandler]] = None, + parent_run_id: Optional[UUID] = None, + *, + parent_run_manager: CallbackManagerForChainRun, + **kwargs: Any, + ) -> None: + """Initialize the callback manager. + + Args: + handlers (list[BaseCallbackHandler]): The list of handlers. + inheritable_handlers (Optional[list[BaseCallbackHandler]]): The list of + inheritable handlers. Defaults to None. + parent_run_id (Optional[UUID]): The ID of the parent run. Defaults to None. + parent_run_manager (CallbackManagerForChainRun): The parent run manager. + **kwargs (Any): Additional keyword arguments. + """ + super().__init__( + handlers, + inheritable_handlers, + parent_run_id, + **kwargs, + ) + self.parent_run_manager = parent_run_manager + self.ended = False + + def copy(self) -> CallbackManagerForChainGroup: + """Copy the callback manager.""" + return self.__class__( + handlers=self.handlers.copy(), + inheritable_handlers=self.inheritable_handlers.copy(), + parent_run_id=self.parent_run_id, + tags=self.tags.copy(), + inheritable_tags=self.inheritable_tags.copy(), + metadata=self.metadata.copy(), + inheritable_metadata=self.inheritable_metadata.copy(), + parent_run_manager=self.parent_run_manager, + ) + + def merge( + self: CallbackManagerForChainGroup, other: BaseCallbackManager + ) -> CallbackManagerForChainGroup: + """Merge the group callback manager with another callback manager. + + Overwrites the merge method in the base class to ensure that the + parent run manager is preserved. Keeps the parent_run_manager + from the current object. + + Returns: + CallbackManagerForChainGroup: A copy of the current object with the + handlers, tags, and other attributes merged from the other object. + + Example: Merging two callback managers. + + .. code-block:: python + + from langchain_core.callbacks.manager import CallbackManager, trace_as_chain_group + from langchain_core.callbacks.stdout import StdOutCallbackHandler + + manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"]) + with trace_as_chain_group("My Group Name", tags=["tag1"]) as group_manager: + merged_manager = group_manager.merge(manager) + print(type(merged_manager)) + # + + print(merged_manager.handlers) + # [ + # , + # , + # ] + + print(merged_manager.tags) + # ['tag2', 'tag1'] + + """ # noqa: E501 + manager = self.__class__( + parent_run_id=self.parent_run_id or other.parent_run_id, + handlers=[], + inheritable_handlers=[], + tags=list(set(self.tags + other.tags)), + inheritable_tags=list(set(self.inheritable_tags + other.inheritable_tags)), + metadata={ + **self.metadata, + **other.metadata, + }, + parent_run_manager=self.parent_run_manager, + ) + + handlers = self.handlers + other.handlers + inheritable_handlers = self.inheritable_handlers + other.inheritable_handlers + + for handler in handlers: + manager.add_handler(handler) + + for handler in inheritable_handlers: + manager.add_handler(handler, inherit=True) + return manager + + def on_chain_end(self, outputs: Union[dict[str, Any], Any], **kwargs: Any) -> None: + """Run when traced chain group ends. + + Args: + outputs (Union[dict[str, Any], Any]): The outputs of the chain. + **kwargs (Any): Additional keyword arguments. + """ + self.ended = True + return self.parent_run_manager.on_chain_end(outputs, **kwargs) + + def on_chain_error( + self, + error: BaseException, + **kwargs: Any, + ) -> None: + """Run when chain errors. + + Args: + error (Exception or KeyboardInterrupt): The error. + **kwargs (Any): Additional keyword arguments. + """ + self.ended = True + return self.parent_run_manager.on_chain_error(error, **kwargs) + + +class AsyncCallbackManager(BaseCallbackManager): + """Async callback manager that handles callbacks from LangChain.""" + + @property + def is_async(self) -> bool: + """Return whether the handler is async.""" + return True + + async def on_llm_start( + self, + serialized: dict[str, Any], + prompts: list[str], + run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> list[AsyncCallbackManagerForLLMRun]: + """Run when LLM starts running. + + Args: + serialized (dict[str, Any]): The serialized LLM. + prompts (list[str]): The list of prompts. + run_id (UUID, optional): The ID of the run. Defaults to None. + **kwargs (Any): Additional keyword arguments. + + Returns: + list[AsyncCallbackManagerForLLMRun]: The list of async + callback managers, one for each LLM Run corresponding + to each prompt. + """ + inline_tasks = [] + non_inline_tasks = [] + inline_handlers = [handler for handler in self.handlers if handler.run_inline] + non_inline_handlers = [ + handler for handler in self.handlers if not handler.run_inline + ] + managers = [] + + for prompt in prompts: + if run_id is not None: + run_id_ = run_id + run_id = None + else: + run_id_ = uuid.uuid4() + + if inline_handlers: + inline_tasks.append( + ahandle_event( + inline_handlers, + "on_llm_start", + "ignore_llm", + serialized, + [prompt], + run_id=run_id_, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + **kwargs, + ) + ) + else: + non_inline_tasks.append( + ahandle_event( + non_inline_handlers, + "on_llm_start", + "ignore_llm", + serialized, + [prompt], + run_id=run_id_, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + **kwargs, + ) + ) + + managers.append( + AsyncCallbackManagerForLLMRun( + run_id=run_id_, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + ) + + # Run inline tasks sequentially + for inline_task in inline_tasks: + await inline_task + + # Run non-inline tasks concurrently + if non_inline_tasks: + await asyncio.gather(*non_inline_tasks) + + return managers + + async def on_chat_model_start( + self, + serialized: dict[str, Any], + messages: list[list[BaseMessage]], + run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> list[AsyncCallbackManagerForLLMRun]: + """Async run when LLM starts running. + + Args: + serialized (dict[str, Any]): The serialized LLM. + messages (list[list[BaseMessage]]): The list of messages. + run_id (UUID, optional): The ID of the run. Defaults to None. + **kwargs (Any): Additional keyword arguments. + + Returns: + list[AsyncCallbackManagerForLLMRun]: The list of + async callback managers, one for each LLM Run + corresponding to each inner message list. + """ + inline_tasks = [] + non_inline_tasks = [] + managers = [] + + for message_list in messages: + if run_id is not None: + run_id_ = run_id + run_id = None + else: + run_id_ = uuid.uuid4() + + for handler in self.handlers: + task = ahandle_event( + [handler], + "on_chat_model_start", + "ignore_chat_model", + serialized, + [message_list], + run_id=run_id_, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + **kwargs, + ) + if handler.run_inline: + inline_tasks.append(task) + else: + non_inline_tasks.append(task) + + managers.append( + AsyncCallbackManagerForLLMRun( + run_id=run_id_, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + ) + + # Run inline tasks sequentially + for task in inline_tasks: + await task + + # Run non-inline tasks concurrently + if non_inline_tasks: + await asyncio.gather(*non_inline_tasks) + + return managers + + async def on_chain_start( + self, + serialized: Optional[dict[str, Any]], + inputs: Union[dict[str, Any], Any], + run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> AsyncCallbackManagerForChainRun: + """Async run when chain starts running. + + Args: + serialized (Optional[dict[str, Any]]): The serialized chain. + inputs (Union[dict[str, Any], Any]): The inputs to the chain. + run_id (UUID, optional): The ID of the run. Defaults to None. + **kwargs (Any): Additional keyword arguments. + + Returns: + AsyncCallbackManagerForChainRun: The async callback manager + for the chain run. + """ + if run_id is None: + run_id = uuid.uuid4() + + await ahandle_event( + self.handlers, + "on_chain_start", + "ignore_chain", + serialized, + inputs, + run_id=run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + **kwargs, + ) + + return AsyncCallbackManagerForChainRun( + run_id=run_id, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + + @override + async def on_tool_start( + self, + serialized: Optional[dict[str, Any]], + input_str: str, + run_id: Optional[UUID] = None, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> AsyncCallbackManagerForToolRun: + """Run when the tool starts running. + + Args: + serialized (Optional[dict[str, Any]]): The serialized tool. + input_str (str): The input to the tool. + run_id (UUID, optional): The ID of the run. Defaults to None. + parent_run_id (UUID, optional): The ID of the parent run. + Defaults to None. + **kwargs (Any): Additional keyword arguments. + + Returns: + AsyncCallbackManagerForToolRun: The async callback manager + for the tool run. + """ + if run_id is None: + run_id = uuid.uuid4() + + await ahandle_event( + self.handlers, + "on_tool_start", + "ignore_agent", + serialized, + input_str, + run_id=run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + **kwargs, + ) + + return AsyncCallbackManagerForToolRun( + run_id=run_id, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + + async def on_custom_event( + self, + name: str, + data: Any, + run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> None: + """Dispatch an adhoc event to the handlers (async version). + + This event should NOT be used in any internal LangChain code. The event + is meant specifically for users of the library to dispatch custom + events that are tailored to their application. + + Args: + name: The name of the adhoc event. + data: The data for the adhoc event. + run_id: The ID of the run. Defaults to None. + + .. versionadded:: 0.2.14 + """ + if run_id is None: + run_id = uuid.uuid4() + + if kwargs: + msg = ( + "The dispatcher API does not accept additional keyword arguments." + "Please do not pass any additional keyword arguments, instead " + "include them in the data field." + ) + raise ValueError(msg) + await ahandle_event( + self.handlers, + "on_custom_event", + "ignore_custom_event", + name, + data, + run_id=run_id, + tags=self.tags, + metadata=self.metadata, + ) + + @override + async def on_retriever_start( + self, + serialized: Optional[dict[str, Any]], + query: str, + run_id: Optional[UUID] = None, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> AsyncCallbackManagerForRetrieverRun: + """Run when the retriever starts running. + + Args: + serialized (Optional[dict[str, Any]]): The serialized retriever. + query (str): The query. + run_id (UUID, optional): The ID of the run. Defaults to None. + parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. + **kwargs (Any): Additional keyword arguments. + + Returns: + AsyncCallbackManagerForRetrieverRun: The async callback manager + for the retriever run. + """ + if run_id is None: + run_id = uuid.uuid4() + + await ahandle_event( + self.handlers, + "on_retriever_start", + "ignore_retriever", + serialized, + query, + run_id=run_id, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + **kwargs, + ) + + return AsyncCallbackManagerForRetrieverRun( + run_id=run_id, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + + @classmethod + def configure( + cls, + inheritable_callbacks: Callbacks = None, + local_callbacks: Callbacks = None, + verbose: bool = False, # noqa: FBT001,FBT002 + inheritable_tags: Optional[list[str]] = None, + local_tags: Optional[list[str]] = None, + inheritable_metadata: Optional[dict[str, Any]] = None, + local_metadata: Optional[dict[str, Any]] = None, + ) -> AsyncCallbackManager: + """Configure the async callback manager. + + Args: + inheritable_callbacks (Optional[Callbacks], optional): The inheritable + callbacks. Defaults to None. + local_callbacks (Optional[Callbacks], optional): The local callbacks. + Defaults to None. + verbose (bool, optional): Whether to enable verbose mode. Defaults to False. + inheritable_tags (Optional[list[str]], optional): The inheritable tags. + Defaults to None. + local_tags (Optional[list[str]], optional): The local tags. + Defaults to None. + inheritable_metadata (Optional[dict[str, Any]], optional): The inheritable + metadata. Defaults to None. + local_metadata (Optional[dict[str, Any]], optional): The local metadata. + Defaults to None. + + Returns: + AsyncCallbackManager: The configured async callback manager. + """ + return _configure( + cls, + inheritable_callbacks, + local_callbacks, + inheritable_tags, + local_tags, + inheritable_metadata, + local_metadata, + verbose=verbose, + ) + + +class AsyncCallbackManagerForChainGroup(AsyncCallbackManager): + """Async callback manager for the chain group.""" + + def __init__( + self, + handlers: list[BaseCallbackHandler], + inheritable_handlers: Optional[list[BaseCallbackHandler]] = None, + parent_run_id: Optional[UUID] = None, + *, + parent_run_manager: AsyncCallbackManagerForChainRun, + **kwargs: Any, + ) -> None: + """Initialize the async callback manager. + + Args: + handlers (list[BaseCallbackHandler]): The list of handlers. + inheritable_handlers (Optional[list[BaseCallbackHandler]]): The list of + inheritable handlers. Defaults to None. + parent_run_id (Optional[UUID]): The ID of the parent run. Defaults to None. + parent_run_manager (AsyncCallbackManagerForChainRun): + The parent run manager. + **kwargs (Any): Additional keyword arguments. + """ + super().__init__( + handlers, + inheritable_handlers, + parent_run_id, + **kwargs, + ) + self.parent_run_manager = parent_run_manager + self.ended = False + + def copy(self) -> AsyncCallbackManagerForChainGroup: + """Copy the async callback manager.""" + return self.__class__( + handlers=self.handlers.copy(), + inheritable_handlers=self.inheritable_handlers.copy(), + parent_run_id=self.parent_run_id, + tags=self.tags.copy(), + inheritable_tags=self.inheritable_tags.copy(), + metadata=self.metadata.copy(), + inheritable_metadata=self.inheritable_metadata.copy(), + parent_run_manager=self.parent_run_manager, + ) + + def merge( + self: AsyncCallbackManagerForChainGroup, other: BaseCallbackManager + ) -> AsyncCallbackManagerForChainGroup: + """Merge the group callback manager with another callback manager. + + Overwrites the merge method in the base class to ensure that the + parent run manager is preserved. Keeps the parent_run_manager + from the current object. + + Returns: + AsyncCallbackManagerForChainGroup: A copy of the current AsyncCallbackManagerForChainGroup + with the handlers, tags, etc. of the other callback manager merged in. + + Example: Merging two callback managers. + + .. code-block:: python + + from langchain_core.callbacks.manager import CallbackManager, atrace_as_chain_group + from langchain_core.callbacks.stdout import StdOutCallbackHandler + + manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"]) + async with atrace_as_chain_group("My Group Name", tags=["tag1"]) as group_manager: + merged_manager = group_manager.merge(manager) + print(type(merged_manager)) + # + + print(merged_manager.handlers) + # [ + # , + # , + # ] + + print(merged_manager.tags) + # ['tag2', 'tag1'] + + """ # noqa: E501 + manager = self.__class__( + parent_run_id=self.parent_run_id or other.parent_run_id, + handlers=[], + inheritable_handlers=[], + tags=list(set(self.tags + other.tags)), + inheritable_tags=list(set(self.inheritable_tags + other.inheritable_tags)), + metadata={ + **self.metadata, + **other.metadata, + }, + parent_run_manager=self.parent_run_manager, + ) + + handlers = self.handlers + other.handlers + inheritable_handlers = self.inheritable_handlers + other.inheritable_handlers + + for handler in handlers: + manager.add_handler(handler) + + for handler in inheritable_handlers: + manager.add_handler(handler, inherit=True) + return manager + + async def on_chain_end( + self, outputs: Union[dict[str, Any], Any], **kwargs: Any + ) -> None: + """Run when traced chain group ends. + + Args: + outputs (Union[dict[str, Any], Any]): The outputs of the chain. + **kwargs (Any): Additional keyword arguments. + """ + self.ended = True + await self.parent_run_manager.on_chain_end(outputs, **kwargs) + + async def on_chain_error( + self, + error: BaseException, + **kwargs: Any, + ) -> None: + """Run when chain errors. + + Args: + error (Exception or KeyboardInterrupt): The error. + **kwargs (Any): Additional keyword arguments. + """ + self.ended = True + await self.parent_run_manager.on_chain_error(error, **kwargs) + + +T = TypeVar("T", CallbackManager, AsyncCallbackManager) + + +def _configure( + callback_manager_cls: type[T], + inheritable_callbacks: Callbacks = None, + local_callbacks: Callbacks = None, + inheritable_tags: Optional[list[str]] = None, + local_tags: Optional[list[str]] = None, + inheritable_metadata: Optional[dict[str, Any]] = None, + local_metadata: Optional[dict[str, Any]] = None, + *, + verbose: bool = False, +) -> T: + """Configure the callback manager. + + Args: + callback_manager_cls (Type[T]): The callback manager class. + inheritable_callbacks (Optional[Callbacks], optional): The inheritable + callbacks. Defaults to None. + local_callbacks (Optional[Callbacks], optional): The local callbacks. + Defaults to None. + verbose (bool, optional): Whether to enable verbose mode. Defaults to False. + inheritable_tags (Optional[list[str]], optional): The inheritable tags. + Defaults to None. + local_tags (Optional[list[str]], optional): The local tags. Defaults to None. + inheritable_metadata (Optional[dict[str, Any]], optional): The inheritable + metadata. Defaults to None. + local_metadata (Optional[dict[str, Any]], optional): The local metadata. + Defaults to None. + + Returns: + T: The configured callback manager. + """ + from langchain_core.tracers.context import ( + _configure_hooks, + _get_tracer_project, + _tracing_v2_is_enabled, + tracing_v2_callback_var, + ) + + tracing_context = get_tracing_context() + tracing_metadata = tracing_context["metadata"] + tracing_tags = tracing_context["tags"] + run_tree: Optional[Run] = tracing_context["parent"] + parent_run_id = None if run_tree is None else run_tree.id + callback_manager = callback_manager_cls( + handlers=[], + parent_run_id=parent_run_id, + ) + if inheritable_callbacks or local_callbacks: + if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None: + inheritable_callbacks_ = inheritable_callbacks or [] + callback_manager = callback_manager_cls( + handlers=inheritable_callbacks_.copy(), + inheritable_handlers=inheritable_callbacks_.copy(), + parent_run_id=parent_run_id, + ) + else: + parent_run_id_ = inheritable_callbacks.parent_run_id + # Break ties between the external tracing context and inherited context + if parent_run_id is not None and ( + parent_run_id_ is None + # If the LC parent has already been reflected + # in the run tree, we know the run_tree is either the + # same parent or a child of the parent. + or (run_tree and str(parent_run_id_) in run_tree.dotted_order) + ): + parent_run_id_ = parent_run_id + # Otherwise, we assume the LC context has progressed + # beyond the run tree and we should not inherit the parent. + callback_manager = callback_manager_cls( + handlers=inheritable_callbacks.handlers.copy(), + inheritable_handlers=inheritable_callbacks.inheritable_handlers.copy(), + parent_run_id=parent_run_id_, + tags=inheritable_callbacks.tags.copy(), + inheritable_tags=inheritable_callbacks.inheritable_tags.copy(), + metadata=inheritable_callbacks.metadata.copy(), + inheritable_metadata=inheritable_callbacks.inheritable_metadata.copy(), + ) + local_handlers_ = ( + local_callbacks + if isinstance(local_callbacks, list) + else (local_callbacks.handlers if local_callbacks else []) + ) + for handler in local_handlers_: + callback_manager.add_handler(handler, inherit=False) + if inheritable_tags or local_tags: + callback_manager.add_tags(inheritable_tags or []) + callback_manager.add_tags(local_tags or [], inherit=False) + if inheritable_metadata or local_metadata: + callback_manager.add_metadata(inheritable_metadata or {}) + callback_manager.add_metadata(local_metadata or {}, inherit=False) + if tracing_metadata: + callback_manager.add_metadata(tracing_metadata.copy()) + if tracing_tags: + callback_manager.add_tags(tracing_tags.copy()) + + v1_tracing_enabled_ = env_var_is_set("LANGCHAIN_TRACING") or env_var_is_set( + "LANGCHAIN_HANDLER" + ) + + tracer_v2 = tracing_v2_callback_var.get() + tracing_v2_enabled_ = _tracing_v2_is_enabled() + + if v1_tracing_enabled_ and not tracing_v2_enabled_: + # if both are enabled, can silently ignore the v1 tracer + msg = ( + "Tracing using LangChainTracerV1 is no longer supported. " + "Please set the LANGCHAIN_TRACING_V2 environment variable to enable " + "tracing instead." + ) + raise RuntimeError(msg) + + tracer_project = _get_tracer_project() + debug = _get_debug() + if verbose or debug or tracing_v2_enabled_: + from langchain_core.tracers.langchain import LangChainTracer + from langchain_core.tracers.stdout import ConsoleCallbackHandler + + if verbose and not any( + isinstance(handler, StdOutCallbackHandler) + for handler in callback_manager.handlers + ): + if debug: + pass + else: + callback_manager.add_handler(StdOutCallbackHandler(), inherit=False) + if debug and not any( + isinstance(handler, ConsoleCallbackHandler) + for handler in callback_manager.handlers + ): + callback_manager.add_handler(ConsoleCallbackHandler()) + if tracing_v2_enabled_ and not any( + isinstance(handler, LangChainTracer) + for handler in callback_manager.handlers + ): + if tracer_v2: + callback_manager.add_handler(tracer_v2) + else: + try: + handler = LangChainTracer( + project_name=tracer_project, + client=( + run_tree.client + if run_tree is not None + else tracing_context["client"] + ), + tags=tracing_tags, + ) + callback_manager.add_handler(handler) + except Exception as e: + logger.warning( + "Unable to load requested LangChainTracer." + " To disable this warning," + " unset the LANGCHAIN_TRACING_V2 environment variables.\n" + "%s", + repr(e), + ) + if run_tree is not None: + for handler in callback_manager.handlers: + if isinstance(handler, LangChainTracer): + handler.order_map[run_tree.id] = ( + run_tree.trace_id, + run_tree.dotted_order, + ) + handler.run_map[str(run_tree.id)] = run_tree + for var, inheritable, handler_class, env_var in _configure_hooks: + create_one = ( + env_var is not None + and env_var_is_set(env_var) + and handler_class is not None + ) + if var.get() is not None or create_one: + var_handler = ( + var.get() or cast("type[BaseCallbackHandler]", handler_class)() + ) + if handler_class is None: + if not any( + handler is var_handler # direct pointer comparison + for handler in callback_manager.handlers + ): + callback_manager.add_handler(var_handler, inheritable) + elif not any( + isinstance(handler, handler_class) + for handler in callback_manager.handlers + ): + callback_manager.add_handler(var_handler, inheritable) + return callback_manager + + +async def adispatch_custom_event( + name: str, data: Any, *, config: Optional[RunnableConfig] = None +) -> None: + """Dispatch an adhoc event to the handlers. + + Args: + name: The name of the adhoc event. + data: The data for the adhoc event. Free form data. Ideally should be + JSON serializable to avoid serialization issues downstream, but + this is not enforced. + config: Optional config object. Mirrors the async API but not strictly needed. + + Example: + + .. code-block:: python + + from langchain_core.callbacks import ( + AsyncCallbackHandler, + adispatch_custom_event + ) + from langchain_core.runnable import RunnableLambda + + class CustomCallbackManager(AsyncCallbackHandler): + async def on_custom_event( + self, + name: str, + data: Any, + *, + run_id: UUID, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + print(f"Received custom event: {name} with data: {data}") + + callback = CustomCallbackManager() + + async def foo(inputs): + await adispatch_custom_event("my_event", {"bar": "buzz}) + return inputs + + foo_ = RunnableLambda(foo) + await foo_.ainvoke({"a": "1"}, {"callbacks": [CustomCallbackManager()]}) + + Example: Use with astream events + + .. code-block:: python + + from langchain_core.callbacks import ( + AsyncCallbackHandler, + adispatch_custom_event + ) + from langchain_core.runnable import RunnableLambda + + class CustomCallbackManager(AsyncCallbackHandler): + async def on_custom_event( + self, + name: str, + data: Any, + *, + run_id: UUID, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + print(f"Received custom event: {name} with data: {data}") + + callback = CustomCallbackManager() + + async def foo(inputs): + await adispatch_custom_event("event_type_1", {"bar": "buzz}) + await adispatch_custom_event("event_type_2", 5) + return inputs + + foo_ = RunnableLambda(foo) + + async for event in foo_.ainvoke_stream( + {"a": "1"}, + version="v2", + config={"callbacks": [CustomCallbackManager()]} + ): + print(event) + + .. warning:: + If using python <= 3.10 and async, you MUST + specify the `config` parameter or the function will raise an error. + This is due to a limitation in asyncio for python <= 3.10 that prevents + LangChain from automatically propagating the config object on the user's + behalf. + + .. versionadded:: 0.2.15 + """ + from langchain_core.runnables.config import ( + ensure_config, + get_async_callback_manager_for_config, + ) + + config = ensure_config(config) + callback_manager = get_async_callback_manager_for_config(config) + # We want to get the callback manager for the parent run. + # This is a work-around for now to be able to dispatch adhoc events from + # within a tool or a lambda and have the metadata events associated + # with the parent run rather than have a new run id generated for each. + if callback_manager.parent_run_id is None: + msg = ( + "Unable to dispatch an adhoc event without a parent run id." + "This function can only be called from within an existing run (e.g.," + "inside a tool or a RunnableLambda or a RunnableGenerator.)" + "If you are doing that and still seeing this error, try explicitly" + "passing the config parameter to this function." + ) + raise RuntimeError(msg) + + await callback_manager.on_custom_event( + name, + data, + run_id=callback_manager.parent_run_id, + ) + + +def dispatch_custom_event( + name: str, data: Any, *, config: Optional[RunnableConfig] = None +) -> None: + """Dispatch an adhoc event. + + Args: + name: The name of the adhoc event. + data: The data for the adhoc event. Free form data. Ideally should be + JSON serializable to avoid serialization issues downstream, but + this is not enforced. + config: Optional config object. Mirrors the async API but not strictly needed. + + Example: + + .. code-block:: python + + from langchain_core.callbacks import BaseCallbackHandler + from langchain_core.callbacks import dispatch_custom_event + from langchain_core.runnable import RunnableLambda + + class CustomCallbackManager(BaseCallbackHandler): + def on_custom_event( + self, + name: str, + data: Any, + *, + run_id: UUID, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + print(f"Received custom event: {name} with data: {data}") + + def foo(inputs): + dispatch_custom_event("my_event", {"bar": "buzz}) + return inputs + + foo_ = RunnableLambda(foo) + foo_.invoke({"a": "1"}, {"callbacks": [CustomCallbackManager()]}) + + .. versionadded:: 0.2.15 + """ + from langchain_core.runnables.config import ( + ensure_config, + get_callback_manager_for_config, + ) + + config = ensure_config(config) + callback_manager = get_callback_manager_for_config(config) + # We want to get the callback manager for the parent run. + # This is a work-around for now to be able to dispatch adhoc events from + # within a tool or a lambda and have the metadata events associated + # with the parent run rather than have a new run id generated for each. + if callback_manager.parent_run_id is None: + msg = ( + "Unable to dispatch an adhoc event without a parent run id." + "This function can only be called from within an existing run (e.g.," + "inside a tool or a RunnableLambda or a RunnableGenerator.)" + "If you are doing that and still seeing this error, try explicitly" + "passing the config parameter to this function." + ) + raise RuntimeError(msg) + callback_manager.on_custom_event( + name, + data, + run_id=callback_manager.parent_run_id, + ) + + +@functools.lru_cache(maxsize=1) +def _executor() -> ThreadPoolExecutor: + # If the user is specifying ASYNC callback handlers to be run from a + # SYNC context, and an event loop is already running, + # we cannot submit the coroutine to the running loop, because it + # would result in a deadlock. Instead we have to schedule them + # on a background thread. To avoid creating & shutting down + # a new executor every time, we use a lazily-created, shared + # executor. If you're using regular langgchain parallelism (batch, etc.) + # you'd only ever need 1 worker, but we permit more for now to reduce the chance + # of slowdown if you are mixing with your own executor. + cutie = ThreadPoolExecutor(max_workers=10) + atexit.register(cutie.shutdown, wait=True) + return cutie diff --git a/venv/Lib/site-packages/langchain_core/callbacks/stdout.py b/venv/Lib/site-packages/langchain_core/callbacks/stdout.py new file mode 100644 index 00000000..f72ea656 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/callbacks/stdout.py @@ -0,0 +1,124 @@ +"""Callback Handler that prints to std out.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Optional + +from typing_extensions import override + +from langchain_core.callbacks.base import BaseCallbackHandler +from langchain_core.utils import print_text + +if TYPE_CHECKING: + from langchain_core.agents import AgentAction, AgentFinish + + +class StdOutCallbackHandler(BaseCallbackHandler): + """Callback Handler that prints to std out.""" + + def __init__(self, color: Optional[str] = None) -> None: + """Initialize callback handler. + + Args: + color: The color to use for the text. Defaults to None. + """ + self.color = color + + @override + def on_chain_start( + self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any + ) -> None: + """Print out that we are entering a chain. + + Args: + serialized (dict[str, Any]): The serialized chain. + inputs (dict[str, Any]): The inputs to the chain. + **kwargs (Any): Additional keyword arguments. + """ + if "name" in kwargs: + name = kwargs["name"] + elif serialized: + name = serialized.get("name", serialized.get("id", [""])[-1]) + else: + name = "" + print(f"\n\n\033[1m> Entering new {name} chain...\033[0m") # noqa: T201 + + @override + def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None: + """Print out that we finished a chain. + + Args: + outputs (dict[str, Any]): The outputs of the chain. + **kwargs (Any): Additional keyword arguments. + """ + print("\n\033[1m> Finished chain.\033[0m") # noqa: T201 + + @override + def on_agent_action( + self, action: AgentAction, color: Optional[str] = None, **kwargs: Any + ) -> Any: + """Run on agent action. + + Args: + action (AgentAction): The agent action. + color (Optional[str]): The color to use for the text. Defaults to None. + **kwargs (Any): Additional keyword arguments. + """ + print_text(action.log, color=color or self.color) + + @override + def on_tool_end( + self, + output: Any, + color: Optional[str] = None, + observation_prefix: Optional[str] = None, + llm_prefix: Optional[str] = None, + **kwargs: Any, + ) -> None: + """If not the final action, print out observation. + + Args: + output (Any): The output to print. + color (Optional[str]): The color to use for the text. Defaults to None. + observation_prefix (Optional[str]): The observation prefix. + Defaults to None. + llm_prefix (Optional[str]): The LLM prefix. Defaults to None. + **kwargs (Any): Additional keyword arguments. + """ + output = str(output) + if observation_prefix is not None: + print_text(f"\n{observation_prefix}") + print_text(output, color=color or self.color) + if llm_prefix is not None: + print_text(f"\n{llm_prefix}") + + @override + def on_text( + self, + text: str, + color: Optional[str] = None, + end: str = "", + **kwargs: Any, + ) -> None: + """Run when the agent ends. + + Args: + text (str): The text to print. + color (Optional[str]): The color to use for the text. Defaults to None. + end (str): The end character to use. Defaults to "". + **kwargs (Any): Additional keyword arguments. + """ + print_text(text, color=color or self.color, end=end) + + @override + def on_agent_finish( + self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any + ) -> None: + """Run on the agent end. + + Args: + finish (AgentFinish): The agent finish. + color (Optional[str]): The color to use for the text. Defaults to None. + **kwargs (Any): Additional keyword arguments. + """ + print_text(finish.log, color=color or self.color, end="\n") diff --git a/venv/Lib/site-packages/langchain_core/callbacks/streaming_stdout.py b/venv/Lib/site-packages/langchain_core/callbacks/streaming_stdout.py new file mode 100644 index 00000000..f8dbe518 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/callbacks/streaming_stdout.py @@ -0,0 +1,149 @@ +"""Callback Handler streams to stdout on new llm token.""" + +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING, Any + +from typing_extensions import override + +from langchain_core.callbacks.base import BaseCallbackHandler + +if TYPE_CHECKING: + from langchain_core.agents import AgentAction, AgentFinish + from langchain_core.messages import BaseMessage + from langchain_core.outputs import LLMResult + + +class StreamingStdOutCallbackHandler(BaseCallbackHandler): + """Callback handler for streaming. Only works with LLMs that support streaming.""" + + def on_llm_start( + self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any + ) -> None: + """Run when LLM starts running. + + Args: + serialized (dict[str, Any]): The serialized LLM. + prompts (list[str]): The prompts to run. + **kwargs (Any): Additional keyword arguments. + """ + + def on_chat_model_start( + self, + serialized: dict[str, Any], + messages: list[list[BaseMessage]], + **kwargs: Any, + ) -> None: + """Run when LLM starts running. + + Args: + serialized (dict[str, Any]): The serialized LLM. + messages (list[list[BaseMessage]]): The messages to run. + **kwargs (Any): Additional keyword arguments. + """ + + @override + def on_llm_new_token(self, token: str, **kwargs: Any) -> None: + """Run on new LLM token. Only available when streaming is enabled. + + Args: + token (str): The new token. + **kwargs (Any): Additional keyword arguments. + """ + sys.stdout.write(token) + sys.stdout.flush() + + def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + """Run when LLM ends running. + + Args: + response (LLMResult): The response from the LLM. + **kwargs (Any): Additional keyword arguments. + """ + + def on_llm_error(self, error: BaseException, **kwargs: Any) -> None: + """Run when LLM errors. + + Args: + error (BaseException): The error that occurred. + **kwargs (Any): Additional keyword arguments. + """ + + def on_chain_start( + self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any + ) -> None: + """Run when a chain starts running. + + Args: + serialized (dict[str, Any]): The serialized chain. + inputs (dict[str, Any]): The inputs to the chain. + **kwargs (Any): Additional keyword arguments. + """ + + def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None: + """Run when a chain ends running. + + Args: + outputs (dict[str, Any]): The outputs of the chain. + **kwargs (Any): Additional keyword arguments. + """ + + def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: + """Run when chain errors. + + Args: + error (BaseException): The error that occurred. + **kwargs (Any): Additional keyword arguments. + """ + + def on_tool_start( + self, serialized: dict[str, Any], input_str: str, **kwargs: Any + ) -> None: + """Run when the tool starts running. + + Args: + serialized (dict[str, Any]): The serialized tool. + input_str (str): The input string. + **kwargs (Any): Additional keyword arguments. + """ + + def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: + """Run on agent action. + + Args: + action (AgentAction): The agent action. + **kwargs (Any): Additional keyword arguments. + """ + + def on_tool_end(self, output: Any, **kwargs: Any) -> None: + """Run when tool ends running. + + Args: + output (Any): The output of the tool. + **kwargs (Any): Additional keyword arguments. + """ + + def on_tool_error(self, error: BaseException, **kwargs: Any) -> None: + """Run when tool errors. + + Args: + error (BaseException): The error that occurred. + **kwargs (Any): Additional keyword arguments. + """ + + def on_text(self, text: str, **kwargs: Any) -> None: + """Run on an arbitrary text. + + Args: + text (str): The text to print. + **kwargs (Any): Additional keyword arguments. + """ + + def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: + """Run on the agent end. + + Args: + finish (AgentFinish): The agent finish. + **kwargs (Any): Additional keyword arguments. + """ diff --git a/venv/Lib/site-packages/langchain_core/callbacks/usage.py b/venv/Lib/site-packages/langchain_core/callbacks/usage.py new file mode 100644 index 00000000..8a04b046 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/callbacks/usage.py @@ -0,0 +1,140 @@ +"""Callback Handler that tracks AIMessage.usage_metadata.""" + +import threading +from collections.abc import Generator +from contextlib import contextmanager +from contextvars import ContextVar +from typing import Any, Optional + +from typing_extensions import override + +from langchain_core.callbacks import BaseCallbackHandler +from langchain_core.messages import AIMessage +from langchain_core.messages.ai import UsageMetadata, add_usage +from langchain_core.outputs import ChatGeneration, LLMResult + + +class UsageMetadataCallbackHandler(BaseCallbackHandler): + """Callback Handler that tracks AIMessage.usage_metadata. + + Example: + .. code-block:: python + + from langchain.chat_models import init_chat_model + from langchain_core.callbacks import UsageMetadataCallbackHandler + + llm_1 = init_chat_model(model="openai:gpt-4o-mini") + llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest") + + callback = UsageMetadataCallbackHandler() + result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]}) + result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]}) + callback.usage_metadata + + .. code-block:: none + + {'gpt-4o-mini-2024-07-18': {'input_tokens': 8, + 'output_tokens': 10, + 'total_tokens': 18, + 'input_token_details': {'audio': 0, 'cache_read': 0}, + 'output_token_details': {'audio': 0, 'reasoning': 0}}, + 'claude-3-5-haiku-20241022': {'input_tokens': 8, + 'output_tokens': 21, + 'total_tokens': 29, + 'input_token_details': {'cache_read': 0, 'cache_creation': 0}}} + + .. versionadded:: 0.3.49 + """ + + def __init__(self) -> None: + """Initialize the UsageMetadataCallbackHandler.""" + super().__init__() + self._lock = threading.Lock() + self.usage_metadata: dict[str, UsageMetadata] = {} + + @override + def __repr__(self) -> str: + return str(self.usage_metadata) + + @override + def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + """Collect token usage.""" + # Check for usage_metadata (langchain-core >= 0.2.2) + try: + generation = response.generations[0][0] + except IndexError: + generation = None + + usage_metadata = None + model_name = None + if isinstance(generation, ChatGeneration): + try: + message = generation.message + if isinstance(message, AIMessage): + usage_metadata = message.usage_metadata + model_name = message.response_metadata.get("model_name") + except AttributeError: + pass + + # update shared state behind lock + if usage_metadata and model_name: + with self._lock: + if model_name not in self.usage_metadata: + self.usage_metadata[model_name] = usage_metadata + else: + self.usage_metadata[model_name] = add_usage( + self.usage_metadata[model_name], usage_metadata + ) + + +@contextmanager +def get_usage_metadata_callback( + name: str = "usage_metadata_callback", +) -> Generator[UsageMetadataCallbackHandler, None, None]: + """Get usage metadata callback. + + Get context manager for tracking usage metadata across chat model calls using + ``AIMessage.usage_metadata``. + + Args: + name (str): The name of the context variable. Defaults to + ``"usage_metadata_callback"``. + + Example: + .. code-block:: python + + from langchain.chat_models import init_chat_model + from langchain_core.callbacks import get_usage_metadata_callback + + llm_1 = init_chat_model(model="openai:gpt-4o-mini") + llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest") + + with get_usage_metadata_callback() as cb: + llm_1.invoke("Hello") + llm_2.invoke("Hello") + print(cb.usage_metadata) + + .. code-block:: none + + {'gpt-4o-mini-2024-07-18': {'input_tokens': 8, + 'output_tokens': 10, + 'total_tokens': 18, + 'input_token_details': {'audio': 0, 'cache_read': 0}, + 'output_token_details': {'audio': 0, 'reasoning': 0}}, + 'claude-3-5-haiku-20241022': {'input_tokens': 8, + 'output_tokens': 21, + 'total_tokens': 29, + 'input_token_details': {'cache_read': 0, 'cache_creation': 0}}} + + .. versionadded:: 0.3.49 + """ + from langchain_core.tracers.context import register_configure_hook + + usage_metadata_callback_var: ContextVar[Optional[UsageMetadataCallbackHandler]] = ( + ContextVar(name, default=None) + ) + register_configure_hook(usage_metadata_callback_var, inheritable=True) + cb = UsageMetadataCallbackHandler() + usage_metadata_callback_var.set(cb) + yield cb + usage_metadata_callback_var.set(None) diff --git a/venv/Lib/site-packages/langchain_core/chat_history.py b/venv/Lib/site-packages/langchain_core/chat_history.py new file mode 100644 index 00000000..9e74dfca --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/chat_history.py @@ -0,0 +1,247 @@ +"""**Chat message history** stores a history of the message interactions in a chat. + +**Class hierarchy:** + +.. code-block:: + + BaseChatMessageHistory --> ChatMessageHistory # Examples: FileChatMessageHistory, PostgresChatMessageHistory + +**Main helpers:** + +.. code-block:: + + AIMessage, HumanMessage, BaseMessage + +""" # noqa: E501 + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Union + +from pydantic import BaseModel, Field + +from langchain_core.messages import ( + AIMessage, + BaseMessage, + HumanMessage, + get_buffer_string, +) + +if TYPE_CHECKING: + from collections.abc import Sequence + + +class BaseChatMessageHistory(ABC): + """Abstract base class for storing chat message history. + + Implementations guidelines: + + Implementations are expected to over-ride all or some of the following methods: + + * add_messages: sync variant for bulk addition of messages + * aadd_messages: async variant for bulk addition of messages + * messages: sync variant for getting messages + * aget_messages: async variant for getting messages + * clear: sync variant for clearing messages + * aclear: async variant for clearing messages + + add_messages contains a default implementation that calls add_message + for each message in the sequence. This is provided for backwards compatibility + with existing implementations which only had add_message. + + Async variants all have default implementations that call the sync variants. + Implementers can choose to over-ride the async implementations to provide + truly async implementations. + + Usage guidelines: + + When used for updating history, users should favor usage of `add_messages` + over `add_message` or other variants like `add_user_message` and `add_ai_message` + to avoid unnecessary round-trips to the underlying persistence layer. + + Example: Shows a default implementation. + + .. code-block:: python + + class FileChatMessageHistory(BaseChatMessageHistory): + storage_path: str + session_id: str + + @property + def messages(self): + with open(os.path.join(storage_path, session_id), 'r:utf-8') as f: + messages = json.loads(f.read()) + return messages_from_dict(messages) + + def add_messages(self, messages: Sequence[BaseMessage]) -> None: + all_messages = list(self.messages) # Existing messages + all_messages.extend(messages) # Add new messages + + serialized = [message_to_dict(message) for message in all_messages] + # Can be further optimized by only writing new messages + # using append mode. + with open(os.path.join(storage_path, session_id), 'w') as f: + json.dump(f, messages) + + def clear(self): + with open(os.path.join(storage_path, session_id), 'w') as f: + f.write("[]") + """ + + messages: list[BaseMessage] + """A property or attribute that returns a list of messages. + + In general, getting the messages may involve IO to the underlying + persistence layer, so this operation is expected to incur some + latency. + """ + + async def aget_messages(self) -> list[BaseMessage]: + """Async version of getting messages. + + Can over-ride this method to provide an efficient async implementation. + + In general, fetching messages may involve IO to the underlying + persistence layer. + """ + from langchain_core.runnables.config import run_in_executor + + return await run_in_executor(None, lambda: self.messages) + + def add_user_message(self, message: Union[HumanMessage, str]) -> None: + """Convenience method for adding a human message string to the store. + + Please note that this is a convenience method. Code should favor the + bulk add_messages interface instead to save on round-trips to the underlying + persistence layer. + + This method may be deprecated in a future release. + + Args: + message: The human message to add to the store. + """ + if isinstance(message, HumanMessage): + self.add_message(message) + else: + self.add_message(HumanMessage(content=message)) + + def add_ai_message(self, message: Union[AIMessage, str]) -> None: + """Convenience method for adding an AI message string to the store. + + Please note that this is a convenience method. Code should favor the bulk + add_messages interface instead to save on round-trips to the underlying + persistence layer. + + This method may be deprecated in a future release. + + Args: + message: The AI message to add. + """ + if isinstance(message, AIMessage): + self.add_message(message) + else: + self.add_message(AIMessage(content=message)) + + def add_message(self, message: BaseMessage) -> None: + """Add a Message object to the store. + + Args: + message: A BaseMessage object to store. + + Raises: + NotImplementedError: If the sub-class has not implemented an efficient + add_messages method. + """ + if type(self).add_messages != BaseChatMessageHistory.add_messages: + # This means that the sub-class has implemented an efficient add_messages + # method, so we should use it. + self.add_messages([message]) + else: + msg = ( + "add_message is not implemented for this class. " + "Please implement add_message or add_messages." + ) + raise NotImplementedError(msg) + + def add_messages(self, messages: Sequence[BaseMessage]) -> None: + """Add a list of messages. + + Implementations should over-ride this method to handle bulk addition of messages + in an efficient manner to avoid unnecessary round-trips to the underlying store. + + Args: + messages: A sequence of BaseMessage objects to store. + """ + for message in messages: + self.add_message(message) + + async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None: + """Async add a list of messages. + + Args: + messages: A sequence of BaseMessage objects to store. + """ + from langchain_core.runnables.config import run_in_executor + + await run_in_executor(None, self.add_messages, messages) + + @abstractmethod + def clear(self) -> None: + """Remove all messages from the store.""" + + async def aclear(self) -> None: + """Async remove all messages from the store.""" + from langchain_core.runnables.config import run_in_executor + + await run_in_executor(None, self.clear) + + def __str__(self) -> str: + """Return a string representation of the chat history.""" + return get_buffer_string(self.messages) + + +class InMemoryChatMessageHistory(BaseChatMessageHistory, BaseModel): + """In memory implementation of chat message history. + + Stores messages in a memory list. + """ + + messages: list[BaseMessage] = Field(default_factory=list) + """A list of messages stored in memory.""" + + async def aget_messages(self) -> list[BaseMessage]: + """Async version of getting messages. + + Can over-ride this method to provide an efficient async implementation. + In general, fetching messages may involve IO to the underlying + persistence layer. + + Returns: + List of messages. + """ + return self.messages + + def add_message(self, message: BaseMessage) -> None: + """Add a self-created message to the store. + + Args: + message: The message to add. + """ + self.messages.append(message) + + async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None: + """Async add messages to the store. + + Args: + messages: The messages to add. + """ + self.add_messages(messages) + + def clear(self) -> None: + """Clear all messages from the store.""" + self.messages = [] + + async def aclear(self) -> None: + """Async clear all messages from the store.""" + self.clear() diff --git a/venv/Lib/site-packages/langchain_core/chat_loaders.py b/venv/Lib/site-packages/langchain_core/chat_loaders.py new file mode 100644 index 00000000..dfb01eb8 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/chat_loaders.py @@ -0,0 +1,26 @@ +"""Chat loaders.""" + +from abc import ABC, abstractmethod +from collections.abc import Iterator + +from langchain_core.chat_sessions import ChatSession + + +class BaseChatLoader(ABC): + """Base class for chat loaders.""" + + @abstractmethod + def lazy_load(self) -> Iterator[ChatSession]: + """Lazy load the chat sessions. + + Returns: + An iterator of chat sessions. + """ + + def load(self) -> list[ChatSession]: + """Eagerly load the chat sessions into memory. + + Returns: + A list of chat sessions. + """ + return list(self.lazy_load()) diff --git a/venv/Lib/site-packages/langchain_core/chat_sessions.py b/venv/Lib/site-packages/langchain_core/chat_sessions.py new file mode 100644 index 00000000..23c60f3f --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/chat_sessions.py @@ -0,0 +1,18 @@ +"""**Chat Sessions** are a collection of messages and function calls.""" + +from collections.abc import Sequence +from typing import TypedDict + +from langchain_core.messages import BaseMessage + + +class ChatSession(TypedDict, total=False): + """Chat Session. + + Chat Session represents a single conversation, channel, or other group of messages. + """ + + messages: Sequence[BaseMessage] + """A sequence of the LangChain chat messages loaded from the source.""" + functions: Sequence[dict] + """A sequence of the function calling specs for the messages.""" diff --git a/venv/Lib/site-packages/langchain_core/document_loaders/__init__.py b/venv/Lib/site-packages/langchain_core/document_loaders/__init__.py new file mode 100644 index 00000000..f4225154 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/document_loaders/__init__.py @@ -0,0 +1,39 @@ +"""Document loaders.""" + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.document_loaders.base import BaseBlobParser, BaseLoader + from langchain_core.document_loaders.blob_loaders import Blob, BlobLoader, PathLike + from langchain_core.document_loaders.langsmith import LangSmithLoader + +__all__ = ( + "BaseBlobParser", + "BaseLoader", + "Blob", + "BlobLoader", + "PathLike", + "LangSmithLoader", +) + +_dynamic_imports = { + "BaseBlobParser": "base", + "BaseLoader": "base", + "Blob": "blob_loaders", + "BlobLoader": "blob_loaders", + "PathLike": "blob_loaders", + "LangSmithLoader": "langsmith", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/document_loaders/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/document_loaders/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..6dd74cf6 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/document_loaders/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/document_loaders/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/document_loaders/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..8c00342d Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/document_loaders/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/document_loaders/__pycache__/blob_loaders.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/document_loaders/__pycache__/blob_loaders.cpython-312.pyc new file mode 100644 index 00000000..76391dbf Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/document_loaders/__pycache__/blob_loaders.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/document_loaders/__pycache__/langsmith.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/document_loaders/__pycache__/langsmith.cpython-312.pyc new file mode 100644 index 00000000..d7a8aa3a Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/document_loaders/__pycache__/langsmith.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/document_loaders/base.py b/venv/Lib/site-packages/langchain_core/document_loaders/base.py new file mode 100644 index 00000000..f76edc87 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/document_loaders/base.py @@ -0,0 +1,127 @@ +"""Abstract interface for document loader implementations.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Optional + +from langchain_core.runnables import run_in_executor + +if TYPE_CHECKING: + from collections.abc import AsyncIterator, Iterator + + from langchain_text_splitters import TextSplitter + + from langchain_core.documents import Document + from langchain_core.documents.base import Blob + + +class BaseLoader(ABC): # noqa: B024 + """Interface for Document Loader. + + Implementations should implement the lazy-loading method using generators + to avoid loading all Documents into memory at once. + + `load` is provided just for user convenience and should not be overridden. + """ + + # Sub-classes should not implement this method directly. Instead, they + # should implement the lazy load method. + def load(self) -> list[Document]: + """Load data into Document objects.""" + return list(self.lazy_load()) + + async def aload(self) -> list[Document]: + """Load data into Document objects.""" + return [document async for document in self.alazy_load()] + + def load_and_split( + self, text_splitter: Optional[TextSplitter] = None + ) -> list[Document]: + """Load Documents and split into chunks. Chunks are returned as Documents. + + Do not override this method. It should be considered to be deprecated! + + Args: + text_splitter: TextSplitter instance to use for splitting documents. + Defaults to RecursiveCharacterTextSplitter. + + Returns: + List of Documents. + """ + if text_splitter is None: + try: + from langchain_text_splitters import RecursiveCharacterTextSplitter + except ImportError as e: + msg = ( + "Unable to import from langchain_text_splitters. Please specify " + "text_splitter or install langchain_text_splitters with " + "`pip install -U langchain-text-splitters`." + ) + raise ImportError(msg) from e + + _text_splitter: TextSplitter = RecursiveCharacterTextSplitter() + else: + _text_splitter = text_splitter + docs = self.load() + return _text_splitter.split_documents(docs) + + # Attention: This method will be upgraded into an abstractmethod once it's + # implemented in all the existing subclasses. + def lazy_load(self) -> Iterator[Document]: + """A lazy loader for Documents.""" + if type(self).load != BaseLoader.load: + return iter(self.load()) + msg = f"{self.__class__.__name__} does not implement lazy_load()" + raise NotImplementedError(msg) + + async def alazy_load(self) -> AsyncIterator[Document]: + """A lazy loader for Documents.""" + iterator = await run_in_executor(None, self.lazy_load) + done = object() + while True: + doc = await run_in_executor(None, next, iterator, done) + if doc is done: + break + yield doc # type: ignore[misc] + + +class BaseBlobParser(ABC): + """Abstract interface for blob parsers. + + A blob parser provides a way to parse raw data stored in a blob into one + or more documents. + + The parser can be composed with blob loaders, making it easy to reuse + a parser independent of how the blob was originally loaded. + """ + + @abstractmethod + def lazy_parse(self, blob: Blob) -> Iterator[Document]: + """Lazy parsing interface. + + Subclasses are required to implement this method. + + Args: + blob: Blob instance + + Returns: + Generator of documents + """ + + def parse(self, blob: Blob) -> list[Document]: + """Eagerly parse the blob into a document or documents. + + This is a convenience method for interactive development environment. + + Production applications should favor the lazy_parse method instead. + + Subclasses should generally not over-ride this parse method. + + Args: + blob: Blob instance + + Returns: + List of documents + """ + return list(self.lazy_parse(blob)) diff --git a/venv/Lib/site-packages/langchain_core/document_loaders/blob_loaders.py b/venv/Lib/site-packages/langchain_core/document_loaders/blob_loaders.py new file mode 100644 index 00000000..6f26106e --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/document_loaders/blob_loaders.py @@ -0,0 +1,39 @@ +"""Schema for Blobs and Blob Loaders. + +The goal is to facilitate decoupling of content loading from content parsing code. + +In addition, content loading code should provide a lazy loading interface by default. +""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING + +# Re-export Blob and PathLike for backwards compatibility +from langchain_core.documents.base import Blob, PathLike + +if TYPE_CHECKING: + from collections.abc import Iterable + + +class BlobLoader(ABC): + """Abstract interface for blob loaders implementation. + + Implementer should be able to load raw content from a storage system according + to some criteria and return the raw content lazily as a stream of blobs. + """ + + @abstractmethod + def yield_blobs( + self, + ) -> Iterable[Blob]: + """A lazy loader for raw data represented by LangChain's Blob object. + + Returns: + A generator over blobs + """ + + +# Re-export Blob and Pathlike for backwards compatibility +__all__ = ["Blob", "BlobLoader", "PathLike"] diff --git a/venv/Lib/site-packages/langchain_core/document_loaders/langsmith.py b/venv/Lib/site-packages/langchain_core/document_loaders/langsmith.py new file mode 100644 index 00000000..259260f7 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/document_loaders/langsmith.py @@ -0,0 +1,134 @@ +"""LangSmith document loader.""" + +import datetime +import json +import uuid +from collections.abc import Iterator, Sequence +from typing import Any, Callable, Optional, Union + +from langsmith import Client as LangSmithClient +from typing_extensions import override + +from langchain_core.document_loaders.base import BaseLoader +from langchain_core.documents import Document + + +class LangSmithLoader(BaseLoader): + """Load LangSmith Dataset examples as Documents. + + Loads the example inputs as the Document page content and places the entire example + into the Document metadata. This allows you to easily create few-shot example + retrievers from the loaded documents. + + .. dropdown:: Lazy load + + .. code-block:: python + + from langchain_core.document_loaders import LangSmithLoader + + loader = LangSmithLoader(dataset_id="...", limit=100) + docs = [] + for doc in loader.lazy_load(): + docs.append(doc) + + .. code-block:: pycon + + # -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...] + + .. versionadded:: 0.2.34 + """ # noqa: E501 + + def __init__( + self, + *, + dataset_id: Optional[Union[uuid.UUID, str]] = None, + dataset_name: Optional[str] = None, + example_ids: Optional[Sequence[Union[uuid.UUID, str]]] = None, + as_of: Optional[Union[datetime.datetime, str]] = None, + splits: Optional[Sequence[str]] = None, + inline_s3_urls: bool = True, + offset: int = 0, + limit: Optional[int] = None, + metadata: Optional[dict] = None, + filter: Optional[str] = None, + content_key: str = "", + format_content: Optional[Callable[..., str]] = None, + client: Optional[LangSmithClient] = None, + **client_kwargs: Any, + ) -> None: + """Create a LangSmith loader. + + Args: + dataset_id: The ID of the dataset to filter by. Defaults to None. + dataset_name: The name of the dataset to filter by. Defaults to None. + content_key: The inputs key to set as Document page content. ``"."`` characters + are interpreted as nested keys. E.g. ``content_key="first.second"`` will + result in + ``Document(page_content=format_content(example.inputs["first"]["second"]))`` + format_content: Function for converting the content extracted from the example + inputs into a string. Defaults to JSON-encoding the contents. + example_ids: The IDs of the examples to filter by. Defaults to None. + as_of: The dataset version tag OR + timestamp to retrieve the examples as of. + Response examples will only be those that were present at the time + of the tagged (or timestamped) version. + splits: A list of dataset splits, which are + divisions of your dataset such as 'train', 'test', or 'validation'. + Returns examples only from the specified splits. + inline_s3_urls: Whether to inline S3 URLs. Defaults to True. + offset: The offset to start from. Defaults to 0. + limit: The maximum number of examples to return. + metadata: Metadata to filter by. Defaults to None. + filter: A structured filter string to apply to the examples. + client: LangSmith Client. If not provided will be initialized from below args. + client_kwargs: Keyword args to pass to LangSmith client init. Should only be + specified if ``client`` isn't. + """ # noqa: E501 + if client and client_kwargs: + raise ValueError + self._client = client or LangSmithClient(**client_kwargs) + self.content_key = list(content_key.split(".")) if content_key else [] + self.format_content = format_content or _stringify + self.dataset_id = dataset_id + self.dataset_name = dataset_name + self.example_ids = example_ids + self.as_of = as_of + self.splits = splits + self.inline_s3_urls = inline_s3_urls + self.offset = offset + self.limit = limit + self.metadata = metadata + self.filter = filter + + @override + def lazy_load(self) -> Iterator[Document]: + for example in self._client.list_examples( + dataset_id=self.dataset_id, + dataset_name=self.dataset_name, + example_ids=self.example_ids, + as_of=self.as_of, + splits=self.splits, + inline_s3_urls=self.inline_s3_urls, + offset=self.offset, + limit=self.limit, + metadata=self.metadata, + filter=self.filter, + ): + content: Any = example.inputs + for key in self.content_key: + content = content[key] + content_str = self.format_content(content) + metadata = example.dict() + # Stringify datetime and UUID types. + for k in ("dataset_id", "created_at", "modified_at", "source_run_id", "id"): + metadata[k] = str(metadata[k]) if metadata[k] else metadata[k] + yield Document(content_str, metadata=metadata) + + +def _stringify(x: Union[str, dict]) -> str: + if isinstance(x, str): + return x + try: + return json.dumps(x, indent=2) + except Exception: + return str(x) diff --git a/venv/Lib/site-packages/langchain_core/documents/__init__.py b/venv/Lib/site-packages/langchain_core/documents/__init__.py new file mode 100644 index 00000000..bc79a7a0 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/documents/__init__.py @@ -0,0 +1,34 @@ +"""Documents module. + +**Document** module is a collection of classes that handle documents +and their transformations. + +""" + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from .base import Document + from .compressor import BaseDocumentCompressor + from .transformers import BaseDocumentTransformer + +__all__ = ("Document", "BaseDocumentTransformer", "BaseDocumentCompressor") + +_dynamic_imports = { + "Document": "base", + "BaseDocumentCompressor": "compressor", + "BaseDocumentTransformer": "transformers", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/documents/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/documents/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..cf5e45a1 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/documents/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/documents/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/documents/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..517de729 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/documents/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/documents/__pycache__/compressor.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/documents/__pycache__/compressor.cpython-312.pyc new file mode 100644 index 00000000..0a71f295 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/documents/__pycache__/compressor.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/documents/__pycache__/transformers.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/documents/__pycache__/transformers.cpython-312.pyc new file mode 100644 index 00000000..d5a85fdf Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/documents/__pycache__/transformers.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/documents/base.py b/venv/Lib/site-packages/langchain_core/documents/base.py new file mode 100644 index 00000000..fba997f4 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/documents/base.py @@ -0,0 +1,316 @@ +"""Base classes for media and documents.""" + +from __future__ import annotations + +import contextlib +import mimetypes +from io import BufferedReader, BytesIO +from pathlib import Path, PurePath +from typing import TYPE_CHECKING, Any, Literal, Optional, Union, cast + +from pydantic import ConfigDict, Field, field_validator, model_validator + +from langchain_core.load.serializable import Serializable + +if TYPE_CHECKING: + from collections.abc import Generator + +PathLike = Union[str, PurePath] + + +class BaseMedia(Serializable): + """Use to represent media content. + + Media objects can be used to represent raw data, such as text or binary data. + + LangChain Media objects allow associating metadata and an optional identifier + with the content. + + The presence of an ID and metadata make it easier to store, index, and search + over the content in a structured way. + """ + + # The ID field is optional at the moment. + # It will likely become required in a future major release after + # it has been adopted by enough vectorstore implementations. + id: Optional[str] = None + """An optional identifier for the document. + + Ideally this should be unique across the document collection and formatted + as a UUID, but this will not be enforced. + + .. versionadded:: 0.2.11 + """ + + metadata: dict = Field(default_factory=dict) + """Arbitrary metadata associated with the content.""" + + @field_validator("id", mode="before") + def cast_id_to_str(cls, id_value: Any) -> Optional[str]: + """Coerce the id field to a string. + + Args: + id_value: The id value to coerce. + """ + if id_value is not None: + return str(id_value) + return id_value + + +class Blob(BaseMedia): + """Blob represents raw data by either reference or value. + + Provides an interface to materialize the blob in different representations, and + help to decouple the development of data loaders from the downstream parsing of + the raw data. + + Inspired by: https://developer.mozilla.org/en-US/docs/Web/API/Blob + + Example: Initialize a blob from in-memory data + + .. code-block:: python + + from langchain_core.documents import Blob + + blob = Blob.from_data("Hello, world!") + + # Read the blob as a string + print(blob.as_string()) + + # Read the blob as bytes + print(blob.as_bytes()) + + # Read the blob as a byte stream + with blob.as_bytes_io() as f: + print(f.read()) + + Example: Load from memory and specify mime-type and metadata + + .. code-block:: python + + from langchain_core.documents import Blob + + blob = Blob.from_data( + data="Hello, world!", + mime_type="text/plain", + metadata={"source": "https://example.com"} + ) + + Example: Load the blob from a file + + .. code-block:: python + + from langchain_core.documents import Blob + + blob = Blob.from_path("path/to/file.txt") + + # Read the blob as a string + print(blob.as_string()) + + # Read the blob as bytes + print(blob.as_bytes()) + + # Read the blob as a byte stream + with blob.as_bytes_io() as f: + print(f.read()) + """ + + data: Union[bytes, str, None] = None + """Raw data associated with the blob.""" + mimetype: Optional[str] = None + """MimeType not to be confused with a file extension.""" + encoding: str = "utf-8" + """Encoding to use if decoding the bytes into a string. + + Use utf-8 as default encoding, if decoding to string. + """ + path: Optional[PathLike] = None + """Location where the original content was found.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + + @property + def source(self) -> Optional[str]: + """The source location of the blob as string if known otherwise none. + + If a path is associated with the blob, it will default to the path location. + + Unless explicitly set via a metadata field called "source", in which + case that value will be used instead. + """ + if self.metadata and "source" in self.metadata: + return cast("Optional[str]", self.metadata["source"]) + return str(self.path) if self.path else None + + @model_validator(mode="before") + @classmethod + def check_blob_is_valid(cls, values: dict[str, Any]) -> Any: + """Verify that either data or path is provided.""" + if "data" not in values and "path" not in values: + msg = "Either data or path must be provided" + raise ValueError(msg) + return values + + def as_string(self) -> str: + """Read data as a string.""" + if self.data is None and self.path: + return Path(self.path).read_text(encoding=self.encoding) + if isinstance(self.data, bytes): + return self.data.decode(self.encoding) + if isinstance(self.data, str): + return self.data + msg = f"Unable to get string for blob {self}" + raise ValueError(msg) + + def as_bytes(self) -> bytes: + """Read data as bytes.""" + if isinstance(self.data, bytes): + return self.data + if isinstance(self.data, str): + return self.data.encode(self.encoding) + if self.data is None and self.path: + return Path(self.path).read_bytes() + msg = f"Unable to get bytes for blob {self}" + raise ValueError(msg) + + @contextlib.contextmanager + def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]: + """Read data as a byte stream.""" + if isinstance(self.data, bytes): + yield BytesIO(self.data) + elif self.data is None and self.path: + with Path(self.path).open("rb") as f: + yield f + else: + msg = f"Unable to convert blob {self}" + raise NotImplementedError(msg) + + @classmethod + def from_path( + cls, + path: PathLike, + *, + encoding: str = "utf-8", + mime_type: Optional[str] = None, + guess_type: bool = True, + metadata: Optional[dict] = None, + ) -> Blob: + """Load the blob from a path like object. + + Args: + path: path like object to file to be read + encoding: Encoding to use if decoding the bytes into a string + mime_type: if provided, will be set as the mime-type of the data + guess_type: If True, the mimetype will be guessed from the file extension, + if a mime-type was not provided + metadata: Metadata to associate with the blob + + Returns: + Blob instance + """ + if mime_type is None and guess_type: + _mimetype = mimetypes.guess_type(path)[0] if guess_type else None + else: + _mimetype = mime_type + # We do not load the data immediately, instead we treat the blob as a + # reference to the underlying data. + return cls( + data=None, + mimetype=_mimetype, + encoding=encoding, + path=path, + metadata=metadata if metadata is not None else {}, + ) + + @classmethod + def from_data( + cls, + data: Union[str, bytes], + *, + encoding: str = "utf-8", + mime_type: Optional[str] = None, + path: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> Blob: + """Initialize the blob from in-memory data. + + Args: + data: the in-memory data associated with the blob + encoding: Encoding to use if decoding the bytes into a string + mime_type: if provided, will be set as the mime-type of the data + path: if provided, will be set as the source from which the data came + metadata: Metadata to associate with the blob + + Returns: + Blob instance + """ + return cls( + data=data, + mimetype=mime_type, + encoding=encoding, + path=path, + metadata=metadata if metadata is not None else {}, + ) + + def __repr__(self) -> str: + """Define the blob representation.""" + str_repr = f"Blob {id(self)}" + if self.source: + str_repr += f" {self.source}" + return str_repr + + +class Document(BaseMedia): + """Class for storing a piece of text and associated metadata. + + Example: + + .. code-block:: python + + from langchain_core.documents import Document + + document = Document( + page_content="Hello, world!", + metadata={"source": "https://example.com"} + ) + """ + + page_content: str + """String text.""" + type: Literal["Document"] = "Document" + + def __init__(self, page_content: str, **kwargs: Any) -> None: + """Pass page_content in as positional or named arg.""" + # my-py is complaining that page_content is not defined on the base class. + # Here, we're relying on pydantic base class to handle the validation. + super().__init__(page_content=page_content, **kwargs) # type: ignore[call-arg] + + @classmethod + def is_lc_serializable(cls) -> bool: + """Return whether this class is serializable.""" + return True + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + Default namespace is ["langchain", "schema", "document"]. + """ + return ["langchain", "schema", "document"] + + def __str__(self) -> str: + """Override __str__ to restrict it to page_content and metadata.""" + # The format matches pydantic format for __str__. + # + # The purpose of this change is to make sure that user code that + # feeds Document objects directly into prompts remains unchanged + # due to the addition of the id field (or any other fields in the future). + # + # This override will likely be removed in the future in favor of + # a more general solution of formatting content directly inside the prompts. + if self.metadata: + return f"page_content='{self.page_content}' metadata={self.metadata}" + return f"page_content='{self.page_content}'" diff --git a/venv/Lib/site-packages/langchain_core/documents/compressor.py b/venv/Lib/site-packages/langchain_core/documents/compressor.py new file mode 100644 index 00000000..707d96cf --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/documents/compressor.py @@ -0,0 +1,71 @@ +"""Document compressor.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Optional + +from pydantic import BaseModel + +from langchain_core.runnables import run_in_executor + +if TYPE_CHECKING: + from collections.abc import Sequence + + from langchain_core.callbacks import Callbacks + from langchain_core.documents import Document + + +class BaseDocumentCompressor(BaseModel, ABC): + """Base class for document compressors. + + This abstraction is primarily used for + post-processing of retrieved documents. + + Documents matching a given query are first retrieved. + Then the list of documents can be further processed. + + For example, one could re-rank the retrieved documents + using an LLM. + + **Note** users should favor using a RunnableLambda + instead of sub-classing from this interface. + """ + + @abstractmethod + def compress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """Compress retrieved documents given the query context. + + Args: + documents: The retrieved documents. + query: The query context. + callbacks: Optional callbacks to run during compression. + + Returns: + The compressed documents. + """ + + async def acompress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + """Async compress retrieved documents given the query context. + + Args: + documents: The retrieved documents. + query: The query context. + callbacks: Optional callbacks to run during compression. + + Returns: + The compressed documents. + """ + return await run_in_executor( + None, self.compress_documents, documents, query, callbacks + ) diff --git a/venv/Lib/site-packages/langchain_core/documents/transformers.py b/venv/Lib/site-packages/langchain_core/documents/transformers.py new file mode 100644 index 00000000..171a98b4 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/documents/transformers.py @@ -0,0 +1,78 @@ +"""Document transformers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any + +from langchain_core.runnables.config import run_in_executor + +if TYPE_CHECKING: + from collections.abc import Sequence + + from langchain_core.documents import Document + + +class BaseDocumentTransformer(ABC): + """Abstract base class for document transformation. + + A document transformation takes a sequence of Documents and returns a + sequence of transformed Documents. + + Example: + .. code-block:: python + + class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel): + embeddings: Embeddings + similarity_fn: Callable = cosine_similarity + similarity_threshold: float = 0.95 + + class Config: + arbitrary_types_allowed = True + + def transform_documents( + self, documents: Sequence[Document], **kwargs: Any + ) -> Sequence[Document]: + stateful_documents = get_stateful_documents(documents) + embedded_documents = _get_embeddings_from_stateful_docs( + self.embeddings, stateful_documents + ) + included_idxs = _filter_similar_embeddings( + embedded_documents, self.similarity_fn, self.similarity_threshold + ) + return [stateful_documents[i] for i in sorted(included_idxs)] + + async def atransform_documents( + self, documents: Sequence[Document], **kwargs: Any + ) -> Sequence[Document]: + raise NotImplementedError + + """ # noqa: E501 + + @abstractmethod + def transform_documents( + self, documents: Sequence[Document], **kwargs: Any + ) -> Sequence[Document]: + """Transform a list of documents. + + Args: + documents: A sequence of Documents to be transformed. + + Returns: + A sequence of transformed Documents. + """ + + async def atransform_documents( + self, documents: Sequence[Document], **kwargs: Any + ) -> Sequence[Document]: + """Asynchronously transform a list of documents. + + Args: + documents: A sequence of Documents to be transformed. + + Returns: + A sequence of transformed Documents. + """ + return await run_in_executor( + None, self.transform_documents, documents, **kwargs + ) diff --git a/venv/Lib/site-packages/langchain_core/embeddings/__init__.py b/venv/Lib/site-packages/langchain_core/embeddings/__init__.py new file mode 100644 index 00000000..66acae12 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/embeddings/__init__.py @@ -0,0 +1,31 @@ +"""Embeddings.""" + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.embeddings.embeddings import Embeddings + from langchain_core.embeddings.fake import ( + DeterministicFakeEmbedding, + FakeEmbeddings, + ) + +__all__ = ("DeterministicFakeEmbedding", "Embeddings", "FakeEmbeddings") + +_dynamic_imports = { + "Embeddings": "embeddings", + "DeterministicFakeEmbedding": "fake", + "FakeEmbeddings": "fake", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/embeddings/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/embeddings/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..f58f7c00 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/embeddings/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/embeddings/__pycache__/embeddings.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/embeddings/__pycache__/embeddings.cpython-312.pyc new file mode 100644 index 00000000..28d991ea Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/embeddings/__pycache__/embeddings.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/embeddings/__pycache__/fake.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/embeddings/__pycache__/fake.cpython-312.pyc new file mode 100644 index 00000000..accd79b4 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/embeddings/__pycache__/fake.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/embeddings/embeddings.py b/venv/Lib/site-packages/langchain_core/embeddings/embeddings.py new file mode 100644 index 00000000..39c0eb42 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/embeddings/embeddings.py @@ -0,0 +1,78 @@ +"""**Embeddings** interface.""" + +from abc import ABC, abstractmethod + +from langchain_core.runnables.config import run_in_executor + + +class Embeddings(ABC): + """Interface for embedding models. + + This is an interface meant for implementing text embedding models. + + Text embedding models are used to map text to a vector (a point in n-dimensional + space). + + Texts that are similar will usually be mapped to points that are close to each + other in this space. The exact details of what's considered "similar" and how + "distance" is measured in this space are dependent on the specific embedding model. + + This abstraction contains a method for embedding a list of documents and a method + for embedding a query text. The embedding of a query text is expected to be a single + vector, while the embedding of a list of documents is expected to be a list of + vectors. + + Usually the query embedding is identical to the document embedding, but the + abstraction allows treating them independently. + + In addition to the synchronous methods, this interface also provides asynchronous + versions of the methods. + + By default, the asynchronous methods are implemented using the synchronous methods; + however, implementations may choose to override the asynchronous methods with + an async native implementation for performance reasons. + """ + + @abstractmethod + def embed_documents(self, texts: list[str]) -> list[list[float]]: + """Embed search docs. + + Args: + texts: List of text to embed. + + Returns: + List of embeddings. + """ + + @abstractmethod + def embed_query(self, text: str) -> list[float]: + """Embed query text. + + Args: + text: Text to embed. + + Returns: + Embedding. + """ + + async def aembed_documents(self, texts: list[str]) -> list[list[float]]: + """Asynchronous Embed search docs. + + Args: + texts: List of text to embed. + + Returns: + List of embeddings. + """ + return await run_in_executor(None, self.embed_documents, texts) + + async def aembed_query(self, text: str) -> list[float]: + """Asynchronous Embed query text. + + Args: + text: Text to embed. + + Returns: + Embedding. + """ + return await run_in_executor(None, self.embed_query, text) diff --git a/venv/Lib/site-packages/langchain_core/embeddings/fake.py b/venv/Lib/site-packages/langchain_core/embeddings/fake.py new file mode 100644 index 00000000..d788416d --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/embeddings/fake.py @@ -0,0 +1,128 @@ +"""Module contains a few fake embedding models for testing purposes.""" + +# Please do not add additional fake embedding model implementations here. +import hashlib + +from pydantic import BaseModel +from typing_extensions import override + +from langchain_core.embeddings import Embeddings + + +class FakeEmbeddings(Embeddings, BaseModel): + """Fake embedding model for unit testing purposes. + + This embedding model creates embeddings by sampling from a normal distribution. + + Do not use this outside of testing, as it is not a real embedding model. + + Instantiate: + .. code-block:: python + + from langchain_core.embeddings import FakeEmbeddings + embed = FakeEmbeddings(size=100) + + Embed single text: + .. code-block:: python + + input_text = "The meaning of life is 42" + vector = embed.embed_query(input_text) + print(vector[:3]) + + .. code-block:: python + + [-0.700234640213188, -0.581266257710429, -1.1328482266445354] + + Embed multiple texts: + .. code-block:: python + + input_texts = ["Document 1...", "Document 2..."] + vectors = embed.embed_documents(input_texts) + print(len(vectors)) + # The first 3 coordinates for the first vector + print(vectors[0][:3]) + + .. code-block:: python + + 2 + [-0.5670477847544458, -0.31403828652395727, -0.5840547508955257] + """ + + size: int + """The size of the embedding vector.""" + + def _get_embedding(self) -> list[float]: + import numpy as np + + return list(np.random.default_rng().normal(size=self.size)) + + @override + def embed_documents(self, texts: list[str]) -> list[list[float]]: + return [self._get_embedding() for _ in texts] + + @override + def embed_query(self, text: str) -> list[float]: + return self._get_embedding() + + +class DeterministicFakeEmbedding(Embeddings, BaseModel): + """Deterministic fake embedding model for unit testing purposes. + + This embedding model creates embeddings by sampling from a normal distribution + with a seed based on the hash of the text. + + Do not use this outside of testing, as it is not a real embedding model. + + Instantiate: + .. code-block:: python + + from langchain_core.embeddings import DeterministicFakeEmbedding + embed = DeterministicFakeEmbedding(size=100) + + Embed single text: + .. code-block:: python + + input_text = "The meaning of life is 42" + vector = embed.embed_query(input_text) + print(vector[:3]) + + .. code-block:: python + + [-0.700234640213188, -0.581266257710429, -1.1328482266445354] + + Embed multiple texts: + .. code-block:: python + + input_texts = ["Document 1...", "Document 2..."] + vectors = embed.embed_documents(input_texts) + print(len(vectors)) + # The first 3 coordinates for the first vector + print(vectors[0][:3]) + + .. code-block:: python + + 2 + [-0.5670477847544458, -0.31403828652395727, -0.5840547508955257] + """ + + size: int + """The size of the embedding vector.""" + + def _get_embedding(self, seed: int) -> list[float]: + import numpy as np + + # set the seed for the random generator + rng = np.random.default_rng(seed) + return list(rng.normal(size=self.size)) + + def _get_seed(self, text: str) -> int: + """Get a seed for the random generator, using the hash of the text.""" + return int(hashlib.sha256(text.encode("utf-8")).hexdigest(), 16) % 10**8 + + @override + def embed_documents(self, texts: list[str]) -> list[list[float]]: + return [self._get_embedding(seed=self._get_seed(_)) for _ in texts] + + @override + def embed_query(self, text: str) -> list[float]: + return self._get_embedding(seed=self._get_seed(text)) diff --git a/venv/Lib/site-packages/langchain_core/env.py b/venv/Lib/site-packages/langchain_core/env.py new file mode 100644 index 00000000..fd619bc1 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/env.py @@ -0,0 +1,23 @@ +"""Utilities for getting information about the runtime environment.""" + +import platform +from functools import lru_cache + + +@lru_cache(maxsize=1) +def get_runtime_environment() -> dict: + """Get information about the LangChain runtime environment. + + Returns: + A dictionary with information about the runtime environment. + """ + # Lazy import to avoid circular imports + from langchain_core import __version__ + + return { + "library_version": __version__, + "library": "langchain-core", + "platform": platform.platform(), + "runtime": "python", + "runtime_version": platform.python_version(), + } diff --git a/venv/Lib/site-packages/langchain_core/example_selectors/__init__.py b/venv/Lib/site-packages/langchain_core/example_selectors/__init__.py new file mode 100644 index 00000000..db079c9f --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/example_selectors/__init__.py @@ -0,0 +1,47 @@ +"""Example selectors. + +**Example selector** implements logic for selecting examples to include them in prompts. +This allows us to select examples that are most relevant to the input. +""" + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.example_selectors.base import BaseExampleSelector + from langchain_core.example_selectors.length_based import ( + LengthBasedExampleSelector, + ) + from langchain_core.example_selectors.semantic_similarity import ( + MaxMarginalRelevanceExampleSelector, + SemanticSimilarityExampleSelector, + sorted_values, + ) + +__all__ = ( + "BaseExampleSelector", + "LengthBasedExampleSelector", + "MaxMarginalRelevanceExampleSelector", + "SemanticSimilarityExampleSelector", + "sorted_values", +) + +_dynamic_imports = { + "BaseExampleSelector": "base", + "LengthBasedExampleSelector": "length_based", + "MaxMarginalRelevanceExampleSelector": "semantic_similarity", + "SemanticSimilarityExampleSelector": "semantic_similarity", + "sorted_values": "semantic_similarity", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/example_selectors/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/example_selectors/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..d131c362 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/example_selectors/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/example_selectors/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/example_selectors/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..ddf2a6b3 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/example_selectors/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/example_selectors/__pycache__/length_based.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/example_selectors/__pycache__/length_based.cpython-312.pyc new file mode 100644 index 00000000..1cf4b660 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/example_selectors/__pycache__/length_based.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/example_selectors/__pycache__/semantic_similarity.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/example_selectors/__pycache__/semantic_similarity.cpython-312.pyc new file mode 100644 index 00000000..75e06f74 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/example_selectors/__pycache__/semantic_similarity.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/example_selectors/base.py b/venv/Lib/site-packages/langchain_core/example_selectors/base.py new file mode 100644 index 00000000..e344e805 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/example_selectors/base.py @@ -0,0 +1,46 @@ +"""Interface for selecting examples to include in prompts.""" + +from abc import ABC, abstractmethod +from typing import Any + +from langchain_core.runnables import run_in_executor + + +class BaseExampleSelector(ABC): + """Interface for selecting examples to include in prompts.""" + + @abstractmethod + def add_example(self, example: dict[str, str]) -> Any: + """Add new example to store. + + Args: + example: A dictionary with keys as input variables + and values as their values. + """ + + async def aadd_example(self, example: dict[str, str]) -> Any: + """Async add new example to store. + + Args: + example: A dictionary with keys as input variables + and values as their values. + """ + return await run_in_executor(None, self.add_example, example) + + @abstractmethod + def select_examples(self, input_variables: dict[str, str]) -> list[dict]: + """Select which examples to use based on the inputs. + + Args: + input_variables: A dictionary with keys as input variables + and values as their values. + """ + + async def aselect_examples(self, input_variables: dict[str, str]) -> list[dict]: + """Async select which examples to use based on the inputs. + + Args: + input_variables: A dictionary with keys as input variables + and values as their values. + """ + return await run_in_executor(None, self.select_examples, input_variables) diff --git a/venv/Lib/site-packages/langchain_core/example_selectors/length_based.py b/venv/Lib/site-packages/langchain_core/example_selectors/length_based.py new file mode 100644 index 00000000..ec9566d7 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/example_selectors/length_based.py @@ -0,0 +1,97 @@ +"""Select examples based on length.""" + +import re +from typing import Callable + +from pydantic import BaseModel, Field, model_validator +from typing_extensions import Self + +from langchain_core.example_selectors.base import BaseExampleSelector +from langchain_core.prompts.prompt import PromptTemplate + + +def _get_length_based(text: str) -> int: + return len(re.split("\n| ", text)) + + +class LengthBasedExampleSelector(BaseExampleSelector, BaseModel): + """Select examples based on length.""" + + examples: list[dict] + """A list of the examples that the prompt template expects.""" + + example_prompt: PromptTemplate + """Prompt template used to format the examples.""" + + get_text_length: Callable[[str], int] = _get_length_based + """Function to measure prompt length. Defaults to word count.""" + + max_length: int = 2048 + """Max length for the prompt, beyond which examples are cut.""" + + example_text_lengths: list[int] = Field(default_factory=list) # :meta private: + """Length of each example.""" + + def add_example(self, example: dict[str, str]) -> None: + """Add new example to list. + + Args: + example: A dictionary with keys as input variables + and values as their values. + """ + self.examples.append(example) + string_example = self.example_prompt.format(**example) + self.example_text_lengths.append(self.get_text_length(string_example)) + + async def aadd_example(self, example: dict[str, str]) -> None: + """Async add new example to list. + + Args: + example: A dictionary with keys as input variables + and values as their values. + """ + self.add_example(example) + + @model_validator(mode="after") + def post_init(self) -> Self: + """Validate that the examples are formatted correctly.""" + if self.example_text_lengths: + return self + string_examples = [self.example_prompt.format(**eg) for eg in self.examples] + self.example_text_lengths = [self.get_text_length(eg) for eg in string_examples] + return self + + def select_examples(self, input_variables: dict[str, str]) -> list[dict]: + """Select which examples to use based on the input lengths. + + Args: + input_variables: A dictionary with keys as input variables + and values as their values. + + Returns: + A list of examples to include in the prompt. + """ + inputs = " ".join(input_variables.values()) + remaining_length = self.max_length - self.get_text_length(inputs) + i = 0 + examples = [] + while remaining_length > 0 and i < len(self.examples): + new_length = remaining_length - self.example_text_lengths[i] + if new_length < 0: + break + examples.append(self.examples[i]) + remaining_length = new_length + i += 1 + return examples + + async def aselect_examples(self, input_variables: dict[str, str]) -> list[dict]: + """Async select which examples to use based on the input lengths. + + Args: + input_variables: A dictionary with keys as input variables + and values as their values. + + Returns: + A list of examples to include in the prompt. + """ + return self.select_examples(input_variables) diff --git a/venv/Lib/site-packages/langchain_core/example_selectors/semantic_similarity.py b/venv/Lib/site-packages/langchain_core/example_selectors/semantic_similarity.py new file mode 100644 index 00000000..b0362728 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/example_selectors/semantic_similarity.py @@ -0,0 +1,362 @@ +"""Example selector that selects examples based on SemanticSimilarity.""" + +from __future__ import annotations + +from abc import ABC +from typing import TYPE_CHECKING, Any, Optional + +from pydantic import BaseModel, ConfigDict + +from langchain_core.example_selectors.base import BaseExampleSelector +from langchain_core.vectorstores import VectorStore + +if TYPE_CHECKING: + from langchain_core.documents import Document + from langchain_core.embeddings import Embeddings + + +def sorted_values(values: dict[str, str]) -> list[Any]: + """Return a list of values in dict sorted by key. + + Args: + values: A dictionary with keys as input variables + and values as their values. + + Returns: + A list of values in dict sorted by key. + """ + return [values[val] for val in sorted(values)] + + +class _VectorStoreExampleSelector(BaseExampleSelector, BaseModel, ABC): + """Example selector that selects examples based on SemanticSimilarity.""" + + vectorstore: VectorStore + """VectorStore that contains information about examples.""" + k: int = 4 + """Number of examples to select.""" + example_keys: Optional[list[str]] = None + """Optional keys to filter examples to.""" + input_keys: Optional[list[str]] = None + """Optional keys to filter input to. If provided, the search is based on + the input variables instead of all variables.""" + vectorstore_kwargs: Optional[dict[str, Any]] = None + """Extra arguments passed to similarity_search function of the vectorstore.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @staticmethod + def _example_to_text( + example: dict[str, str], input_keys: Optional[list[str]] + ) -> str: + if input_keys: + return " ".join(sorted_values({key: example[key] for key in input_keys})) + return " ".join(sorted_values(example)) + + def _documents_to_examples(self, documents: list[Document]) -> list[dict]: + # Get the examples from the metadata. + # This assumes that examples are stored in metadata. + examples = [dict(e.metadata) for e in documents] + # If example keys are provided, filter examples to those keys. + if self.example_keys: + examples = [{k: eg[k] for k in self.example_keys} for eg in examples] + return examples + + def add_example(self, example: dict[str, str]) -> str: + """Add a new example to vectorstore. + + Args: + example: A dictionary with keys as input variables + and values as their values. + + Returns: + The ID of the added example. + """ + ids = self.vectorstore.add_texts( + [self._example_to_text(example, self.input_keys)], metadatas=[example] + ) + return ids[0] + + async def aadd_example(self, example: dict[str, str]) -> str: + """Async add new example to vectorstore. + + Args: + example: A dictionary with keys as input variables + and values as their values. + + Returns: + The ID of the added example. + """ + ids = await self.vectorstore.aadd_texts( + [self._example_to_text(example, self.input_keys)], metadatas=[example] + ) + return ids[0] + + +class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector): + """Select examples based on semantic similarity.""" + + def select_examples(self, input_variables: dict[str, str]) -> list[dict]: + """Select examples based on semantic similarity. + + Args: + input_variables: The input variables to use for search. + + Returns: + The selected examples. + """ + # Get the docs with the highest similarity. + vectorstore_kwargs = self.vectorstore_kwargs or {} + example_docs = self.vectorstore.similarity_search( + self._example_to_text(input_variables, self.input_keys), + k=self.k, + **vectorstore_kwargs, + ) + return self._documents_to_examples(example_docs) + + async def aselect_examples(self, input_variables: dict[str, str]) -> list[dict]: + """Asynchronously select examples based on semantic similarity. + + Args: + input_variables: The input variables to use for search. + + Returns: + The selected examples. + """ + # Get the docs with the highest similarity. + vectorstore_kwargs = self.vectorstore_kwargs or {} + example_docs = await self.vectorstore.asimilarity_search( + self._example_to_text(input_variables, self.input_keys), + k=self.k, + **vectorstore_kwargs, + ) + return self._documents_to_examples(example_docs) + + @classmethod + def from_examples( + cls, + examples: list[dict], + embeddings: Embeddings, + vectorstore_cls: type[VectorStore], + k: int = 4, + input_keys: Optional[list[str]] = None, + *, + example_keys: Optional[list[str]] = None, + vectorstore_kwargs: Optional[dict] = None, + **vectorstore_cls_kwargs: Any, + ) -> SemanticSimilarityExampleSelector: + """Create k-shot example selector using example list and embeddings. + + Reshuffles examples dynamically based on query similarity. + + Args: + examples: List of examples to use in the prompt. + embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings(). + vectorstore_cls: A vector store DB interface class, e.g. FAISS. + k: Number of examples to select. Default is 4. + input_keys: If provided, the search is based on the input variables + instead of all variables. + example_keys: If provided, keys to filter examples to. + vectorstore_kwargs: Extra arguments passed to similarity_search function + of the vectorstore. + vectorstore_cls_kwargs: optional kwargs containing url for vector store + + Returns: + The ExampleSelector instantiated, backed by a vector store. + """ + string_examples = [cls._example_to_text(eg, input_keys) for eg in examples] + vectorstore = vectorstore_cls.from_texts( + string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs + ) + return cls( + vectorstore=vectorstore, + k=k, + input_keys=input_keys, + example_keys=example_keys, + vectorstore_kwargs=vectorstore_kwargs, + ) + + @classmethod + async def afrom_examples( + cls, + examples: list[dict], + embeddings: Embeddings, + vectorstore_cls: type[VectorStore], + k: int = 4, + input_keys: Optional[list[str]] = None, + *, + example_keys: Optional[list[str]] = None, + vectorstore_kwargs: Optional[dict] = None, + **vectorstore_cls_kwargs: Any, + ) -> SemanticSimilarityExampleSelector: + """Async create k-shot example selector using example list and embeddings. + + Reshuffles examples dynamically based on query similarity. + + Args: + examples: List of examples to use in the prompt. + embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings(). + vectorstore_cls: A vector store DB interface class, e.g. FAISS. + k: Number of examples to select. Default is 4. + input_keys: If provided, the search is based on the input variables + instead of all variables. + example_keys: If provided, keys to filter examples to. + vectorstore_kwargs: Extra arguments passed to similarity_search function + of the vectorstore. + vectorstore_cls_kwargs: optional kwargs containing url for vector store + + Returns: + The ExampleSelector instantiated, backed by a vector store. + """ + string_examples = [cls._example_to_text(eg, input_keys) for eg in examples] + vectorstore = await vectorstore_cls.afrom_texts( + string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs + ) + return cls( + vectorstore=vectorstore, + k=k, + input_keys=input_keys, + example_keys=example_keys, + vectorstore_kwargs=vectorstore_kwargs, + ) + + +class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector): + """Select examples based on Max Marginal Relevance. + + This was shown to improve performance in this paper: + https://arxiv.org/pdf/2211.13892.pdf + """ + + fetch_k: int = 20 + """Number of examples to fetch to rerank.""" + + def select_examples(self, input_variables: dict[str, str]) -> list[dict]: + """Select examples based on Max Marginal Relevance. + + Args: + input_variables: The input variables to use for search. + + Returns: + The selected examples. + """ + example_docs = self.vectorstore.max_marginal_relevance_search( + self._example_to_text(input_variables, self.input_keys), + k=self.k, + fetch_k=self.fetch_k, + ) + return self._documents_to_examples(example_docs) + + async def aselect_examples(self, input_variables: dict[str, str]) -> list[dict]: + """Asynchronously select examples based on Max Marginal Relevance. + + Args: + input_variables: The input variables to use for search. + + Returns: + The selected examples. + """ + example_docs = await self.vectorstore.amax_marginal_relevance_search( + self._example_to_text(input_variables, self.input_keys), + k=self.k, + fetch_k=self.fetch_k, + ) + return self._documents_to_examples(example_docs) + + @classmethod + def from_examples( + cls, + examples: list[dict], + embeddings: Embeddings, + vectorstore_cls: type[VectorStore], + k: int = 4, + input_keys: Optional[list[str]] = None, + fetch_k: int = 20, + example_keys: Optional[list[str]] = None, + vectorstore_kwargs: Optional[dict] = None, + **vectorstore_cls_kwargs: Any, + ) -> MaxMarginalRelevanceExampleSelector: + """Create k-shot example selector using example list and embeddings. + + Reshuffles examples dynamically based on Max Marginal Relevance. + + Args: + examples: List of examples to use in the prompt. + embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings(). + vectorstore_cls: A vector store DB interface class, e.g. FAISS. + k: Number of examples to select. Default is 4. + fetch_k: Number of Documents to fetch to pass to MMR algorithm. + Default is 20. + input_keys: If provided, the search is based on the input variables + instead of all variables. + example_keys: If provided, keys to filter examples to. + vectorstore_kwargs: Extra arguments passed to similarity_search function + of the vectorstore. + vectorstore_cls_kwargs: optional kwargs containing url for vector store + + Returns: + The ExampleSelector instantiated, backed by a vector store. + """ + string_examples = [cls._example_to_text(eg, input_keys) for eg in examples] + vectorstore = vectorstore_cls.from_texts( + string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs + ) + return cls( + vectorstore=vectorstore, + k=k, + fetch_k=fetch_k, + input_keys=input_keys, + example_keys=example_keys, + vectorstore_kwargs=vectorstore_kwargs, + ) + + @classmethod + async def afrom_examples( + cls, + examples: list[dict], + embeddings: Embeddings, + vectorstore_cls: type[VectorStore], + *, + k: int = 4, + input_keys: Optional[list[str]] = None, + fetch_k: int = 20, + example_keys: Optional[list[str]] = None, + vectorstore_kwargs: Optional[dict] = None, + **vectorstore_cls_kwargs: Any, + ) -> MaxMarginalRelevanceExampleSelector: + """Create k-shot example selector using example list and embeddings. + + Reshuffles examples dynamically based on Max Marginal Relevance. + + Args: + examples: List of examples to use in the prompt. + embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings(). + vectorstore_cls: A vector store DB interface class, e.g. FAISS. + k: Number of examples to select. Default is 4. + fetch_k: Number of Documents to fetch to pass to MMR algorithm. + Default is 20. + input_keys: If provided, the search is based on the input variables + instead of all variables. + example_keys: If provided, keys to filter examples to. + vectorstore_kwargs: Extra arguments passed to similarity_search function + of the vectorstore. + vectorstore_cls_kwargs: optional kwargs containing url for vector store + + Returns: + The ExampleSelector instantiated, backed by a vector store. + """ + string_examples = [cls._example_to_text(eg, input_keys) for eg in examples] + vectorstore = await vectorstore_cls.afrom_texts( + string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs + ) + return cls( + vectorstore=vectorstore, + k=k, + fetch_k=fetch_k, + input_keys=input_keys, + example_keys=example_keys, + vectorstore_kwargs=vectorstore_kwargs, + ) diff --git a/venv/Lib/site-packages/langchain_core/exceptions.py b/venv/Lib/site-packages/langchain_core/exceptions.py new file mode 100644 index 00000000..cde6d6ff --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/exceptions.py @@ -0,0 +1,85 @@ +"""Custom **exceptions** for LangChain.""" + +from enum import Enum +from typing import Any, Optional + + +class LangChainException(Exception): # noqa: N818 + """General LangChain exception.""" + + +class TracerException(LangChainException): + """Base class for exceptions in tracers module.""" + + +class OutputParserException(ValueError, LangChainException): # noqa: N818 + """Exception that output parsers should raise to signify a parsing error. + + This exists to differentiate parsing errors from other code or execution errors + that also may arise inside the output parser. OutputParserExceptions will be + available to catch and handle in ways to fix the parsing error, while other + errors will be raised. + """ + + def __init__( + self, + error: Any, + observation: Optional[str] = None, + llm_output: Optional[str] = None, + send_to_llm: bool = False, # noqa: FBT001,FBT002 + ): + """Create an OutputParserException. + + Args: + error: The error that's being re-raised or an error message. + observation: String explanation of error which can be passed to a + model to try and remediate the issue. Defaults to None. + llm_output: String model output which is error-ing. + Defaults to None. + send_to_llm: Whether to send the observation and llm_output back to an Agent + after an OutputParserException has been raised. + This gives the underlying model driving the agent the context that the + previous output was improperly structured, in the hopes that it will + update the output to the correct format. + Defaults to False. + """ + if isinstance(error, str): + error = create_message( + message=error, error_code=ErrorCode.OUTPUT_PARSING_FAILURE + ) + super().__init__(error) + if send_to_llm and (observation is None or llm_output is None): + msg = ( + "Arguments 'observation' & 'llm_output'" + " are required if 'send_to_llm' is True" + ) + raise ValueError(msg) + self.observation = observation + self.llm_output = llm_output + self.send_to_llm = send_to_llm + + +class ErrorCode(Enum): + """Error codes.""" + + INVALID_PROMPT_INPUT = "INVALID_PROMPT_INPUT" + INVALID_TOOL_RESULTS = "INVALID_TOOL_RESULTS" + MESSAGE_COERCION_FAILURE = "MESSAGE_COERCION_FAILURE" + MODEL_AUTHENTICATION = "MODEL_AUTHENTICATION" + MODEL_NOT_FOUND = "MODEL_NOT_FOUND" + MODEL_RATE_LIMIT = "MODEL_RATE_LIMIT" + OUTPUT_PARSING_FAILURE = "OUTPUT_PARSING_FAILURE" + + +def create_message(*, message: str, error_code: ErrorCode) -> str: + """Create a message with a link to the LangChain troubleshooting guide. + + Args: + message: The message to display. + error_code: The error code to display. + """ + return ( + f"{message}\n" + "For troubleshooting, visit: https://python.langchain.com/docs/" + f"troubleshooting/errors/{error_code.value} " + ) diff --git a/venv/Lib/site-packages/langchain_core/globals.py b/venv/Lib/site-packages/langchain_core/globals.py new file mode 100644 index 00000000..77375a2d --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/globals.py @@ -0,0 +1,231 @@ +"""Global values and configuration that apply to all of LangChain.""" + +import warnings +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from langchain_core.caches import BaseCache + + +# DO NOT USE THESE VALUES DIRECTLY! +# Use them only via `get_()` and `set_()` below, +# or else your code may behave unexpectedly with other uses of these global settings: +# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 +_verbose: bool = False +_debug: bool = False +_llm_cache: Optional["BaseCache"] = None + + +def set_verbose(value: bool) -> None: # noqa: FBT001 + """Set a new value for the `verbose` global setting. + + Args: + value: The new value for the `verbose` global setting. + """ + try: + import langchain # type: ignore[import-not-found] + + # We're about to run some deprecated code, don't report warnings from it. + # The user called the correct (non-deprecated) code path and shouldn't get + # warnings. + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=( + "Importing verbose from langchain root module " + "is no longer supported" + ), + ) + # N.B.: This is a workaround for an unfortunate quirk of Python's + # module-level `__getattr__()` implementation: + # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 + # + # Remove it once `langchain.verbose` is no longer supported, and once all + # users have migrated to using `set_verbose()` here. + langchain.verbose = value + except ImportError: + pass + + global _verbose # noqa: PLW0603 + _verbose = value + + +def get_verbose() -> bool: + """Get the value of the `verbose` global setting. + + Returns: + The value of the `verbose` global setting. + """ + try: + import langchain + + # We're about to run some deprecated code, don't report warnings from it. + # The user called the correct (non-deprecated) code path and shouldn't get + # warnings. + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=( + ".*Importing verbose from langchain root module " + "is no longer supported" + ), + ) + # N.B.: This is a workaround for an unfortunate quirk of Python's + # module-level `__getattr__()` implementation: + # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 + # + # Remove it once `langchain.verbose` is no longer supported, and once all + # users have migrated to using `set_verbose()` here. + # + # In the meantime, the `verbose` setting is considered True if either the + # old or the new value are True. This accommodates users who haven't + # migrated to using `set_verbose()` yet. Those users are getting + # deprecation warnings directing them to use `set_verbose()` when they + # import `langchain.verbose`. + old_verbose = langchain.verbose + except ImportError: + old_verbose = False + + return _verbose or old_verbose + + +def set_debug(value: bool) -> None: # noqa: FBT001 + """Set a new value for the `debug` global setting. + + Args: + value: The new value for the `debug` global setting. + """ + try: + import langchain + + # We're about to run some deprecated code, don't report warnings from it. + # The user called the correct (non-deprecated) code path and shouldn't get + # warnings. + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message="Importing debug from langchain root module " + "is no longer supported", + ) + # N.B.: This is a workaround for an unfortunate quirk of Python's + # module-level `__getattr__()` implementation: + # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 + # + # Remove it once `langchain.debug` is no longer supported, and once all + # users have migrated to using `set_debug()` here. + langchain.debug = value + except ImportError: + pass + + global _debug # noqa: PLW0603 + _debug = value + + +def get_debug() -> bool: + """Get the value of the `debug` global setting. + + Returns: + The value of the `debug` global setting. + """ + try: + import langchain + + # We're about to run some deprecated code, don't report warnings from it. + # The user called the correct (non-deprecated) code path and shouldn't get + # warnings. + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message="Importing debug from langchain root module " + "is no longer supported", + ) + # N.B.: This is a workaround for an unfortunate quirk of Python's + # module-level `__getattr__()` implementation: + # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 + # + # Remove it once `langchain.debug` is no longer supported, and once all + # users have migrated to using `set_debug()` here. + # + # In the meantime, the `debug` setting is considered True if either the old + # or the new value are True. This accommodates users who haven't migrated + # to using `set_debug()` yet. Those users are getting deprecation warnings + # directing them to use `set_debug()` when they import `langchain.debug`. + old_debug = langchain.debug + except ImportError: + old_debug = False + + return _debug or old_debug + + +def set_llm_cache(value: Optional["BaseCache"]) -> None: + """Set a new LLM cache, overwriting the previous value, if any. + + Args: + value: The new LLM cache to use. If `None`, the LLM cache is disabled. + """ + try: + import langchain + + # We're about to run some deprecated code, don't report warnings from it. + # The user called the correct (non-deprecated) code path and shouldn't get + # warnings. + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=( + "Importing llm_cache from langchain root module " + "is no longer supported" + ), + ) + # N.B.: This is a workaround for an unfortunate quirk of Python's + # module-level `__getattr__()` implementation: + # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 + # + # Remove it once `langchain.llm_cache` is no longer supported, and + # once all users have migrated to using `set_llm_cache()` here. + langchain.llm_cache = value + except ImportError: + pass + + global _llm_cache # noqa: PLW0603 + _llm_cache = value + + +def get_llm_cache() -> "BaseCache": + """Get the value of the `llm_cache` global setting. + + Returns: + The value of the `llm_cache` global setting. + """ + try: + import langchain + + # We're about to run some deprecated code, don't report warnings from it. + # The user called the correct (non-deprecated) code path and shouldn't get + # warnings. + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=( + "Importing llm_cache from langchain root module " + "is no longer supported" + ), + ) + # N.B.: This is a workaround for an unfortunate quirk of Python's + # module-level `__getattr__()` implementation: + # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004 + # + # Remove it once `langchain.llm_cache` is no longer supported, and + # once all users have migrated to using `set_llm_cache()` here. + # + # In the meantime, the `llm_cache` setting returns whichever of + # its two backing sources is truthy (not `None` and non-empty), + # or the old value if both are falsy. This accommodates users + # who haven't migrated to using `set_llm_cache()` yet. + # Those users are getting deprecation warnings directing them + # to use `set_llm_cache()` when they import `langchain.llm_cache`. + old_llm_cache = langchain.llm_cache + except ImportError: + old_llm_cache = None + + return _llm_cache or old_llm_cache diff --git a/venv/Lib/site-packages/langchain_core/indexing/__init__.py b/venv/Lib/site-packages/langchain_core/indexing/__init__.py new file mode 100644 index 00000000..2a64cc25 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/indexing/__init__.py @@ -0,0 +1,53 @@ +"""Code to help indexing data into a vectorstore. + +This package contains helper logic to help deal with indexing data into +a vectorstore while avoiding duplicated content and over-writing content +if it's unchanged. +""" + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.indexing.api import IndexingResult, aindex, index + from langchain_core.indexing.base import ( + DeleteResponse, + DocumentIndex, + InMemoryRecordManager, + RecordManager, + UpsertResponse, + ) + +__all__ = ( + "aindex", + "DeleteResponse", + "DocumentIndex", + "index", + "IndexingResult", + "InMemoryRecordManager", + "RecordManager", + "UpsertResponse", +) + +_dynamic_imports = { + "aindex": "api", + "index": "api", + "IndexingResult": "api", + "DeleteResponse": "base", + "DocumentIndex": "base", + "InMemoryRecordManager": "base", + "RecordManager": "base", + "UpsertResponse": "base", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/indexing/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/indexing/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..7f3804a3 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/indexing/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/indexing/__pycache__/api.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/indexing/__pycache__/api.cpython-312.pyc new file mode 100644 index 00000000..691f3894 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/indexing/__pycache__/api.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/indexing/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/indexing/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..96cc60d5 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/indexing/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/indexing/__pycache__/in_memory.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/indexing/__pycache__/in_memory.cpython-312.pyc new file mode 100644 index 00000000..00a028e7 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/indexing/__pycache__/in_memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/indexing/api.py b/venv/Lib/site-packages/langchain_core/indexing/api.py new file mode 100644 index 00000000..084fe9b8 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/indexing/api.py @@ -0,0 +1,813 @@ +"""Module contains logic for indexing documents into vector stores.""" + +from __future__ import annotations + +import hashlib +import json +import uuid +from collections.abc import AsyncIterable, AsyncIterator, Iterable, Iterator, Sequence +from itertools import islice +from typing import ( + Any, + Callable, + Literal, + Optional, + TypedDict, + TypeVar, + Union, + cast, +) + +from pydantic import model_validator + +from langchain_core.document_loaders.base import BaseLoader +from langchain_core.documents import Document +from langchain_core.exceptions import LangChainException +from langchain_core.indexing.base import DocumentIndex, RecordManager +from langchain_core.vectorstores import VectorStore + +# Magic UUID to use as a namespace for hashing. +# Used to try and generate a unique UUID for each document +# from hashing the document content and metadata. +NAMESPACE_UUID = uuid.UUID(int=1984) + + +T = TypeVar("T") + + +def _hash_string_to_uuid(input_string: str) -> uuid.UUID: + """Hashes a string and returns the corresponding UUID.""" + hash_value = hashlib.sha1( + input_string.encode("utf-8"), usedforsecurity=False + ).hexdigest() + return uuid.uuid5(NAMESPACE_UUID, hash_value) + + +def _hash_nested_dict_to_uuid(data: dict[Any, Any]) -> uuid.UUID: + """Hashes a nested dictionary and returns the corresponding UUID.""" + serialized_data = json.dumps(data, sort_keys=True) + hash_value = hashlib.sha1( + serialized_data.encode("utf-8"), usedforsecurity=False + ).hexdigest() + return uuid.uuid5(NAMESPACE_UUID, hash_value) + + +class _HashedDocument(Document): + """A hashed document with a unique ID.""" + + uid: str + hash_: str + """The hash of the document including content and metadata.""" + content_hash: str + """The hash of the document content.""" + metadata_hash: str + """The hash of the document metadata.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + return False + + @model_validator(mode="before") + @classmethod + def calculate_hashes(cls, values: dict[str, Any]) -> Any: + """Root validator to calculate content and metadata hash.""" + content = values.get("page_content", "") + metadata = values.get("metadata", {}) + + forbidden_keys = ("hash_", "content_hash", "metadata_hash") + + for key in forbidden_keys: + if key in metadata: + msg = ( + f"Metadata cannot contain key {key} as it " + f"is reserved for internal use." + ) + raise ValueError(msg) + + content_hash = str(_hash_string_to_uuid(content)) + + try: + metadata_hash = str(_hash_nested_dict_to_uuid(metadata)) + except Exception as e: + msg = ( + f"Failed to hash metadata: {e}. " + f"Please use a dict that can be serialized using json." + ) + raise ValueError(msg) from e + + values["content_hash"] = content_hash + values["metadata_hash"] = metadata_hash + values["hash_"] = str(_hash_string_to_uuid(content_hash + metadata_hash)) + + _uid = values.get("uid") + + if _uid is None: + values["uid"] = values["hash_"] + return values + + def to_document(self) -> Document: + """Return a Document object.""" + return Document( + id=self.uid, + page_content=self.page_content, + metadata=self.metadata, + ) + + @classmethod + def from_document( + cls, document: Document, *, uid: Optional[str] = None + ) -> _HashedDocument: + """Create a HashedDocument from a Document.""" + return cls( # type: ignore[call-arg] + uid=uid, # type: ignore[arg-type] + page_content=document.page_content, + metadata=document.metadata, + ) + + +def _batch(size: int, iterable: Iterable[T]) -> Iterator[list[T]]: + """Utility batching function.""" + it = iter(iterable) + while True: + chunk = list(islice(it, size)) + if not chunk: + return + yield chunk + + +async def _abatch(size: int, iterable: AsyncIterable[T]) -> AsyncIterator[list[T]]: + """Utility batching function.""" + batch: list[T] = [] + async for element in iterable: + if len(batch) < size: + batch.append(element) + + if len(batch) >= size: + yield batch + batch = [] + + if batch: + yield batch + + +def _get_source_id_assigner( + source_id_key: Union[str, Callable[[Document], str], None], +) -> Callable[[Document], Union[str, None]]: + """Get the source id from the document.""" + if source_id_key is None: + return lambda _doc: None + if isinstance(source_id_key, str): + return lambda doc: doc.metadata[source_id_key] + if callable(source_id_key): + return source_id_key + msg = ( + f"source_id_key should be either None, a string or a callable. " + f"Got {source_id_key} of type {type(source_id_key)}." + ) + raise ValueError(msg) + + +def _deduplicate_in_order( + hashed_documents: Iterable[_HashedDocument], +) -> Iterator[_HashedDocument]: + """Deduplicate a list of hashed documents while preserving order.""" + seen: set[str] = set() + + for hashed_doc in hashed_documents: + if hashed_doc.hash_ not in seen: + seen.add(hashed_doc.hash_) + yield hashed_doc + + +class IndexingException(LangChainException): + """Raised when an indexing operation fails.""" + + +def _delete( + vector_store: Union[VectorStore, DocumentIndex], + ids: list[str], +) -> None: + if isinstance(vector_store, VectorStore): + delete_ok = vector_store.delete(ids) + if delete_ok is not None and delete_ok is False: + msg = "The delete operation to VectorStore failed." + raise IndexingException(msg) + elif isinstance(vector_store, DocumentIndex): + delete_response = vector_store.delete(ids) + if "num_failed" in delete_response and delete_response["num_failed"] > 0: + msg = "The delete operation to DocumentIndex failed." + raise IndexingException(msg) + else: + msg = ( + f"Vectorstore should be either a VectorStore or a DocumentIndex. " + f"Got {type(vector_store)}." + ) + raise TypeError(msg) + + +# PUBLIC API + + +class IndexingResult(TypedDict): + """Return a detailed a breakdown of the result of the indexing operation.""" + + num_added: int + """Number of added documents.""" + num_updated: int + """Number of updated documents because they were not up to date.""" + num_deleted: int + """Number of deleted documents.""" + num_skipped: int + """Number of skipped documents because they were already up to date.""" + + +def index( + docs_source: Union[BaseLoader, Iterable[Document]], + record_manager: RecordManager, + vector_store: Union[VectorStore, DocumentIndex], + *, + batch_size: int = 100, + cleanup: Literal["incremental", "full", "scoped_full", None] = None, + source_id_key: Union[str, Callable[[Document], str], None] = None, + cleanup_batch_size: int = 1_000, + force_update: bool = False, + upsert_kwargs: Optional[dict[str, Any]] = None, +) -> IndexingResult: + """Index data from the loader into the vector store. + + Indexing functionality uses a manager to keep track of which documents + are in the vector store. + + This allows us to keep track of which documents were updated, and which + documents were deleted, which documents should be skipped. + + For the time being, documents are indexed using their hashes, and users + are not able to specify the uid of the document. + + Important: + * In full mode, the loader should be returning + the entire dataset, and not just a subset of the dataset. + Otherwise, the auto_cleanup will remove documents that it is not + supposed to. + * In incremental mode, if documents associated with a particular + source id appear across different batches, the indexing API + will do some redundant work. This will still result in the + correct end state of the index, but will unfortunately not be + 100% efficient. For example, if a given document is split into 15 + chunks, and we index them using a batch size of 5, we'll have 3 batches + all with the same source id. In general, to avoid doing too much + redundant work select as big a batch size as possible. + * The `scoped_full` mode is suitable if determining an appropriate batch size + is challenging or if your data loader cannot return the entire dataset at + once. This mode keeps track of source IDs in memory, which should be fine + for most use cases. If your dataset is large (10M+ docs), you will likely + need to parallelize the indexing process regardless. + + Args: + docs_source: Data loader or iterable of documents to index. + record_manager: Timestamped set to keep track of which documents were + updated. + vector_store: VectorStore or DocumentIndex to index the documents into. + batch_size: Batch size to use when indexing. Default is 100. + cleanup: How to handle clean up of documents. Default is None. + - incremental: Cleans up all documents that haven't been updated AND + that are associated with source ids that were seen + during indexing. + Clean up is done continuously during indexing helping + to minimize the probability of users seeing duplicated + content. + - full: Delete all documents that have not been returned by the loader + during this run of indexing. + Clean up runs after all documents have been indexed. + This means that users may see duplicated content during indexing. + - scoped_full: Similar to Full, but only deletes all documents + that haven't been updated AND that are associated with + source ids that were seen during indexing. + - None: Do not delete any documents. + source_id_key: Optional key that helps identify the original source + of the document. Default is None. + cleanup_batch_size: Batch size to use when cleaning up documents. + Default is 1_000. + force_update: Force update documents even if they are present in the + record manager. Useful if you are re-indexing with updated embeddings. + Default is False. + upsert_kwargs: Additional keyword arguments to pass to the add_documents + method of the VectorStore or the upsert method of the + DocumentIndex. For example, you can use this to + specify a custom vector_field: + upsert_kwargs={"vector_field": "embedding"} + .. versionadded:: 0.3.10 + + Returns: + Indexing result which contains information about how many documents + were added, updated, deleted, or skipped. + + Raises: + ValueError: If cleanup mode is not one of 'incremental', 'full' or None + ValueError: If cleanup mode is incremental and source_id_key is None. + ValueError: If vectorstore does not have + "delete" and "add_documents" required methods. + ValueError: If source_id_key is not None, but is not a string or callable. + + .. version_modified:: 0.3.25 + + * Added `scoped_full` cleanup mode. + """ + if cleanup not in {"incremental", "full", "scoped_full", None}: + msg = ( + f"cleanup should be one of 'incremental', 'full', 'scoped_full' or None. " + f"Got {cleanup}." + ) + raise ValueError(msg) + + if (cleanup in {"incremental", "scoped_full"}) and source_id_key is None: + msg = ( + "Source id key is required when cleanup mode is incremental or scoped_full." + ) + raise ValueError(msg) + + destination = vector_store # Renaming internally for clarity + + # If it's a vectorstore, let's check if it has the required methods. + if isinstance(destination, VectorStore): + # Check that the Vectorstore has required methods implemented + methods = ["delete", "add_documents"] + + for method in methods: + if not hasattr(destination, method): + msg = ( + f"Vectorstore {destination} does not have required method {method}" + ) + raise ValueError(msg) + + if type(destination).delete == VectorStore.delete: + # Checking if the vectorstore has overridden the default delete method + # implementation which just raises a NotImplementedError + msg = "Vectorstore has not implemented the delete method" + raise ValueError(msg) + elif isinstance(destination, DocumentIndex): + pass + else: + msg = ( + f"Vectorstore should be either a VectorStore or a DocumentIndex. " + f"Got {type(destination)}." + ) + raise TypeError(msg) + + if isinstance(docs_source, BaseLoader): + try: + doc_iterator = docs_source.lazy_load() + except NotImplementedError: + doc_iterator = iter(docs_source.load()) + else: + doc_iterator = iter(docs_source) + + source_id_assigner = _get_source_id_assigner(source_id_key) + + # Mark when the update started. + index_start_dt = record_manager.get_time() + num_added = 0 + num_skipped = 0 + num_updated = 0 + num_deleted = 0 + scoped_full_cleanup_source_ids: set[str] = set() + + for doc_batch in _batch(batch_size, doc_iterator): + hashed_docs = list( + _deduplicate_in_order( + [_HashedDocument.from_document(doc) for doc in doc_batch] + ) + ) + + source_ids: Sequence[Optional[str]] = [ + source_id_assigner(doc) for doc in hashed_docs + ] + + if cleanup in {"incremental", "scoped_full"}: + # source ids are required. + for source_id, hashed_doc in zip(source_ids, hashed_docs): + if source_id is None: + msg = ( + f"Source ids are required when cleanup mode is " + f"incremental or scoped_full. " + f"Document that starts with " + f"content: {hashed_doc.page_content[:100]} was not assigned " + f"as source id." + ) + raise ValueError(msg) + if cleanup == "scoped_full": + scoped_full_cleanup_source_ids.add(source_id) + # source ids cannot be None after for loop above. + source_ids = cast("Sequence[str]", source_ids) + + exists_batch = record_manager.exists([doc.uid for doc in hashed_docs]) + + # Filter out documents that already exist in the record store. + uids = [] + docs_to_index = [] + uids_to_refresh = [] + seen_docs: set[str] = set() + for hashed_doc, doc_exists in zip(hashed_docs, exists_batch): + if doc_exists: + if force_update: + seen_docs.add(hashed_doc.uid) + else: + uids_to_refresh.append(hashed_doc.uid) + continue + uids.append(hashed_doc.uid) + docs_to_index.append(hashed_doc.to_document()) + + # Update refresh timestamp + if uids_to_refresh: + record_manager.update(uids_to_refresh, time_at_least=index_start_dt) + num_skipped += len(uids_to_refresh) + + # Be pessimistic and assume that all vector store write will fail. + # First write to vector store + if docs_to_index: + if isinstance(destination, VectorStore): + destination.add_documents( + docs_to_index, + ids=uids, + batch_size=batch_size, + **(upsert_kwargs or {}), + ) + elif isinstance(destination, DocumentIndex): + destination.upsert( + docs_to_index, + **(upsert_kwargs or {}), + ) + + num_added += len(docs_to_index) - len(seen_docs) + num_updated += len(seen_docs) + + # And only then update the record store. + # Update ALL records, even if they already exist since we want to refresh + # their timestamp. + record_manager.update( + [doc.uid for doc in hashed_docs], + group_ids=source_ids, + time_at_least=index_start_dt, + ) + + # If source IDs are provided, we can do the deletion incrementally! + if cleanup == "incremental": + # Get the uids of the documents that were not returned by the loader. + + # mypy isn't good enough to determine that source ids cannot be None + # here due to a check that's happening above, so we check again. + for source_id in source_ids: + if source_id is None: + msg = ( + "source_id cannot be None at this point. " + "Reached unreachable code." + ) + raise AssertionError(msg) + + _source_ids = cast("Sequence[str]", source_ids) + + uids_to_delete = record_manager.list_keys( + group_ids=_source_ids, before=index_start_dt + ) + if uids_to_delete: + # Then delete from vector store. + _delete(destination, uids_to_delete) + # First delete from record store. + record_manager.delete_keys(uids_to_delete) + num_deleted += len(uids_to_delete) + + if cleanup == "full" or ( + cleanup == "scoped_full" and scoped_full_cleanup_source_ids + ): + delete_group_ids: Optional[Sequence[str]] = None + if cleanup == "scoped_full": + delete_group_ids = list(scoped_full_cleanup_source_ids) + while uids_to_delete := record_manager.list_keys( + group_ids=delete_group_ids, before=index_start_dt, limit=cleanup_batch_size + ): + # First delete from record store. + _delete(destination, uids_to_delete) + # Then delete from record manager. + record_manager.delete_keys(uids_to_delete) + num_deleted += len(uids_to_delete) + + return { + "num_added": num_added, + "num_updated": num_updated, + "num_skipped": num_skipped, + "num_deleted": num_deleted, + } + + +# Define an asynchronous generator function +async def _to_async_iterator(iterator: Iterable[T]) -> AsyncIterator[T]: + """Convert an iterable to an async iterator.""" + for item in iterator: + yield item + + +async def _adelete( + vector_store: Union[VectorStore, DocumentIndex], + ids: list[str], +) -> None: + if isinstance(vector_store, VectorStore): + delete_ok = await vector_store.adelete(ids) + if delete_ok is not None and delete_ok is False: + msg = "The delete operation to VectorStore failed." + raise IndexingException(msg) + elif isinstance(vector_store, DocumentIndex): + delete_response = await vector_store.adelete(ids) + if "num_failed" in delete_response and delete_response["num_failed"] > 0: + msg = "The delete operation to DocumentIndex failed." + raise IndexingException(msg) + else: + msg = ( + f"Vectorstore should be either a VectorStore or a DocumentIndex. " + f"Got {type(vector_store)}." + ) + raise TypeError(msg) + + +async def aindex( + docs_source: Union[BaseLoader, Iterable[Document], AsyncIterator[Document]], + record_manager: RecordManager, + vector_store: Union[VectorStore, DocumentIndex], + *, + batch_size: int = 100, + cleanup: Literal["incremental", "full", "scoped_full", None] = None, + source_id_key: Union[str, Callable[[Document], str], None] = None, + cleanup_batch_size: int = 1_000, + force_update: bool = False, + upsert_kwargs: Optional[dict[str, Any]] = None, +) -> IndexingResult: + """Async index data from the loader into the vector store. + + Indexing functionality uses a manager to keep track of which documents + are in the vector store. + + This allows us to keep track of which documents were updated, and which + documents were deleted, which documents should be skipped. + + For the time being, documents are indexed using their hashes, and users + are not able to specify the uid of the document. + + Important: + * In full mode, the loader should be returning + the entire dataset, and not just a subset of the dataset. + Otherwise, the auto_cleanup will remove documents that it is not + supposed to. + * In incremental mode, if documents associated with a particular + source id appear across different batches, the indexing API + will do some redundant work. This will still result in the + correct end state of the index, but will unfortunately not be + 100% efficient. For example, if a given document is split into 15 + chunks, and we index them using a batch size of 5, we'll have 3 batches + all with the same source id. In general, to avoid doing too much + redundant work select as big a batch size as possible. + * The `scoped_full` mode is suitable if determining an appropriate batch size + is challenging or if your data loader cannot return the entire dataset at + once. This mode keeps track of source IDs in memory, which should be fine + for most use cases. If your dataset is large (10M+ docs), you will likely + need to parallelize the indexing process regardless. + + Args: + docs_source: Data loader or iterable of documents to index. + record_manager: Timestamped set to keep track of which documents were + updated. + vector_store: VectorStore or DocumentIndex to index the documents into. + batch_size: Batch size to use when indexing. Default is 100. + cleanup: How to handle clean up of documents. Default is None. + - incremental: Cleans up all documents that haven't been updated AND + that are associated with source ids that were seen + during indexing. + Clean up is done continuously during indexing helping + to minimize the probability of users seeing duplicated + content. + - full: Delete all documents that haven to been returned by the loader. + Clean up runs after all documents have been indexed. + This means that users may see duplicated content during indexing. + - scoped_full: Similar to Full, but only deletes all documents + that haven't been updated AND that are associated with + source ids that were seen during indexing. + - None: Do not delete any documents. + source_id_key: Optional key that helps identify the original source + of the document. Default is None. + cleanup_batch_size: Batch size to use when cleaning up documents. + Default is 1_000. + force_update: Force update documents even if they are present in the + record manager. Useful if you are re-indexing with updated embeddings. + Default is False. + upsert_kwargs: Additional keyword arguments to pass to the aadd_documents + method of the VectorStore or the aupsert method of the + DocumentIndex. For example, you can use this to + specify a custom vector_field: + upsert_kwargs={"vector_field": "embedding"} + .. versionadded:: 0.3.10 + + Returns: + Indexing result which contains information about how many documents + were added, updated, deleted, or skipped. + + Raises: + ValueError: If cleanup mode is not one of 'incremental', 'full' or None + ValueError: If cleanup mode is incremental and source_id_key is None. + ValueError: If vectorstore does not have + "adelete" and "aadd_documents" required methods. + ValueError: If source_id_key is not None, but is not a string or callable. + + .. version_modified:: 0.3.25 + + * Added `scoped_full` cleanup mode. + """ + if cleanup not in {"incremental", "full", "scoped_full", None}: + msg = ( + f"cleanup should be one of 'incremental', 'full', 'scoped_full' or None. " + f"Got {cleanup}." + ) + raise ValueError(msg) + + if (cleanup in {"incremental", "scoped_full"}) and source_id_key is None: + msg = ( + "Source id key is required when cleanup mode is incremental or scoped_full." + ) + raise ValueError(msg) + + destination = vector_store # Renaming internally for clarity + + # If it's a vectorstore, let's check if it has the required methods. + if isinstance(destination, VectorStore): + # Check that the Vectorstore has required methods implemented + # Check that the Vectorstore has required methods implemented + methods = ["adelete", "aadd_documents"] + + for method in methods: + if not hasattr(destination, method): + msg = ( + f"Vectorstore {destination} does not have required method {method}" + ) + raise ValueError(msg) + + if type(destination).adelete == VectorStore.adelete: + # Checking if the vectorstore has overridden the default delete method + # implementation which just raises a NotImplementedError + msg = "Vectorstore has not implemented the delete method" + raise ValueError(msg) + elif isinstance(destination, DocumentIndex): + pass + else: + msg = ( + f"Vectorstore should be either a VectorStore or a DocumentIndex. " + f"Got {type(destination)}." + ) + raise TypeError(msg) + async_doc_iterator: AsyncIterator[Document] + if isinstance(docs_source, BaseLoader): + try: + async_doc_iterator = docs_source.alazy_load() + except NotImplementedError: + # Exception triggered when neither lazy_load nor alazy_load are implemented. + # * The default implementation of alazy_load uses lazy_load. + # * The default implementation of lazy_load raises NotImplementedError. + # In such a case, we use the load method and convert it to an async + # iterator. + async_doc_iterator = _to_async_iterator(docs_source.load()) + elif hasattr(docs_source, "__aiter__"): + async_doc_iterator = docs_source # type: ignore[assignment] + else: + async_doc_iterator = _to_async_iterator(docs_source) + + source_id_assigner = _get_source_id_assigner(source_id_key) + + # Mark when the update started. + index_start_dt = await record_manager.aget_time() + num_added = 0 + num_skipped = 0 + num_updated = 0 + num_deleted = 0 + scoped_full_cleanup_source_ids: set[str] = set() + + async for doc_batch in _abatch(batch_size, async_doc_iterator): + hashed_docs = list( + _deduplicate_in_order( + [_HashedDocument.from_document(doc) for doc in doc_batch] + ) + ) + + source_ids: Sequence[Optional[str]] = [ + source_id_assigner(doc) for doc in hashed_docs + ] + + if cleanup in {"incremental", "scoped_full"}: + # If the cleanup mode is incremental, source ids are required. + for source_id, hashed_doc in zip(source_ids, hashed_docs): + if source_id is None: + msg = ( + f"Source ids are required when cleanup mode is " + f"incremental or scoped_full. " + f"Document that starts with " + f"content: {hashed_doc.page_content[:100]} was not assigned " + f"as source id." + ) + raise ValueError(msg) + if cleanup == "scoped_full": + scoped_full_cleanup_source_ids.add(source_id) + # source ids cannot be None after for loop above. + source_ids = cast("Sequence[str]", source_ids) + + exists_batch = await record_manager.aexists([doc.uid for doc in hashed_docs]) + + # Filter out documents that already exist in the record store. + uids: list[str] = [] + docs_to_index: list[Document] = [] + uids_to_refresh = [] + seen_docs: set[str] = set() + for hashed_doc, doc_exists in zip(hashed_docs, exists_batch): + if doc_exists: + if force_update: + seen_docs.add(hashed_doc.uid) + else: + uids_to_refresh.append(hashed_doc.uid) + continue + uids.append(hashed_doc.uid) + docs_to_index.append(hashed_doc.to_document()) + + if uids_to_refresh: + # Must be updated to refresh timestamp. + await record_manager.aupdate(uids_to_refresh, time_at_least=index_start_dt) + num_skipped += len(uids_to_refresh) + + # Be pessimistic and assume that all vector store write will fail. + # First write to vector store + if docs_to_index: + if isinstance(destination, VectorStore): + await destination.aadd_documents( + docs_to_index, + ids=uids, + batch_size=batch_size, + **(upsert_kwargs or {}), + ) + elif isinstance(destination, DocumentIndex): + await destination.aupsert( + docs_to_index, + **(upsert_kwargs or {}), + ) + num_added += len(docs_to_index) - len(seen_docs) + num_updated += len(seen_docs) + + # And only then update the record store. + # Update ALL records, even if they already exist since we want to refresh + # their timestamp. + await record_manager.aupdate( + [doc.uid for doc in hashed_docs], + group_ids=source_ids, + time_at_least=index_start_dt, + ) + + # If source IDs are provided, we can do the deletion incrementally! + + if cleanup == "incremental": + # Get the uids of the documents that were not returned by the loader. + + # mypy isn't good enough to determine that source ids cannot be None + # here due to a check that's happening above, so we check again. + for source_id in source_ids: + if source_id is None: + msg = ( + "source_id cannot be None at this point. " + "Reached unreachable code." + ) + raise AssertionError(msg) + + _source_ids = cast("Sequence[str]", source_ids) + + uids_to_delete = await record_manager.alist_keys( + group_ids=_source_ids, before=index_start_dt + ) + if uids_to_delete: + # Then delete from vector store. + await _adelete(destination, uids_to_delete) + # First delete from record store. + await record_manager.adelete_keys(uids_to_delete) + num_deleted += len(uids_to_delete) + + if cleanup == "full" or ( + cleanup == "scoped_full" and scoped_full_cleanup_source_ids + ): + delete_group_ids: Optional[Sequence[str]] = None + if cleanup == "scoped_full": + delete_group_ids = list(scoped_full_cleanup_source_ids) + while uids_to_delete := await record_manager.alist_keys( + group_ids=delete_group_ids, before=index_start_dt, limit=cleanup_batch_size + ): + # First delete from record store. + await _adelete(destination, uids_to_delete) + # Then delete from record manager. + await record_manager.adelete_keys(uids_to_delete) + num_deleted += len(uids_to_delete) + + return { + "num_added": num_added, + "num_updated": num_updated, + "num_skipped": num_skipped, + "num_deleted": num_deleted, + } diff --git a/venv/Lib/site-packages/langchain_core/indexing/base.py b/venv/Lib/site-packages/langchain_core/indexing/base.py new file mode 100644 index 00000000..4ed4772e --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/indexing/base.py @@ -0,0 +1,666 @@ +"""Base classes for indexing.""" + +from __future__ import annotations + +import abc +import time +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, Optional, TypedDict + +from langchain_core._api import beta +from langchain_core.retrievers import BaseRetriever +from langchain_core.runnables import run_in_executor + +if TYPE_CHECKING: + from collections.abc import Sequence + + from langchain_core.documents import Document + + +class RecordManager(ABC): + """Abstract base class representing the interface for a record manager. + + The record manager abstraction is used by the langchain indexing API. + + The record manager keeps track of which documents have been + written into a vectorstore and when they were written. + + The indexing API computes hashes for each document and stores the hash + together with the write time and the source id in the record manager. + + On subsequent indexing runs, the indexing API can check the record manager + to determine which documents have already been indexed and which have not. + + This allows the indexing API to avoid re-indexing documents that have + already been indexed, and to only index new documents. + + The main benefit of this abstraction is that it works across many vectorstores. + To be supported, a vectorstore needs to only support the ability to add and + delete documents by ID. Using the record manager, the indexing API will + be able to delete outdated documents and avoid redundant indexing of documents + that have already been indexed. + + The main constraints of this abstraction are: + + 1. It relies on the time-stamps to determine which documents have been + indexed and which have not. This means that the time-stamps must be + monotonically increasing. The timestamp should be the timestamp + as measured by the server to minimize issues. + 2. The record manager is currently implemented separately from the + vectorstore, which means that the overall system becomes distributed + and may create issues with consistency. For example, writing to + record manager succeeds, but corresponding writing to vectorstore fails. + """ + + def __init__( + self, + namespace: str, + ) -> None: + """Initialize the record manager. + + Args: + namespace (str): The namespace for the record manager. + """ + self.namespace = namespace + + @abstractmethod + def create_schema(self) -> None: + """Create the database schema for the record manager.""" + + @abstractmethod + async def acreate_schema(self) -> None: + """Asynchronously create the database schema for the record manager.""" + + @abstractmethod + def get_time(self) -> float: + """Get the current server time as a high resolution timestamp! + + It's important to get this from the server to ensure a monotonic clock, + otherwise there may be data loss when cleaning up old documents! + + Returns: + The current server time as a float timestamp. + """ + + @abstractmethod + async def aget_time(self) -> float: + """Asynchronously get the current server time as a high resolution timestamp. + + It's important to get this from the server to ensure a monotonic clock, + otherwise there may be data loss when cleaning up old documents! + + Returns: + The current server time as a float timestamp. + """ + + @abstractmethod + def update( + self, + keys: Sequence[str], + *, + group_ids: Optional[Sequence[Optional[str]]] = None, + time_at_least: Optional[float] = None, + ) -> None: + """Upsert records into the database. + + Args: + keys: A list of record keys to upsert. + group_ids: A list of group IDs corresponding to the keys. + time_at_least: Optional timestamp. Implementation can use this + to optionally verify that the timestamp IS at least this time + in the system that stores the data. + + e.g., use to validate that the time in the postgres database + is equal to or larger than the given timestamp, if not + raise an error. + + This is meant to help prevent time-drift issues since + time may not be monotonically increasing! + + Raises: + ValueError: If the length of keys doesn't match the length of group_ids. + """ + + @abstractmethod + async def aupdate( + self, + keys: Sequence[str], + *, + group_ids: Optional[Sequence[Optional[str]]] = None, + time_at_least: Optional[float] = None, + ) -> None: + """Asynchronously upsert records into the database. + + Args: + keys: A list of record keys to upsert. + group_ids: A list of group IDs corresponding to the keys. + time_at_least: Optional timestamp. Implementation can use this + to optionally verify that the timestamp IS at least this time + in the system that stores the data. + + e.g., use to validate that the time in the postgres database + is equal to or larger than the given timestamp, if not + raise an error. + + This is meant to help prevent time-drift issues since + time may not be monotonically increasing! + + Raises: + ValueError: If the length of keys doesn't match the length of group_ids. + """ + + @abstractmethod + def exists(self, keys: Sequence[str]) -> list[bool]: + """Check if the provided keys exist in the database. + + Args: + keys: A list of keys to check. + + Returns: + A list of boolean values indicating the existence of each key. + """ + + @abstractmethod + async def aexists(self, keys: Sequence[str]) -> list[bool]: + """Asynchronously check if the provided keys exist in the database. + + Args: + keys: A list of keys to check. + + Returns: + A list of boolean values indicating the existence of each key. + """ + + @abstractmethod + def list_keys( + self, + *, + before: Optional[float] = None, + after: Optional[float] = None, + group_ids: Optional[Sequence[str]] = None, + limit: Optional[int] = None, + ) -> list[str]: + """List records in the database based on the provided filters. + + Args: + before: Filter to list records updated before this time. + after: Filter to list records updated after this time. + group_ids: Filter to list records with specific group IDs. + limit: optional limit on the number of records to return. + + Returns: + A list of keys for the matching records. + """ + + @abstractmethod + async def alist_keys( + self, + *, + before: Optional[float] = None, + after: Optional[float] = None, + group_ids: Optional[Sequence[str]] = None, + limit: Optional[int] = None, + ) -> list[str]: + """Asynchronously list records in the database based on the provided filters. + + Args: + before: Filter to list records updated before this time. + after: Filter to list records updated after this time. + group_ids: Filter to list records with specific group IDs. + limit: optional limit on the number of records to return. + + Returns: + A list of keys for the matching records. + """ + + @abstractmethod + def delete_keys(self, keys: Sequence[str]) -> None: + """Delete specified records from the database. + + Args: + keys: A list of keys to delete. + """ + + @abstractmethod + async def adelete_keys(self, keys: Sequence[str]) -> None: + """Asynchronously delete specified records from the database. + + Args: + keys: A list of keys to delete. + """ + + +class _Record(TypedDict): + group_id: Optional[str] + updated_at: float + + +class InMemoryRecordManager(RecordManager): + """An in-memory record manager for testing purposes.""" + + def __init__(self, namespace: str) -> None: + """Initialize the in-memory record manager. + + Args: + namespace (str): The namespace for the record manager. + """ + super().__init__(namespace) + # Each key points to a dictionary + # of {'group_id': group_id, 'updated_at': timestamp} + self.records: dict[str, _Record] = {} + self.namespace = namespace + + def create_schema(self) -> None: + """In-memory schema creation is simply ensuring the structure is initialized.""" + + async def acreate_schema(self) -> None: + """Async in-memory schema creation is simply ensuring the structure is initialized.""" # noqa: E501 + + def get_time(self) -> float: + """Get the current server time as a high resolution timestamp!""" + return time.time() + + async def aget_time(self) -> float: + """Async get the current server time as a high resolution timestamp!""" + return self.get_time() + + def update( + self, + keys: Sequence[str], + *, + group_ids: Optional[Sequence[Optional[str]]] = None, + time_at_least: Optional[float] = None, + ) -> None: + """Upsert records into the database. + + Args: + keys: A list of record keys to upsert. + group_ids: A list of group IDs corresponding to the keys. + Defaults to None. + time_at_least: Optional timestamp. Implementation can use this + to optionally verify that the timestamp IS at least this time + in the system that stores. Defaults to None. + E.g., use to validate that the time in the postgres database + is equal to or larger than the given timestamp, if not + raise an error. + This is meant to help prevent time-drift issues since + time may not be monotonically increasing! + + Raises: + ValueError: If the length of keys doesn't match the length of group + ids. + ValueError: If time_at_least is in the future. + """ + if group_ids and len(keys) != len(group_ids): + msg = "Length of keys must match length of group_ids" + raise ValueError(msg) + for index, key in enumerate(keys): + group_id = group_ids[index] if group_ids else None + if time_at_least and time_at_least > self.get_time(): + msg = "time_at_least must be in the past" + raise ValueError(msg) + self.records[key] = {"group_id": group_id, "updated_at": self.get_time()} + + async def aupdate( + self, + keys: Sequence[str], + *, + group_ids: Optional[Sequence[Optional[str]]] = None, + time_at_least: Optional[float] = None, + ) -> None: + """Async upsert records into the database. + + Args: + keys: A list of record keys to upsert. + group_ids: A list of group IDs corresponding to the keys. + Defaults to None. + time_at_least: Optional timestamp. Implementation can use this + to optionally verify that the timestamp IS at least this time + in the system that stores. Defaults to None. + E.g., use to validate that the time in the postgres database + is equal to or larger than the given timestamp, if not + raise an error. + This is meant to help prevent time-drift issues since + time may not be monotonically increasing! + + Raises: + ValueError: If the length of keys doesn't match the length of group + ids. + ValueError: If time_at_least is in the future. + """ + self.update(keys, group_ids=group_ids, time_at_least=time_at_least) + + def exists(self, keys: Sequence[str]) -> list[bool]: + """Check if the provided keys exist in the database. + + Args: + keys: A list of keys to check. + + Returns: + A list of boolean values indicating the existence of each key. + """ + return [key in self.records for key in keys] + + async def aexists(self, keys: Sequence[str]) -> list[bool]: + """Async check if the provided keys exist in the database. + + Args: + keys: A list of keys to check. + + Returns: + A list of boolean values indicating the existence of each key. + """ + return self.exists(keys) + + def list_keys( + self, + *, + before: Optional[float] = None, + after: Optional[float] = None, + group_ids: Optional[Sequence[str]] = None, + limit: Optional[int] = None, + ) -> list[str]: + """List records in the database based on the provided filters. + + Args: + before: Filter to list records updated before this time. + Defaults to None. + after: Filter to list records updated after this time. + Defaults to None. + group_ids: Filter to list records with specific group IDs. + Defaults to None. + limit: optional limit on the number of records to return. + Defaults to None. + + Returns: + A list of keys for the matching records. + """ + result = [] + for key, data in self.records.items(): + if before and data["updated_at"] >= before: + continue + if after and data["updated_at"] <= after: + continue + if group_ids and data["group_id"] not in group_ids: + continue + result.append(key) + if limit: + return result[:limit] + return result + + async def alist_keys( + self, + *, + before: Optional[float] = None, + after: Optional[float] = None, + group_ids: Optional[Sequence[str]] = None, + limit: Optional[int] = None, + ) -> list[str]: + """Async list records in the database based on the provided filters. + + Args: + before: Filter to list records updated before this time. + Defaults to None. + after: Filter to list records updated after this time. + Defaults to None. + group_ids: Filter to list records with specific group IDs. + Defaults to None. + limit: optional limit on the number of records to return. + Defaults to None. + + Returns: + A list of keys for the matching records. + """ + return self.list_keys( + before=before, after=after, group_ids=group_ids, limit=limit + ) + + def delete_keys(self, keys: Sequence[str]) -> None: + """Delete specified records from the database. + + Args: + keys: A list of keys to delete. + """ + for key in keys: + if key in self.records: + del self.records[key] + + async def adelete_keys(self, keys: Sequence[str]) -> None: + """Async delete specified records from the database. + + Args: + keys: A list of keys to delete. + """ + self.delete_keys(keys) + + +class UpsertResponse(TypedDict): + """A generic response for upsert operations. + + The upsert response will be used by abstractions that implement an upsert + operation for content that can be upserted by ID. + + Upsert APIs that accept inputs with IDs and generate IDs internally + will return a response that includes the IDs that succeeded and the IDs + that failed. + + If there are no failures, the failed list will be empty, and the order + of the IDs in the succeeded list will match the order of the input documents. + + If there are failures, the response becomes ill defined, and a user of the API + cannot determine which generated ID corresponds to which input document. + + It is recommended for users explicitly attach the IDs to the items being + indexed to avoid this issue. + """ + + succeeded: list[str] + """The IDs that were successfully indexed.""" + failed: list[str] + """The IDs that failed to index.""" + + +class DeleteResponse(TypedDict, total=False): + """A generic response for delete operation. + + The fields in this response are optional and whether the vectorstore + returns them or not is up to the implementation. + """ + + num_deleted: int + """The number of items that were successfully deleted. + + If returned, this should only include *actual* deletions. + + If the ID did not exist to begin with, + it should not be included in this count. + """ + + succeeded: Sequence[str] + """The IDs that were successfully deleted. + + If returned, this should only include *actual* deletions. + + If the ID did not exist to begin with, + it should not be included in this list. + """ + + failed: Sequence[str] + """The IDs that failed to be deleted. + + Please note that deleting an ID that + does not exist is **NOT** considered a failure. + """ + + num_failed: int + """The number of items that failed to be deleted.""" + + +@beta(message="Added in 0.2.29. The abstraction is subject to change.") +class DocumentIndex(BaseRetriever): + """A document retriever that supports indexing operations. + + This indexing interface is designed to be a generic abstraction for storing and + querying documents that has an ID and metadata associated with it. + + The interface is designed to be agnostic to the underlying implementation of the + indexing system. + + The interface is designed to support the following operations: + + 1. Storing document in the index. + 2. Fetching document by ID. + 3. Searching for document using a query. + + .. versionadded:: 0.2.29 + """ + + @abc.abstractmethod + def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse: + """Upsert documents into the index. + + The upsert functionality should utilize the ID field of the content object + if it is provided. If the ID is not provided, the upsert method is free + to generate an ID for the content. + + When an ID is specified and the content already exists in the vectorstore, + the upsert method should update the content with the new data. If the content + does not exist, the upsert method should add the item to the vectorstore. + + Args: + items: Sequence of documents to add to the vectorstore. + **kwargs: Additional keyword arguments. + + Returns: + UpsertResponse: A response object that contains the list of IDs that were + successfully added or updated in the vectorstore and the list of IDs that + failed to be added or updated. + """ + + async def aupsert( + self, items: Sequence[Document], /, **kwargs: Any + ) -> UpsertResponse: + """Add or update documents in the vectorstore. Async version of upsert. + + The upsert functionality should utilize the ID field of the item + if it is provided. If the ID is not provided, the upsert method is free + to generate an ID for the item. + + When an ID is specified and the item already exists in the vectorstore, + the upsert method should update the item with the new data. If the item + does not exist, the upsert method should add the item to the vectorstore. + + Args: + items: Sequence of documents to add to the vectorstore. + **kwargs: Additional keyword arguments. + + Returns: + UpsertResponse: A response object that contains the list of IDs that were + successfully added or updated in the vectorstore and the list of IDs that + failed to be added or updated. + """ + return await run_in_executor( + None, + self.upsert, + items, + **kwargs, + ) + + @abc.abstractmethod + def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse: + """Delete by IDs or other criteria. + + Calling delete without any input parameters should raise a ValueError! + + Args: + ids: List of ids to delete. + kwargs: Additional keyword arguments. This is up to the implementation. + For example, can include an option to delete the entire index, + or else issue a non-blocking delete etc. + + Returns: + DeleteResponse: A response object that contains the list of IDs that were + successfully deleted and the list of IDs that failed to be deleted. + """ + + async def adelete( + self, ids: Optional[list[str]] = None, **kwargs: Any + ) -> DeleteResponse: + """Delete by IDs or other criteria. Async variant. + + Calling adelete without any input parameters should raise a ValueError! + + Args: + ids: List of ids to delete. + kwargs: Additional keyword arguments. This is up to the implementation. + For example, can include an option to delete the entire index. + + Returns: + DeleteResponse: A response object that contains the list of IDs that were + successfully deleted and the list of IDs that failed to be deleted. + """ + return await run_in_executor( + None, + self.delete, + ids, + **kwargs, + ) + + @abc.abstractmethod + def get( + self, + ids: Sequence[str], + /, + **kwargs: Any, + ) -> list[Document]: + """Get documents by id. + + Fewer documents may be returned than requested if some IDs are not found or + if there are duplicated IDs. + + Users should not assume that the order of the returned documents matches + the order of the input IDs. Instead, users should rely on the ID field of the + returned documents. + + This method should **NOT** raise exceptions if no documents are found for + some IDs. + + Args: + ids: List of IDs to get. + kwargs: Additional keyword arguments. These are up to the implementation. + + Returns: + list[Document]: List of documents that were found. + """ + + async def aget( + self, + ids: Sequence[str], + /, + **kwargs: Any, + ) -> list[Document]: + """Get documents by id. + + Fewer documents may be returned than requested if some IDs are not found or + if there are duplicated IDs. + + Users should not assume that the order of the returned documents matches + the order of the input IDs. Instead, users should rely on the ID field of the + returned documents. + + This method should **NOT** raise exceptions if no documents are found for + some IDs. + + Args: + ids: List of IDs to get. + kwargs: Additional keyword arguments. These are up to the implementation. + + Returns: + list[Document]: List of documents that were found. + """ + return await run_in_executor( + None, + self.get, + ids, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain_core/indexing/in_memory.py b/venv/Lib/site-packages/langchain_core/indexing/in_memory.py new file mode 100644 index 00000000..34609a03 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/indexing/in_memory.py @@ -0,0 +1,86 @@ +"""In memory document index.""" + +import operator +import uuid +from collections.abc import Sequence +from typing import Any, Optional, cast + +from pydantic import Field +from typing_extensions import override + +from langchain_core._api import beta +from langchain_core.callbacks import CallbackManagerForRetrieverRun +from langchain_core.documents import Document +from langchain_core.indexing import UpsertResponse +from langchain_core.indexing.base import DeleteResponse, DocumentIndex + + +@beta(message="Introduced in version 0.2.29. Underlying abstraction subject to change.") +class InMemoryDocumentIndex(DocumentIndex): + """In memory document index. + + This is an in-memory document index that stores documents in a dictionary. + + It provides a simple search API that returns documents by the number of + counts the given query appears in the document. + + .. versionadded:: 0.2.29 + """ + + store: dict[str, Document] = Field(default_factory=dict) + top_k: int = 4 + + @override + def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse: + """Upsert items into the index.""" + ok_ids = [] + + for item in items: + if item.id is None: + id_ = str(uuid.uuid4()) + item_ = item.model_copy() + item_.id = id_ + else: + item_ = item + id_ = item.id + + self.store[id_] = item_ + ok_ids.append(cast("str", item_.id)) + + return UpsertResponse(succeeded=ok_ids, failed=[]) + + @override + def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse: + """Delete by ID.""" + if ids is None: + msg = "IDs must be provided for deletion" + raise ValueError(msg) + + ok_ids = [] + + for id_ in ids: + if id_ in self.store: + del self.store[id_] + ok_ids.append(id_) + + return DeleteResponse( + succeeded=ok_ids, num_deleted=len(ok_ids), num_failed=0, failed=[] + ) + + @override + def get(self, ids: Sequence[str], /, **kwargs: Any) -> list[Document]: + """Get by ids.""" + return [self.store[id_] for id_ in ids if id_ in self.store] + + @override + def _get_relevant_documents( + self, query: str, *, run_manager: CallbackManagerForRetrieverRun + ) -> list[Document]: + counts_by_doc = [] + + for document in self.store.values(): + count = document.page_content.count(query) + counts_by_doc.append((document, count)) + + counts_by_doc.sort(key=operator.itemgetter(1), reverse=True) + return [doc.model_copy() for doc, count in counts_by_doc[: self.top_k]] diff --git a/venv/Lib/site-packages/langchain_core/language_models/__init__.py b/venv/Lib/site-packages/langchain_core/language_models/__init__.py new file mode 100644 index 00000000..8ed64aea --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/language_models/__init__.py @@ -0,0 +1,117 @@ +"""Language models. + +**Language Model** is a type of model that can generate text or complete +text prompts. + +LangChain has two main classes to work with language models: **Chat Models** +and "old-fashioned" **LLMs**. + +**Chat Models** + +Language models that use a sequence of messages as inputs and return chat messages +as outputs (as opposed to using plain text). These are traditionally newer models ( +older models are generally LLMs, see below). Chat models support the assignment of +distinct roles to conversation messages, helping to distinguish messages from the AI, +users, and instructions such as system messages. + +The key abstraction for chat models is `BaseChatModel`. Implementations +should inherit from this class. Please see LangChain how-to guides with more +information on how to implement a custom chat model. + +To implement a custom Chat Model, inherit from `BaseChatModel`. See +the following guide for more information on how to implement a custom Chat Model: + +https://python.langchain.com/docs/how_to/custom_chat_model/ + +**LLMs** + +Language models that takes a string as input and returns a string. +These are traditionally older models (newer models generally are Chat Models, see below). + +Although the underlying models are string in, string out, the LangChain wrappers +also allow these models to take messages as input. This gives them the same interface +as Chat Models. When messages are passed in as input, they will be formatted into a +string under the hood before being passed to the underlying model. + +To implement a custom LLM, inherit from `BaseLLM` or `LLM`. +Please see the following guide for more information on how to implement a custom LLM: + +https://python.langchain.com/docs/how_to/custom_llm/ + + +""" # noqa: E501 + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.language_models.base import ( + BaseLanguageModel, + LangSmithParams, + LanguageModelInput, + LanguageModelLike, + LanguageModelOutput, + get_tokenizer, + ) + from langchain_core.language_models.chat_models import ( + BaseChatModel, + SimpleChatModel, + ) + from langchain_core.language_models.fake import FakeListLLM, FakeStreamingListLLM + from langchain_core.language_models.fake_chat_models import ( + FakeListChatModel, + FakeMessagesListChatModel, + GenericFakeChatModel, + ParrotFakeChatModel, + ) + from langchain_core.language_models.llms import LLM, BaseLLM + +__all__ = ( + "BaseLanguageModel", + "BaseChatModel", + "SimpleChatModel", + "BaseLLM", + "LLM", + "LanguageModelInput", + "get_tokenizer", + "LangSmithParams", + "LanguageModelOutput", + "LanguageModelLike", + "FakeListLLM", + "FakeStreamingListLLM", + "FakeListChatModel", + "FakeMessagesListChatModel", + "GenericFakeChatModel", + "ParrotFakeChatModel", +) + +_dynamic_imports = { + "BaseLanguageModel": "base", + "LangSmithParams": "base", + "LanguageModelInput": "base", + "LanguageModelLike": "base", + "LanguageModelOutput": "base", + "get_tokenizer": "base", + "BaseChatModel": "chat_models", + "SimpleChatModel": "chat_models", + "FakeListLLM": "fake", + "FakeStreamingListLLM": "fake", + "FakeListChatModel": "fake_chat_models", + "FakeMessagesListChatModel": "fake_chat_models", + "GenericFakeChatModel": "fake_chat_models", + "ParrotFakeChatModel": "fake_chat_models", + "LLM": "llms", + "BaseLLM": "llms", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/language_models/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..cc2df103 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/language_models/__pycache__/_utils.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/_utils.cpython-312.pyc new file mode 100644 index 00000000..3ec5da1f Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/_utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/language_models/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..656a2a33 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/language_models/__pycache__/chat_models.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/chat_models.cpython-312.pyc new file mode 100644 index 00000000..d7157f6e Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/chat_models.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/language_models/__pycache__/fake.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/fake.cpython-312.pyc new file mode 100644 index 00000000..10047e08 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/fake.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/language_models/__pycache__/fake_chat_models.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/fake_chat_models.cpython-312.pyc new file mode 100644 index 00000000..0e9921b6 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/fake_chat_models.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/language_models/__pycache__/llms.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/llms.cpython-312.pyc new file mode 100644 index 00000000..dd575946 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/language_models/__pycache__/llms.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/language_models/_utils.py b/venv/Lib/site-packages/langchain_core/language_models/_utils.py new file mode 100644 index 00000000..f54d6972 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/language_models/_utils.py @@ -0,0 +1,139 @@ +import re +from collections.abc import Sequence +from typing import Optional + +from langchain_core.messages import BaseMessage + + +def _is_openai_data_block(block: dict) -> bool: + """Check if the block contains multimodal data in OpenAI Chat Completions format.""" + if block.get("type") == "image_url": + if ( + (set(block.keys()) <= {"type", "image_url", "detail"}) + and (image_url := block.get("image_url")) + and isinstance(image_url, dict) + ): + url = image_url.get("url") + if isinstance(url, str): + return True + + elif block.get("type") == "file": + if (file := block.get("file")) and isinstance(file, dict): + file_data = file.get("file_data") + if isinstance(file_data, str): + return True + + elif block.get("type") == "input_audio": # noqa: SIM102 + if (input_audio := block.get("input_audio")) and isinstance(input_audio, dict): + audio_data = input_audio.get("data") + audio_format = input_audio.get("format") + if isinstance(audio_data, str) and isinstance(audio_format, str): + return True + + else: + return False + + return False + + +def _parse_data_uri(uri: str) -> Optional[dict]: + """Parse a data URI into its components. If parsing fails, return None. + + Example: + + .. code-block:: python + + data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..." + parsed = _parse_data_uri(data_uri) + + assert parsed == { + "source_type": "base64", + "mime_type": "image/jpeg", + "data": "/9j/4AAQSkZJRg...", + } + """ + regex = r"^data:(?P[^;]+);base64,(?P.+)$" + match = re.match(regex, uri) + if match is None: + return None + return { + "source_type": "base64", + "data": match.group("data"), + "mime_type": match.group("mime_type"), + } + + +def _convert_openai_format_to_data_block(block: dict) -> dict: + """Convert OpenAI image content block to standard data content block. + + If parsing fails, pass-through. + + Args: + block: The OpenAI image content block to convert. + + Returns: + The converted standard data content block. + """ + if block["type"] == "image_url": + parsed = _parse_data_uri(block["image_url"]["url"]) + if parsed is not None: + parsed["type"] = "image" + return parsed + return block + + if block["type"] == "file": + parsed = _parse_data_uri(block["file"]["file_data"]) + if parsed is not None: + parsed["type"] = "file" + if filename := block["file"].get("filename"): + parsed["filename"] = filename + return parsed + return block + + if block["type"] == "input_audio": + data = block["input_audio"].get("data") + format = block["input_audio"].get("format") + if data and format: + return { + "type": "audio", + "source_type": "base64", + "data": data, + "mime_type": f"audio/{format}", + } + return block + + return block + + +def _normalize_messages(messages: Sequence[BaseMessage]) -> list[BaseMessage]: + """Extend support for message formats. + + Chat models implement support for images in OpenAI Chat Completions format, as well + as other multimodal data as standard data blocks. This function extends support to + audio and file data in OpenAI Chat Completions format by converting them to standard + data blocks. + """ + formatted_messages = [] + for message in messages: + formatted_message = message + if isinstance(message.content, list): + for idx, block in enumerate(message.content): + if ( + isinstance(block, dict) + # Subset to (PDF) files and audio, as most relevant chat models + # support images in OAI format (and some may not yet support the + # standard data block format) + and block.get("type") in ("file", "input_audio") + and _is_openai_data_block(block) + ): + if formatted_message is message: + formatted_message = message.model_copy() + # Also shallow-copy content + formatted_message.content = list(formatted_message.content) + + formatted_message.content[idx] = ( # type: ignore[index] # mypy confused by .model_copy + _convert_openai_format_to_data_block(block) + ) + formatted_messages.append(formatted_message) + + return formatted_messages diff --git a/venv/Lib/site-packages/langchain_core/language_models/base.py b/venv/Lib/site-packages/langchain_core/language_models/base.py new file mode 100644 index 00000000..a4d3442d --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/language_models/base.py @@ -0,0 +1,401 @@ +"""Base language models class.""" + +from __future__ import annotations + +import warnings +from abc import ABC, abstractmethod +from collections.abc import Mapping, Sequence +from functools import cache +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + Optional, + TypeVar, + Union, +) + +from pydantic import BaseModel, ConfigDict, Field, field_validator +from typing_extensions import TypeAlias, TypedDict, override + +from langchain_core._api import deprecated +from langchain_core.caches import BaseCache +from langchain_core.callbacks import Callbacks +from langchain_core.messages import ( + AnyMessage, + BaseMessage, + MessageLikeRepresentation, + get_buffer_string, +) +from langchain_core.prompt_values import PromptValue +from langchain_core.runnables import Runnable, RunnableSerializable +from langchain_core.utils import get_pydantic_field_names + +if TYPE_CHECKING: + from langchain_core.outputs import LLMResult + + +class LangSmithParams(TypedDict, total=False): + """LangSmith parameters for tracing.""" + + ls_provider: str + """Provider of the model.""" + ls_model_name: str + """Name of the model.""" + ls_model_type: Literal["chat", "llm"] + """Type of the model. Should be 'chat' or 'llm'.""" + ls_temperature: Optional[float] + """Temperature for generation.""" + ls_max_tokens: Optional[int] + """Max tokens for generation.""" + ls_stop: Optional[list[str]] + """Stop words for generation.""" + + +@cache # Cache the tokenizer +def get_tokenizer() -> Any: + """Get a GPT-2 tokenizer instance. + + This function is cached to avoid re-loading the tokenizer + every time it is called. + """ + try: + from transformers import GPT2TokenizerFast # type: ignore[import-not-found] + except ImportError as e: + msg = ( + "Could not import transformers python package. " + "This is needed in order to calculate get_token_ids. " + "Please install it with `pip install transformers`." + ) + raise ImportError(msg) from e + # create a GPT-2 tokenizer instance + return GPT2TokenizerFast.from_pretrained("gpt2") + + +def _get_token_ids_default_method(text: str) -> list[int]: + """Encode the text into token IDs.""" + # get the cached tokenizer + tokenizer = get_tokenizer() + + # tokenize the text using the GPT-2 tokenizer + return tokenizer.encode(text) + + +LanguageModelInput = Union[PromptValue, str, Sequence[MessageLikeRepresentation]] +LanguageModelOutput = Union[BaseMessage, str] +LanguageModelLike = Runnable[LanguageModelInput, LanguageModelOutput] +LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", BaseMessage, str) + + +def _get_verbosity() -> bool: + from langchain_core.globals import get_verbose + + return get_verbose() + + +class BaseLanguageModel( + RunnableSerializable[LanguageModelInput, LanguageModelOutputVar], ABC +): + """Abstract base class for interfacing with language models. + + All language model wrappers inherited from BaseLanguageModel. + """ + + cache: Union[BaseCache, bool, None] = Field(default=None, exclude=True) + """Whether to cache the response. + + * If true, will use the global cache. + * If false, will not use a cache + * If None, will use the global cache if it's set, otherwise no cache. + * If instance of BaseCache, will use the provided cache. + + Caching is not currently supported for streaming methods of models. + """ + verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False) + """Whether to print out response text.""" + callbacks: Callbacks = Field(default=None, exclude=True) + """Callbacks to add to the run trace.""" + tags: Optional[list[str]] = Field(default=None, exclude=True) + """Tags to add to the run trace.""" + metadata: Optional[dict[str, Any]] = Field(default=None, exclude=True) + """Metadata to add to the run trace.""" + custom_get_token_ids: Optional[Callable[[str], list[int]]] = Field( + default=None, exclude=True + ) + """Optional encoder to use for counting tokens.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @field_validator("verbose", mode="before") + def set_verbose(cls, verbose: Optional[bool]) -> bool: + """If verbose is None, set it. + + This allows users to pass in None as verbose to access the global setting. + + Args: + verbose: The verbosity setting to use. + + Returns: + The verbosity setting to use. + """ + if verbose is None: + return _get_verbosity() + return verbose + + @property + @override + def InputType(self) -> TypeAlias: + """Get the input type for this runnable.""" + from langchain_core.prompt_values import ( + ChatPromptValueConcrete, + StringPromptValue, + ) + + # This is a version of LanguageModelInput which replaces the abstract + # base class BaseMessage with a union of its subclasses, which makes + # for a much better schema. + return Union[ + str, + Union[StringPromptValue, ChatPromptValueConcrete], + list[AnyMessage], + ] + + @abstractmethod + def generate_prompt( + self, + prompts: list[PromptValue], + stop: Optional[list[str]] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> LLMResult: + """Pass a sequence of prompts to the model and return model generations. + + This method should make use of batched calls for models that expose a batched + API. + + Use this method when you want to: + 1. take advantage of batched calls, + 2. need more output from the model than just the top generated value, + 3. are building chains that are agnostic to the underlying language model + type (e.g., pure text completion models vs chat models). + + Args: + prompts: List of PromptValues. A PromptValue is an object that can be + converted to match the format of any language model (string for pure + text generation models and BaseMessages for chat models). + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + callbacks: Callbacks to pass through. Used for executing additional + functionality, such as logging or streaming, throughout generation. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + An LLMResult, which contains a list of candidate Generations for each input + prompt and additional model provider-specific output. + """ + + @abstractmethod + async def agenerate_prompt( + self, + prompts: list[PromptValue], + stop: Optional[list[str]] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> LLMResult: + """Asynchronously pass a sequence of prompts and return model generations. + + This method should make use of batched calls for models that expose a batched + API. + + Use this method when you want to: + 1. take advantage of batched calls, + 2. need more output from the model than just the top generated value, + 3. are building chains that are agnostic to the underlying language model + type (e.g., pure text completion models vs chat models). + + Args: + prompts: List of PromptValues. A PromptValue is an object that can be + converted to match the format of any language model (string for pure + text generation models and BaseMessages for chat models). + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + callbacks: Callbacks to pass through. Used for executing additional + functionality, such as logging or streaming, throughout generation. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + An LLMResult, which contains a list of candidate Generations for each input + prompt and additional model provider-specific output. + """ + + def with_structured_output( + self, schema: Union[dict, type], **kwargs: Any + ) -> Runnable[LanguageModelInput, Union[dict, BaseModel]]: + """Not implemented on this class.""" + # Implement this on child class if there is a way of steering the model to + # generate responses that match a given schema. + raise NotImplementedError + + @deprecated("0.1.7", alternative="invoke", removal="1.0") + @abstractmethod + def predict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: + """Pass a single string input to the model and return a string. + + Use this method when passing in raw text. If you want to pass in specific + types of chat messages, use predict_messages. + + Args: + text: String input to pass to the model. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + Top model prediction as a string. + """ + + @deprecated("0.1.7", alternative="invoke", removal="1.0") + @abstractmethod + def predict_messages( + self, + messages: list[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> BaseMessage: + """Pass a message sequence to the model and return a message. + + Use this method when passing in chat messages. If you want to pass in raw text, + use predict. + + Args: + messages: A sequence of chat messages corresponding to a single model input. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + Top model prediction as a message. + """ + + @deprecated("0.1.7", alternative="ainvoke", removal="1.0") + @abstractmethod + async def apredict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: + """Asynchronously pass a string to the model and return a string. + + Use this method when calling pure text generation models and only the top + candidate generation is needed. + + Args: + text: String input to pass to the model. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + Top model prediction as a string. + """ + + @deprecated("0.1.7", alternative="ainvoke", removal="1.0") + @abstractmethod + async def apredict_messages( + self, + messages: list[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> BaseMessage: + """Asynchronously pass messages to the model and return a message. + + Use this method when calling chat models and only the top + candidate generation is needed. + + Args: + messages: A sequence of chat messages corresponding to a single model input. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + Top model prediction as a message. + """ + + @property + def _identifying_params(self) -> Mapping[str, Any]: + """Get the identifying parameters.""" + return self.lc_attributes + + def get_token_ids(self, text: str) -> list[int]: + """Return the ordered ids of the tokens in a text. + + Args: + text: The string input to tokenize. + + Returns: + A list of ids corresponding to the tokens in the text, in order they occur + in the text. + """ + if self.custom_get_token_ids is not None: + return self.custom_get_token_ids(text) + return _get_token_ids_default_method(text) + + def get_num_tokens(self, text: str) -> int: + """Get the number of tokens present in the text. + + Useful for checking if an input fits in a model's context window. + + Args: + text: The string input to tokenize. + + Returns: + The integer number of tokens in the text. + """ + return len(self.get_token_ids(text)) + + def get_num_tokens_from_messages( + self, + messages: list[BaseMessage], + tools: Optional[Sequence] = None, + ) -> int: + """Get the number of tokens in the messages. + + Useful for checking if an input fits in a model's context window. + + **Note**: the base implementation of get_num_tokens_from_messages ignores + tool schemas. + + Args: + messages: The message inputs to tokenize. + tools: If provided, sequence of dict, BaseModel, function, or BaseTools + to be converted to tool schemas. + + Returns: + The sum of the number of tokens across the messages. + """ + if tools is not None: + warnings.warn( + "Counting tokens in tool schemas is not yet supported. Ignoring tools.", + stacklevel=2, + ) + return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages) + + @classmethod + def _all_required_field_names(cls) -> set: + """DEPRECATED: Kept for backwards compatibility. + + Use get_pydantic_field_names. + """ + return get_pydantic_field_names(cls) diff --git a/venv/Lib/site-packages/langchain_core/language_models/chat_models.py b/venv/Lib/site-packages/langchain_core/language_models/chat_models.py new file mode 100644 index 00000000..71227177 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/language_models/chat_models.py @@ -0,0 +1,1557 @@ +"""Chat models for conversational AI.""" + +from __future__ import annotations + +import asyncio +import inspect +import json +import typing +import warnings +from abc import ABC, abstractmethod +from collections.abc import AsyncIterator, Iterator, Sequence +from functools import cached_property +from operator import itemgetter +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + Optional, + Union, + cast, +) + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + model_validator, +) +from typing_extensions import override + +from langchain_core._api import deprecated +from langchain_core.caches import BaseCache +from langchain_core.callbacks import ( + AsyncCallbackManager, + AsyncCallbackManagerForLLMRun, + BaseCallbackManager, + CallbackManager, + CallbackManagerForLLMRun, + Callbacks, +) +from langchain_core.globals import get_llm_cache +from langchain_core.language_models._utils import _normalize_messages +from langchain_core.language_models.base import ( + BaseLanguageModel, + LangSmithParams, + LanguageModelInput, +) +from langchain_core.load import dumpd, dumps +from langchain_core.messages import ( + AIMessage, + AnyMessage, + BaseMessage, + BaseMessageChunk, + HumanMessage, + convert_to_messages, + convert_to_openai_image_block, + is_data_content_block, + message_chunk_to_message, +) +from langchain_core.messages.ai import _LC_ID_PREFIX +from langchain_core.outputs import ( + ChatGeneration, + ChatGenerationChunk, + ChatResult, + LLMResult, + RunInfo, +) +from langchain_core.prompt_values import ChatPromptValue, PromptValue, StringPromptValue +from langchain_core.rate_limiters import BaseRateLimiter +from langchain_core.runnables import RunnableMap, RunnablePassthrough +from langchain_core.runnables.config import ensure_config, run_in_executor +from langchain_core.tracers._streaming import _StreamingCallbackHandler +from langchain_core.utils.function_calling import ( + convert_to_json_schema, + convert_to_openai_tool, +) +from langchain_core.utils.pydantic import TypeBaseModel, is_basemodel_subclass + +if TYPE_CHECKING: + import uuid + + from langchain_core.output_parsers.base import OutputParserLike + from langchain_core.runnables import Runnable, RunnableConfig + from langchain_core.tools import BaseTool + + +def _generate_response_from_error(error: BaseException) -> list[ChatGeneration]: + if hasattr(error, "response"): + response = error.response + metadata: dict = {} + if hasattr(response, "headers"): + try: + metadata["headers"] = dict(response.headers) + except Exception: + metadata["headers"] = None + if hasattr(response, "status_code"): + metadata["status_code"] = response.status_code + if hasattr(error, "request_id"): + metadata["request_id"] = error.request_id + generations = [ + ChatGeneration(message=AIMessage(content="", response_metadata=metadata)) + ] + else: + generations = [] + + return generations + + +def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]: + """Format messages for tracing in on_chat_model_start. + + For backward compatibility, we update image content blocks to OpenAI Chat + Completions format. + + Args: + messages: List of messages to format. + + Returns: + List of messages formatted for tracing. + """ + messages_to_trace = [] + for message in messages: + message_to_trace = message + if isinstance(message.content, list): + for idx, block in enumerate(message.content): + if ( + isinstance(block, dict) + and block.get("type") == "image" + and is_data_content_block(block) + ): + if message_to_trace is message: + message_to_trace = message.model_copy() + # Also shallow-copy content + message_to_trace.content = list(message_to_trace.content) + + message_to_trace.content[idx] = ( # type: ignore[index] # mypy confused by .model_copy + convert_to_openai_image_block(block) + ) + messages_to_trace.append(message_to_trace) + + return messages_to_trace + + +def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult: + """Generate from a stream. + + Args: + stream: Iterator of ChatGenerationChunk. + + Returns: + ChatResult: Chat result. + """ + generation = next(stream, None) + if generation: + generation += list(stream) + if generation is None: + msg = "No generations found in stream." + raise ValueError(msg) + return ChatResult( + generations=[ + ChatGeneration( + message=message_chunk_to_message(generation.message), + generation_info=generation.generation_info, + ) + ] + ) + + +async def agenerate_from_stream( + stream: AsyncIterator[ChatGenerationChunk], +) -> ChatResult: + """Async generate from a stream. + + Args: + stream: Iterator of ChatGenerationChunk. + + Returns: + ChatResult: Chat result. + """ + chunks = [chunk async for chunk in stream] + return await run_in_executor(None, generate_from_stream, iter(chunks)) + + +def _format_ls_structured_output(ls_structured_output_format: Optional[dict]) -> dict: + if ls_structured_output_format: + try: + ls_structured_output_format_dict = { + "ls_structured_output_format": { + "kwargs": ls_structured_output_format.get("kwargs", {}), + "schema": convert_to_json_schema( + ls_structured_output_format["schema"] + ), + } + } + except ValueError: + ls_structured_output_format_dict = {} + else: + ls_structured_output_format_dict = {} + + return ls_structured_output_format_dict + + +class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): + """Base class for chat models. + + Key imperative methods: + Methods that actually call the underlying model. + + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | Method | Input | Output | Description | + +===========================+================================================================+=====================================================================+==================================================================================================+ + | `invoke` | str | list[dict | tuple | BaseMessage] | PromptValue | BaseMessage | A single chat model call. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `ainvoke` | ''' | BaseMessage | Defaults to running invoke in an async executor. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `stream` | ''' | Iterator[BaseMessageChunk] | Defaults to yielding output of invoke. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `astream` | ''' | AsyncIterator[BaseMessageChunk] | Defaults to yielding output of ainvoke. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `astream_events` | ''' | AsyncIterator[StreamEvent] | Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `batch` | list['''] | list[BaseMessage] | Defaults to running invoke in concurrent threads. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `abatch` | list['''] | list[BaseMessage] | Defaults to running ainvoke in concurrent threads. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `batch_as_completed` | list['''] | Iterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running invoke in concurrent threads. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `abatch_as_completed` | list['''] | AsyncIterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running ainvoke in concurrent threads. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + + This table provides a brief overview of the main imperative methods. Please see the base Runnable reference for full documentation. + + Key declarative methods: + Methods for creating another Runnable using the ChatModel. + + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | Method | Description | + +==================================+===========================================================================================================+ + | `bind_tools` | Create ChatModel that can call tools. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `with_structured_output` | Create wrapper that structures model output using schema. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `with_retry` | Create wrapper that retries model calls on failure. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `with_fallbacks` | Create wrapper that falls back to other models on failure. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `configurable_fields` | Specify init args of the model that can be configured at runtime via the RunnableConfig. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the RunnableConfig. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + + This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation. + + Creating custom chat model: + Custom chat model implementations should inherit from this class. + Please reference the table below for information about which + methods and properties are required or optional for implementations. + + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | Method/Property | Description | Required/Optional | + +==================================+====================================================================+===================+ + | `_generate` | Use to generate a chat result from a prompt | Required | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_stream` | Use to implement streaming | Optional | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_agenerate` | Use to implement a native async method | Optional | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_astream` | Use to implement async version of `_stream` | Optional | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + + Follow the guide for more information on how to implement a custom Chat Model: + [Guide](https://python.langchain.com/docs/how_to/custom_chat_model/). + + """ # noqa: E501 + + callback_manager: Optional[BaseCallbackManager] = deprecated( + name="callback_manager", since="0.1.7", removal="1.0", alternative="callbacks" + )( + Field( + default=None, + exclude=True, + description="Callback manager to add to the run trace.", + ) + ) + + rate_limiter: Optional[BaseRateLimiter] = Field(default=None, exclude=True) + "An optional rate limiter to use for limiting the number of requests." + + disable_streaming: Union[bool, Literal["tool_calling"]] = False + """Whether to disable streaming for this model. + + If streaming is bypassed, then ``stream()``/``astream()``/``astream_events()`` will + defer to ``invoke()``/``ainvoke()``. + + - If True, will always bypass streaming case. + - If "tool_calling", will bypass streaming case only when the model is called + with a ``tools`` keyword argument. + - If False (default), will always use streaming case if available. + """ + + @model_validator(mode="before") + @classmethod + def raise_deprecation(cls, values: dict) -> Any: + """Raise deprecation warning if callback_manager is used. + + Args: + values (Dict): Values to validate. + + Returns: + Dict: Validated values. + + Raises: + DeprecationWarning: If callback_manager is used. + """ + if values.get("callback_manager") is not None: + warnings.warn( + "callback_manager is deprecated. Please use callbacks instead.", + DeprecationWarning, + stacklevel=5, + ) + values["callbacks"] = values.pop("callback_manager", None) + return values + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @cached_property + def _serialized(self) -> dict[str, Any]: + return dumpd(self) + + # --- Runnable methods --- + + @property + @override + def OutputType(self) -> Any: + """Get the output type for this runnable.""" + return AnyMessage + + def _convert_input(self, input: LanguageModelInput) -> PromptValue: + if isinstance(input, PromptValue): + return input + if isinstance(input, str): + return StringPromptValue(text=input) + if isinstance(input, Sequence): + return ChatPromptValue(messages=convert_to_messages(input)) + msg = ( + f"Invalid input type {type(input)}. " + "Must be a PromptValue, str, or list of BaseMessages." + ) + raise ValueError(msg) # noqa: TRY004 + + @override + def invoke( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> BaseMessage: + config = ensure_config(config) + return cast( + "ChatGeneration", + self.generate_prompt( + [self._convert_input(input)], + stop=stop, + callbacks=config.get("callbacks"), + tags=config.get("tags"), + metadata=config.get("metadata"), + run_name=config.get("run_name"), + run_id=config.pop("run_id", None), + **kwargs, + ).generations[0][0], + ).message + + @override + async def ainvoke( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> BaseMessage: + config = ensure_config(config) + llm_result = await self.agenerate_prompt( + [self._convert_input(input)], + stop=stop, + callbacks=config.get("callbacks"), + tags=config.get("tags"), + metadata=config.get("metadata"), + run_name=config.get("run_name"), + run_id=config.pop("run_id", None), + **kwargs, + ) + return cast("ChatGeneration", llm_result.generations[0][0]).message + + def _should_stream( + self, + *, + async_api: bool, + run_manager: Optional[ + Union[CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun] + ] = None, + **kwargs: Any, + ) -> bool: + """Determine if a given model call should hit the streaming API.""" + sync_not_implemented = type(self)._stream == BaseChatModel._stream + async_not_implemented = type(self)._astream == BaseChatModel._astream + + # Check if streaming is implemented. + if (not async_api) and sync_not_implemented: + return False + # Note, since async falls back to sync we check both here. + if async_api and async_not_implemented and sync_not_implemented: + return False + + # Check if streaming has been disabled on this instance. + if self.disable_streaming is True: + return False + # We assume tools are passed in via "tools" kwarg in all models. + if self.disable_streaming == "tool_calling" and kwargs.get("tools"): + return False + + # Check if a runtime streaming flag has been passed in. + if "stream" in kwargs: + return kwargs["stream"] + + # Check if any streaming callback handlers have been passed in. + handlers = run_manager.handlers if run_manager else [] + return any(isinstance(h, _StreamingCallbackHandler) for h in handlers) + + @override + def stream( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> Iterator[BaseMessageChunk]: + if not self._should_stream(async_api=False, **{**kwargs, "stream": True}): + # model doesn't implement streaming, so use default implementation + yield cast( + "BaseMessageChunk", + self.invoke(input, config=config, stop=stop, **kwargs), + ) + else: + config = ensure_config(config) + messages = self._convert_input(input).to_messages() + ls_structured_output_format = kwargs.pop( + "ls_structured_output_format", None + ) or kwargs.pop("structured_output_format", None) + ls_structured_output_format_dict = _format_ls_structured_output( + ls_structured_output_format + ) + + params = self._get_invocation_params(stop=stop, **kwargs) + options = {"stop": stop, **kwargs, **ls_structured_output_format_dict} + inheritable_metadata = { + **(config.get("metadata") or {}), + **self._get_ls_params(stop=stop, **kwargs), + } + callback_manager = CallbackManager.configure( + config.get("callbacks"), + self.callbacks, + self.verbose, + config.get("tags"), + self.tags, + inheritable_metadata, + self.metadata, + ) + (run_manager,) = callback_manager.on_chat_model_start( + self._serialized, + [_format_for_tracing(messages)], + invocation_params=params, + options=options, + name=config.get("run_name"), + run_id=config.pop("run_id", None), + batch_size=1, + ) + generation: Optional[ChatGenerationChunk] = None + + if self.rate_limiter: + self.rate_limiter.acquire(blocking=True) + + try: + input_messages = _normalize_messages(messages) + for chunk in self._stream(input_messages, stop=stop, **kwargs): + if chunk.message.id is None: + chunk.message.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}" + chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk) + run_manager.on_llm_new_token( + cast("str", chunk.message.content), chunk=chunk + ) + yield chunk.message + if generation is None: + generation = chunk + else: + generation += chunk + except BaseException as e: + generations_with_error_metadata = _generate_response_from_error(e) + if generation: + generations = [[generation], generations_with_error_metadata] + else: + generations = [generations_with_error_metadata] + run_manager.on_llm_error(e, response=LLMResult(generations=generations)) # type: ignore[arg-type] + raise + + if generation is None: + err = ValueError("No generation chunks were returned") + run_manager.on_llm_error(err, response=LLMResult(generations=[])) + raise err + + run_manager.on_llm_end(LLMResult(generations=[[generation]])) + + @override + async def astream( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> AsyncIterator[BaseMessageChunk]: + if not self._should_stream(async_api=True, **{**kwargs, "stream": True}): + # No async or sync stream is implemented, so fall back to ainvoke + yield cast( + "BaseMessageChunk", + await self.ainvoke(input, config=config, stop=stop, **kwargs), + ) + return + + config = ensure_config(config) + messages = self._convert_input(input).to_messages() + + ls_structured_output_format = kwargs.pop( + "ls_structured_output_format", None + ) or kwargs.pop("structured_output_format", None) + ls_structured_output_format_dict = _format_ls_structured_output( + ls_structured_output_format + ) + + params = self._get_invocation_params(stop=stop, **kwargs) + options = {"stop": stop, **kwargs, **ls_structured_output_format_dict} + inheritable_metadata = { + **(config.get("metadata") or {}), + **self._get_ls_params(stop=stop, **kwargs), + } + callback_manager = AsyncCallbackManager.configure( + config.get("callbacks"), + self.callbacks, + self.verbose, + config.get("tags"), + self.tags, + inheritable_metadata, + self.metadata, + ) + (run_manager,) = await callback_manager.on_chat_model_start( + self._serialized, + [_format_for_tracing(messages)], + invocation_params=params, + options=options, + name=config.get("run_name"), + run_id=config.pop("run_id", None), + batch_size=1, + ) + + if self.rate_limiter: + await self.rate_limiter.aacquire(blocking=True) + + generation: Optional[ChatGenerationChunk] = None + try: + input_messages = _normalize_messages(messages) + async for chunk in self._astream( + input_messages, + stop=stop, + **kwargs, + ): + if chunk.message.id is None: + chunk.message.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}" + chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk) + await run_manager.on_llm_new_token( + cast("str", chunk.message.content), chunk=chunk + ) + yield chunk.message + if generation is None: + generation = chunk + else: + generation += chunk + except BaseException as e: + generations_with_error_metadata = _generate_response_from_error(e) + if generation: + generations = [[generation], generations_with_error_metadata] + else: + generations = [generations_with_error_metadata] + await run_manager.on_llm_error( + e, + response=LLMResult(generations=generations), # type: ignore[arg-type] + ) + raise + + if generation is None: + err = ValueError("No generation chunks were returned") + await run_manager.on_llm_error(err, response=LLMResult(generations=[])) + raise err + + await run_manager.on_llm_end( + LLMResult(generations=[[generation]]), + ) + + # --- Custom methods --- + + def _combine_llm_outputs(self, llm_outputs: list[Optional[dict]]) -> dict: # noqa: ARG002 + return {} + + def _get_invocation_params( + self, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> dict: + params = self.dict() + params["stop"] = stop + return {**params, **kwargs} + + def _get_ls_params( + self, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> LangSmithParams: + """Get standard params for tracing.""" + # get default provider from class name + default_provider = self.__class__.__name__ + if default_provider.startswith("Chat"): + default_provider = default_provider[4:].lower() + elif default_provider.endswith("Chat"): + default_provider = default_provider[:-4] + default_provider = default_provider.lower() + + ls_params = LangSmithParams(ls_provider=default_provider, ls_model_type="chat") + if stop: + ls_params["ls_stop"] = stop + + # model + if hasattr(self, "model") and isinstance(self.model, str): + ls_params["ls_model_name"] = self.model + elif hasattr(self, "model_name") and isinstance(self.model_name, str): + ls_params["ls_model_name"] = self.model_name + + # temperature + if "temperature" in kwargs and isinstance(kwargs["temperature"], float): + ls_params["ls_temperature"] = kwargs["temperature"] + elif hasattr(self, "temperature") and isinstance(self.temperature, float): + ls_params["ls_temperature"] = self.temperature + + # max_tokens + if "max_tokens" in kwargs and isinstance(kwargs["max_tokens"], int): + ls_params["ls_max_tokens"] = kwargs["max_tokens"] + elif hasattr(self, "max_tokens") and isinstance(self.max_tokens, int): + ls_params["ls_max_tokens"] = self.max_tokens + + return ls_params + + def _get_llm_string(self, stop: Optional[list[str]] = None, **kwargs: Any) -> str: + if self.is_lc_serializable(): + params = {**kwargs, "stop": stop} + param_string = str(sorted(params.items())) + # This code is not super efficient as it goes back and forth between + # json and dict. + serialized_repr = self._serialized + _cleanup_llm_representation(serialized_repr, 1) + llm_string = json.dumps(serialized_repr, sort_keys=True) + return llm_string + "---" + param_string + params = self._get_invocation_params(stop=stop, **kwargs) + params = {**params, **kwargs} + return str(sorted(params.items())) + + def generate( + self, + messages: list[list[BaseMessage]], + stop: Optional[list[str]] = None, + callbacks: Callbacks = None, + *, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + run_name: Optional[str] = None, + run_id: Optional[uuid.UUID] = None, + **kwargs: Any, + ) -> LLMResult: + """Pass a sequence of prompts to the model and return model generations. + + This method should make use of batched calls for models that expose a batched + API. + + Use this method when you want to: + 1. take advantage of batched calls, + 2. need more output from the model than just the top generated value, + 3. are building chains that are agnostic to the underlying language model + type (e.g., pure text completion models vs chat models). + + Args: + messages: List of list of messages. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + callbacks: Callbacks to pass through. Used for executing additional + functionality, such as logging or streaming, throughout generation. + tags: The tags to apply. + metadata: The metadata to apply. + run_name: The name of the run. + run_id: The ID of the run. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + An LLMResult, which contains a list of candidate Generations for each input + prompt and additional model provider-specific output. + """ + ls_structured_output_format = kwargs.pop( + "ls_structured_output_format", None + ) or kwargs.pop("structured_output_format", None) + ls_structured_output_format_dict = _format_ls_structured_output( + ls_structured_output_format + ) + + params = self._get_invocation_params(stop=stop, **kwargs) + options = {"stop": stop, **ls_structured_output_format_dict} + inheritable_metadata = { + **(metadata or {}), + **self._get_ls_params(stop=stop, **kwargs), + } + + callback_manager = CallbackManager.configure( + callbacks, + self.callbacks, + self.verbose, + tags, + self.tags, + inheritable_metadata, + self.metadata, + ) + messages_to_trace = [ + _format_for_tracing(message_list) for message_list in messages + ] + run_managers = callback_manager.on_chat_model_start( + self._serialized, + messages_to_trace, + invocation_params=params, + options=options, + name=run_name, + run_id=run_id, + batch_size=len(messages), + ) + results = [] + input_messages = [ + _normalize_messages(message_list) for message_list in messages + ] + for i, m in enumerate(input_messages): + try: + results.append( + self._generate_with_cache( + m, + stop=stop, + run_manager=run_managers[i] if run_managers else None, + **kwargs, + ) + ) + except BaseException as e: + if run_managers: + generations_with_error_metadata = _generate_response_from_error(e) + run_managers[i].on_llm_error( + e, + response=LLMResult( + generations=[generations_with_error_metadata] # type: ignore[list-item] + ), + ) + raise + flattened_outputs = [ + LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item] + for res in results + ] + llm_output = self._combine_llm_outputs([res.llm_output for res in results]) + generations = [res.generations for res in results] + output = LLMResult(generations=generations, llm_output=llm_output) # type: ignore[arg-type] + if run_managers: + run_infos = [] + for manager, flattened_output in zip(run_managers, flattened_outputs): + manager.on_llm_end(flattened_output) + run_infos.append(RunInfo(run_id=manager.run_id)) + output.run = run_infos + return output + + async def agenerate( + self, + messages: list[list[BaseMessage]], + stop: Optional[list[str]] = None, + callbacks: Callbacks = None, + *, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + run_name: Optional[str] = None, + run_id: Optional[uuid.UUID] = None, + **kwargs: Any, + ) -> LLMResult: + """Asynchronously pass a sequence of prompts to a model and return generations. + + This method should make use of batched calls for models that expose a batched + API. + + Use this method when you want to: + 1. take advantage of batched calls, + 2. need more output from the model than just the top generated value, + 3. are building chains that are agnostic to the underlying language model + type (e.g., pure text completion models vs chat models). + + Args: + messages: List of list of messages. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + callbacks: Callbacks to pass through. Used for executing additional + functionality, such as logging or streaming, throughout generation. + tags: The tags to apply. + metadata: The metadata to apply. + run_name: The name of the run. + run_id: The ID of the run. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + An LLMResult, which contains a list of candidate Generations for each input + prompt and additional model provider-specific output. + """ + ls_structured_output_format = kwargs.pop( + "ls_structured_output_format", None + ) or kwargs.pop("structured_output_format", None) + ls_structured_output_format_dict = _format_ls_structured_output( + ls_structured_output_format + ) + + params = self._get_invocation_params(stop=stop, **kwargs) + options = {"stop": stop, **ls_structured_output_format_dict} + inheritable_metadata = { + **(metadata or {}), + **self._get_ls_params(stop=stop, **kwargs), + } + + callback_manager = AsyncCallbackManager.configure( + callbacks, + self.callbacks, + self.verbose, + tags, + self.tags, + inheritable_metadata, + self.metadata, + ) + + messages_to_trace = [ + _format_for_tracing(message_list) for message_list in messages + ] + run_managers = await callback_manager.on_chat_model_start( + self._serialized, + messages_to_trace, + invocation_params=params, + options=options, + name=run_name, + batch_size=len(messages), + run_id=run_id, + ) + + input_messages = [ + _normalize_messages(message_list) for message_list in messages + ] + results = await asyncio.gather( + *[ + self._agenerate_with_cache( + m, + stop=stop, + run_manager=run_managers[i] if run_managers else None, + **kwargs, + ) + for i, m in enumerate(input_messages) + ], + return_exceptions=True, + ) + exceptions = [] + for i, res in enumerate(results): + if isinstance(res, BaseException): + if run_managers: + generations_with_error_metadata = _generate_response_from_error(res) + await run_managers[i].on_llm_error( + res, + response=LLMResult( + generations=[generations_with_error_metadata] # type: ignore[list-item] + ), + ) + exceptions.append(res) + if exceptions: + if run_managers: + await asyncio.gather( + *[ + run_manager.on_llm_end( + LLMResult( + generations=[res.generations], # type: ignore[list-item, union-attr] + llm_output=res.llm_output, # type: ignore[union-attr] + ) + ) + for run_manager, res in zip(run_managers, results) + if not isinstance(res, Exception) + ] + ) + raise exceptions[0] + flattened_outputs = [ + LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item, union-attr] + for res in results + ] + llm_output = self._combine_llm_outputs([res.llm_output for res in results]) # type: ignore[union-attr] + generations = [res.generations for res in results] # type: ignore[union-attr] + output = LLMResult(generations=generations, llm_output=llm_output) # type: ignore[arg-type] + await asyncio.gather( + *[ + run_manager.on_llm_end(flattened_output) + for run_manager, flattened_output in zip( + run_managers, flattened_outputs + ) + ] + ) + if run_managers: + output.run = [ + RunInfo(run_id=run_manager.run_id) for run_manager in run_managers + ] + return output + + @override + def generate_prompt( + self, + prompts: list[PromptValue], + stop: Optional[list[str]] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> LLMResult: + prompt_messages = [p.to_messages() for p in prompts] + return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs) + + @override + async def agenerate_prompt( + self, + prompts: list[PromptValue], + stop: Optional[list[str]] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> LLMResult: + prompt_messages = [p.to_messages() for p in prompts] + return await self.agenerate( + prompt_messages, stop=stop, callbacks=callbacks, **kwargs + ) + + def _generate_with_cache( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + llm_cache = self.cache if isinstance(self.cache, BaseCache) else get_llm_cache() + # We should check the cache unless it's explicitly set to False + # A None cache means we should use the default global cache + # if it's configured. + check_cache = self.cache or self.cache is None + if check_cache: + if llm_cache: + llm_string = self._get_llm_string(stop=stop, **kwargs) + prompt = dumps(messages) + cache_val = llm_cache.lookup(prompt, llm_string) + if isinstance(cache_val, list): + return ChatResult(generations=cache_val) + elif self.cache is None: + pass + else: + msg = "Asked to cache, but no cache found at `langchain.cache`." + raise ValueError(msg) + + # Apply the rate limiter after checking the cache, since + # we usually don't want to rate limit cache lookups, but + # we do want to rate limit API requests. + if self.rate_limiter: + self.rate_limiter.acquire(blocking=True) + + # If stream is not explicitly set, check if implicitly requested by + # astream_events() or astream_log(). Bail out if _stream not implemented + if self._should_stream( + async_api=False, + run_manager=run_manager, + **kwargs, + ): + chunks: list[ChatGenerationChunk] = [] + for chunk in self._stream(messages, stop=stop, **kwargs): + chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk) + if run_manager: + if chunk.message.id is None: + chunk.message.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}" + run_manager.on_llm_new_token( + cast("str", chunk.message.content), chunk=chunk + ) + chunks.append(chunk) + result = generate_from_stream(iter(chunks)) + elif inspect.signature(self._generate).parameters.get("run_manager"): + result = self._generate( + messages, stop=stop, run_manager=run_manager, **kwargs + ) + else: + result = self._generate(messages, stop=stop, **kwargs) + + # Add response metadata to each generation + for idx, generation in enumerate(result.generations): + if run_manager and generation.message.id is None: + generation.message.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}-{idx}" + generation.message.response_metadata = _gen_info_and_msg_metadata( + generation + ) + if len(result.generations) == 1 and result.llm_output is not None: + result.generations[0].message.response_metadata = { + **result.llm_output, + **result.generations[0].message.response_metadata, + } + if check_cache and llm_cache: + llm_cache.update(prompt, llm_string, result.generations) + return result + + async def _agenerate_with_cache( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + llm_cache = self.cache if isinstance(self.cache, BaseCache) else get_llm_cache() + # We should check the cache unless it's explicitly set to False + # A None cache means we should use the default global cache + # if it's configured. + check_cache = self.cache or self.cache is None + if check_cache: + if llm_cache: + llm_string = self._get_llm_string(stop=stop, **kwargs) + prompt = dumps(messages) + cache_val = await llm_cache.alookup(prompt, llm_string) + if isinstance(cache_val, list): + return ChatResult(generations=cache_val) + elif self.cache is None: + pass + else: + msg = "Asked to cache, but no cache found at `langchain.cache`." + raise ValueError(msg) + + # Apply the rate limiter after checking the cache, since + # we usually don't want to rate limit cache lookups, but + # we do want to rate limit API requests. + if self.rate_limiter: + await self.rate_limiter.aacquire(blocking=True) + + # If stream is not explicitly set, check if implicitly requested by + # astream_events() or astream_log(). Bail out if _astream not implemented + if self._should_stream( + async_api=True, + run_manager=run_manager, + **kwargs, + ): + chunks: list[ChatGenerationChunk] = [] + async for chunk in self._astream(messages, stop=stop, **kwargs): + chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk) + if run_manager: + if chunk.message.id is None: + chunk.message.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}" + await run_manager.on_llm_new_token( + cast("str", chunk.message.content), chunk=chunk + ) + chunks.append(chunk) + result = generate_from_stream(iter(chunks)) + elif inspect.signature(self._agenerate).parameters.get("run_manager"): + result = await self._agenerate( + messages, stop=stop, run_manager=run_manager, **kwargs + ) + else: + result = await self._agenerate(messages, stop=stop, **kwargs) + + # Add response metadata to each generation + for idx, generation in enumerate(result.generations): + if run_manager and generation.message.id is None: + generation.message.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}-{idx}" + generation.message.response_metadata = _gen_info_and_msg_metadata( + generation + ) + if len(result.generations) == 1 and result.llm_output is not None: + result.generations[0].message.response_metadata = { + **result.llm_output, + **result.generations[0].message.response_metadata, + } + if check_cache and llm_cache: + await llm_cache.aupdate(prompt, llm_string, result.generations) + return result + + @abstractmethod + def _generate( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + """Top Level call.""" + + async def _agenerate( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + """Top Level call.""" + return await run_in_executor( + None, + self._generate, + messages, + stop, + run_manager.get_sync() if run_manager else None, + **kwargs, + ) + + def _stream( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + raise NotImplementedError + + async def _astream( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[ChatGenerationChunk]: + iterator = await run_in_executor( + None, + self._stream, + messages, + stop, + run_manager.get_sync() if run_manager else None, + **kwargs, + ) + done = object() + while True: + item = await run_in_executor( + None, + next, + iterator, + done, + ) + if item is done: + break + yield item # type: ignore[misc] + + @deprecated("0.1.7", alternative="invoke", removal="1.0") + def __call__( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> BaseMessage: + """Call the model. + + Args: + messages: List of messages. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + callbacks: Callbacks to pass through. Used for executing additional + functionality, such as logging or streaming, throughout generation. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + The model output message. + """ + generation = self.generate( + [messages], stop=stop, callbacks=callbacks, **kwargs + ).generations[0][0] + if isinstance(generation, ChatGeneration): + return generation.message + msg = "Unexpected generation type" + raise ValueError(msg) # noqa: TRY004 + + async def _call_async( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> BaseMessage: + result = await self.agenerate( + [messages], stop=stop, callbacks=callbacks, **kwargs + ) + generation = result.generations[0][0] + if isinstance(generation, ChatGeneration): + return generation.message + msg = "Unexpected generation type" + raise ValueError(msg) # noqa: TRY004 + + @deprecated("0.1.7", alternative="invoke", removal="1.0") + def call_as_llm( + self, message: str, stop: Optional[list[str]] = None, **kwargs: Any + ) -> str: + """Call the model. + + Args: + message: The input message. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + The model output string. + """ + return self.predict(message, stop=stop, **kwargs) + + @deprecated("0.1.7", alternative="invoke", removal="1.0") + @override + def predict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: + """Predict the next message. + + Args: + text: The input message. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + The predicted output string. + """ + _stop = None if stop is None else list(stop) + result = self([HumanMessage(content=text)], stop=_stop, **kwargs) + if isinstance(result.content, str): + return result.content + msg = "Cannot use predict when output is not a string." + raise ValueError(msg) # noqa: TRY004 + + @deprecated("0.1.7", alternative="invoke", removal="1.0") + @override + def predict_messages( + self, + messages: list[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> BaseMessage: + _stop = None if stop is None else list(stop) + return self(messages, stop=_stop, **kwargs) + + @deprecated("0.1.7", alternative="ainvoke", removal="1.0") + @override + async def apredict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: + _stop = None if stop is None else list(stop) + result = await self._call_async( + [HumanMessage(content=text)], stop=_stop, **kwargs + ) + if isinstance(result.content, str): + return result.content + msg = "Cannot use predict when output is not a string." + raise ValueError(msg) # noqa: TRY004 + + @deprecated("0.1.7", alternative="ainvoke", removal="1.0") + @override + async def apredict_messages( + self, + messages: list[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> BaseMessage: + _stop = None if stop is None else list(stop) + return await self._call_async(messages, stop=_stop, **kwargs) + + @property + @abstractmethod + def _llm_type(self) -> str: + """Return type of chat model.""" + + @override + def dict(self, **kwargs: Any) -> dict: + """Return a dictionary of the LLM.""" + starter_dict = dict(self._identifying_params) + starter_dict["_type"] = self._llm_type + return starter_dict + + def bind_tools( + self, + tools: Sequence[ + Union[typing.Dict[str, Any], type, Callable, BaseTool] # noqa: UP006 + ], + *, + tool_choice: Optional[Union[str]] = None, + **kwargs: Any, + ) -> Runnable[LanguageModelInput, BaseMessage]: + """Bind tools to the model. + + Args: + tools: Sequence of tools to bind to the model. + tool_choice: The tool to use. If "any" then any tool can be used. + + Returns: + A Runnable that returns a message. + """ + raise NotImplementedError + + def with_structured_output( + self, + schema: Union[typing.Dict, type], # noqa: UP006 + *, + include_raw: bool = False, + **kwargs: Any, + ) -> Runnable[LanguageModelInput, Union[typing.Dict, BaseModel]]: # noqa: UP006 + """Model wrapper that returns outputs formatted to match the given schema. + + Args: + schema: + The output schema. Can be passed in as: + - an OpenAI function/tool schema, + - a JSON Schema, + - a TypedDict class, + - or a Pydantic class. + If ``schema`` is a Pydantic class then the model output will be a + Pydantic instance of that class, and the model-generated fields will be + validated by the Pydantic class. Otherwise the model output will be a + dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool` + for more on how to properly specify types and descriptions of + schema fields when specifying a Pydantic or TypedDict class. + + include_raw: + If False then only the parsed structured output is returned. If + an error occurs during model output parsing it will be raised. If True + then both the raw model response (a BaseMessage) and the parsed model + response will be returned. If an error occurs during output parsing it + will be caught and returned as well. The final output is always a dict + with keys "raw", "parsed", and "parsing_error". + + Returns: + A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. + + If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs + an instance of ``schema`` (i.e., a Pydantic object). + + Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + + If ``include_raw`` is True, then Runnable outputs a dict with keys: + - ``"raw"``: BaseMessage + - ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``"parsing_error"``: Optional[BaseException] + + Example: Pydantic schema (include_raw=False): + .. code-block:: python + + from pydantic import BaseModel + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str + + llm = ChatModel(model="model-name", temperature=0) + structured_llm = llm.with_structured_output(AnswerWithJustification) + + structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + + # -> AnswerWithJustification( + # answer='They weigh the same', + # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' + # ) + + Example: Pydantic schema (include_raw=True): + .. code-block:: python + + from pydantic import BaseModel + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str + + llm = ChatModel(model="model-name", temperature=0) + structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True) + + structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + # -> { + # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), + # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), + # 'parsing_error': None + # } + + Example: Dict schema (include_raw=False): + .. code-block:: python + + from pydantic import BaseModel + from langchain_core.utils.function_calling import convert_to_openai_tool + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str + + dict_schema = convert_to_openai_tool(AnswerWithJustification) + llm = ChatModel(model="model-name", temperature=0) + structured_llm = llm.with_structured_output(dict_schema) + + structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + # -> { + # 'answer': 'They weigh the same', + # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' + # } + + .. versionchanged:: 0.2.26 + + Added support for TypedDict class. + """ # noqa: E501 + _ = kwargs.pop("method", None) + _ = kwargs.pop("strict", None) + if kwargs: + msg = f"Received unsupported arguments {kwargs}" + raise ValueError(msg) + + from langchain_core.output_parsers.openai_tools import ( + JsonOutputKeyToolsParser, + PydanticToolsParser, + ) + + if self.bind_tools is BaseChatModel.bind_tools: + msg = "with_structured_output is not implemented for this model." + raise NotImplementedError(msg) + + llm = self.bind_tools( + [schema], + tool_choice="any", + ls_structured_output_format={ + "kwargs": {"method": "function_calling"}, + "schema": schema, + }, + ) + if isinstance(schema, type) and is_basemodel_subclass(schema): + output_parser: OutputParserLike = PydanticToolsParser( + tools=[cast("TypeBaseModel", schema)], first_tool_only=True + ) + else: + key_name = convert_to_openai_tool(schema)["function"]["name"] + output_parser = JsonOutputKeyToolsParser( + key_name=key_name, first_tool_only=True + ) + if include_raw: + parser_assign = RunnablePassthrough.assign( + parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None + ) + parser_none = RunnablePassthrough.assign(parsed=lambda _: None) + parser_with_fallback = parser_assign.with_fallbacks( + [parser_none], exception_key="parsing_error" + ) + return RunnableMap(raw=llm) | parser_with_fallback + return llm | output_parser + + +class SimpleChatModel(BaseChatModel): + """Simplified implementation for a chat model to inherit from. + + **Note** This implementation is primarily here for backwards compatibility. + For new implementations, please use `BaseChatModel` directly. + """ + + def _generate( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs) + message = AIMessage(content=output_str) + generation = ChatGeneration(message=message) + return ChatResult(generations=[generation]) + + @abstractmethod + def _call( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + """Simpler interface.""" + + async def _agenerate( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + return await run_in_executor( + None, + self._generate, + messages, + stop=stop, + run_manager=run_manager.get_sync() if run_manager else None, + **kwargs, + ) + + +def _gen_info_and_msg_metadata( + generation: Union[ChatGeneration, ChatGenerationChunk], +) -> dict: + return { + **(generation.generation_info or {}), + **generation.message.response_metadata, + } + + +def _cleanup_llm_representation(serialized: Any, depth: int) -> None: + """Remove non-serializable objects from a serialized object.""" + if depth > 100: # Don't cooperate for pathological cases + return + + if not isinstance(serialized, dict): + return + + if ( + "type" in serialized + and serialized["type"] == "not_implemented" + and "repr" in serialized + ): + del serialized["repr"] + + if "graph" in serialized: + del serialized["graph"] + + if "kwargs" in serialized: + kwargs = serialized["kwargs"] + + for value in kwargs.values(): + _cleanup_llm_representation(value, depth + 1) diff --git a/venv/Lib/site-packages/langchain_core/language_models/fake.py b/venv/Lib/site-packages/langchain_core/language_models/fake.py new file mode 100644 index 00000000..72366302 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/language_models/fake.py @@ -0,0 +1,137 @@ +"""Fake LLMs for testing purposes.""" + +import asyncio +import time +from collections.abc import AsyncIterator, Iterator, Mapping +from typing import Any, Optional + +from typing_extensions import override + +from langchain_core.callbacks import ( + AsyncCallbackManagerForLLMRun, + CallbackManagerForLLMRun, +) +from langchain_core.language_models import LanguageModelInput +from langchain_core.language_models.llms import LLM +from langchain_core.runnables import RunnableConfig + + +class FakeListLLM(LLM): + """Fake LLM for testing purposes.""" + + responses: list[str] + """List of responses to return in order.""" + # This parameter should be removed from FakeListLLM since + # it's only used by sub-classes. + sleep: Optional[float] = None + """Sleep time in seconds between responses. + + Ignored by FakeListLLM, but used by sub-classes. + """ + i: int = 0 + """Internally incremented after every model invocation. + + Useful primarily for testing purposes. + """ + + @property + @override + def _llm_type(self) -> str: + """Return type of llm.""" + return "fake-list" + + @override + def _call( + self, + prompt: str, + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + """Return next response.""" + response = self.responses[self.i] + if self.i < len(self.responses) - 1: + self.i += 1 + else: + self.i = 0 + return response + + @override + async def _acall( + self, + prompt: str, + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + """Return next response.""" + response = self.responses[self.i] + if self.i < len(self.responses) - 1: + self.i += 1 + else: + self.i = 0 + return response + + @property + @override + def _identifying_params(self) -> Mapping[str, Any]: + return {"responses": self.responses} + + +class FakeListLLMError(Exception): + """Fake error for testing purposes.""" + + +class FakeStreamingListLLM(FakeListLLM): + """Fake streaming list LLM for testing purposes. + + An LLM that will return responses from a list in order. + + This model also supports optionally sleeping between successive + chunks in a streaming implementation. + """ + + error_on_chunk_number: Optional[int] = None + """If set, will raise an exception on the specified chunk number.""" + + @override + def stream( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> Iterator[str]: + result = self.invoke(input, config) + for i_c, c in enumerate(result): + if self.sleep is not None: + time.sleep(self.sleep) + + if ( + self.error_on_chunk_number is not None + and i_c == self.error_on_chunk_number + ): + raise FakeListLLMError + yield c + + @override + async def astream( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> AsyncIterator[str]: + result = await self.ainvoke(input, config) + for i_c, c in enumerate(result): + if self.sleep is not None: + await asyncio.sleep(self.sleep) + + if ( + self.error_on_chunk_number is not None + and i_c == self.error_on_chunk_number + ): + raise FakeListLLMError + yield c diff --git a/venv/Lib/site-packages/langchain_core/language_models/fake_chat_models.py b/venv/Lib/site-packages/langchain_core/language_models/fake_chat_models.py new file mode 100644 index 00000000..a1cdd49a --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/language_models/fake_chat_models.py @@ -0,0 +1,365 @@ +"""Fake ChatModel for testing purposes.""" + +import asyncio +import re +import time +from collections.abc import AsyncIterator, Iterator +from typing import Any, Optional, Union, cast + +from typing_extensions import override + +from langchain_core.callbacks import ( + AsyncCallbackManagerForLLMRun, + CallbackManagerForLLMRun, +) +from langchain_core.language_models.chat_models import BaseChatModel, SimpleChatModel +from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage +from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult +from langchain_core.runnables import RunnableConfig + + +class FakeMessagesListChatModel(BaseChatModel): + """Fake ChatModel for testing purposes.""" + + responses: list[BaseMessage] + """List of responses to **cycle** through in order.""" + sleep: Optional[float] = None + """Sleep time in seconds between responses.""" + i: int = 0 + """Internally incremented after every model invocation.""" + + @override + def _generate( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + response = self.responses[self.i] + if self.i < len(self.responses) - 1: + self.i += 1 + else: + self.i = 0 + generation = ChatGeneration(message=response) + return ChatResult(generations=[generation]) + + @property + @override + def _llm_type(self) -> str: + return "fake-messages-list-chat-model" + + +class FakeListChatModelError(Exception): + """Fake error for testing purposes.""" + + +class FakeListChatModel(SimpleChatModel): + """Fake ChatModel for testing purposes.""" + + responses: list[str] + """List of responses to **cycle** through in order.""" + sleep: Optional[float] = None + i: int = 0 + """List of responses to **cycle** through in order.""" + error_on_chunk_number: Optional[int] = None + """Internally incremented after every model invocation.""" + + @property + @override + def _llm_type(self) -> str: + return "fake-list-chat-model" + + @override + def _call( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + """First try to lookup in queries, else return 'foo' or 'bar'.""" + response = self.responses[self.i] + if self.i < len(self.responses) - 1: + self.i += 1 + else: + self.i = 0 + return response + + @override + def _stream( + self, + messages: list[BaseMessage], + stop: Union[list[str], None] = None, + run_manager: Union[CallbackManagerForLLMRun, None] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + response = self.responses[self.i] + if self.i < len(self.responses) - 1: + self.i += 1 + else: + self.i = 0 + for i_c, c in enumerate(response): + if self.sleep is not None: + time.sleep(self.sleep) + if ( + self.error_on_chunk_number is not None + and i_c == self.error_on_chunk_number + ): + raise FakeListChatModelError + + yield ChatGenerationChunk(message=AIMessageChunk(content=c)) + + @override + async def _astream( + self, + messages: list[BaseMessage], + stop: Union[list[str], None] = None, + run_manager: Union[AsyncCallbackManagerForLLMRun, None] = None, + **kwargs: Any, + ) -> AsyncIterator[ChatGenerationChunk]: + response = self.responses[self.i] + if self.i < len(self.responses) - 1: + self.i += 1 + else: + self.i = 0 + for i_c, c in enumerate(response): + if self.sleep is not None: + await asyncio.sleep(self.sleep) + if ( + self.error_on_chunk_number is not None + and i_c == self.error_on_chunk_number + ): + raise FakeListChatModelError + yield ChatGenerationChunk(message=AIMessageChunk(content=c)) + + @property + @override + def _identifying_params(self) -> dict[str, Any]: + return {"responses": self.responses} + + @override + # manually override batch to preserve batch ordering with no concurrency + def batch( + self, + inputs: list[Any], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Any, + ) -> list[BaseMessage]: + if isinstance(config, list): + return [self.invoke(m, c, **kwargs) for m, c in zip(inputs, config)] + return [self.invoke(m, config, **kwargs) for m in inputs] + + @override + async def abatch( + self, + inputs: list[Any], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Any, + ) -> list[BaseMessage]: + if isinstance(config, list): + # do Not use an async iterator here because need explicit ordering + return [await self.ainvoke(m, c, **kwargs) for m, c in zip(inputs, config)] + # do Not use an async iterator here because need explicit ordering + return [await self.ainvoke(m, config, **kwargs) for m in inputs] + + +class FakeChatModel(SimpleChatModel): + """Fake Chat Model wrapper for testing purposes.""" + + @override + def _call( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + return "fake response" + + @override + async def _agenerate( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + output_str = "fake response" + message = AIMessage(content=output_str) + generation = ChatGeneration(message=message) + return ChatResult(generations=[generation]) + + @property + def _llm_type(self) -> str: + return "fake-chat-model" + + @property + def _identifying_params(self) -> dict[str, Any]: + return {"key": "fake"} + + +class GenericFakeChatModel(BaseChatModel): + """Generic fake chat model that can be used to test the chat model interface. + + * Chat model should be usable in both sync and async tests + * Invokes on_llm_new_token to allow for testing of callback related code for new + tokens. + * Includes logic to break messages into message chunk to facilitate testing of + streaming. + """ + + messages: Iterator[Union[AIMessage, str]] + """Get an iterator over messages. + + This can be expanded to accept other types like Callables / dicts / strings + to make the interface more generic if needed. + + Note: if you want to pass a list, you can use `iter` to convert it to an iterator. + + Please note that streaming is not implemented yet. We should try to implement it + in the future by delegating to invoke and then breaking the resulting output + into message chunks. + """ + + @override + def _generate( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + """Top Level call.""" + message = next(self.messages) + message_ = AIMessage(content=message) if isinstance(message, str) else message + generation = ChatGeneration(message=message_) + return ChatResult(generations=[generation]) + + def _stream( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + """Stream the output of the model.""" + chat_result = self._generate( + messages, stop=stop, run_manager=run_manager, **kwargs + ) + if not isinstance(chat_result, ChatResult): + msg = ( + f"Expected generate to return a ChatResult, " + f"but got {type(chat_result)} instead." + ) + raise ValueError(msg) # noqa: TRY004 + + message = chat_result.generations[0].message + + if not isinstance(message, AIMessage): + msg = ( + f"Expected invoke to return an AIMessage, " + f"but got {type(message)} instead." + ) + raise ValueError(msg) # noqa: TRY004 + + content = message.content + + if content: + # Use a regular expression to split on whitespace with a capture group + # so that we can preserve the whitespace in the output. + if not isinstance(content, str): + msg = "Expected content to be a string." + raise ValueError(msg) + + content_chunks = cast("list[str]", re.split(r"(\s)", content)) + + for token in content_chunks: + chunk = ChatGenerationChunk( + message=AIMessageChunk(content=token, id=message.id) + ) + if run_manager: + run_manager.on_llm_new_token(token, chunk=chunk) + yield chunk + + if message.additional_kwargs: + for key, value in message.additional_kwargs.items(): + # We should further break down the additional kwargs into chunks + # Special case for function call + if key == "function_call": + for fkey, fvalue in value.items(): + if isinstance(fvalue, str): + # Break function call by `,` + fvalue_chunks = cast("list[str]", re.split(r"(,)", fvalue)) + for fvalue_chunk in fvalue_chunks: + chunk = ChatGenerationChunk( + message=AIMessageChunk( + id=message.id, + content="", + additional_kwargs={ + "function_call": {fkey: fvalue_chunk} + }, + ) + ) + if run_manager: + run_manager.on_llm_new_token( + "", + chunk=chunk, # No token for function call + ) + yield chunk + else: + chunk = ChatGenerationChunk( + message=AIMessageChunk( + id=message.id, + content="", + additional_kwargs={"function_call": {fkey: fvalue}}, + ) + ) + if run_manager: + run_manager.on_llm_new_token( + "", + chunk=chunk, # No token for function call + ) + yield chunk + else: + chunk = ChatGenerationChunk( + message=AIMessageChunk( + id=message.id, content="", additional_kwargs={key: value} + ) + ) + if run_manager: + run_manager.on_llm_new_token( + "", + chunk=chunk, # No token for function call + ) + yield chunk + + @property + def _llm_type(self) -> str: + return "generic-fake-chat-model" + + +class ParrotFakeChatModel(BaseChatModel): + """Generic fake chat model that can be used to test the chat model interface. + + * Chat model should be usable in both sync and async tests + """ + + @override + def _generate( + self, + messages: list[BaseMessage], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + """Top Level call.""" + return ChatResult(generations=[ChatGeneration(message=messages[-1])]) + + @property + def _llm_type(self) -> str: + return "parrot-fake-chat-model" diff --git a/venv/Lib/site-packages/langchain_core/language_models/llms.py b/venv/Lib/site-packages/langchain_core/language_models/llms.py new file mode 100644 index 00000000..87f3974a --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/language_models/llms.py @@ -0,0 +1,1569 @@ +"""Base interface for large language models to expose.""" + +from __future__ import annotations + +import asyncio +import functools +import inspect +import json +import logging +import warnings +from abc import ABC, abstractmethod +from collections.abc import AsyncIterator, Iterator, Sequence +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Union, + cast, +) + +import yaml +from pydantic import ConfigDict, Field, model_validator +from tenacity import ( + RetryCallState, + before_sleep_log, + retry, + retry_base, + retry_if_exception_type, + stop_after_attempt, + wait_exponential, +) +from typing_extensions import override + +from langchain_core._api import deprecated +from langchain_core.caches import BaseCache +from langchain_core.callbacks import ( + AsyncCallbackManager, + AsyncCallbackManagerForLLMRun, + BaseCallbackManager, + CallbackManager, + CallbackManagerForLLMRun, + Callbacks, +) +from langchain_core.globals import get_llm_cache +from langchain_core.language_models.base import ( + BaseLanguageModel, + LangSmithParams, + LanguageModelInput, +) +from langchain_core.load import dumpd +from langchain_core.messages import ( + AIMessage, + BaseMessage, + convert_to_messages, + get_buffer_string, +) +from langchain_core.outputs import Generation, GenerationChunk, LLMResult, RunInfo +from langchain_core.prompt_values import ChatPromptValue, PromptValue, StringPromptValue +from langchain_core.runnables import RunnableConfig, ensure_config, get_config_list +from langchain_core.runnables.config import run_in_executor + +if TYPE_CHECKING: + import uuid + +logger = logging.getLogger(__name__) + + +@functools.lru_cache +def _log_error_once(msg: str) -> None: + """Log an error once.""" + logger.error(msg) + + +def create_base_retry_decorator( + error_types: list[type[BaseException]], + max_retries: int = 1, + run_manager: Optional[ + Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] + ] = None, +) -> Callable[[Any], Any]: + """Create a retry decorator for a given LLM and provided a list of error types. + + Args: + error_types: List of error types to retry on. + max_retries: Number of retries. Default is 1. + run_manager: Callback manager for the run. Default is None. + + Returns: + A retry decorator. + + Raises: + ValueError: If the cache is not set and cache is True. + """ + _logging = before_sleep_log(logger, logging.WARNING) + + def _before_sleep(retry_state: RetryCallState) -> None: + _logging(retry_state) + if run_manager: + if isinstance(run_manager, AsyncCallbackManagerForLLMRun): + coro = run_manager.on_retry(retry_state) + try: + loop = asyncio.get_event_loop() + if loop.is_running(): + loop.create_task(coro) + else: + asyncio.run(coro) + except Exception as e: + _log_error_once(f"Error in on_retry: {e}") + else: + run_manager.on_retry(retry_state) + + min_seconds = 4 + max_seconds = 10 + # Wait 2^x * 1 second between each retry starting with + # 4 seconds, then up to 10 seconds, then 10 seconds afterwards + retry_instance: retry_base = retry_if_exception_type(error_types[0]) + for error in error_types[1:]: + retry_instance = retry_instance | retry_if_exception_type(error) + return retry( + reraise=True, + stop=stop_after_attempt(max_retries), + wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), + retry=retry_instance, + before_sleep=_before_sleep, + ) + + +def _resolve_cache(cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]: + """Resolve the cache.""" + if isinstance(cache, BaseCache): + llm_cache = cache + elif cache is None: + llm_cache = get_llm_cache() + elif cache is True: + llm_cache = get_llm_cache() + if llm_cache is None: + msg = ( + "No global cache was configured. Use `set_llm_cache`." + "to set a global cache if you want to use a global cache." + "Otherwise either pass a cache object or set cache to False/None" + ) + raise ValueError(msg) + elif cache is False: + llm_cache = None + else: + msg = f"Unsupported cache value {cache}" + raise ValueError(msg) + return llm_cache + + +def get_prompts( + params: dict[str, Any], + prompts: list[str], + cache: Optional[Union[BaseCache, bool, None]] = None, +) -> tuple[dict[int, list], str, list[int], list[str]]: + """Get prompts that are already cached. + + Args: + params: Dictionary of parameters. + prompts: List of prompts. + cache: Cache object. Default is None. + + Returns: + A tuple of existing prompts, llm_string, missing prompt indexes, + and missing prompts. + + Raises: + ValueError: If the cache is not set and cache is True. + """ + llm_string = str(sorted(params.items())) + missing_prompts = [] + missing_prompt_idxs = [] + existing_prompts = {} + + llm_cache = _resolve_cache(cache) + for i, prompt in enumerate(prompts): + if llm_cache: + cache_val = llm_cache.lookup(prompt, llm_string) + if isinstance(cache_val, list): + existing_prompts[i] = cache_val + else: + missing_prompts.append(prompt) + missing_prompt_idxs.append(i) + return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts + + +async def aget_prompts( + params: dict[str, Any], + prompts: list[str], + cache: Optional[Union[BaseCache, bool, None]] = None, +) -> tuple[dict[int, list], str, list[int], list[str]]: + """Get prompts that are already cached. Async version. + + Args: + params: Dictionary of parameters. + prompts: List of prompts. + cache: Cache object. Default is None. + + Returns: + A tuple of existing prompts, llm_string, missing prompt indexes, + and missing prompts. + + Raises: + ValueError: If the cache is not set and cache is True. + """ + llm_string = str(sorted(params.items())) + missing_prompts = [] + missing_prompt_idxs = [] + existing_prompts = {} + llm_cache = _resolve_cache(cache) + for i, prompt in enumerate(prompts): + if llm_cache: + cache_val = await llm_cache.alookup(prompt, llm_string) + if isinstance(cache_val, list): + existing_prompts[i] = cache_val + else: + missing_prompts.append(prompt) + missing_prompt_idxs.append(i) + return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts + + +def update_cache( + cache: Union[BaseCache, bool, None], + existing_prompts: dict[int, list], + llm_string: str, + missing_prompt_idxs: list[int], + new_results: LLMResult, + prompts: list[str], +) -> Optional[dict]: + """Update the cache and get the LLM output. + + Args: + cache: Cache object. + existing_prompts: Dictionary of existing prompts. + llm_string: LLM string. + missing_prompt_idxs: List of missing prompt indexes. + new_results: LLMResult object. + prompts: List of prompts. + + Returns: + LLM output. + + Raises: + ValueError: If the cache is not set and cache is True. + """ + llm_cache = _resolve_cache(cache) + for i, result in enumerate(new_results.generations): + existing_prompts[missing_prompt_idxs[i]] = result + prompt = prompts[missing_prompt_idxs[i]] + if llm_cache is not None: + llm_cache.update(prompt, llm_string, result) + return new_results.llm_output + + +async def aupdate_cache( + cache: Union[BaseCache, bool, None], + existing_prompts: dict[int, list], + llm_string: str, + missing_prompt_idxs: list[int], + new_results: LLMResult, + prompts: list[str], +) -> Optional[dict]: + """Update the cache and get the LLM output. Async version. + + Args: + cache: Cache object. + existing_prompts: Dictionary of existing prompts. + llm_string: LLM string. + missing_prompt_idxs: List of missing prompt indexes. + new_results: LLMResult object. + prompts: List of prompts. + + Returns: + LLM output. + + Raises: + ValueError: If the cache is not set and cache is True. + """ + llm_cache = _resolve_cache(cache) + for i, result in enumerate(new_results.generations): + existing_prompts[missing_prompt_idxs[i]] = result + prompt = prompts[missing_prompt_idxs[i]] + if llm_cache: + await llm_cache.aupdate(prompt, llm_string, result) + return new_results.llm_output + + +class BaseLLM(BaseLanguageModel[str], ABC): + """Base LLM abstract interface. + + It should take in a prompt and return a string. + """ + + callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) + """[DEPRECATED]""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @model_validator(mode="before") + @classmethod + def raise_deprecation(cls, values: dict) -> Any: + """Raise deprecation warning if callback_manager is used.""" + if values.get("callback_manager") is not None: + warnings.warn( + "callback_manager is deprecated. Please use callbacks instead.", + DeprecationWarning, + stacklevel=5, + ) + values["callbacks"] = values.pop("callback_manager", None) + return values + + @functools.cached_property + def _serialized(self) -> dict[str, Any]: + return dumpd(self) + + # --- Runnable methods --- + + @property + @override + def OutputType(self) -> type[str]: + """Get the input type for this runnable.""" + return str + + def _convert_input(self, input: LanguageModelInput) -> PromptValue: + if isinstance(input, PromptValue): + return input + if isinstance(input, str): + return StringPromptValue(text=input) + if isinstance(input, Sequence): + return ChatPromptValue(messages=convert_to_messages(input)) + msg = ( + f"Invalid input type {type(input)}. " + "Must be a PromptValue, str, or list of BaseMessages." + ) + raise ValueError(msg) # noqa: TRY004 + + def _get_ls_params( + self, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> LangSmithParams: + """Get standard params for tracing.""" + # get default provider from class name + default_provider = self.__class__.__name__ + default_provider = default_provider.removesuffix("LLM") + default_provider = default_provider.lower() + + ls_params = LangSmithParams(ls_provider=default_provider, ls_model_type="llm") + if stop: + ls_params["ls_stop"] = stop + + # model + if hasattr(self, "model") and isinstance(self.model, str): + ls_params["ls_model_name"] = self.model + elif hasattr(self, "model_name") and isinstance(self.model_name, str): + ls_params["ls_model_name"] = self.model_name + + # temperature + if "temperature" in kwargs and isinstance(kwargs["temperature"], float): + ls_params["ls_temperature"] = kwargs["temperature"] + elif hasattr(self, "temperature") and isinstance(self.temperature, float): + ls_params["ls_temperature"] = self.temperature + + # max_tokens + if "max_tokens" in kwargs and isinstance(kwargs["max_tokens"], int): + ls_params["ls_max_tokens"] = kwargs["max_tokens"] + elif hasattr(self, "max_tokens") and isinstance(self.max_tokens, int): + ls_params["ls_max_tokens"] = self.max_tokens + + return ls_params + + @override + def invoke( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> str: + config = ensure_config(config) + return ( + self.generate_prompt( + [self._convert_input(input)], + stop=stop, + callbacks=config.get("callbacks"), + tags=config.get("tags"), + metadata=config.get("metadata"), + run_name=config.get("run_name"), + run_id=config.pop("run_id", None), + **kwargs, + ) + .generations[0][0] + .text + ) + + @override + async def ainvoke( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> str: + config = ensure_config(config) + llm_result = await self.agenerate_prompt( + [self._convert_input(input)], + stop=stop, + callbacks=config.get("callbacks"), + tags=config.get("tags"), + metadata=config.get("metadata"), + run_name=config.get("run_name"), + run_id=config.pop("run_id", None), + **kwargs, + ) + return llm_result.generations[0][0].text + + @override + def batch( + self, + inputs: list[LanguageModelInput], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Any, + ) -> list[str]: + if not inputs: + return [] + + config = get_config_list(config, len(inputs)) + max_concurrency = config[0].get("max_concurrency") + + if max_concurrency is None: + try: + llm_result = self.generate_prompt( + [self._convert_input(input) for input in inputs], + callbacks=[c.get("callbacks") for c in config], + tags=[c.get("tags") for c in config], + metadata=[c.get("metadata") for c in config], + run_name=[c.get("run_name") for c in config], + **kwargs, + ) + return [g[0].text for g in llm_result.generations] + except Exception as e: + if return_exceptions: + return cast("list[str]", [e for _ in inputs]) + raise + else: + batches = [ + inputs[i : i + max_concurrency] + for i in range(0, len(inputs), max_concurrency) + ] + config = [{**c, "max_concurrency": None} for c in config] + return [ + output + for i, batch in enumerate(batches) + for output in self.batch( + batch, + config=config[i * max_concurrency : (i + 1) * max_concurrency], + return_exceptions=return_exceptions, + **kwargs, + ) + ] + + @override + async def abatch( + self, + inputs: list[LanguageModelInput], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Any, + ) -> list[str]: + if not inputs: + return [] + config = get_config_list(config, len(inputs)) + max_concurrency = config[0].get("max_concurrency") + + if max_concurrency is None: + try: + llm_result = await self.agenerate_prompt( + [self._convert_input(input) for input in inputs], + callbacks=[c.get("callbacks") for c in config], + tags=[c.get("tags") for c in config], + metadata=[c.get("metadata") for c in config], + run_name=[c.get("run_name") for c in config], + **kwargs, + ) + return [g[0].text for g in llm_result.generations] + except Exception as e: + if return_exceptions: + return cast("list[str]", [e for _ in inputs]) + raise + else: + batches = [ + inputs[i : i + max_concurrency] + for i in range(0, len(inputs), max_concurrency) + ] + config = [{**c, "max_concurrency": None} for c in config] + return [ + output + for i, batch in enumerate(batches) + for output in await self.abatch( + batch, + config=config[i * max_concurrency : (i + 1) * max_concurrency], + return_exceptions=return_exceptions, + **kwargs, + ) + ] + + @override + def stream( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> Iterator[str]: + if type(self)._stream == BaseLLM._stream: + # model doesn't implement streaming, so use default implementation + yield self.invoke(input, config=config, stop=stop, **kwargs) + else: + prompt = self._convert_input(input).to_string() + config = ensure_config(config) + params = self.dict() + params["stop"] = stop + params = {**params, **kwargs} + options = {"stop": stop} + inheritable_metadata = { + **(config.get("metadata") or {}), + **self._get_ls_params(stop=stop, **kwargs), + } + callback_manager = CallbackManager.configure( + config.get("callbacks"), + self.callbacks, + self.verbose, + config.get("tags"), + self.tags, + inheritable_metadata, + self.metadata, + ) + (run_manager,) = callback_manager.on_llm_start( + self._serialized, + [prompt], + invocation_params=params, + options=options, + name=config.get("run_name"), + run_id=config.pop("run_id", None), + batch_size=1, + ) + generation: Optional[GenerationChunk] = None + try: + for chunk in self._stream( + prompt, stop=stop, run_manager=run_manager, **kwargs + ): + yield chunk.text + if generation is None: + generation = chunk + else: + generation += chunk + except BaseException as e: + run_manager.on_llm_error( + e, + response=LLMResult( + generations=[[generation]] if generation else [] + ), + ) + raise + + if generation is None: + err = ValueError("No generation chunks were returned") + run_manager.on_llm_error(err, response=LLMResult(generations=[])) + raise err + + run_manager.on_llm_end(LLMResult(generations=[[generation]])) + + @override + async def astream( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> AsyncIterator[str]: + if ( + type(self)._astream is BaseLLM._astream + and type(self)._stream is BaseLLM._stream + ): + yield await self.ainvoke(input, config=config, stop=stop, **kwargs) + return + + prompt = self._convert_input(input).to_string() + config = ensure_config(config) + params = self.dict() + params["stop"] = stop + params = {**params, **kwargs} + options = {"stop": stop} + inheritable_metadata = { + **(config.get("metadata") or {}), + **self._get_ls_params(stop=stop, **kwargs), + } + callback_manager = AsyncCallbackManager.configure( + config.get("callbacks"), + self.callbacks, + self.verbose, + config.get("tags"), + self.tags, + inheritable_metadata, + self.metadata, + ) + (run_manager,) = await callback_manager.on_llm_start( + self._serialized, + [prompt], + invocation_params=params, + options=options, + name=config.get("run_name"), + run_id=config.pop("run_id", None), + batch_size=1, + ) + generation: Optional[GenerationChunk] = None + try: + async for chunk in self._astream( + prompt, + stop=stop, + run_manager=run_manager, + **kwargs, + ): + yield chunk.text + if generation is None: + generation = chunk + else: + generation += chunk + except BaseException as e: + await run_manager.on_llm_error( + e, + response=LLMResult(generations=[[generation]] if generation else []), + ) + raise + + if generation is None: + err = ValueError("No generation chunks were returned") + await run_manager.on_llm_error(err, response=LLMResult(generations=[])) + raise err + + await run_manager.on_llm_end(LLMResult(generations=[[generation]])) + + # --- Custom methods --- + + @abstractmethod + def _generate( + self, + prompts: list[str], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> LLMResult: + """Run the LLM on the given prompts.""" + + async def _agenerate( + self, + prompts: list[str], + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> LLMResult: + """Run the LLM on the given prompts.""" + return await run_in_executor( + None, + self._generate, + prompts, + stop, + run_manager.get_sync() if run_manager else None, + **kwargs, + ) + + def _stream( + self, + prompt: str, + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[GenerationChunk]: + """Stream the LLM on the given prompt. + + This method should be overridden by subclasses that support streaming. + + If not implemented, the default behavior of calls to stream will be to + fallback to the non-streaming version of the model and return + the output as a single chunk. + + Args: + prompt: The prompt to generate from. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + run_manager: Callback manager for the run. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + An iterator of GenerationChunks. + """ + raise NotImplementedError + + async def _astream( + self, + prompt: str, + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[GenerationChunk]: + """An async version of the _stream method. + + The default implementation uses the synchronous _stream method and wraps it in + an async iterator. Subclasses that need to provide a true async implementation + should override this method. + + Args: + prompt: The prompt to generate from. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + run_manager: Callback manager for the run. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + An async iterator of GenerationChunks. + """ + iterator = await run_in_executor( + None, + self._stream, + prompt, + stop, + run_manager.get_sync() if run_manager else None, + **kwargs, + ) + done = object() + while True: + item = await run_in_executor( + None, + next, + iterator, + done, + ) + if item is done: + break + yield item # type: ignore[misc] + + @override + def generate_prompt( + self, + prompts: list[PromptValue], + stop: Optional[list[str]] = None, + callbacks: Optional[Union[Callbacks, list[Callbacks]]] = None, + **kwargs: Any, + ) -> LLMResult: + prompt_strings = [p.to_string() for p in prompts] + return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs) + + @override + async def agenerate_prompt( + self, + prompts: list[PromptValue], + stop: Optional[list[str]] = None, + callbacks: Optional[Union[Callbacks, list[Callbacks]]] = None, + **kwargs: Any, + ) -> LLMResult: + prompt_strings = [p.to_string() for p in prompts] + return await self.agenerate( + prompt_strings, stop=stop, callbacks=callbacks, **kwargs + ) + + def _generate_helper( + self, + prompts: list[str], + stop: Optional[list[str]], + run_managers: list[CallbackManagerForLLMRun], + *, + new_arg_supported: bool, + **kwargs: Any, + ) -> LLMResult: + try: + output = ( + self._generate( + prompts, + stop=stop, + # TODO: support multiple run managers + run_manager=run_managers[0] if run_managers else None, + **kwargs, + ) + if new_arg_supported + else self._generate(prompts, stop=stop) + ) + except BaseException as e: + for run_manager in run_managers: + run_manager.on_llm_error(e, response=LLMResult(generations=[])) + raise + flattened_outputs = output.flatten() + for manager, flattened_output in zip(run_managers, flattened_outputs): + manager.on_llm_end(flattened_output) + if run_managers: + output.run = [ + RunInfo(run_id=run_manager.run_id) for run_manager in run_managers + ] + return output + + def generate( + self, + prompts: list[str], + stop: Optional[list[str]] = None, + callbacks: Optional[Union[Callbacks, list[Callbacks]]] = None, + *, + tags: Optional[Union[list[str], list[list[str]]]] = None, + metadata: Optional[Union[dict[str, Any], list[dict[str, Any]]]] = None, + run_name: Optional[Union[str, list[str]]] = None, + run_id: Optional[Union[uuid.UUID, list[Optional[uuid.UUID]]]] = None, + **kwargs: Any, + ) -> LLMResult: + """Pass a sequence of prompts to a model and return generations. + + This method should make use of batched calls for models that expose a batched + API. + + Use this method when you want to: + 1. take advantage of batched calls, + 2. need more output from the model than just the top generated value, + 3. are building chains that are agnostic to the underlying language model + type (e.g., pure text completion models vs chat models). + + Args: + prompts: List of string prompts. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + callbacks: Callbacks to pass through. Used for executing additional + functionality, such as logging or streaming, throughout generation. + tags: List of tags to associate with each prompt. If provided, the length + of the list must match the length of the prompts list. + metadata: List of metadata dictionaries to associate with each prompt. If + provided, the length of the list must match the length of the prompts + list. + run_name: List of run names to associate with each prompt. If provided, the + length of the list must match the length of the prompts list. + run_id: List of run IDs to associate with each prompt. If provided, the + length of the list must match the length of the prompts list. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + An LLMResult, which contains a list of candidate Generations for each input + prompt and additional model provider-specific output. + """ + if not isinstance(prompts, list): + msg = ( + "Argument 'prompts' is expected to be of type list[str], received" + f" argument of type {type(prompts)}." + ) + raise ValueError(msg) # noqa: TRY004 + # Create callback managers + if isinstance(metadata, list): + metadata = [ + { + **(meta or {}), + **self._get_ls_params(stop=stop, **kwargs), + } + for meta in metadata + ] + elif isinstance(metadata, dict): + metadata = { + **(metadata or {}), + **self._get_ls_params(stop=stop, **kwargs), + } + else: + pass + if ( + isinstance(callbacks, list) + and callbacks + and ( + isinstance(callbacks[0], (list, BaseCallbackManager)) + or callbacks[0] is None + ) + ): + # We've received a list of callbacks args to apply to each input + if len(callbacks) != len(prompts): + msg = "callbacks must be the same length as prompts" + raise ValueError(msg) + if tags is not None and not ( + isinstance(tags, list) and len(tags) == len(prompts) + ): + msg = "tags must be a list of the same length as prompts" + raise ValueError(msg) + if metadata is not None and not ( + isinstance(metadata, list) and len(metadata) == len(prompts) + ): + msg = "metadata must be a list of the same length as prompts" + raise ValueError(msg) + if run_name is not None and not ( + isinstance(run_name, list) and len(run_name) == len(prompts) + ): + msg = "run_name must be a list of the same length as prompts" + raise ValueError(msg) + callbacks = cast("list[Callbacks]", callbacks) + tags_list = cast( + "list[Optional[list[str]]]", tags or ([None] * len(prompts)) + ) + metadata_list = cast( + "list[Optional[dict[str, Any]]]", metadata or ([{}] * len(prompts)) + ) + run_name_list = run_name or cast( + "list[Optional[str]]", ([None] * len(prompts)) + ) + callback_managers = [ + CallbackManager.configure( + callback, + self.callbacks, + self.verbose, + tag, + self.tags, + meta, + self.metadata, + ) + for callback, tag, meta in zip(callbacks, tags_list, metadata_list) + ] + else: + # We've received a single callbacks arg to apply to all inputs + callback_managers = [ + CallbackManager.configure( + cast("Callbacks", callbacks), + self.callbacks, + self.verbose, + cast("list[str]", tags), + self.tags, + cast("dict[str, Any]", metadata), + self.metadata, + ) + ] * len(prompts) + run_name_list = [cast("Optional[str]", run_name)] * len(prompts) + run_ids_list = self._get_run_ids_list(run_id, prompts) + params = self.dict() + params["stop"] = stop + options = {"stop": stop} + ( + existing_prompts, + llm_string, + missing_prompt_idxs, + missing_prompts, + ) = get_prompts(params, prompts, self.cache) + new_arg_supported = inspect.signature(self._generate).parameters.get( + "run_manager" + ) + if (self.cache is None and get_llm_cache() is None) or self.cache is False: + run_managers = [ + callback_manager.on_llm_start( + self._serialized, + [prompt], + invocation_params=params, + options=options, + name=run_name, + batch_size=len(prompts), + run_id=run_id_, + )[0] + for callback_manager, prompt, run_name, run_id_ in zip( + callback_managers, prompts, run_name_list, run_ids_list + ) + ] + return self._generate_helper( + prompts, + stop, + run_managers, + new_arg_supported=bool(new_arg_supported), + **kwargs, + ) + if len(missing_prompts) > 0: + run_managers = [ + callback_managers[idx].on_llm_start( + self._serialized, + [prompts[idx]], + invocation_params=params, + options=options, + name=run_name_list[idx], + batch_size=len(missing_prompts), + )[0] + for idx in missing_prompt_idxs + ] + new_results = self._generate_helper( + missing_prompts, + stop, + run_managers, + new_arg_supported=bool(new_arg_supported), + **kwargs, + ) + llm_output = update_cache( + self.cache, + existing_prompts, + llm_string, + missing_prompt_idxs, + new_results, + prompts, + ) + run_info = ( + [RunInfo(run_id=run_manager.run_id) for run_manager in run_managers] + if run_managers + else None + ) + else: + llm_output = {} + run_info = None + generations = [existing_prompts[i] for i in range(len(prompts))] + return LLMResult(generations=generations, llm_output=llm_output, run=run_info) + + @staticmethod + def _get_run_ids_list( + run_id: Optional[Union[uuid.UUID, list[Optional[uuid.UUID]]]], prompts: list + ) -> list: + if run_id is None: + return [None] * len(prompts) + if isinstance(run_id, list): + if len(run_id) != len(prompts): + msg = ( + "Number of manually provided run_id's does not match batch length." + f" {len(run_id)} != {len(prompts)}" + ) + raise ValueError(msg) + return run_id + return [run_id] + [None] * (len(prompts) - 1) + + async def _agenerate_helper( + self, + prompts: list[str], + stop: Optional[list[str]], + run_managers: list[AsyncCallbackManagerForLLMRun], + *, + new_arg_supported: bool, + **kwargs: Any, + ) -> LLMResult: + try: + output = ( + await self._agenerate( + prompts, + stop=stop, + run_manager=run_managers[0] if run_managers else None, + **kwargs, + ) + if new_arg_supported + else await self._agenerate(prompts, stop=stop) + ) + except BaseException as e: + await asyncio.gather( + *[ + run_manager.on_llm_error(e, response=LLMResult(generations=[])) + for run_manager in run_managers + ] + ) + raise + flattened_outputs = output.flatten() + await asyncio.gather( + *[ + run_manager.on_llm_end(flattened_output) + for run_manager, flattened_output in zip( + run_managers, flattened_outputs + ) + ] + ) + if run_managers: + output.run = [ + RunInfo(run_id=run_manager.run_id) for run_manager in run_managers + ] + return output + + async def agenerate( + self, + prompts: list[str], + stop: Optional[list[str]] = None, + callbacks: Optional[Union[Callbacks, list[Callbacks]]] = None, + *, + tags: Optional[Union[list[str], list[list[str]]]] = None, + metadata: Optional[Union[dict[str, Any], list[dict[str, Any]]]] = None, + run_name: Optional[Union[str, list[str]]] = None, + run_id: Optional[Union[uuid.UUID, list[Optional[uuid.UUID]]]] = None, + **kwargs: Any, + ) -> LLMResult: + """Asynchronously pass a sequence of prompts to a model and return generations. + + This method should make use of batched calls for models that expose a batched + API. + + Use this method when you want to: + 1. take advantage of batched calls, + 2. need more output from the model than just the top generated value, + 3. are building chains that are agnostic to the underlying language model + type (e.g., pure text completion models vs chat models). + + Args: + prompts: List of string prompts. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + callbacks: Callbacks to pass through. Used for executing additional + functionality, such as logging or streaming, throughout generation. + tags: List of tags to associate with each prompt. If provided, the length + of the list must match the length of the prompts list. + metadata: List of metadata dictionaries to associate with each prompt. If + provided, the length of the list must match the length of the prompts + list. + run_name: List of run names to associate with each prompt. If provided, the + length of the list must match the length of the prompts list. + run_id: List of run IDs to associate with each prompt. If provided, the + length of the list must match the length of the prompts list. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + An LLMResult, which contains a list of candidate Generations for each input + prompt and additional model provider-specific output. + """ + if isinstance(metadata, list): + metadata = [ + { + **(meta or {}), + **self._get_ls_params(stop=stop, **kwargs), + } + for meta in metadata + ] + elif isinstance(metadata, dict): + metadata = { + **(metadata or {}), + **self._get_ls_params(stop=stop, **kwargs), + } + else: + pass + # Create callback managers + if isinstance(callbacks, list) and ( + isinstance(callbacks[0], (list, BaseCallbackManager)) + or callbacks[0] is None + ): + # We've received a list of callbacks args to apply to each input + if len(callbacks) != len(prompts): + msg = "callbacks must be the same length as prompts" + raise ValueError(msg) + if tags is not None and not ( + isinstance(tags, list) and len(tags) == len(prompts) + ): + msg = "tags must be a list of the same length as prompts" + raise ValueError(msg) + if metadata is not None and not ( + isinstance(metadata, list) and len(metadata) == len(prompts) + ): + msg = "metadata must be a list of the same length as prompts" + raise ValueError(msg) + if run_name is not None and not ( + isinstance(run_name, list) and len(run_name) == len(prompts) + ): + msg = "run_name must be a list of the same length as prompts" + raise ValueError(msg) + callbacks = cast("list[Callbacks]", callbacks) + tags_list = cast( + "list[Optional[list[str]]]", tags or ([None] * len(prompts)) + ) + metadata_list = cast( + "list[Optional[dict[str, Any]]]", metadata or ([{}] * len(prompts)) + ) + run_name_list = run_name or cast( + "list[Optional[str]]", ([None] * len(prompts)) + ) + callback_managers = [ + AsyncCallbackManager.configure( + callback, + self.callbacks, + self.verbose, + tag, + self.tags, + meta, + self.metadata, + ) + for callback, tag, meta in zip(callbacks, tags_list, metadata_list) + ] + else: + # We've received a single callbacks arg to apply to all inputs + callback_managers = [ + AsyncCallbackManager.configure( + cast("Callbacks", callbacks), + self.callbacks, + self.verbose, + cast("list[str]", tags), + self.tags, + cast("dict[str, Any]", metadata), + self.metadata, + ) + ] * len(prompts) + run_name_list = [cast("Optional[str]", run_name)] * len(prompts) + run_ids_list = self._get_run_ids_list(run_id, prompts) + params = self.dict() + params["stop"] = stop + options = {"stop": stop} + ( + existing_prompts, + llm_string, + missing_prompt_idxs, + missing_prompts, + ) = await aget_prompts(params, prompts, self.cache) + + # Verify whether the cache is set, and if the cache is set, + # verify whether the cache is available. + new_arg_supported = inspect.signature(self._agenerate).parameters.get( + "run_manager" + ) + if (self.cache is None and get_llm_cache() is None) or self.cache is False: + run_managers = await asyncio.gather( + *[ + callback_manager.on_llm_start( + self._serialized, + [prompt], + invocation_params=params, + options=options, + name=run_name, + batch_size=len(prompts), + run_id=run_id_, + ) + for callback_manager, prompt, run_name, run_id_ in zip( + callback_managers, prompts, run_name_list, run_ids_list + ) + ] + ) + run_managers = [r[0] for r in run_managers] # type: ignore[misc] + return await self._agenerate_helper( + prompts, + stop, + run_managers, # type: ignore[arg-type] + new_arg_supported=bool(new_arg_supported), + **kwargs, + ) + if len(missing_prompts) > 0: + run_managers = await asyncio.gather( + *[ + callback_managers[idx].on_llm_start( + self._serialized, + [prompts[idx]], + invocation_params=params, + options=options, + name=run_name_list[idx], + batch_size=len(missing_prompts), + ) + for idx in missing_prompt_idxs + ] + ) + run_managers = [r[0] for r in run_managers] # type: ignore[misc] + new_results = await self._agenerate_helper( + missing_prompts, + stop, + run_managers, # type: ignore[arg-type] + new_arg_supported=bool(new_arg_supported), + **kwargs, + ) + llm_output = await aupdate_cache( + self.cache, + existing_prompts, + llm_string, + missing_prompt_idxs, + new_results, + prompts, + ) + run_info = ( + [RunInfo(run_id=run_manager.run_id) for run_manager in run_managers] # type: ignore[attr-defined] + if run_managers + else None + ) + else: + llm_output = {} + run_info = None + generations = [existing_prompts[i] for i in range(len(prompts))] + return LLMResult(generations=generations, llm_output=llm_output, run=run_info) + + @deprecated("0.1.7", alternative="invoke", removal="1.0") + def __call__( + self, + prompt: str, + stop: Optional[list[str]] = None, + callbacks: Callbacks = None, + *, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> str: + """Check Cache and run the LLM on the given prompt and input. + + Args: + prompt: The prompt to generate from. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + callbacks: Callbacks to pass through. Used for executing additional + functionality, such as logging or streaming, throughout generation. + tags: List of tags to associate with the prompt. + metadata: Metadata to associate with the prompt. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + The generated text. + + Raises: + ValueError: If the prompt is not a string. + """ + if not isinstance(prompt, str): + msg = ( + "Argument `prompt` is expected to be a string. Instead found " + f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " + "`generate` instead." + ) + raise ValueError(msg) # noqa: TRY004 + return ( + self.generate( + [prompt], + stop=stop, + callbacks=callbacks, + tags=tags, + metadata=metadata, + **kwargs, + ) + .generations[0][0] + .text + ) + + async def _call_async( + self, + prompt: str, + stop: Optional[list[str]] = None, + callbacks: Callbacks = None, + *, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> str: + """Check Cache and run the LLM on the given prompt and input.""" + result = await self.agenerate( + [prompt], + stop=stop, + callbacks=callbacks, + tags=tags, + metadata=metadata, + **kwargs, + ) + return result.generations[0][0].text + + @deprecated("0.1.7", alternative="invoke", removal="1.0") + @override + def predict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: + _stop = None if stop is None else list(stop) + return self(text, stop=_stop, **kwargs) + + @deprecated("0.1.7", alternative="invoke", removal="1.0") + @override + def predict_messages( + self, + messages: list[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> BaseMessage: + text = get_buffer_string(messages) + _stop = None if stop is None else list(stop) + content = self(text, stop=_stop, **kwargs) + return AIMessage(content=content) + + @deprecated("0.1.7", alternative="ainvoke", removal="1.0") + @override + async def apredict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: + _stop = None if stop is None else list(stop) + return await self._call_async(text, stop=_stop, **kwargs) + + @deprecated("0.1.7", alternative="ainvoke", removal="1.0") + @override + async def apredict_messages( + self, + messages: list[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> BaseMessage: + text = get_buffer_string(messages) + _stop = None if stop is None else list(stop) + content = await self._call_async(text, stop=_stop, **kwargs) + return AIMessage(content=content) + + def __str__(self) -> str: + """Get a string representation of the object for printing.""" + cls_name = f"\033[1m{self.__class__.__name__}\033[0m" + return f"{cls_name}\nParams: {self._identifying_params}" + + @property + @abstractmethod + def _llm_type(self) -> str: + """Return type of llm.""" + + @override + def dict(self, **kwargs: Any) -> dict: + """Return a dictionary of the LLM.""" + starter_dict = dict(self._identifying_params) + starter_dict["_type"] = self._llm_type + return starter_dict + + def save(self, file_path: Union[Path, str]) -> None: + """Save the LLM. + + Args: + file_path: Path to file to save the LLM to. + + Raises: + ValueError: If the file path is not a string or Path object. + + Example: + .. code-block:: python + + llm.save(file_path="path/llm.yaml") + """ + # Convert file to Path object. + save_path = Path(file_path) + + directory_path = save_path.parent + directory_path.mkdir(parents=True, exist_ok=True) + + # Fetch dictionary to save + prompt_dict = self.dict() + + if save_path.suffix == ".json": + with save_path.open("w") as f: + json.dump(prompt_dict, f, indent=4) + elif save_path.suffix.endswith((".yaml", ".yml")): + with save_path.open("w") as f: + yaml.dump(prompt_dict, f, default_flow_style=False) + else: + msg = f"{save_path} must be json or yaml" + raise ValueError(msg) + + +class LLM(BaseLLM): + """Simple interface for implementing a custom LLM. + + You should subclass this class and implement the following: + + - `_call` method: Run the LLM on the given prompt and input (used by `invoke`). + - `_identifying_params` property: Return a dictionary of the identifying parameters + This is critical for caching and tracing purposes. Identifying parameters + is a dict that identifies the LLM. + It should mostly include a `model_name`. + + Optional: Override the following methods to provide more optimizations: + + - `_acall`: Provide a native async version of the `_call` method. + If not provided, will delegate to the synchronous version using + `run_in_executor`. (Used by `ainvoke`). + - `_stream`: Stream the LLM on the given prompt and input. + `stream` will use `_stream` if provided, otherwise it + use `_call` and output will arrive in one chunk. + - `_astream`: Override to provide a native async version of the `_stream` method. + `astream` will use `_astream` if provided, otherwise it will implement + a fallback behavior that will use `_stream` if `_stream` is implemented, + and use `_acall` if `_stream` is not implemented. + + Please see the following guide for more information on how to + implement a custom LLM: + + https://python.langchain.com/docs/how_to/custom_llm/ + """ + + @abstractmethod + def _call( + self, + prompt: str, + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + """Run the LLM on the given input. + + Override this method to implement the LLM logic. + + Args: + prompt: The prompt to generate from. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of the stop substrings. + If stop tokens are not supported consider raising NotImplementedError. + run_manager: Callback manager for the run. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + The model output as a string. SHOULD NOT include the prompt. + """ + + async def _acall( + self, + prompt: str, + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + """Async version of the _call method. + + The default implementation delegates to the synchronous _call method using + `run_in_executor`. Subclasses that need to provide a true async implementation + should override this method to reduce the overhead of using `run_in_executor`. + + Args: + prompt: The prompt to generate from. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of the stop substrings. + If stop tokens are not supported consider raising NotImplementedError. + run_manager: Callback manager for the run. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + The model output as a string. SHOULD NOT include the prompt. + """ + return await run_in_executor( + None, + self._call, + prompt, + stop, + run_manager.get_sync() if run_manager else None, + **kwargs, + ) + + def _generate( + self, + prompts: list[str], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> LLMResult: + """Run the LLM on the given prompt and input.""" + # TODO: add caching here. + generations = [] + new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") + for prompt in prompts: + text = ( + self._call(prompt, stop=stop, run_manager=run_manager, **kwargs) + if new_arg_supported + else self._call(prompt, stop=stop, **kwargs) + ) + generations.append([Generation(text=text)]) + return LLMResult(generations=generations) + + async def _agenerate( + self, + prompts: list[str], + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> LLMResult: + """Async run the LLM on the given prompt and input.""" + generations = [] + new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") + for prompt in prompts: + text = ( + await self._acall(prompt, stop=stop, run_manager=run_manager, **kwargs) + if new_arg_supported + else await self._acall(prompt, stop=stop, **kwargs) + ) + generations.append([Generation(text=text)]) + return LLMResult(generations=generations) diff --git a/venv/Lib/site-packages/langchain_core/load/__init__.py b/venv/Lib/site-packages/langchain_core/load/__init__.py new file mode 100644 index 00000000..a87a380f --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/load/__init__.py @@ -0,0 +1,36 @@ +"""**Load** module helps with serialization and deserialization.""" + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.load.dump import dumpd, dumps + from langchain_core.load.load import loads + from langchain_core.load.serializable import Serializable + +# Unfortunately, we have to eagerly import load from langchain_core/load/load.py +# eagerly to avoid a namespace conflict. We want users to still be able to use +# `from langchain_core.load import load` to get the load function, but +# the `from langchain_core.load.load import load` absolute import should also work. +from langchain_core.load.load import load + +__all__ = ("dumpd", "dumps", "load", "loads", "Serializable") + +_dynamic_imports = { + "dumpd": "dump", + "dumps": "dump", + "loads": "load", + "Serializable": "serializable", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/load/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/load/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..a7e51fa9 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/load/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/load/__pycache__/dump.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/load/__pycache__/dump.cpython-312.pyc new file mode 100644 index 00000000..e2a3c8da Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/load/__pycache__/dump.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/load/__pycache__/load.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/load/__pycache__/load.cpython-312.pyc new file mode 100644 index 00000000..cc7bf2a6 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/load/__pycache__/load.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/load/__pycache__/mapping.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/load/__pycache__/mapping.cpython-312.pyc new file mode 100644 index 00000000..b185836a Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/load/__pycache__/mapping.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/load/__pycache__/serializable.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/load/__pycache__/serializable.cpython-312.pyc new file mode 100644 index 00000000..cef217ff Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/load/__pycache__/serializable.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/load/dump.py b/venv/Lib/site-packages/langchain_core/load/dump.py new file mode 100644 index 00000000..b1993c38 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/load/dump.py @@ -0,0 +1,87 @@ +"""Dump objects to json.""" + +import json +from typing import Any + +from pydantic import BaseModel + +from langchain_core.load.serializable import Serializable, to_json_not_implemented + + +def default(obj: Any) -> Any: + """Return a default value for an object. + + Args: + obj: The object to serialize to json if it is a Serializable object. + + Returns: + A json serializable object or a SerializedNotImplemented object. + """ + if isinstance(obj, Serializable): + return obj.to_json() + return to_json_not_implemented(obj) + + +def _dump_pydantic_models(obj: Any) -> Any: + from langchain_core.messages import AIMessage + from langchain_core.outputs import ChatGeneration + + if ( + isinstance(obj, ChatGeneration) + and isinstance(obj.message, AIMessage) + and (parsed := obj.message.additional_kwargs.get("parsed")) + and isinstance(parsed, BaseModel) + ): + obj_copy = obj.model_copy(deep=True) + obj_copy.message.additional_kwargs["parsed"] = parsed.model_dump() + return obj_copy + return obj + + +def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str: + """Return a json string representation of an object. + + Args: + obj: The object to dump. + pretty: Whether to pretty print the json. If true, the json will be + indented with 2 spaces (if no indent is provided as part of kwargs). + Default is False. + kwargs: Additional arguments to pass to json.dumps + + Returns: + A json string representation of the object. + + Raises: + ValueError: If `default` is passed as a kwarg. + """ + if "default" in kwargs: + msg = "`default` should not be passed to dumps" + raise ValueError(msg) + try: + obj = _dump_pydantic_models(obj) + if pretty: + indent = kwargs.pop("indent", 2) + return json.dumps(obj, default=default, indent=indent, **kwargs) + return json.dumps(obj, default=default, **kwargs) + except TypeError: + if pretty: + indent = kwargs.pop("indent", 2) + return json.dumps(to_json_not_implemented(obj), indent=indent, **kwargs) + return json.dumps(to_json_not_implemented(obj), **kwargs) + + +def dumpd(obj: Any) -> Any: + """Return a dict representation of an object. + + Note: + Unfortunately this function is not as efficient as it could be + because it first dumps the object to a json string and then loads it + back into a dictionary. + + Args: + obj: The object to dump. + + Returns: + dictionary that can be serialized to json using json.dumps + """ + return json.loads(dumps(obj)) diff --git a/venv/Lib/site-packages/langchain_core/load/load.py b/venv/Lib/site-packages/langchain_core/load/load.py new file mode 100644 index 00000000..e7c4cadb --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/load/load.py @@ -0,0 +1,245 @@ +"""Load LangChain objects from JSON strings or objects.""" + +import importlib +import json +import os +from typing import Any, Optional + +from langchain_core._api import beta +from langchain_core.load.mapping import ( + _JS_SERIALIZABLE_MAPPING, + _OG_SERIALIZABLE_MAPPING, + OLD_CORE_NAMESPACES_MAPPING, + SERIALIZABLE_MAPPING, +) +from langchain_core.load.serializable import Serializable + +DEFAULT_NAMESPACES = [ + "langchain", + "langchain_core", + "langchain_community", + "langchain_anthropic", + "langchain_groq", + "langchain_google_genai", + "langchain_aws", + "langchain_openai", + "langchain_google_vertexai", + "langchain_mistralai", + "langchain_fireworks", + "langchain_xai", + "langchain_sambanova", + "langchain_perplexity", +] +# Namespaces for which only deserializing via the SERIALIZABLE_MAPPING is allowed. +# Load by path is not allowed. +DISALLOW_LOAD_FROM_PATH = [ + "langchain_community", + "langchain", +] + +ALL_SERIALIZABLE_MAPPINGS = { + **SERIALIZABLE_MAPPING, + **OLD_CORE_NAMESPACES_MAPPING, + **_OG_SERIALIZABLE_MAPPING, + **_JS_SERIALIZABLE_MAPPING, +} + + +class Reviver: + """Reviver for JSON objects.""" + + def __init__( + self, + secrets_map: Optional[dict[str, str]] = None, + valid_namespaces: Optional[list[str]] = None, + secrets_from_env: bool = True, # noqa: FBT001,FBT002 + additional_import_mappings: Optional[ + dict[tuple[str, ...], tuple[str, ...]] + ] = None, + ) -> None: + """Initialize the reviver. + + Args: + secrets_map: A map of secrets to load. If a secret is not found in + the map, it will be loaded from the environment if `secrets_from_env` + is True. Defaults to None. + valid_namespaces: A list of additional namespaces (modules) + to allow to be deserialized. Defaults to None. + secrets_from_env: Whether to load secrets from the environment. + Defaults to True. + additional_import_mappings: A dictionary of additional namespace mappings + You can use this to override default mappings or add new mappings. + Defaults to None. + """ + self.secrets_from_env = secrets_from_env + self.secrets_map = secrets_map or {} + # By default, only support langchain, but user can pass in additional namespaces + self.valid_namespaces = ( + [*DEFAULT_NAMESPACES, *valid_namespaces] + if valid_namespaces + else DEFAULT_NAMESPACES + ) + self.additional_import_mappings = additional_import_mappings or {} + self.import_mappings = ( + { + **ALL_SERIALIZABLE_MAPPINGS, + **self.additional_import_mappings, + } + if self.additional_import_mappings + else ALL_SERIALIZABLE_MAPPINGS + ) + + def __call__(self, value: dict[str, Any]) -> Any: + """Revive the value.""" + if ( + value.get("lc") == 1 + and value.get("type") == "secret" + and value.get("id") is not None + ): + [key] = value["id"] + if key in self.secrets_map: + return self.secrets_map[key] + if self.secrets_from_env and key in os.environ and os.environ[key]: + return os.environ[key] + return None + + if ( + value.get("lc") == 1 + and value.get("type") == "not_implemented" + and value.get("id") is not None + ): + msg = ( + "Trying to load an object that doesn't implement " + f"serialization: {value}" + ) + raise NotImplementedError(msg) + + if ( + value.get("lc") == 1 + and value.get("type") == "constructor" + and value.get("id") is not None + ): + [*namespace, name] = value["id"] + mapping_key = tuple(value["id"]) + + if ( + namespace[0] not in self.valid_namespaces + # The root namespace ["langchain"] is not a valid identifier. + or namespace == ["langchain"] + ): + msg = f"Invalid namespace: {value}" + raise ValueError(msg) + # Has explicit import path. + if mapping_key in self.import_mappings: + import_path = self.import_mappings[mapping_key] + # Split into module and name + import_dir, name = import_path[:-1], import_path[-1] + # Import module + mod = importlib.import_module(".".join(import_dir)) + elif namespace[0] in DISALLOW_LOAD_FROM_PATH: + msg = ( + "Trying to deserialize something that cannot " + "be deserialized in current version of langchain-core: " + f"{mapping_key}." + ) + raise ValueError(msg) + # Otherwise, treat namespace as path. + else: + mod = importlib.import_module(".".join(namespace)) + + cls = getattr(mod, name) + + # The class must be a subclass of Serializable. + if not issubclass(cls, Serializable): + msg = f"Invalid namespace: {value}" + raise ValueError(msg) + + # We don't need to recurse on kwargs + # as json.loads will do that for us. + kwargs = value.get("kwargs", {}) + return cls(**kwargs) + + return value + + +@beta() +def loads( + text: str, + *, + secrets_map: Optional[dict[str, str]] = None, + valid_namespaces: Optional[list[str]] = None, + secrets_from_env: bool = True, + additional_import_mappings: Optional[dict[tuple[str, ...], tuple[str, ...]]] = None, +) -> Any: + """Revive a LangChain class from a JSON string. + + Equivalent to `load(json.loads(text))`. + + Args: + text: The string to load. + secrets_map: A map of secrets to load. If a secret is not found in + the map, it will be loaded from the environment if `secrets_from_env` + is True. Defaults to None. + valid_namespaces: A list of additional namespaces (modules) + to allow to be deserialized. Defaults to None. + secrets_from_env: Whether to load secrets from the environment. + Defaults to True. + additional_import_mappings: A dictionary of additional namespace mappings + You can use this to override default mappings or add new mappings. + Defaults to None. + + Returns: + Revived LangChain objects. + """ + return json.loads( + text, + object_hook=Reviver( + secrets_map, valid_namespaces, secrets_from_env, additional_import_mappings + ), + ) + + +@beta() +def load( + obj: Any, + *, + secrets_map: Optional[dict[str, str]] = None, + valid_namespaces: Optional[list[str]] = None, + secrets_from_env: bool = True, + additional_import_mappings: Optional[dict[tuple[str, ...], tuple[str, ...]]] = None, +) -> Any: + """Revive a LangChain class from a JSON object. + + Use this if you already have a parsed JSON object, + eg. from `json.load` or `orjson.loads`. + + Args: + obj: The object to load. + secrets_map: A map of secrets to load. If a secret is not found in + the map, it will be loaded from the environment if `secrets_from_env` + is True. Defaults to None. + valid_namespaces: A list of additional namespaces (modules) + to allow to be deserialized. Defaults to None. + secrets_from_env: Whether to load secrets from the environment. + Defaults to True. + additional_import_mappings: A dictionary of additional namespace mappings + You can use this to override default mappings or add new mappings. + Defaults to None. + + Returns: + Revived LangChain objects. + """ + reviver = Reviver( + secrets_map, valid_namespaces, secrets_from_env, additional_import_mappings + ) + + def _load(obj: Any) -> Any: + if isinstance(obj, dict): + # Need to revive leaf nodes before reviving this node + loaded_obj = {k: _load(v) for k, v in obj.items()} + return reviver(loaded_obj) + if isinstance(obj, list): + return [_load(o) for o in obj] + return obj + + return _load(obj) diff --git a/venv/Lib/site-packages/langchain_core/load/mapping.py b/venv/Lib/site-packages/langchain_core/load/mapping.py new file mode 100644 index 00000000..aee837b5 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/load/mapping.py @@ -0,0 +1,1066 @@ +"""Serialization mapping. + +This file contains a mapping between the lc_namespace path for a given +subclass that implements from Serializable to the namespace +where that class is actually located. + +This mapping helps maintain the ability to serialize and deserialize +well-known LangChain objects even if they are moved around in the codebase +across different LangChain versions. + +For example, + +The code for AIMessage class is located in langchain_core.messages.ai.AIMessage, +This message is associated with the lc_namespace +["langchain", "schema", "messages", "AIMessage"], +because this code was originally in langchain.schema.messages.AIMessage. + +The mapping allows us to deserialize an AIMessage created with an older +version of LangChain where the code was in a different location. +""" + +# First value is the value that it is serialized as +# Second value is the path to load it from +SERIALIZABLE_MAPPING: dict[tuple[str, ...], tuple[str, ...]] = { + ("langchain", "schema", "messages", "AIMessage"): ( + "langchain_core", + "messages", + "ai", + "AIMessage", + ), + ("langchain", "schema", "messages", "AIMessageChunk"): ( + "langchain_core", + "messages", + "ai", + "AIMessageChunk", + ), + ("langchain", "schema", "messages", "BaseMessage"): ( + "langchain_core", + "messages", + "base", + "BaseMessage", + ), + ("langchain", "schema", "messages", "BaseMessageChunk"): ( + "langchain_core", + "messages", + "base", + "BaseMessageChunk", + ), + ("langchain", "schema", "messages", "ChatMessage"): ( + "langchain_core", + "messages", + "chat", + "ChatMessage", + ), + ("langchain", "schema", "messages", "FunctionMessage"): ( + "langchain_core", + "messages", + "function", + "FunctionMessage", + ), + ("langchain", "schema", "messages", "HumanMessage"): ( + "langchain_core", + "messages", + "human", + "HumanMessage", + ), + ("langchain", "schema", "messages", "SystemMessage"): ( + "langchain_core", + "messages", + "system", + "SystemMessage", + ), + ("langchain", "schema", "messages", "ToolMessage"): ( + "langchain_core", + "messages", + "tool", + "ToolMessage", + ), + ("langchain", "schema", "messages", "RemoveMessage"): ( + "langchain_core", + "messages", + "modifier", + "RemoveMessage", + ), + ("langchain", "schema", "agent", "AgentAction"): ( + "langchain_core", + "agents", + "AgentAction", + ), + ("langchain", "schema", "agent", "AgentFinish"): ( + "langchain_core", + "agents", + "AgentFinish", + ), + ("langchain", "schema", "prompt_template", "BasePromptTemplate"): ( + "langchain_core", + "prompts", + "base", + "BasePromptTemplate", + ), + ("langchain", "chains", "llm", "LLMChain"): ( + "langchain", + "chains", + "llm", + "LLMChain", + ), + ("langchain", "prompts", "prompt", "PromptTemplate"): ( + "langchain_core", + "prompts", + "prompt", + "PromptTemplate", + ), + ("langchain", "prompts", "chat", "MessagesPlaceholder"): ( + "langchain_core", + "prompts", + "chat", + "MessagesPlaceholder", + ), + ("langchain", "llms", "openai", "OpenAI"): ( + "langchain_openai", + "llms", + "base", + "OpenAI", + ), + ("langchain", "prompts", "chat", "ChatPromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "ChatPromptTemplate", + ), + ("langchain", "prompts", "chat", "HumanMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "HumanMessagePromptTemplate", + ), + ("langchain", "prompts", "chat", "SystemMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "SystemMessagePromptTemplate", + ), + ("langchain", "prompts", "image", "ImagePromptTemplate"): ( + "langchain_core", + "prompts", + "image", + "ImagePromptTemplate", + ), + ("langchain", "schema", "agent", "AgentActionMessageLog"): ( + "langchain_core", + "agents", + "AgentActionMessageLog", + ), + ("langchain", "schema", "agent", "ToolAgentAction"): ( + "langchain", + "agents", + "output_parsers", + "tools", + "ToolAgentAction", + ), + ("langchain", "prompts", "chat", "BaseMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "BaseMessagePromptTemplate", + ), + ("langchain", "schema", "output", "ChatGeneration"): ( + "langchain_core", + "outputs", + "chat_generation", + "ChatGeneration", + ), + ("langchain", "schema", "output", "Generation"): ( + "langchain_core", + "outputs", + "generation", + "Generation", + ), + ("langchain", "schema", "document", "Document"): ( + "langchain_core", + "documents", + "base", + "Document", + ), + ("langchain", "output_parsers", "fix", "OutputFixingParser"): ( + "langchain", + "output_parsers", + "fix", + "OutputFixingParser", + ), + ("langchain", "prompts", "chat", "AIMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "AIMessagePromptTemplate", + ), + ("langchain", "output_parsers", "regex", "RegexParser"): ( + "langchain", + "output_parsers", + "regex", + "RegexParser", + ), + ("langchain", "schema", "runnable", "DynamicRunnable"): ( + "langchain_core", + "runnables", + "configurable", + "DynamicRunnable", + ), + ("langchain", "schema", "prompt", "PromptValue"): ( + "langchain_core", + "prompt_values", + "PromptValue", + ), + ("langchain", "schema", "runnable", "RunnableBinding"): ( + "langchain_core", + "runnables", + "base", + "RunnableBinding", + ), + ("langchain", "schema", "runnable", "RunnableBranch"): ( + "langchain_core", + "runnables", + "branch", + "RunnableBranch", + ), + ("langchain", "schema", "runnable", "RunnableWithFallbacks"): ( + "langchain_core", + "runnables", + "fallbacks", + "RunnableWithFallbacks", + ), + ("langchain", "schema", "output_parser", "StrOutputParser"): ( + "langchain_core", + "output_parsers", + "string", + "StrOutputParser", + ), + ("langchain", "chat_models", "openai", "ChatOpenAI"): ( + "langchain_openai", + "chat_models", + "base", + "ChatOpenAI", + ), + ("langchain", "output_parsers", "list", "CommaSeparatedListOutputParser"): ( + "langchain_core", + "output_parsers", + "list", + "CommaSeparatedListOutputParser", + ), + ("langchain", "schema", "runnable", "RunnableParallel"): ( + "langchain_core", + "runnables", + "base", + "RunnableParallel", + ), + ("langchain", "chat_models", "azure_openai", "AzureChatOpenAI"): ( + "langchain_openai", + "chat_models", + "azure", + "AzureChatOpenAI", + ), + ("langchain", "chat_models", "bedrock", "BedrockChat"): ( + "langchain_aws", + "chat_models", + "bedrock", + "ChatBedrock", + ), + ("langchain", "chat_models", "anthropic", "ChatAnthropic"): ( + "langchain_anthropic", + "chat_models", + "ChatAnthropic", + ), + ("langchain_groq", "chat_models", "ChatGroq"): ( + "langchain_groq", + "chat_models", + "ChatGroq", + ), + ("langchain", "chat_models", "fireworks", "ChatFireworks"): ( + "langchain_fireworks", + "chat_models", + "ChatFireworks", + ), + ("langchain", "chat_models", "google_palm", "ChatGooglePalm"): ( + "langchain", + "chat_models", + "google_palm", + "ChatGooglePalm", + ), + ("langchain", "chat_models", "vertexai", "ChatVertexAI"): ( + "langchain_google_vertexai", + "chat_models", + "ChatVertexAI", + ), + ("langchain", "chat_models", "mistralai", "ChatMistralAI"): ( + "langchain_mistralai", + "chat_models", + "ChatMistralAI", + ), + ("langchain", "chat_models", "bedrock", "ChatBedrock"): ( + "langchain_aws", + "chat_models", + "bedrock", + "ChatBedrock", + ), + ("langchain_google_genai", "chat_models", "ChatGoogleGenerativeAI"): ( + "langchain_google_genai", + "chat_models", + "ChatGoogleGenerativeAI", + ), + ("langchain", "schema", "output", "ChatGenerationChunk"): ( + "langchain_core", + "outputs", + "chat_generation", + "ChatGenerationChunk", + ), + ("langchain", "schema", "messages", "ChatMessageChunk"): ( + "langchain_core", + "messages", + "chat", + "ChatMessageChunk", + ), + ("langchain", "schema", "messages", "HumanMessageChunk"): ( + "langchain_core", + "messages", + "human", + "HumanMessageChunk", + ), + ("langchain", "schema", "messages", "FunctionMessageChunk"): ( + "langchain_core", + "messages", + "function", + "FunctionMessageChunk", + ), + ("langchain", "schema", "messages", "SystemMessageChunk"): ( + "langchain_core", + "messages", + "system", + "SystemMessageChunk", + ), + ("langchain", "schema", "messages", "ToolMessageChunk"): ( + "langchain_core", + "messages", + "tool", + "ToolMessageChunk", + ), + ("langchain", "schema", "output", "GenerationChunk"): ( + "langchain_core", + "outputs", + "generation", + "GenerationChunk", + ), + ("langchain", "llms", "openai", "BaseOpenAI"): ( + "langchain", + "llms", + "openai", + "BaseOpenAI", + ), + ("langchain", "llms", "bedrock", "Bedrock"): ( + "langchain_aws", + "llms", + "bedrock", + "BedrockLLM", + ), + ("langchain", "llms", "fireworks", "Fireworks"): ( + "langchain_fireworks", + "llms", + "Fireworks", + ), + ("langchain", "llms", "google_palm", "GooglePalm"): ( + "langchain", + "llms", + "google_palm", + "GooglePalm", + ), + ("langchain", "llms", "openai", "AzureOpenAI"): ( + "langchain_openai", + "llms", + "azure", + "AzureOpenAI", + ), + ("langchain", "llms", "replicate", "Replicate"): ( + "langchain", + "llms", + "replicate", + "Replicate", + ), + ("langchain", "llms", "vertexai", "VertexAI"): ( + "langchain_vertexai", + "llms", + "VertexAI", + ), + ("langchain", "output_parsers", "combining", "CombiningOutputParser"): ( + "langchain", + "output_parsers", + "combining", + "CombiningOutputParser", + ), + ("langchain", "schema", "prompt_template", "BaseChatPromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "BaseChatPromptTemplate", + ), + ("langchain", "prompts", "chat", "ChatMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "ChatMessagePromptTemplate", + ), + ("langchain", "prompts", "few_shot_with_templates", "FewShotPromptWithTemplates"): ( + "langchain_core", + "prompts", + "few_shot_with_templates", + "FewShotPromptWithTemplates", + ), + ("langchain", "prompts", "pipeline", "PipelinePromptTemplate"): ( + "langchain_core", + "prompts", + "pipeline", + "PipelinePromptTemplate", + ), + ("langchain", "prompts", "base", "StringPromptTemplate"): ( + "langchain_core", + "prompts", + "string", + "StringPromptTemplate", + ), + ("langchain", "prompts", "base", "StringPromptValue"): ( + "langchain_core", + "prompt_values", + "StringPromptValue", + ), + ("langchain", "prompts", "chat", "BaseStringMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "BaseStringMessagePromptTemplate", + ), + ("langchain", "prompts", "chat", "ChatPromptValue"): ( + "langchain_core", + "prompt_values", + "ChatPromptValue", + ), + ("langchain", "prompts", "chat", "ChatPromptValueConcrete"): ( + "langchain_core", + "prompt_values", + "ChatPromptValueConcrete", + ), + ("langchain", "schema", "runnable", "HubRunnable"): ( + "langchain", + "runnables", + "hub", + "HubRunnable", + ), + ("langchain", "schema", "runnable", "RunnableBindingBase"): ( + "langchain_core", + "runnables", + "base", + "RunnableBindingBase", + ), + ("langchain", "schema", "runnable", "OpenAIFunctionsRouter"): ( + "langchain", + "runnables", + "openai_functions", + "OpenAIFunctionsRouter", + ), + ("langchain", "schema", "runnable", "RouterRunnable"): ( + "langchain_core", + "runnables", + "router", + "RouterRunnable", + ), + ("langchain", "schema", "runnable", "RunnablePassthrough"): ( + "langchain_core", + "runnables", + "passthrough", + "RunnablePassthrough", + ), + ("langchain", "schema", "runnable", "RunnableSequence"): ( + "langchain_core", + "runnables", + "base", + "RunnableSequence", + ), + ("langchain", "schema", "runnable", "RunnableEach"): ( + "langchain_core", + "runnables", + "base", + "RunnableEach", + ), + ("langchain", "schema", "runnable", "RunnableEachBase"): ( + "langchain_core", + "runnables", + "base", + "RunnableEachBase", + ), + ("langchain", "schema", "runnable", "RunnableConfigurableAlternatives"): ( + "langchain_core", + "runnables", + "configurable", + "RunnableConfigurableAlternatives", + ), + ("langchain", "schema", "runnable", "RunnableConfigurableFields"): ( + "langchain_core", + "runnables", + "configurable", + "RunnableConfigurableFields", + ), + ("langchain", "schema", "runnable", "RunnableWithMessageHistory"): ( + "langchain_core", + "runnables", + "history", + "RunnableWithMessageHistory", + ), + ("langchain", "schema", "runnable", "RunnableAssign"): ( + "langchain_core", + "runnables", + "passthrough", + "RunnableAssign", + ), + ("langchain", "schema", "runnable", "RunnableRetry"): ( + "langchain_core", + "runnables", + "retry", + "RunnableRetry", + ), + ("langchain_core", "prompts", "structured", "StructuredPrompt"): ( + "langchain_core", + "prompts", + "structured", + "StructuredPrompt", + ), + ("langchain_sambanova", "chat_models", "ChatSambaNovaCloud"): ( + "langchain_sambanova", + "chat_models", + "ChatSambaNovaCloud", + ), + ("langchain_sambanova", "chat_models", "ChatSambaStudio"): ( + "langchain_sambanova", + "chat_models", + "ChatSambaStudio", + ), + ("langchain_core", "prompts", "message", "_DictMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "dict", + "DictPromptTemplate", + ), +} + +# Needed for backwards compatibility for old versions of LangChain where things +# Were in different place +_OG_SERIALIZABLE_MAPPING: dict[tuple[str, ...], tuple[str, ...]] = { + ("langchain", "schema", "AIMessage"): ( + "langchain_core", + "messages", + "ai", + "AIMessage", + ), + ("langchain", "schema", "ChatMessage"): ( + "langchain_core", + "messages", + "chat", + "ChatMessage", + ), + ("langchain", "schema", "FunctionMessage"): ( + "langchain_core", + "messages", + "function", + "FunctionMessage", + ), + ("langchain", "schema", "HumanMessage"): ( + "langchain_core", + "messages", + "human", + "HumanMessage", + ), + ("langchain", "schema", "SystemMessage"): ( + "langchain_core", + "messages", + "system", + "SystemMessage", + ), + ("langchain", "schema", "prompt_template", "ImagePromptTemplate"): ( + "langchain_core", + "prompts", + "image", + "ImagePromptTemplate", + ), + ("langchain", "schema", "agent", "OpenAIToolAgentAction"): ( + "langchain", + "agents", + "output_parsers", + "openai_tools", + "OpenAIToolAgentAction", + ), +} + +# Needed for backwards compatibility for a few versions where we serialized +# with langchain_core paths. +OLD_CORE_NAMESPACES_MAPPING: dict[tuple[str, ...], tuple[str, ...]] = { + ("langchain_core", "messages", "ai", "AIMessage"): ( + "langchain_core", + "messages", + "ai", + "AIMessage", + ), + ("langchain_core", "messages", "ai", "AIMessageChunk"): ( + "langchain_core", + "messages", + "ai", + "AIMessageChunk", + ), + ("langchain_core", "messages", "base", "BaseMessage"): ( + "langchain_core", + "messages", + "base", + "BaseMessage", + ), + ("langchain_core", "messages", "base", "BaseMessageChunk"): ( + "langchain_core", + "messages", + "base", + "BaseMessageChunk", + ), + ("langchain_core", "messages", "chat", "ChatMessage"): ( + "langchain_core", + "messages", + "chat", + "ChatMessage", + ), + ("langchain_core", "messages", "function", "FunctionMessage"): ( + "langchain_core", + "messages", + "function", + "FunctionMessage", + ), + ("langchain_core", "messages", "human", "HumanMessage"): ( + "langchain_core", + "messages", + "human", + "HumanMessage", + ), + ("langchain_core", "messages", "system", "SystemMessage"): ( + "langchain_core", + "messages", + "system", + "SystemMessage", + ), + ("langchain_core", "messages", "tool", "ToolMessage"): ( + "langchain_core", + "messages", + "tool", + "ToolMessage", + ), + ("langchain_core", "agents", "AgentAction"): ( + "langchain_core", + "agents", + "AgentAction", + ), + ("langchain_core", "agents", "AgentFinish"): ( + "langchain_core", + "agents", + "AgentFinish", + ), + ("langchain_core", "prompts", "base", "BasePromptTemplate"): ( + "langchain_core", + "prompts", + "base", + "BasePromptTemplate", + ), + ("langchain_core", "prompts", "prompt", "PromptTemplate"): ( + "langchain_core", + "prompts", + "prompt", + "PromptTemplate", + ), + ("langchain_core", "prompts", "chat", "MessagesPlaceholder"): ( + "langchain_core", + "prompts", + "chat", + "MessagesPlaceholder", + ), + ("langchain_core", "prompts", "chat", "ChatPromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "ChatPromptTemplate", + ), + ("langchain_core", "prompts", "chat", "HumanMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "HumanMessagePromptTemplate", + ), + ("langchain_core", "prompts", "chat", "SystemMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "SystemMessagePromptTemplate", + ), + ("langchain_core", "agents", "AgentActionMessageLog"): ( + "langchain_core", + "agents", + "AgentActionMessageLog", + ), + ("langchain_core", "prompts", "chat", "BaseMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "BaseMessagePromptTemplate", + ), + ("langchain_core", "outputs", "chat_generation", "ChatGeneration"): ( + "langchain_core", + "outputs", + "chat_generation", + "ChatGeneration", + ), + ("langchain_core", "outputs", "generation", "Generation"): ( + "langchain_core", + "outputs", + "generation", + "Generation", + ), + ("langchain_core", "documents", "base", "Document"): ( + "langchain_core", + "documents", + "base", + "Document", + ), + ("langchain_core", "prompts", "chat", "AIMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "AIMessagePromptTemplate", + ), + ("langchain_core", "runnables", "configurable", "DynamicRunnable"): ( + "langchain_core", + "runnables", + "configurable", + "DynamicRunnable", + ), + ("langchain_core", "prompt_values", "PromptValue"): ( + "langchain_core", + "prompt_values", + "PromptValue", + ), + ("langchain_core", "runnables", "base", "RunnableBinding"): ( + "langchain_core", + "runnables", + "base", + "RunnableBinding", + ), + ("langchain_core", "runnables", "branch", "RunnableBranch"): ( + "langchain_core", + "runnables", + "branch", + "RunnableBranch", + ), + ("langchain_core", "runnables", "fallbacks", "RunnableWithFallbacks"): ( + "langchain_core", + "runnables", + "fallbacks", + "RunnableWithFallbacks", + ), + ("langchain_core", "output_parsers", "string", "StrOutputParser"): ( + "langchain_core", + "output_parsers", + "string", + "StrOutputParser", + ), + ("langchain_core", "output_parsers", "list", "CommaSeparatedListOutputParser"): ( + "langchain_core", + "output_parsers", + "list", + "CommaSeparatedListOutputParser", + ), + ("langchain_core", "runnables", "base", "RunnableParallel"): ( + "langchain_core", + "runnables", + "base", + "RunnableParallel", + ), + ("langchain_core", "outputs", "chat_generation", "ChatGenerationChunk"): ( + "langchain_core", + "outputs", + "chat_generation", + "ChatGenerationChunk", + ), + ("langchain_core", "messages", "chat", "ChatMessageChunk"): ( + "langchain_core", + "messages", + "chat", + "ChatMessageChunk", + ), + ("langchain_core", "messages", "human", "HumanMessageChunk"): ( + "langchain_core", + "messages", + "human", + "HumanMessageChunk", + ), + ("langchain_core", "messages", "function", "FunctionMessageChunk"): ( + "langchain_core", + "messages", + "function", + "FunctionMessageChunk", + ), + ("langchain_core", "messages", "system", "SystemMessageChunk"): ( + "langchain_core", + "messages", + "system", + "SystemMessageChunk", + ), + ("langchain_core", "messages", "tool", "ToolMessageChunk"): ( + "langchain_core", + "messages", + "tool", + "ToolMessageChunk", + ), + ("langchain_core", "outputs", "generation", "GenerationChunk"): ( + "langchain_core", + "outputs", + "generation", + "GenerationChunk", + ), + ("langchain_core", "prompts", "chat", "BaseChatPromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "BaseChatPromptTemplate", + ), + ("langchain_core", "prompts", "chat", "ChatMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "ChatMessagePromptTemplate", + ), + ( + "langchain_core", + "prompts", + "few_shot_with_templates", + "FewShotPromptWithTemplates", + ): ( + "langchain_core", + "prompts", + "few_shot_with_templates", + "FewShotPromptWithTemplates", + ), + ("langchain_core", "prompts", "pipeline", "PipelinePromptTemplate"): ( + "langchain_core", + "prompts", + "pipeline", + "PipelinePromptTemplate", + ), + ("langchain_core", "prompts", "string", "StringPromptTemplate"): ( + "langchain_core", + "prompts", + "string", + "StringPromptTemplate", + ), + ("langchain_core", "prompt_values", "StringPromptValue"): ( + "langchain_core", + "prompt_values", + "StringPromptValue", + ), + ("langchain_core", "prompts", "chat", "BaseStringMessagePromptTemplate"): ( + "langchain_core", + "prompts", + "chat", + "BaseStringMessagePromptTemplate", + ), + ("langchain_core", "prompt_values", "ChatPromptValue"): ( + "langchain_core", + "prompt_values", + "ChatPromptValue", + ), + ("langchain_core", "prompt_values", "ChatPromptValueConcrete"): ( + "langchain_core", + "prompt_values", + "ChatPromptValueConcrete", + ), + ("langchain_core", "runnables", "base", "RunnableBindingBase"): ( + "langchain_core", + "runnables", + "base", + "RunnableBindingBase", + ), + ("langchain_core", "runnables", "router", "RouterRunnable"): ( + "langchain_core", + "runnables", + "router", + "RouterRunnable", + ), + ("langchain_core", "runnables", "passthrough", "RunnablePassthrough"): ( + "langchain_core", + "runnables", + "passthrough", + "RunnablePassthrough", + ), + ("langchain_core", "runnables", "base", "RunnableSequence"): ( + "langchain_core", + "runnables", + "base", + "RunnableSequence", + ), + ("langchain_core", "runnables", "base", "RunnableEach"): ( + "langchain_core", + "runnables", + "base", + "RunnableEach", + ), + ("langchain_core", "runnables", "base", "RunnableEachBase"): ( + "langchain_core", + "runnables", + "base", + "RunnableEachBase", + ), + ( + "langchain_core", + "runnables", + "configurable", + "RunnableConfigurableAlternatives", + ): ( + "langchain_core", + "runnables", + "configurable", + "RunnableConfigurableAlternatives", + ), + ("langchain_core", "runnables", "configurable", "RunnableConfigurableFields"): ( + "langchain_core", + "runnables", + "configurable", + "RunnableConfigurableFields", + ), + ("langchain_core", "runnables", "history", "RunnableWithMessageHistory"): ( + "langchain_core", + "runnables", + "history", + "RunnableWithMessageHistory", + ), + ("langchain_core", "runnables", "passthrough", "RunnableAssign"): ( + "langchain_core", + "runnables", + "passthrough", + "RunnableAssign", + ), + ("langchain_core", "runnables", "retry", "RunnableRetry"): ( + "langchain_core", + "runnables", + "retry", + "RunnableRetry", + ), +} + +_JS_SERIALIZABLE_MAPPING: dict[tuple[str, ...], tuple[str, ...]] = { + ("langchain_core", "messages", "AIMessage"): ( + "langchain_core", + "messages", + "ai", + "AIMessage", + ), + ("langchain_core", "messages", "AIMessageChunk"): ( + "langchain_core", + "messages", + "ai", + "AIMessageChunk", + ), + ("langchain_core", "messages", "BaseMessage"): ( + "langchain_core", + "messages", + "base", + "BaseMessage", + ), + ("langchain_core", "messages", "BaseMessageChunk"): ( + "langchain_core", + "messages", + "base", + "BaseMessageChunk", + ), + ("langchain_core", "messages", "ChatMessage"): ( + "langchain_core", + "messages", + "chat", + "ChatMessage", + ), + ("langchain_core", "messages", "ChatMessageChunk"): ( + "langchain_core", + "messages", + "chat", + "ChatMessageChunk", + ), + ("langchain_core", "messages", "FunctionMessage"): ( + "langchain_core", + "messages", + "function", + "FunctionMessage", + ), + ("langchain_core", "messages", "FunctionMessageChunk"): ( + "langchain_core", + "messages", + "function", + "FunctionMessageChunk", + ), + ("langchain_core", "messages", "HumanMessage"): ( + "langchain_core", + "messages", + "human", + "HumanMessage", + ), + ("langchain_core", "messages", "HumanMessageChunk"): ( + "langchain_core", + "messages", + "human", + "HumanMessageChunk", + ), + ("langchain_core", "messages", "SystemMessage"): ( + "langchain_core", + "messages", + "system", + "SystemMessage", + ), + ("langchain_core", "messages", "SystemMessageChunk"): ( + "langchain_core", + "messages", + "system", + "SystemMessageChunk", + ), + ("langchain_core", "messages", "ToolMessage"): ( + "langchain_core", + "messages", + "tool", + "ToolMessage", + ), + ("langchain_core", "messages", "ToolMessageChunk"): ( + "langchain_core", + "messages", + "tool", + "ToolMessageChunk", + ), + ("langchain_core", "prompts", "image", "ImagePromptTemplate"): ( + "langchain_core", + "prompts", + "image", + "ImagePromptTemplate", + ), + ("langchain", "chat_models", "bedrock", "ChatBedrock"): ( + "langchain_aws", + "chat_models", + "ChatBedrock", + ), + ("langchain", "chat_models", "google_genai", "ChatGoogleGenerativeAI"): ( + "langchain_google_genai", + "chat_models", + "ChatGoogleGenerativeAI", + ), + ("langchain", "chat_models", "groq", "ChatGroq"): ( + "langchain_groq", + "chat_models", + "ChatGroq", + ), + ("langchain", "chat_models", "bedrock", "BedrockChat"): ( + "langchain_aws", + "chat_models", + "ChatBedrock", + ), +} diff --git a/venv/Lib/site-packages/langchain_core/load/serializable.py b/venv/Lib/site-packages/langchain_core/load/serializable.py new file mode 100644 index 00000000..9da23152 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/load/serializable.py @@ -0,0 +1,375 @@ +"""Serializable base class.""" + +import contextlib +import logging +from abc import ABC +from typing import ( + Any, + Literal, + Optional, + TypedDict, + Union, + cast, +) + +from pydantic import BaseModel, ConfigDict +from pydantic.fields import FieldInfo +from typing_extensions import NotRequired, override + +logger = logging.getLogger(__name__) + + +class BaseSerialized(TypedDict): + """Base class for serialized objects. + + Parameters: + lc: The version of the serialization format. + id: The unique identifier of the object. + name: The name of the object. Optional. + graph: The graph of the object. Optional. + """ + + lc: int + id: list[str] + name: NotRequired[str] + graph: NotRequired[dict[str, Any]] + + +class SerializedConstructor(BaseSerialized): + """Serialized constructor. + + Parameters: + type: The type of the object. Must be "constructor". + kwargs: The constructor arguments. + """ + + type: Literal["constructor"] + kwargs: dict[str, Any] + + +class SerializedSecret(BaseSerialized): + """Serialized secret. + + Parameters: + type: The type of the object. Must be "secret". + """ + + type: Literal["secret"] + + +class SerializedNotImplemented(BaseSerialized): + """Serialized not implemented. + + Parameters: + type: The type of the object. Must be "not_implemented". + repr: The representation of the object. Optional. + """ + + type: Literal["not_implemented"] + repr: Optional[str] + + +def try_neq_default(value: Any, key: str, model: BaseModel) -> bool: + """Try to determine if a value is different from the default. + + Args: + value: The value. + key: The key. + model: The pydantic model. + + Returns: + Whether the value is different from the default. + + Raises: + Exception: If the key is not in the model. + """ + field = type(model).model_fields[key] + return _try_neq_default(value, field) + + +def _try_neq_default(value: Any, field: FieldInfo) -> bool: + # Handle edge case: inequality of two objects does not evaluate to a bool (e.g. two + # Pandas DataFrames). + try: + return bool(field.get_default() != value) + except Exception as _: + try: + return all(field.get_default() != value) + except Exception as _: + try: + return value is not field.default + except Exception as _: + return False + + +class Serializable(BaseModel, ABC): + """Serializable base class. + + This class is used to serialize objects to JSON. + + It relies on the following methods and properties: + + - `is_lc_serializable`: Is this class serializable? + By design, even if a class inherits from Serializable, it is not serializable by + default. This is to prevent accidental serialization of objects that should not + be serialized. + - `get_lc_namespace`: Get the namespace of the langchain object. + During deserialization, this namespace is used to identify + the correct class to instantiate. + Please see the `Reviver` class in `langchain_core.load.load` for more details. + During deserialization an additional mapping is handle + classes that have moved or been renamed across package versions. + - `lc_secrets`: A map of constructor argument names to secret ids. + - `lc_attributes`: List of additional attribute names that should be included + as part of the serialized representation. + """ + + # Remove default BaseModel init docstring. + def __init__(self, *args: Any, **kwargs: Any) -> None: + """""" # noqa: D419 + super().__init__(*args, **kwargs) + + @classmethod + def is_lc_serializable(cls) -> bool: + """Is this class serializable? + + By design, even if a class inherits from Serializable, it is not serializable by + default. This is to prevent accidental serialization of objects that should not + be serialized. + + Returns: + Whether the class is serializable. Default is False. + """ + return False + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + For example, if the class is `langchain.llms.openai.OpenAI`, then the + namespace is ["langchain", "llms", "openai"] + """ + return cls.__module__.split(".") + + @property + def lc_secrets(self) -> dict[str, str]: + """A map of constructor argument names to secret ids. + + For example, + {"openai_api_key": "OPENAI_API_KEY"} + """ + return {} + + @property + def lc_attributes(self) -> dict: + """List of attribute names that should be included in the serialized kwargs. + + These attributes must be accepted by the constructor. + Default is an empty dictionary. + """ + return {} + + @classmethod + def lc_id(cls) -> list[str]: + """A unique identifier for this class for serialization purposes. + + The unique identifier is a list of strings that describes the path + to the object. + For example, for the class `langchain.llms.openai.OpenAI`, the id is + ["langchain", "llms", "openai", "OpenAI"]. + """ + # Pydantic generics change the class name. So we need to do the following + if ( + "origin" in cls.__pydantic_generic_metadata__ + and cls.__pydantic_generic_metadata__["origin"] is not None + ): + original_name = cls.__pydantic_generic_metadata__["origin"].__name__ + else: + original_name = cls.__name__ + return [*cls.get_lc_namespace(), original_name] + + model_config = ConfigDict( + extra="ignore", + ) + + @override + def __repr_args__(self) -> Any: + return [ + (k, v) + for k, v in super().__repr_args__() + if (k not in type(self).model_fields or try_neq_default(v, k, self)) + ] + + def to_json(self) -> Union[SerializedConstructor, SerializedNotImplemented]: + """Serialize the object to JSON. + + Returns: + A json serializable object or a SerializedNotImplemented object. + """ + if not self.is_lc_serializable(): + return self.to_json_not_implemented() + + model_fields = type(self).model_fields + secrets = {} + # Get latest values for kwargs if there is an attribute with same name + lc_kwargs = {} + for k, v in self: + if not _is_field_useful(self, k, v): + continue + # Do nothing if the field is excluded + if k in model_fields and model_fields[k].exclude: + continue + + lc_kwargs[k] = getattr(self, k, v) + + # Merge the lc_secrets and lc_attributes from every class in the MRO + for cls in [None, *self.__class__.mro()]: + # Once we get to Serializable, we're done + if cls is Serializable: + break + + if cls: + deprecated_attributes = [ + "lc_namespace", + "lc_serializable", + ] + + for attr in deprecated_attributes: + if hasattr(cls, attr): + msg = ( + f"Class {self.__class__} has a deprecated " + f"attribute {attr}. Please use the corresponding " + f"classmethod instead." + ) + raise ValueError(msg) + + # Get a reference to self bound to each class in the MRO + this = cast("Serializable", self if cls is None else super(cls, self)) + + secrets.update(this.lc_secrets) + # Now also add the aliases for the secrets + # This ensures known secret aliases are hidden. + # Note: this does NOT hide any other extra kwargs + # that are not present in the fields. + for key in list(secrets): + value = secrets[key] + if (key in model_fields) and ( + alias := model_fields[key].alias + ) is not None: + secrets[alias] = value + lc_kwargs.update(this.lc_attributes) + + # include all secrets, even if not specified in kwargs + # as these secrets may be passed as an environment variable instead + for key in secrets: + secret_value = getattr(self, key, None) or lc_kwargs.get(key) + if secret_value is not None: + lc_kwargs.update({key: secret_value}) + + return { + "lc": 1, + "type": "constructor", + "id": self.lc_id(), + "kwargs": lc_kwargs + if not secrets + else _replace_secrets(lc_kwargs, secrets), + } + + def to_json_not_implemented(self) -> SerializedNotImplemented: + """Serialize a "not implemented" object.""" + return to_json_not_implemented(self) + + +def _is_field_useful(inst: Serializable, key: str, value: Any) -> bool: + """Check if a field is useful as a constructor argument. + + Args: + inst: The instance. + key: The key. + value: The value. + + Returns: + Whether the field is useful. If the field is required, it is useful. + If the field is not required, it is useful if the value is not None. + If the field is not required and the value is None, it is useful if the + default value is different from the value. + """ + field = type(inst).model_fields.get(key) + if not field: + return False + + if field.is_required(): + return True + + # Handle edge case: a value cannot be converted to a boolean (e.g. a + # Pandas DataFrame). + try: + value_is_truthy = bool(value) + except Exception as _: + value_is_truthy = False + + if value_is_truthy: + return True + + # Value is still falsy here! + if field.default_factory is dict and isinstance(value, dict): + return False + + # Value is still falsy here! + if field.default_factory is list and isinstance(value, list): + return False + + value_neq_default = _try_neq_default(value, field) + + # If value is falsy and does not match the default + return value_is_truthy or value_neq_default + + +def _replace_secrets( + root: dict[Any, Any], secrets_map: dict[str, str] +) -> dict[Any, Any]: + result = root.copy() + for path, secret_id in secrets_map.items(): + [*parts, last] = path.split(".") + current = result + for part in parts: + if part not in current: + break + current[part] = current[part].copy() + current = current[part] + if last in current: + current[last] = { + "lc": 1, + "type": "secret", + "id": [secret_id], + } + return result + + +def to_json_not_implemented(obj: object) -> SerializedNotImplemented: + """Serialize a "not implemented" object. + + Args: + obj: object to serialize. + + Returns: + SerializedNotImplemented + """ + _id: list[str] = [] + try: + if hasattr(obj, "__name__"): + _id = [*obj.__module__.split("."), obj.__name__] + elif hasattr(obj, "__class__"): + _id = [*obj.__class__.__module__.split("."), obj.__class__.__name__] + except Exception: + logger.debug("Failed to serialize object", exc_info=True) + + result: SerializedNotImplemented = { + "lc": 1, + "type": "not_implemented", + "id": _id, + "repr": None, + } + with contextlib.suppress(Exception): + result["repr"] = repr(obj) + return result diff --git a/venv/Lib/site-packages/langchain_core/memory.py b/venv/Lib/site-packages/langchain_core/memory.py new file mode 100644 index 00000000..35e8d9cb --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/memory.py @@ -0,0 +1,115 @@ +"""**Memory** maintains Chain state, incorporating context from past runs. + +This module contains memory abstractions from LangChain v0.0.x. + +These abstractions are now deprecated and will be removed in LangChain v1.0.0. +""" # noqa: E501 + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Any + +from pydantic import ConfigDict + +from langchain_core._api import deprecated +from langchain_core.load.serializable import Serializable +from langchain_core.runnables import run_in_executor + + +@deprecated( + since="0.3.3", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class BaseMemory(Serializable, ABC): + """Abstract base class for memory in Chains. + + Memory refers to state in Chains. Memory can be used to store information about + past executions of a Chain and inject that information into the inputs of + future executions of the Chain. For example, for conversational Chains Memory + can be used to store conversations and automatically add them to future model + prompts so that the model has the necessary context to respond coherently to + the latest input. + + Example: + .. code-block:: python + + class SimpleMemory(BaseMemory): + memories: dict[str, Any] = dict() + + @property + def memory_variables(self) -> list[str]: + return list(self.memories.keys()) + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]: + return self.memories + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + pass + + def clear(self) -> None: + pass + """ # noqa: E501 + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @property + @abstractmethod + def memory_variables(self) -> list[str]: + """The string keys this memory class will add to chain inputs.""" + + @abstractmethod + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return key-value pairs given the text input to the chain. + + Args: + inputs: The inputs to the chain. + + Returns: + A dictionary of key-value pairs. + """ + + async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Async return key-value pairs given the text input to the chain. + + Args: + inputs: The inputs to the chain. + + Returns: + A dictionary of key-value pairs. + """ + return await run_in_executor(None, self.load_memory_variables, inputs) + + @abstractmethod + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save the context of this chain run to memory. + + Args: + inputs: The inputs to the chain. + outputs: The outputs of the chain. + """ + + async def asave_context( + self, inputs: dict[str, Any], outputs: dict[str, str] + ) -> None: + """Async save the context of this chain run to memory. + + Args: + inputs: The inputs to the chain. + outputs: The outputs of the chain. + """ + await run_in_executor(None, self.save_context, inputs, outputs) + + @abstractmethod + def clear(self) -> None: + """Clear memory contents.""" + + async def aclear(self) -> None: + """Async clear memory contents.""" + await run_in_executor(None, self.clear) diff --git a/venv/Lib/site-packages/langchain_core/messages/__init__.py b/venv/Lib/site-packages/langchain_core/messages/__init__.py new file mode 100644 index 00000000..a36042ba --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/messages/__init__.py @@ -0,0 +1,150 @@ +"""**Messages** are objects used in prompts and chat conversations. + +**Class hierarchy:** + +.. code-block:: + + BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage + --> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk + +**Main helpers:** + +.. code-block:: + + ChatPromptTemplate + +""" # noqa: E501 + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.messages.ai import ( + AIMessage, + AIMessageChunk, + ) + from langchain_core.messages.base import ( + BaseMessage, + BaseMessageChunk, + merge_content, + message_to_dict, + messages_to_dict, + ) + from langchain_core.messages.chat import ChatMessage, ChatMessageChunk + from langchain_core.messages.content_blocks import ( + convert_to_openai_data_block, + convert_to_openai_image_block, + is_data_content_block, + ) + from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk + from langchain_core.messages.human import HumanMessage, HumanMessageChunk + from langchain_core.messages.modifier import RemoveMessage + from langchain_core.messages.system import SystemMessage, SystemMessageChunk + from langchain_core.messages.tool import ( + InvalidToolCall, + ToolCall, + ToolCallChunk, + ToolMessage, + ToolMessageChunk, + ) + from langchain_core.messages.utils import ( + AnyMessage, + MessageLikeRepresentation, + _message_from_dict, + convert_to_messages, + convert_to_openai_messages, + filter_messages, + get_buffer_string, + merge_message_runs, + message_chunk_to_message, + messages_from_dict, + trim_messages, + ) + +__all__ = ( + "AIMessage", + "AIMessageChunk", + "AnyMessage", + "BaseMessage", + "BaseMessageChunk", + "ChatMessage", + "ChatMessageChunk", + "FunctionMessage", + "FunctionMessageChunk", + "HumanMessage", + "HumanMessageChunk", + "InvalidToolCall", + "MessageLikeRepresentation", + "SystemMessage", + "SystemMessageChunk", + "ToolCall", + "ToolCallChunk", + "ToolMessage", + "ToolMessageChunk", + "RemoveMessage", + "_message_from_dict", + "convert_to_openai_data_block", + "convert_to_openai_image_block", + "convert_to_messages", + "get_buffer_string", + "is_data_content_block", + "merge_content", + "message_chunk_to_message", + "message_to_dict", + "messages_from_dict", + "messages_to_dict", + "filter_messages", + "merge_message_runs", + "trim_messages", + "convert_to_openai_messages", +) + +_dynamic_imports = { + "AIMessage": "ai", + "AIMessageChunk": "ai", + "BaseMessage": "base", + "BaseMessageChunk": "base", + "merge_content": "base", + "message_to_dict": "base", + "messages_to_dict": "base", + "ChatMessage": "chat", + "ChatMessageChunk": "chat", + "FunctionMessage": "function", + "FunctionMessageChunk": "function", + "HumanMessage": "human", + "HumanMessageChunk": "human", + "RemoveMessage": "modifier", + "SystemMessage": "system", + "SystemMessageChunk": "system", + "InvalidToolCall": "tool", + "ToolCall": "tool", + "ToolCallChunk": "tool", + "ToolMessage": "tool", + "ToolMessageChunk": "tool", + "AnyMessage": "utils", + "MessageLikeRepresentation": "utils", + "_message_from_dict": "utils", + "convert_to_messages": "utils", + "convert_to_openai_data_block": "content_blocks", + "convert_to_openai_image_block": "content_blocks", + "convert_to_openai_messages": "utils", + "filter_messages": "utils", + "get_buffer_string": "utils", + "is_data_content_block": "content_blocks", + "merge_message_runs": "utils", + "message_chunk_to_message": "utils", + "messages_from_dict": "utils", + "trim_messages": "utils", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/messages/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/messages/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..7b9bc0ee Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/messages/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/messages/__pycache__/ai.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/messages/__pycache__/ai.cpython-312.pyc new file mode 100644 index 00000000..bba3422b Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/messages/__pycache__/ai.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/messages/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/messages/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..6ae337f8 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/messages/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/messages/__pycache__/chat.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/messages/__pycache__/chat.cpython-312.pyc new file mode 100644 index 00000000..e15e08b4 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/messages/__pycache__/chat.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/messages/__pycache__/content_blocks.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/messages/__pycache__/content_blocks.cpython-312.pyc new file mode 100644 index 00000000..b7c8060b Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/messages/__pycache__/content_blocks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/messages/__pycache__/function.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/messages/__pycache__/function.cpython-312.pyc new file mode 100644 index 00000000..5803c421 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/messages/__pycache__/function.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/messages/__pycache__/human.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/messages/__pycache__/human.cpython-312.pyc new file mode 100644 index 00000000..76170081 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/messages/__pycache__/human.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/messages/__pycache__/modifier.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/messages/__pycache__/modifier.cpython-312.pyc new file mode 100644 index 00000000..95d9c32c Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/messages/__pycache__/modifier.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/messages/__pycache__/system.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/messages/__pycache__/system.cpython-312.pyc new file mode 100644 index 00000000..99f8476b Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/messages/__pycache__/system.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/messages/__pycache__/tool.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/messages/__pycache__/tool.cpython-312.pyc new file mode 100644 index 00000000..2db899b2 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/messages/__pycache__/tool.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/messages/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/messages/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..c01d27c0 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/messages/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/messages/ai.py b/venv/Lib/site-packages/langchain_core/messages/ai.py new file mode 100644 index 00000000..6b067864 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/messages/ai.py @@ -0,0 +1,557 @@ +"""AI message.""" + +import json +import logging +import operator +from typing import Any, Literal, Optional, Union, cast + +from pydantic import model_validator +from typing_extensions import NotRequired, Self, TypedDict, override + +from langchain_core.messages.base import ( + BaseMessage, + BaseMessageChunk, + merge_content, +) +from langchain_core.messages.tool import ( + InvalidToolCall, + ToolCall, + ToolCallChunk, + default_tool_chunk_parser, + default_tool_parser, +) +from langchain_core.messages.tool import ( + invalid_tool_call as create_invalid_tool_call, +) +from langchain_core.messages.tool import ( + tool_call as create_tool_call, +) +from langchain_core.messages.tool import ( + tool_call_chunk as create_tool_call_chunk, +) +from langchain_core.utils._merge import merge_dicts, merge_lists +from langchain_core.utils.json import parse_partial_json +from langchain_core.utils.usage import _dict_int_op + +logger = logging.getLogger(__name__) + + +_LC_ID_PREFIX = "run-" + + +class InputTokenDetails(TypedDict, total=False): + """Breakdown of input token counts. + + Does *not* need to sum to full input token count. Does *not* need to have all keys. + + Example: + + .. code-block:: python + + { + "audio": 10, + "cache_creation": 200, + "cache_read": 100, + } + + .. versionadded:: 0.3.9 + """ + + audio: int + """Audio input tokens.""" + cache_creation: int + """Input tokens that were cached and there was a cache miss. + + Since there was a cache miss, the cache was created from these tokens. + """ + cache_read: int + """Input tokens that were cached and there was a cache hit. + + Since there was a cache hit, the tokens were read from the cache. More precisely, + the model state given these tokens was read from the cache. + """ + + +class OutputTokenDetails(TypedDict, total=False): + """Breakdown of output token counts. + + Does *not* need to sum to full output token count. Does *not* need to have all keys. + + Example: + + .. code-block:: python + + { + "audio": 10, + "reasoning": 200, + } + + .. versionadded:: 0.3.9 + """ + + audio: int + """Audio output tokens.""" + reasoning: int + """Reasoning output tokens. + + Tokens generated by the model in a chain of thought process (i.e. by OpenAI's o1 + models) that are not returned as part of model output. + """ + + +class UsageMetadata(TypedDict): + """Usage metadata for a message, such as token counts. + + This is a standard representation of token usage that is consistent across models. + + Example: + + .. code-block:: python + + { + "input_tokens": 350, + "output_tokens": 240, + "total_tokens": 590, + "input_token_details": { + "audio": 10, + "cache_creation": 200, + "cache_read": 100, + }, + "output_token_details": { + "audio": 10, + "reasoning": 200, + } + } + + .. versionchanged:: 0.3.9 + + Added ``input_token_details`` and ``output_token_details``. + """ + + input_tokens: int + """Count of input (or prompt) tokens. Sum of all input token types.""" + output_tokens: int + """Count of output (or completion) tokens. Sum of all output token types.""" + total_tokens: int + """Total token count. Sum of input_tokens + output_tokens.""" + input_token_details: NotRequired[InputTokenDetails] + """Breakdown of input token counts. + + Does *not* need to sum to full input token count. Does *not* need to have all keys. + """ + output_token_details: NotRequired[OutputTokenDetails] + """Breakdown of output token counts. + + Does *not* need to sum to full output token count. Does *not* need to have all keys. + """ + + +class AIMessage(BaseMessage): + """Message from an AI. + + AIMessage is returned from a chat model as a response to a prompt. + + This message represents the output of the model and consists of both + the raw output as returned by the model together standardized fields + (e.g., tool calls, usage metadata) added by the LangChain framework. + """ + + example: bool = False + """Use to denote that a message is part of an example conversation. + + At the moment, this is ignored by most models. Usage is discouraged. + """ + + tool_calls: list[ToolCall] = [] + """If provided, tool calls associated with the message.""" + invalid_tool_calls: list[InvalidToolCall] = [] + """If provided, tool calls with parsing errors associated with the message.""" + usage_metadata: Optional[UsageMetadata] = None + """If provided, usage metadata for a message, such as token counts. + + This is a standard representation of token usage that is consistent across models. + """ + + type: Literal["ai"] = "ai" + """The type of the message (used for deserialization). Defaults to "ai".""" + + def __init__( + self, content: Union[str, list[Union[str, dict]]], **kwargs: Any + ) -> None: + """Pass in content as positional arg. + + Args: + content: The content of the message. + kwargs: Additional arguments to pass to the parent class. + """ + super().__init__(content=content, **kwargs) + + @property + def lc_attributes(self) -> dict: + """Attrs to be serialized even if they are derived from other init args.""" + return { + "tool_calls": self.tool_calls, + "invalid_tool_calls": self.invalid_tool_calls, + } + + @model_validator(mode="before") + @classmethod + def _backwards_compat_tool_calls(cls, values: dict) -> Any: + check_additional_kwargs = not any( + values.get(k) + for k in ("tool_calls", "invalid_tool_calls", "tool_call_chunks") + ) + if check_additional_kwargs and ( + raw_tool_calls := values.get("additional_kwargs", {}).get("tool_calls") + ): + try: + if issubclass(cls, AIMessageChunk): + values["tool_call_chunks"] = default_tool_chunk_parser( + raw_tool_calls + ) + else: + parsed_tool_calls, parsed_invalid_tool_calls = default_tool_parser( + raw_tool_calls + ) + values["tool_calls"] = parsed_tool_calls + values["invalid_tool_calls"] = parsed_invalid_tool_calls + except Exception: + logger.debug("Failed to parse tool calls", exc_info=True) + + # Ensure "type" is properly set on all tool call-like dicts. + if tool_calls := values.get("tool_calls"): + values["tool_calls"] = [ + create_tool_call(**{k: v for k, v in tc.items() if k != "type"}) + for tc in tool_calls + ] + if invalid_tool_calls := values.get("invalid_tool_calls"): + values["invalid_tool_calls"] = [ + create_invalid_tool_call(**{k: v for k, v in tc.items() if k != "type"}) + for tc in invalid_tool_calls + ] + + if tool_call_chunks := values.get("tool_call_chunks"): + values["tool_call_chunks"] = [ + create_tool_call_chunk(**{k: v for k, v in tc.items() if k != "type"}) + for tc in tool_call_chunks + ] + + return values + + @override + def pretty_repr(self, html: bool = False) -> str: + """Return a pretty representation of the message. + + Args: + html: Whether to return an HTML-formatted string. + Defaults to False. + + Returns: + A pretty representation of the message. + """ + base = super().pretty_repr(html=html) + lines = [] + + def _format_tool_args(tc: Union[ToolCall, InvalidToolCall]) -> list[str]: + lines = [ + f" {tc.get('name', 'Tool')} ({tc.get('id')})", + f" Call ID: {tc.get('id')}", + ] + if tc.get("error"): + lines.append(f" Error: {tc.get('error')}") + lines.append(" Args:") + args = tc.get("args") + if isinstance(args, str): + lines.append(f" {args}") + elif isinstance(args, dict): + for arg, value in args.items(): + lines.append(f" {arg}: {value}") + return lines + + if self.tool_calls: + lines.append("Tool Calls:") + for tc in self.tool_calls: + lines.extend(_format_tool_args(tc)) + if self.invalid_tool_calls: + lines.append("Invalid Tool Calls:") + for itc in self.invalid_tool_calls: + lines.extend(_format_tool_args(itc)) + return (base.strip() + "\n" + "\n".join(lines)).strip() + + +class AIMessageChunk(AIMessage, BaseMessageChunk): + """Message chunk from an AI.""" + + # Ignoring mypy re-assignment here since we're overriding the value + # to make sure that the chunk variant can be discriminated from the + # non-chunk variant. + type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment] + """The type of the message (used for deserialization). + Defaults to "AIMessageChunk".""" + + tool_call_chunks: list[ToolCallChunk] = [] + """If provided, tool call chunks associated with the message.""" + + @property + def lc_attributes(self) -> dict: + """Attrs to be serialized even if they are derived from other init args.""" + return { + "tool_calls": self.tool_calls, + "invalid_tool_calls": self.invalid_tool_calls, + } + + @model_validator(mode="after") + def init_tool_calls(self) -> Self: + """Initialize tool calls from tool call chunks. + + Args: + values: The values to validate. + + Returns: + The values with tool calls initialized. + + Raises: + ValueError: If the tool call chunks are malformed. + """ + if not self.tool_call_chunks: + if self.tool_calls: + self.tool_call_chunks = [ + create_tool_call_chunk( + name=tc["name"], + args=json.dumps(tc["args"]), + id=tc["id"], + index=None, + ) + for tc in self.tool_calls + ] + if self.invalid_tool_calls: + tool_call_chunks = self.tool_call_chunks + tool_call_chunks.extend( + [ + create_tool_call_chunk( + name=tc["name"], args=tc["args"], id=tc["id"], index=None + ) + for tc in self.invalid_tool_calls + ] + ) + self.tool_call_chunks = tool_call_chunks + + return self + tool_calls = [] + invalid_tool_calls = [] + + def add_chunk_to_invalid_tool_calls(chunk: ToolCallChunk) -> None: + invalid_tool_calls.append( + create_invalid_tool_call( + name=chunk["name"], + args=chunk["args"], + id=chunk["id"], + error=None, + ) + ) + + for chunk in self.tool_call_chunks: + try: + args_ = parse_partial_json(chunk["args"]) if chunk["args"] != "" else {} # type: ignore[arg-type] + if isinstance(args_, dict): + tool_calls.append( + create_tool_call( + name=chunk["name"] or "", + args=args_, + id=chunk["id"], + ) + ) + else: + add_chunk_to_invalid_tool_calls(chunk) + except Exception: + add_chunk_to_invalid_tool_calls(chunk) + self.tool_calls = tool_calls + self.invalid_tool_calls = invalid_tool_calls + return self + + @override + def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override] + if isinstance(other, AIMessageChunk): + return add_ai_message_chunks(self, other) + if isinstance(other, (list, tuple)) and all( + isinstance(o, AIMessageChunk) for o in other + ): + return add_ai_message_chunks(self, *other) + return super().__add__(other) + + +def add_ai_message_chunks( + left: AIMessageChunk, *others: AIMessageChunk +) -> AIMessageChunk: + """Add multiple AIMessageChunks together.""" + if any(left.example != o.example for o in others): + msg = "Cannot concatenate AIMessageChunks with different example values." + raise ValueError(msg) + + content = merge_content(left.content, *(o.content for o in others)) + additional_kwargs = merge_dicts( + left.additional_kwargs, *(o.additional_kwargs for o in others) + ) + response_metadata = merge_dicts( + left.response_metadata, *(o.response_metadata for o in others) + ) + + # Merge tool call chunks + if raw_tool_calls := merge_lists( + left.tool_call_chunks, *(o.tool_call_chunks for o in others) + ): + tool_call_chunks = [ + create_tool_call_chunk( + name=rtc.get("name"), + args=rtc.get("args"), + index=rtc.get("index"), + id=rtc.get("id"), + ) + for rtc in raw_tool_calls + ] + else: + tool_call_chunks = [] + + # Token usage + if left.usage_metadata or any(o.usage_metadata is not None for o in others): + usage_metadata: Optional[UsageMetadata] = left.usage_metadata + for other in others: + usage_metadata = add_usage(usage_metadata, other.usage_metadata) + else: + usage_metadata = None + + id = None + candidates = [left.id] + [o.id for o in others] + # first pass: pick the first non‐run-* id + for id_ in candidates: + if id_ and not id_.startswith(_LC_ID_PREFIX): + id = id_ + break + else: + # second pass: no provider-assigned id found, just take the first non‐null + for id_ in candidates: + if id_: + id = id_ + break + + return left.__class__( + example=left.example, + content=content, + additional_kwargs=additional_kwargs, + tool_call_chunks=tool_call_chunks, + response_metadata=response_metadata, + usage_metadata=usage_metadata, + id=id, + ) + + +def add_usage( + left: Optional[UsageMetadata], right: Optional[UsageMetadata] +) -> UsageMetadata: + """Recursively add two UsageMetadata objects. + + Example: + .. code-block:: python + + from langchain_core.messages.ai import add_usage + + left = UsageMetadata( + input_tokens=5, + output_tokens=0, + total_tokens=5, + input_token_details=InputTokenDetails(cache_read=3) + ) + right = UsageMetadata( + input_tokens=0, + output_tokens=10, + total_tokens=10, + output_token_details=OutputTokenDetails(reasoning=4) + ) + + add_usage(left, right) + + results in + + .. code-block:: python + + UsageMetadata( + input_tokens=5, + output_tokens=10, + total_tokens=15, + input_token_details=InputTokenDetails(cache_read=3), + output_token_details=OutputTokenDetails(reasoning=4) + ) + + """ + if not (left or right): + return UsageMetadata(input_tokens=0, output_tokens=0, total_tokens=0) + if not (left and right): + return cast("UsageMetadata", left or right) + + return UsageMetadata( + **cast( + "UsageMetadata", + _dict_int_op( + cast("dict", left), + cast("dict", right), + operator.add, + ), + ) + ) + + +def subtract_usage( + left: Optional[UsageMetadata], right: Optional[UsageMetadata] +) -> UsageMetadata: + """Recursively subtract two UsageMetadata objects. + + Token counts cannot be negative so the actual operation is max(left - right, 0). + + Example: + .. code-block:: python + + from langchain_core.messages.ai import subtract_usage + + left = UsageMetadata( + input_tokens=5, + output_tokens=10, + total_tokens=15, + input_token_details=InputTokenDetails(cache_read=4) + ) + right = UsageMetadata( + input_tokens=3, + output_tokens=8, + total_tokens=11, + output_token_details=OutputTokenDetails(reasoning=4) + ) + + subtract_usage(left, right) + + results in + + .. code-block:: python + + UsageMetadata( + input_tokens=2, + output_tokens=2, + total_tokens=4, + input_token_details=InputTokenDetails(cache_read=4), + output_token_details=OutputTokenDetails(reasoning=0) + ) + + """ + if not (left or right): + return UsageMetadata(input_tokens=0, output_tokens=0, total_tokens=0) + if not (left and right): + return cast("UsageMetadata", left or right) + + return UsageMetadata( + **cast( + "UsageMetadata", + _dict_int_op( + cast("dict", left), + cast("dict", right), + (lambda le, ri: max(le - ri, 0)), + ), + ) + ) diff --git a/venv/Lib/site-packages/langchain_core/messages/base.py b/venv/Lib/site-packages/langchain_core/messages/base.py new file mode 100644 index 00000000..58049d28 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/messages/base.py @@ -0,0 +1,294 @@ +"""Base message.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Optional, Union, cast + +from pydantic import ConfigDict, Field, field_validator + +from langchain_core.load.serializable import Serializable +from langchain_core.utils import get_bolded_text +from langchain_core.utils._merge import merge_dicts, merge_lists +from langchain_core.utils.interactive_env import is_interactive_env + +if TYPE_CHECKING: + from collections.abc import Sequence + + from langchain_core.prompts.chat import ChatPromptTemplate + + +class BaseMessage(Serializable): + """Base abstract message class. + + Messages are the inputs and outputs of ChatModels. + """ + + content: Union[str, list[Union[str, dict]]] + """The string contents of the message.""" + + additional_kwargs: dict = Field(default_factory=dict) + """Reserved for additional payload data associated with the message. + + For example, for a message from an AI, this could include tool calls as + encoded by the model provider. + """ + + response_metadata: dict = Field(default_factory=dict) + """Response metadata. For example: response headers, logprobs, token counts.""" + + type: str + """The type of the message. Must be a string that is unique to the message type. + + The purpose of this field is to allow for easy identification of the message type + when deserializing messages. + """ + + name: Optional[str] = None + """An optional name for the message. + + This can be used to provide a human-readable name for the message. + + Usage of this field is optional, and whether it's used or not is up to the + model implementation. + """ + + id: Optional[str] = None + """An optional unique identifier for the message. This should ideally be + provided by the provider/model which created the message.""" + + model_config = ConfigDict( + extra="allow", + ) + + @field_validator("id", mode="before") + def cast_id_to_str(cls, id_value: Any) -> Optional[str]: + """Coerce the id field to a string.""" + if id_value is not None: + return str(id_value) + return id_value + + def __init__( + self, content: Union[str, list[Union[str, dict]]], **kwargs: Any + ) -> None: + """Pass in content as positional arg. + + Args: + content: The string contents of the message. + """ + super().__init__(content=content, **kwargs) + + @classmethod + def is_lc_serializable(cls) -> bool: + """BaseMessage is serializable. + + Returns: + True + """ + return True + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + Default is ["langchain", "schema", "messages"]. + """ + return ["langchain", "schema", "messages"] + + def text(self) -> str: + """Get the text content of the message. + + Returns: + The text content of the message. + """ + if isinstance(self.content, str): + return self.content + + # must be a list + blocks = [ + block + for block in self.content + if isinstance(block, str) + or block.get("type") == "text" + and isinstance(block.get("text"), str) + ] + return "".join( + block if isinstance(block, str) else block["text"] for block in blocks + ) + + def __add__(self, other: Any) -> ChatPromptTemplate: + """Concatenate this message with another message.""" + from langchain_core.prompts.chat import ChatPromptTemplate + + prompt = ChatPromptTemplate(messages=[self]) + return prompt + other + + def pretty_repr( + self, + html: bool = False, # noqa: FBT001,FBT002 + ) -> str: + """Get a pretty representation of the message. + + Args: + html: Whether to format the message as HTML. If True, the message will be + formatted with HTML tags. Default is False. + + Returns: + A pretty representation of the message. + """ + title = get_msg_title_repr(self.type.title() + " Message", bold=html) + # TODO: handle non-string content. + if self.name is not None: + title += f"\nName: {self.name}" + return f"{title}\n\n{self.content}" + + def pretty_print(self) -> None: + """Print a pretty representation of the message.""" + print(self.pretty_repr(html=is_interactive_env())) # noqa: T201 + + +def merge_content( + first_content: Union[str, list[Union[str, dict]]], + *contents: Union[str, list[Union[str, dict]]], +) -> Union[str, list[Union[str, dict]]]: + """Merge multiple message contents. + + Args: + first_content: The first content. Can be a string or a list. + contents: The other contents. Can be a string or a list. + + Returns: + The merged content. + """ + merged = first_content + for content in contents: + # If current is a string + if isinstance(merged, str): + # If the next chunk is also a string, then merge them naively + if isinstance(content, str): + merged += content + # If the next chunk is a list, add the current to the start of the list + else: + merged = [merged] + content # type: ignore[assignment,operator] + elif isinstance(content, list): + # If both are lists + merged = merge_lists(cast("list", merged), content) # type: ignore[assignment] + # If the first content is a list, and the second content is a string + # If the last element of the first content is a string + # Add the second content to the last element + elif merged and isinstance(merged[-1], str): + merged[-1] += content + # If second content is an empty string, treat as a no-op + elif content == "": + pass + else: + # Otherwise, add the second content as a new element of the list + merged.append(content) + return merged + + +class BaseMessageChunk(BaseMessage): + """Message chunk, which can be concatenated with other Message chunks.""" + + def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override] + """Message chunks support concatenation with other message chunks. + + This functionality is useful to combine message chunks yielded from + a streaming model into a complete message. + + Args: + other: Another message chunk to concatenate with this one. + + Returns: + A new message chunk that is the concatenation of this message chunk + and the other message chunk. + + Raises: + TypeError: If the other object is not a message chunk. + + For example, + + `AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")` + + will give `AIMessageChunk(content="Hello World")` + """ + if isinstance(other, BaseMessageChunk): + # If both are (subclasses of) BaseMessageChunk, + # concat into a single BaseMessageChunk + + return self.__class__( + id=self.id, + type=self.type, + content=merge_content(self.content, other.content), + additional_kwargs=merge_dicts( + self.additional_kwargs, other.additional_kwargs + ), + response_metadata=merge_dicts( + self.response_metadata, other.response_metadata + ), + ) + if isinstance(other, list) and all( + isinstance(o, BaseMessageChunk) for o in other + ): + content = merge_content(self.content, *(o.content for o in other)) + additional_kwargs = merge_dicts( + self.additional_kwargs, *(o.additional_kwargs for o in other) + ) + response_metadata = merge_dicts( + self.response_metadata, *(o.response_metadata for o in other) + ) + return self.__class__( # type: ignore[call-arg] + id=self.id, + content=content, + additional_kwargs=additional_kwargs, + response_metadata=response_metadata, + ) + msg = ( + 'unsupported operand type(s) for +: "' + f"{self.__class__.__name__}" + f'" and "{other.__class__.__name__}"' + ) + raise TypeError(msg) + + +def message_to_dict(message: BaseMessage) -> dict: + """Convert a Message to a dictionary. + + Args: + message: Message to convert. + + Returns: + Message as a dict. The dict will have a "type" key with the message type + and a "data" key with the message data as a dict. + """ + return {"type": message.type, "data": message.model_dump()} + + +def messages_to_dict(messages: Sequence[BaseMessage]) -> list[dict]: + """Convert a sequence of Messages to a list of dictionaries. + + Args: + messages: Sequence of messages (as BaseMessages) to convert. + + Returns: + List of messages as dicts. + """ + return [message_to_dict(m) for m in messages] + + +def get_msg_title_repr(title: str, *, bold: bool = False) -> str: + """Get a title representation for a message. + + Args: + title: The title. + bold: Whether to bold the title. Default is False. + + Returns: + The title representation. + """ + padded = " " + title + " " + sep_len = (80 - len(padded)) // 2 + sep = "=" * sep_len + second_sep = sep + "=" if len(padded) % 2 else sep + if bold: + padded = get_bolded_text(padded) + return f"{sep}{padded}{second_sep}" diff --git a/venv/Lib/site-packages/langchain_core/messages/chat.py b/venv/Lib/site-packages/langchain_core/messages/chat.py new file mode 100644 index 00000000..a4791423 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/messages/chat.py @@ -0,0 +1,65 @@ +"""Chat Message.""" + +from typing import Any, Literal + +from typing_extensions import override + +from langchain_core.messages.base import ( + BaseMessage, + BaseMessageChunk, + merge_content, +) +from langchain_core.utils._merge import merge_dicts + + +class ChatMessage(BaseMessage): + """Message that can be assigned an arbitrary speaker (i.e. role).""" + + role: str + """The speaker / role of the Message.""" + + type: Literal["chat"] = "chat" + """The type of the message (used during serialization). Defaults to "chat".""" + + +class ChatMessageChunk(ChatMessage, BaseMessageChunk): + """Chat Message chunk.""" + + # Ignoring mypy re-assignment here since we're overriding the value + # to make sure that the chunk variant can be discriminated from the + # non-chunk variant. + type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment] + """The type of the message (used during serialization). + Defaults to "ChatMessageChunk".""" + + @override + def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override] + if isinstance(other, ChatMessageChunk): + if self.role != other.role: + msg = "Cannot concatenate ChatMessageChunks with different roles." + raise ValueError(msg) + + return self.__class__( + role=self.role, + content=merge_content(self.content, other.content), + additional_kwargs=merge_dicts( + self.additional_kwargs, other.additional_kwargs + ), + response_metadata=merge_dicts( + self.response_metadata, other.response_metadata + ), + id=self.id, + ) + if isinstance(other, BaseMessageChunk): + return self.__class__( + role=self.role, + content=merge_content(self.content, other.content), + additional_kwargs=merge_dicts( + self.additional_kwargs, other.additional_kwargs + ), + response_metadata=merge_dicts( + self.response_metadata, other.response_metadata + ), + id=self.id, + ) + return super().__add__(other) diff --git a/venv/Lib/site-packages/langchain_core/messages/content_blocks.py b/venv/Lib/site-packages/langchain_core/messages/content_blocks.py new file mode 100644 index 00000000..cbd9bfa2 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/messages/content_blocks.py @@ -0,0 +1,155 @@ +"""Types for content blocks.""" + +import warnings +from typing import Any, Literal, Union + +from pydantic import TypeAdapter, ValidationError +from typing_extensions import NotRequired, TypedDict + + +class BaseDataContentBlock(TypedDict, total=False): + """Base class for data content blocks.""" + + mime_type: NotRequired[str] + """MIME type of the content block (if needed).""" + + +class URLContentBlock(BaseDataContentBlock): + """Content block for data from a URL.""" + + type: Literal["image", "audio", "file"] + """Type of the content block.""" + source_type: Literal["url"] + """Source type (url).""" + url: str + """URL for data.""" + + +class Base64ContentBlock(BaseDataContentBlock): + """Content block for inline data from a base64 string.""" + + type: Literal["image", "audio", "file"] + """Type of the content block.""" + source_type: Literal["base64"] + """Source type (base64).""" + data: str + """Data as a base64 string.""" + + +class PlainTextContentBlock(BaseDataContentBlock): + """Content block for plain text data (e.g., from a document).""" + + type: Literal["file"] + """Type of the content block.""" + source_type: Literal["text"] + """Source type (text).""" + text: str + """Text data.""" + + +class IDContentBlock(TypedDict): + """Content block for data specified by an identifier.""" + + type: Literal["image", "audio", "file"] + """Type of the content block.""" + source_type: Literal["id"] + """Source type (id).""" + id: str + """Identifier for data source.""" + + +DataContentBlock = Union[ + URLContentBlock, + Base64ContentBlock, + PlainTextContentBlock, + IDContentBlock, +] + +_DataContentBlockAdapter: TypeAdapter[DataContentBlock] = TypeAdapter(DataContentBlock) + + +def is_data_content_block( + content_block: dict, +) -> bool: + """Check if the content block is a standard data content block. + + Args: + content_block: The content block to check. + + Returns: + True if the content block is a data content block, False otherwise. + """ + try: + _ = _DataContentBlockAdapter.validate_python(content_block) + except ValidationError: + return False + else: + return True + + +def convert_to_openai_image_block(content_block: dict[str, Any]) -> dict: + """Convert image content block to format expected by OpenAI Chat Completions API.""" + if content_block["source_type"] == "url": + return { + "type": "image_url", + "image_url": { + "url": content_block["url"], + }, + } + if content_block["source_type"] == "base64": + if "mime_type" not in content_block: + error_message = "mime_type key is required for base64 data." + raise ValueError(error_message) + mime_type = content_block["mime_type"] + return { + "type": "image_url", + "image_url": { + "url": f"data:{mime_type};base64,{content_block['data']}", + }, + } + error_message = "Unsupported source type. Only 'url' and 'base64' are supported." + raise ValueError(error_message) + + +def convert_to_openai_data_block(block: dict) -> dict: + """Format standard data content block to format expected by OpenAI.""" + if block["type"] == "image": + formatted_block = convert_to_openai_image_block(block) + + elif block["type"] == "file": + if block["source_type"] == "base64": + file = {"file_data": f"data:{block['mime_type']};base64,{block['data']}"} + if filename := block.get("filename"): + file["filename"] = filename + elif (metadata := block.get("metadata")) and ("filename" in metadata): + file["filename"] = metadata["filename"] + else: + warnings.warn( + "OpenAI may require a filename for file inputs. Specify a filename " + "in the content block: {'type': 'file', 'source_type': 'base64', " + "'mime_type': 'application/pdf', 'data': '...', " + "'filename': 'my-pdf'}", + stacklevel=1, + ) + formatted_block = {"type": "file", "file": file} + elif block["source_type"] == "id": + formatted_block = {"type": "file", "file": {"file_id": block["id"]}} + else: + error_msg = "source_type base64 or id is required for file blocks." + raise ValueError(error_msg) + + elif block["type"] == "audio": + if block["source_type"] == "base64": + format = block["mime_type"].split("/")[-1] + formatted_block = { + "type": "input_audio", + "input_audio": {"data": block["data"], "format": format}, + } + else: + error_msg = "source_type base64 is required for audio blocks." + raise ValueError(error_msg) + else: + error_msg = f"Block of type {block['type']} is not supported." + raise ValueError(error_msg) + + return formatted_block diff --git a/venv/Lib/site-packages/langchain_core/messages/function.py b/venv/Lib/site-packages/langchain_core/messages/function.py new file mode 100644 index 00000000..fc101877 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/messages/function.py @@ -0,0 +1,62 @@ +"""Function Message.""" + +from typing import Any, Literal + +from typing_extensions import override + +from langchain_core.messages.base import ( + BaseMessage, + BaseMessageChunk, + merge_content, +) +from langchain_core.utils._merge import merge_dicts + + +class FunctionMessage(BaseMessage): + """Message for passing the result of executing a tool back to a model. + + FunctionMessage are an older version of the ToolMessage schema, and + do not contain the tool_call_id field. + + The tool_call_id field is used to associate the tool call request with the + tool call response. This is useful in situations where a chat model is able + to request multiple tool calls in parallel. + """ + + name: str + """The name of the function that was executed.""" + + type: Literal["function"] = "function" + """The type of the message (used for serialization). Defaults to "function".""" + + +class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): + """Function Message chunk.""" + + # Ignoring mypy re-assignment here since we're overriding the value + # to make sure that the chunk variant can be discriminated from the + # non-chunk variant. + type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment] + """The type of the message (used for serialization). + Defaults to "FunctionMessageChunk".""" + + @override + def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override] + if isinstance(other, FunctionMessageChunk): + if self.name != other.name: + msg = "Cannot concatenate FunctionMessageChunks with different names." + raise ValueError(msg) + + return self.__class__( + name=self.name, + content=merge_content(self.content, other.content), + additional_kwargs=merge_dicts( + self.additional_kwargs, other.additional_kwargs + ), + response_metadata=merge_dicts( + self.response_metadata, other.response_metadata + ), + id=self.id, + ) + + return super().__add__(other) diff --git a/venv/Lib/site-packages/langchain_core/messages/human.py b/venv/Lib/site-packages/langchain_core/messages/human.py new file mode 100644 index 00000000..4e19904a --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/messages/human.py @@ -0,0 +1,63 @@ +"""Human message.""" + +from typing import Any, Literal, Union + +from langchain_core.messages.base import BaseMessage, BaseMessageChunk + + +class HumanMessage(BaseMessage): + """Message from a human. + + HumanMessages are messages that are passed in from a human to the model. + + Example: + + .. code-block:: python + + from langchain_core.messages import HumanMessage, SystemMessage + + messages = [ + SystemMessage( + content="You are a helpful assistant! Your name is Bob." + ), + HumanMessage( + content="What is your name?" + ) + ] + + # Instantiate a chat model and invoke it with the messages + model = ... + print(model.invoke(messages)) + """ + + example: bool = False + """Use to denote that a message is part of an example conversation. + + At the moment, this is ignored by most models. Usage is discouraged. + Defaults to False. + """ + + type: Literal["human"] = "human" + """The type of the message (used for serialization). Defaults to "human".""" + + def __init__( + self, content: Union[str, list[Union[str, dict]]], **kwargs: Any + ) -> None: + """Pass in content as positional arg. + + Args: + content: The string contents of the message. + kwargs: Additional fields to pass to the message. + """ + super().__init__(content=content, **kwargs) + + +class HumanMessageChunk(HumanMessage, BaseMessageChunk): + """Human Message chunk.""" + + # Ignoring mypy re-assignment here since we're overriding the value + # to make sure that the chunk variant can be discriminated from the + # non-chunk variant. + type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment] + """The type of the message (used for serialization). + Defaults to "HumanMessageChunk".""" diff --git a/venv/Lib/site-packages/langchain_core/messages/modifier.py b/venv/Lib/site-packages/langchain_core/messages/modifier.py new file mode 100644 index 00000000..9cfc89c5 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/messages/modifier.py @@ -0,0 +1,28 @@ +"""Message responsible for deleting other messages.""" + +from typing import Any, Literal + +from langchain_core.messages.base import BaseMessage + + +class RemoveMessage(BaseMessage): + """Message responsible for deleting other messages.""" + + type: Literal["remove"] = "remove" + """The type of the message (used for serialization). Defaults to "remove".""" + + def __init__(self, id: str, **kwargs: Any) -> None: + """Create a RemoveMessage. + + Args: + id: The ID of the message to remove. + kwargs: Additional fields to pass to the message. + + Raises: + ValueError: If the 'content' field is passed in kwargs. + """ + if kwargs.pop("content", None): + msg = "RemoveMessage does not support 'content' field." + raise ValueError(msg) + + super().__init__("", id=id, **kwargs) diff --git a/venv/Lib/site-packages/langchain_core/messages/system.py b/venv/Lib/site-packages/langchain_core/messages/system.py new file mode 100644 index 00000000..d63bd53a --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/messages/system.py @@ -0,0 +1,57 @@ +"""System message.""" + +from typing import Any, Literal, Union + +from langchain_core.messages.base import BaseMessage, BaseMessageChunk + + +class SystemMessage(BaseMessage): + """Message for priming AI behavior. + + The system message is usually passed in as the first of a sequence + of input messages. + + Example: + + .. code-block:: python + + from langchain_core.messages import HumanMessage, SystemMessage + + messages = [ + SystemMessage( + content="You are a helpful assistant! Your name is Bob." + ), + HumanMessage( + content="What is your name?" + ) + ] + + # Define a chat model and invoke it with the messages + print(model.invoke(messages)) + + """ + + type: Literal["system"] = "system" + """The type of the message (used for serialization). Defaults to "system".""" + + def __init__( + self, content: Union[str, list[Union[str, dict]]], **kwargs: Any + ) -> None: + """Pass in content as positional arg. + + Args: + content: The string contents of the message. + kwargs: Additional fields to pass to the message. + """ + super().__init__(content=content, **kwargs) + + +class SystemMessageChunk(SystemMessage, BaseMessageChunk): + """System Message chunk.""" + + # Ignoring mypy re-assignment here since we're overriding the value + # to make sure that the chunk variant can be discriminated from the + # non-chunk variant. + type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment] + """The type of the message (used for serialization). + Defaults to "SystemMessageChunk".""" diff --git a/venv/Lib/site-packages/langchain_core/messages/tool.py b/venv/Lib/site-packages/langchain_core/messages/tool.py new file mode 100644 index 00000000..42c9018e --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/messages/tool.py @@ -0,0 +1,364 @@ +"""Messages for tools.""" + +import json +from typing import Any, Literal, Optional, Union +from uuid import UUID + +from pydantic import Field, model_validator +from typing_extensions import NotRequired, TypedDict, override + +from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content +from langchain_core.utils._merge import merge_dicts, merge_obj + + +class ToolOutputMixin: + """Mixin for objects that tools can return directly. + + If a custom BaseTool is invoked with a ToolCall and the output of custom code is + not an instance of ToolOutputMixin, the output will automatically be coerced to a + string and wrapped in a ToolMessage. + """ + + +class ToolMessage(BaseMessage, ToolOutputMixin): + """Message for passing the result of executing a tool back to a model. + + ToolMessages contain the result of a tool invocation. Typically, the result + is encoded inside the `content` field. + + Example: A ToolMessage representing a result of 42 from a tool call with id + + .. code-block:: python + + from langchain_core.messages import ToolMessage + + ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + + + Example: A ToolMessage where only part of the tool output is sent to the model + and the full output is passed in to artifact. + + .. versionadded:: 0.2.17 + + .. code-block:: python + + from langchain_core.messages import ToolMessage + + tool_output = { + "stdout": "From the graph we can see that the correlation between x and y is ...", + "stderr": None, + "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, + } + + ToolMessage( + content=tool_output["stdout"], + artifact=tool_output, + tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + ) + + The tool_call_id field is used to associate the tool call request with the + tool call response. This is useful in situations where a chat model is able + to request multiple tool calls in parallel. + """ # noqa: E501 + + tool_call_id: str + """Tool call that this message is responding to.""" + + type: Literal["tool"] = "tool" + """The type of the message (used for serialization). Defaults to "tool".""" + + artifact: Any = None + """Artifact of the Tool execution which is not meant to be sent to the model. + + Should only be specified if it is different from the message content, e.g. if only + a subset of the full tool output is being passed as message content but the full + output is needed in other parts of the code. + + .. versionadded:: 0.2.17 + """ + + status: Literal["success", "error"] = "success" + """Status of the tool invocation. + + .. versionadded:: 0.2.24 + """ + + additional_kwargs: dict = Field(default_factory=dict, repr=False) + """Currently inherited from BaseMessage, but not used.""" + response_metadata: dict = Field(default_factory=dict, repr=False) + """Currently inherited from BaseMessage, but not used.""" + + @model_validator(mode="before") + @classmethod + def coerce_args(cls, values: dict) -> dict: + """Coerce the model arguments to the correct types. + + Args: + values: The model arguments. + """ + content = values["content"] + if isinstance(content, tuple): + content = list(content) + + if not isinstance(content, (str, list)): + try: + values["content"] = str(content) + except ValueError as e: + msg = ( + "ToolMessage content should be a string or a list of string/dicts. " + f"Received:\n\n{content=}\n\n which could not be coerced into a " + "string." + ) + raise ValueError(msg) from e + elif isinstance(content, list): + values["content"] = [] + for i, x in enumerate(content): + if not isinstance(x, (str, dict)): + try: + values["content"].append(str(x)) + except ValueError as e: + msg = ( + "ToolMessage content should be a string or a list of " + "string/dicts. Received a list but " + f"element ToolMessage.content[{i}] is not a dict and could " + f"not be coerced to a string.:\n\n{x}" + ) + raise ValueError(msg) from e + else: + values["content"].append(x) + else: + pass + + tool_call_id = values["tool_call_id"] + if isinstance(tool_call_id, (UUID, int, float)): + values["tool_call_id"] = str(tool_call_id) + return values + + def __init__( + self, content: Union[str, list[Union[str, dict]]], **kwargs: Any + ) -> None: + """Create a ToolMessage. + + Args: + content: The string contents of the message. + **kwargs: Additional fields. + """ + super().__init__(content=content, **kwargs) + + +class ToolMessageChunk(ToolMessage, BaseMessageChunk): + """Tool Message chunk.""" + + # Ignoring mypy re-assignment here since we're overriding the value + # to make sure that the chunk variant can be discriminated from the + # non-chunk variant. + type: Literal["ToolMessageChunk"] = "ToolMessageChunk" # type: ignore[assignment] + + @override + def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override] + if isinstance(other, ToolMessageChunk): + if self.tool_call_id != other.tool_call_id: + msg = "Cannot concatenate ToolMessageChunks with different names." + raise ValueError(msg) + + return self.__class__( + tool_call_id=self.tool_call_id, + content=merge_content(self.content, other.content), + artifact=merge_obj(self.artifact, other.artifact), + additional_kwargs=merge_dicts( + self.additional_kwargs, other.additional_kwargs + ), + response_metadata=merge_dicts( + self.response_metadata, other.response_metadata + ), + id=self.id, + status=_merge_status(self.status, other.status), + ) + + return super().__add__(other) + + +class ToolCall(TypedDict): + """Represents a request to call a tool. + + Example: + + .. code-block:: python + + { + "name": "foo", + "args": {"a": 1}, + "id": "123" + } + + This represents a request to call the tool named "foo" with arguments {"a": 1} + and an identifier of "123". + """ + + name: str + """The name of the tool to be called.""" + args: dict[str, Any] + """The arguments to the tool call.""" + id: Optional[str] + """An identifier associated with the tool call. + + An identifier is needed to associate a tool call request with a tool + call result in events when multiple concurrent tool calls are made. + """ + type: NotRequired[Literal["tool_call"]] + + +def tool_call(*, name: str, args: dict[str, Any], id: Optional[str]) -> ToolCall: + """Create a tool call. + + Args: + name: The name of the tool to be called. + args: The arguments to the tool call. + id: An identifier associated with the tool call. + """ + return ToolCall(name=name, args=args, id=id, type="tool_call") + + +class ToolCallChunk(TypedDict): + """A chunk of a tool call (e.g., as part of a stream). + + When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), + all string attributes are concatenated. Chunks are only merged if their + values of `index` are equal and not None. + + Example: + + .. code-block:: python + + left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] + right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + + ( + AIMessageChunk(content="", tool_call_chunks=left_chunks) + + AIMessageChunk(content="", tool_call_chunks=right_chunks) + ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + """ + + name: Optional[str] + """The name of the tool to be called.""" + args: Optional[str] + """The arguments to the tool call.""" + id: Optional[str] + """An identifier associated with the tool call.""" + index: Optional[int] + """The index of the tool call in a sequence.""" + type: NotRequired[Literal["tool_call_chunk"]] + + +def tool_call_chunk( + *, + name: Optional[str] = None, + args: Optional[str] = None, + id: Optional[str] = None, + index: Optional[int] = None, +) -> ToolCallChunk: + """Create a tool call chunk. + + Args: + name: The name of the tool to be called. + args: The arguments to the tool call. + id: An identifier associated with the tool call. + index: The index of the tool call in a sequence. + """ + return ToolCallChunk( + name=name, args=args, id=id, index=index, type="tool_call_chunk" + ) + + +class InvalidToolCall(TypedDict): + """Allowance for errors made by LLM. + + Here we add an `error` key to surface errors made during generation + (e.g., invalid JSON arguments.) + """ + + name: Optional[str] + """The name of the tool to be called.""" + args: Optional[str] + """The arguments to the tool call.""" + id: Optional[str] + """An identifier associated with the tool call.""" + error: Optional[str] + """An error message associated with the tool call.""" + type: NotRequired[Literal["invalid_tool_call"]] + + +def invalid_tool_call( + *, + name: Optional[str] = None, + args: Optional[str] = None, + id: Optional[str] = None, + error: Optional[str] = None, +) -> InvalidToolCall: + """Create an invalid tool call. + + Args: + name: The name of the tool to be called. + args: The arguments to the tool call. + id: An identifier associated with the tool call. + error: An error message associated with the tool call. + """ + return InvalidToolCall( + name=name, args=args, id=id, error=error, type="invalid_tool_call" + ) + + +def default_tool_parser( + raw_tool_calls: list[dict], +) -> tuple[list[ToolCall], list[InvalidToolCall]]: + """Best-effort parsing of tools.""" + tool_calls = [] + invalid_tool_calls = [] + for raw_tool_call in raw_tool_calls: + if "function" not in raw_tool_call: + continue + function_name = raw_tool_call["function"]["name"] + try: + function_args = json.loads(raw_tool_call["function"]["arguments"]) + parsed = tool_call( + name=function_name or "", + args=function_args or {}, + id=raw_tool_call.get("id"), + ) + tool_calls.append(parsed) + except json.JSONDecodeError: + invalid_tool_calls.append( + invalid_tool_call( + name=function_name, + args=raw_tool_call["function"]["arguments"], + id=raw_tool_call.get("id"), + error=None, + ) + ) + return tool_calls, invalid_tool_calls + + +def default_tool_chunk_parser(raw_tool_calls: list[dict]) -> list[ToolCallChunk]: + """Best-effort parsing of tool chunks.""" + tool_call_chunks = [] + for tool_call in raw_tool_calls: + if "function" not in tool_call: + function_args = None + function_name = None + else: + function_args = tool_call["function"]["arguments"] + function_name = tool_call["function"]["name"] + parsed = tool_call_chunk( + name=function_name, + args=function_args, + id=tool_call.get("id"), + index=tool_call.get("index"), + ) + tool_call_chunks.append(parsed) + return tool_call_chunks + + +def _merge_status( + left: Literal["success", "error"], right: Literal["success", "error"] +) -> Literal["success", "error"]: + return "error" if "error" in (left, right) else "success" diff --git a/venv/Lib/site-packages/langchain_core/messages/utils.py b/venv/Lib/site-packages/langchain_core/messages/utils.py new file mode 100644 index 00000000..5b51f63c --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/messages/utils.py @@ -0,0 +1,1640 @@ +"""Module contains utility functions for working with messages. + +Some examples of what you can do with these functions include: + +* Convert messages to strings (serialization) +* Convert messages from dicts to Message objects (deserialization) +* Filter messages from a list of messages based on name, type or id etc. +""" + +from __future__ import annotations + +import base64 +import inspect +import json +import logging +import math +from collections.abc import Iterable, Sequence +from functools import partial +from typing import ( + TYPE_CHECKING, + Annotated, + Any, + Callable, + Literal, + Optional, + Union, + cast, + overload, +) + +from pydantic import Discriminator, Field, Tag + +from langchain_core.exceptions import ErrorCode, create_message +from langchain_core.messages import convert_to_openai_data_block, is_data_content_block +from langchain_core.messages.ai import AIMessage, AIMessageChunk +from langchain_core.messages.base import BaseMessage, BaseMessageChunk +from langchain_core.messages.chat import ChatMessage, ChatMessageChunk +from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk +from langchain_core.messages.human import HumanMessage, HumanMessageChunk +from langchain_core.messages.modifier import RemoveMessage +from langchain_core.messages.system import SystemMessage, SystemMessageChunk +from langchain_core.messages.tool import ToolCall, ToolMessage, ToolMessageChunk + +if TYPE_CHECKING: + from langchain_text_splitters import TextSplitter + + from langchain_core.language_models import BaseLanguageModel + from langchain_core.prompt_values import PromptValue + from langchain_core.runnables.base import Runnable + +logger = logging.getLogger(__name__) + + +def _get_type(v: Any) -> str: + """Get the type associated with the object for serialization purposes.""" + if isinstance(v, dict) and "type" in v: + return v["type"] + if hasattr(v, "type"): + return v.type + msg = ( + f"Expected either a dictionary with a 'type' key or an object " + f"with a 'type' attribute. Instead got type {type(v)}." + ) + raise TypeError(msg) + + +AnyMessage = Annotated[ + Union[ + Annotated[AIMessage, Tag(tag="ai")], + Annotated[HumanMessage, Tag(tag="human")], + Annotated[ChatMessage, Tag(tag="chat")], + Annotated[SystemMessage, Tag(tag="system")], + Annotated[FunctionMessage, Tag(tag="function")], + Annotated[ToolMessage, Tag(tag="tool")], + Annotated[AIMessageChunk, Tag(tag="AIMessageChunk")], + Annotated[HumanMessageChunk, Tag(tag="HumanMessageChunk")], + Annotated[ChatMessageChunk, Tag(tag="ChatMessageChunk")], + Annotated[SystemMessageChunk, Tag(tag="SystemMessageChunk")], + Annotated[FunctionMessageChunk, Tag(tag="FunctionMessageChunk")], + Annotated[ToolMessageChunk, Tag(tag="ToolMessageChunk")], + ], + Field(discriminator=Discriminator(_get_type)), +] + + +def get_buffer_string( + messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI" +) -> str: + r"""Convert a sequence of Messages to strings and concatenate them into one string. + + Args: + messages: Messages to be converted to strings. + human_prefix: The prefix to prepend to contents of HumanMessages. + Default is "Human". + ai_prefix: THe prefix to prepend to contents of AIMessages. Default is "AI". + + Returns: + A single string concatenation of all input messages. + + Raises: + ValueError: If an unsupported message type is encountered. + + Example: + .. code-block:: python + + from langchain_core import AIMessage, HumanMessage + + messages = [ + HumanMessage(content="Hi, how are you?"), + AIMessage(content="Good, how are you?"), + ] + get_buffer_string(messages) + # -> "Human: Hi, how are you?\nAI: Good, how are you?" + """ + string_messages = [] + for m in messages: + if isinstance(m, HumanMessage): + role = human_prefix + elif isinstance(m, AIMessage): + role = ai_prefix + elif isinstance(m, SystemMessage): + role = "System" + elif isinstance(m, FunctionMessage): + role = "Function" + elif isinstance(m, ToolMessage): + role = "Tool" + elif isinstance(m, ChatMessage): + role = m.role + else: + msg = f"Got unsupported message type: {m}" + raise ValueError(msg) # noqa: TRY004 + message = f"{role}: {m.content}" + if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs: + message += f"{m.additional_kwargs['function_call']}" + string_messages.append(message) + + return "\n".join(string_messages) + + +def _message_from_dict(message: dict) -> BaseMessage: + _type = message["type"] + if _type == "human": + return HumanMessage(**message["data"]) + if _type == "ai": + return AIMessage(**message["data"]) + if _type == "system": + return SystemMessage(**message["data"]) + if _type == "chat": + return ChatMessage(**message["data"]) + if _type == "function": + return FunctionMessage(**message["data"]) + if _type == "tool": + return ToolMessage(**message["data"]) + if _type == "remove": + return RemoveMessage(**message["data"]) + if _type == "AIMessageChunk": + return AIMessageChunk(**message["data"]) + if _type == "HumanMessageChunk": + return HumanMessageChunk(**message["data"]) + if _type == "FunctionMessageChunk": + return FunctionMessageChunk(**message["data"]) + if _type == "ToolMessageChunk": + return ToolMessageChunk(**message["data"]) + if _type == "SystemMessageChunk": + return SystemMessageChunk(**message["data"]) + if _type == "ChatMessageChunk": + return ChatMessageChunk(**message["data"]) + msg = f"Got unexpected message type: {_type}" + raise ValueError(msg) + + +def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]: + """Convert a sequence of messages from dicts to Message objects. + + Args: + messages: Sequence of messages (as dicts) to convert. + + Returns: + list of messages (BaseMessages). + """ + return [_message_from_dict(m) for m in messages] + + +def message_chunk_to_message(chunk: BaseMessageChunk) -> BaseMessage: + """Convert a message chunk to a message. + + Args: + chunk: Message chunk to convert. + + Returns: + Message. + """ + if not isinstance(chunk, BaseMessageChunk): + return chunk + # chunk classes always have the equivalent non-chunk class as their first parent + ignore_keys = ["type"] + if isinstance(chunk, AIMessageChunk): + ignore_keys.append("tool_call_chunks") + return chunk.__class__.__mro__[1]( + **{k: v for k, v in chunk.__dict__.items() if k not in ignore_keys} + ) + + +MessageLikeRepresentation = Union[ + BaseMessage, list[str], tuple[str, str], str, dict[str, Any] +] + + +def _create_message_from_message_type( + message_type: str, + content: str, + name: Optional[str] = None, + tool_call_id: Optional[str] = None, + tool_calls: Optional[list[dict[str, Any]]] = None, + id: Optional[str] = None, + **additional_kwargs: Any, +) -> BaseMessage: + """Create a message from a message type and content string. + + Args: + message_type: (str) the type of the message (e.g., "human", "ai", etc.). + content: (str) the content string. + name: (str) the name of the message. Default is None. + tool_call_id: (str) the tool call id. Default is None. + tool_calls: (list[dict[str, Any]]) the tool calls. Default is None. + id: (str) the id of the message. Default is None. + additional_kwargs: (dict[str, Any]) additional keyword arguments. + + Returns: + a message of the appropriate type. + + Raises: + ValueError: if the message type is not one of "human", "user", "ai", + "assistant", "function", "tool", "system", or "developer". + """ + kwargs: dict[str, Any] = {} + if name is not None: + kwargs["name"] = name + if tool_call_id is not None: + kwargs["tool_call_id"] = tool_call_id + if additional_kwargs: + if response_metadata := additional_kwargs.pop("response_metadata", None): + kwargs["response_metadata"] = response_metadata + kwargs["additional_kwargs"] = additional_kwargs + additional_kwargs.update(additional_kwargs.pop("additional_kwargs", {})) + if id is not None: + kwargs["id"] = id + if tool_calls is not None: + kwargs["tool_calls"] = [] + for tool_call in tool_calls: + # Convert OpenAI-format tool call to LangChain format. + if "function" in tool_call: + args = tool_call["function"]["arguments"] + if isinstance(args, str): + args = json.loads(args, strict=False) + kwargs["tool_calls"].append( + { + "name": tool_call["function"]["name"], + "args": args, + "id": tool_call["id"], + "type": "tool_call", + } + ) + else: + kwargs["tool_calls"].append(tool_call) + if message_type in ("human", "user"): + if example := kwargs.get("additional_kwargs", {}).pop("example", False): + kwargs["example"] = example + message: BaseMessage = HumanMessage(content=content, **kwargs) + elif message_type in ("ai", "assistant"): + if example := kwargs.get("additional_kwargs", {}).pop("example", False): + kwargs["example"] = example + message = AIMessage(content=content, **kwargs) + elif message_type in ("system", "developer"): + if message_type == "developer": + kwargs["additional_kwargs"] = kwargs.get("additional_kwargs") or {} + kwargs["additional_kwargs"]["__openai_role__"] = "developer" + message = SystemMessage(content=content, **kwargs) + elif message_type == "function": + message = FunctionMessage(content=content, **kwargs) + elif message_type == "tool": + artifact = kwargs.get("additional_kwargs", {}).pop("artifact", None) + message = ToolMessage(content=content, artifact=artifact, **kwargs) + elif message_type == "remove": + message = RemoveMessage(**kwargs) + else: + msg = ( + f"Unexpected message type: '{message_type}'. Use one of 'human'," + f" 'user', 'ai', 'assistant', 'function', 'tool', 'system', or 'developer'." + ) + msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE) + raise ValueError(msg) + return message + + +def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage: + """Instantiate a message from a variety of message formats. + + The message format can be one of the following: + + - BaseMessagePromptTemplate + - BaseMessage + - 2-tuple of (role string, template); e.g., ("human", "{user_input}") + - dict: a message dict with role and content keys + - string: shorthand for ("human", template); e.g., "{user_input}" + + Args: + message: a representation of a message in one of the supported formats. + + Returns: + an instance of a message or a message template. + + Raises: + NotImplementedError: if the message type is not supported. + ValueError: if the message dict does not contain the required keys. + """ + if isinstance(message, BaseMessage): + _message = message + elif isinstance(message, str): + _message = _create_message_from_message_type("human", message) + elif isinstance(message, Sequence) and len(message) == 2: + # mypy doesn't realise this can't be a string given the previous branch + message_type_str, template = message # type: ignore[misc] + _message = _create_message_from_message_type(message_type_str, template) + elif isinstance(message, dict): + msg_kwargs = message.copy() + try: + try: + msg_type = msg_kwargs.pop("role") + except KeyError: + msg_type = msg_kwargs.pop("type") + # None msg content is not allowed + msg_content = msg_kwargs.pop("content") or "" + except KeyError as e: + msg = f"Message dict must contain 'role' and 'content' keys, got {message}" + msg = create_message( + message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE + ) + raise ValueError(msg) from e + _message = _create_message_from_message_type( + msg_type, msg_content, **msg_kwargs + ) + else: + msg = f"Unsupported message type: {type(message)}" + msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE) + raise NotImplementedError(msg) + + return _message + + +def convert_to_messages( + messages: Union[Iterable[MessageLikeRepresentation], PromptValue], +) -> list[BaseMessage]: + """Convert a sequence of messages to a list of messages. + + Args: + messages: Sequence of messages to convert. + + Returns: + list of messages (BaseMessages). + """ + # Import here to avoid circular imports + from langchain_core.prompt_values import PromptValue + + if isinstance(messages, PromptValue): + return messages.to_messages() + return [_convert_to_message(m) for m in messages] + + +def _runnable_support(func: Callable) -> Callable: + @overload + def wrapped( + messages: Literal[None] = None, **kwargs: Any + ) -> Runnable[Sequence[MessageLikeRepresentation], list[BaseMessage]]: ... + + @overload + def wrapped( + messages: Sequence[MessageLikeRepresentation], **kwargs: Any + ) -> list[BaseMessage]: ... + + def wrapped( + messages: Union[Sequence[MessageLikeRepresentation], None] = None, + **kwargs: Any, + ) -> Union[ + list[BaseMessage], + Runnable[Sequence[MessageLikeRepresentation], list[BaseMessage]], + ]: + from langchain_core.runnables.base import RunnableLambda + + if messages is not None: + return func(messages, **kwargs) + return RunnableLambda(partial(func, **kwargs), name=func.__name__) + + wrapped.__doc__ = func.__doc__ + return wrapped + + +@_runnable_support +def filter_messages( + messages: Union[Iterable[MessageLikeRepresentation], PromptValue], + *, + include_names: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[Union[str, type[BaseMessage]]]] = None, + exclude_types: Optional[Sequence[Union[str, type[BaseMessage]]]] = None, + include_ids: Optional[Sequence[str]] = None, + exclude_ids: Optional[Sequence[str]] = None, + exclude_tool_calls: Optional[Sequence[str] | bool] = None, +) -> list[BaseMessage]: + """Filter messages based on name, type or id. + + Args: + messages: Sequence Message-like objects to filter. + include_names: Message names to include. Default is None. + exclude_names: Messages names to exclude. Default is None. + include_types: Message types to include. Can be specified as string names (e.g. + "system", "human", "ai", ...) or as BaseMessage classes (e.g. + SystemMessage, HumanMessage, AIMessage, ...). Default is None. + exclude_types: Message types to exclude. Can be specified as string names (e.g. + "system", "human", "ai", ...) or as BaseMessage classes (e.g. + SystemMessage, HumanMessage, AIMessage, ...). Default is None. + include_ids: Message IDs to include. Default is None. + exclude_ids: Message IDs to exclude. Default is None. + exclude_tool_calls: Tool call IDs to exclude. Default is None. + Can be one of the following: + - `True`: all AIMessages with tool calls and all ToolMessages will be excluded. + - a sequence of tool call IDs to exclude: + - ToolMessages with the corresponding tool call ID will be excluded. + - The `tool_calls` in the AIMessage will be updated to exclude matching tool calls. + If all tool_calls are filtered from an AIMessage, the whole message is excluded. + + Returns: + A list of Messages that meets at least one of the incl_* conditions and none + of the excl_* conditions. If not incl_* conditions are specified then + anything that is not explicitly excluded will be included. + + Raises: + ValueError if two incompatible arguments are provided. + + Example: + .. code-block:: python + + from langchain_core.messages import filter_messages, AIMessage, HumanMessage, SystemMessage + + messages = [ + SystemMessage("you're a good assistant."), + HumanMessage("what's your name", id="foo", name="example_user"), + AIMessage("steve-o", id="bar", name="example_assistant"), + HumanMessage("what's your favorite color", id="baz",), + AIMessage("silicon blue", id="blah",), + ] + + filter_messages( + messages, + incl_names=("example_user", "example_assistant"), + incl_types=("system",), + excl_ids=("bar",), + ) + + .. code-block:: python + + [ + SystemMessage("you're a good assistant."), + HumanMessage("what's your name", id="foo", name="example_user"), + ] + """ # noqa: E501 + messages = convert_to_messages(messages) + filtered: list[BaseMessage] = [] + for msg in messages: + if ( + (exclude_names and msg.name in exclude_names) + or (exclude_types and _is_message_type(msg, exclude_types)) + or (exclude_ids and msg.id in exclude_ids) + ): + continue + + if exclude_tool_calls is True and ( + (isinstance(msg, AIMessage) and msg.tool_calls) + or isinstance(msg, ToolMessage) + ): + continue + + if isinstance(exclude_tool_calls, (list, tuple, set)): + if isinstance(msg, AIMessage) and msg.tool_calls: + tool_calls = [ + tool_call + for tool_call in msg.tool_calls + if tool_call["id"] not in exclude_tool_calls + ] + if not tool_calls: + continue + + content = msg.content + # handle Anthropic content blocks + if isinstance(msg.content, list): + content = [ + content_block + for content_block in msg.content + if ( + not isinstance(content_block, dict) + or content_block.get("type") != "tool_use" + or content_block.get("id") not in exclude_tool_calls + ) + ] + + msg = msg.model_copy( # noqa: PLW2901 + update={"tool_calls": tool_calls, "content": content} + ) + elif ( + isinstance(msg, ToolMessage) and msg.tool_call_id in exclude_tool_calls + ): + continue + + # default to inclusion when no inclusion criteria given. + if ( + not (include_types or include_ids or include_names) + or (include_names and msg.name in include_names) + or (include_types and _is_message_type(msg, include_types)) + or (include_ids and msg.id in include_ids) + ): + filtered.append(msg) + else: + pass + + return filtered + + +@_runnable_support +def merge_message_runs( + messages: Union[Iterable[MessageLikeRepresentation], PromptValue], + *, + chunk_separator: str = "\n", +) -> list[BaseMessage]: + r"""Merge consecutive Messages of the same type. + + **NOTE**: ToolMessages are not merged, as each has a distinct tool call id that + can't be merged. + + Args: + messages: Sequence Message-like objects to merge. + chunk_separator: Specify the string to be inserted between message chunks. + Default is "\n". + + Returns: + list of BaseMessages with consecutive runs of message types merged into single + messages. By default, if two messages being merged both have string contents, + the merged content is a concatenation of the two strings with a new-line separator. + The separator inserted between message chunks can be controlled by specifying + any string with ``chunk_separator``. If at least one of the messages has a list of + content blocks, the merged content is a list of content blocks. + + Example: + .. code-block:: python + + from langchain_core.messages import ( + merge_message_runs, + AIMessage, + HumanMessage, + SystemMessage, + ToolCall, + ) + + messages = [ + SystemMessage("you're a good assistant."), + HumanMessage("what's your favorite color", id="foo",), + HumanMessage("wait your favorite food", id="bar",), + AIMessage( + "my favorite colo", + tool_calls=[ToolCall(name="blah_tool", args={"x": 2}, id="123", type="tool_call")], + id="baz", + ), + AIMessage( + [{"type": "text", "text": "my favorite dish is lasagna"}], + tool_calls=[ToolCall(name="blah_tool", args={"x": -10}, id="456", type="tool_call")], + id="blur", + ), + ] + + merge_message_runs(messages) + + .. code-block:: python + + [ + SystemMessage("you're a good assistant."), + HumanMessage("what's your favorite color\\nwait your favorite food", id="foo",), + AIMessage( + [ + "my favorite colo", + {"type": "text", "text": "my favorite dish is lasagna"} + ], + tool_calls=[ + ToolCall({"name": "blah_tool", "args": {"x": 2}, "id": "123", "type": "tool_call"}), + ToolCall({"name": "blah_tool", "args": {"x": -10}, "id": "456", "type": "tool_call"}) + ] + id="baz" + ), + ] + + """ # noqa: E501 + if not messages: + return [] + messages = convert_to_messages(messages) + merged: list[BaseMessage] = [] + for msg in messages: + last = merged.pop() if merged else None + if not last: + merged.append(msg) + elif isinstance(msg, ToolMessage) or not isinstance(msg, last.__class__): + merged.extend([last, msg]) + else: + last_chunk = _msg_to_chunk(last) + curr_chunk = _msg_to_chunk(msg) + if curr_chunk.response_metadata: + curr_chunk.response_metadata.clear() + if ( + isinstance(last_chunk.content, str) + and isinstance(curr_chunk.content, str) + and last_chunk.content + and curr_chunk.content + ): + last_chunk.content += chunk_separator + merged.append(_chunk_to_msg(last_chunk + curr_chunk)) + return merged + + +# TODO: Update so validation errors (for token_counter, for example) are raised on +# init not at runtime. +@_runnable_support +def trim_messages( + messages: Union[Iterable[MessageLikeRepresentation], PromptValue], + *, + max_tokens: int, + token_counter: Union[ + Callable[[list[BaseMessage]], int], + Callable[[BaseMessage], int], + BaseLanguageModel, + ], + strategy: Literal["first", "last"] = "last", + allow_partial: bool = False, + end_on: Optional[ + Union[str, type[BaseMessage], Sequence[Union[str, type[BaseMessage]]]] + ] = None, + start_on: Optional[ + Union[str, type[BaseMessage], Sequence[Union[str, type[BaseMessage]]]] + ] = None, + include_system: bool = False, + text_splitter: Optional[Union[Callable[[str], list[str]], TextSplitter]] = None, +) -> list[BaseMessage]: + r"""Trim messages to be below a token count. + + trim_messages can be used to reduce the size of a chat history to a specified token + count or specified message count. + + In either case, if passing the trimmed chat history back into a chat model + directly, the resulting chat history should usually satisfy the following + properties: + + 1. The resulting chat history should be valid. Most chat models expect that chat + history starts with either (1) a `HumanMessage` or (2) a `SystemMessage` followed + by a `HumanMessage`. To achieve this, set `start_on="human"`. + In addition, generally a `ToolMessage` can only appear after an `AIMessage` + that involved a tool call. + Please see the following link for more information about messages: + https://python.langchain.com/docs/concepts/#messages + 2. It includes recent messages and drops old messages in the chat history. + To achieve this set the `strategy="last"`. + 3. Usually, the new chat history should include the `SystemMessage` if it + was present in the original chat history since the `SystemMessage` includes + special instructions to the chat model. The `SystemMessage` is almost always + the first message in the history if present. To achieve this set the + `include_system=True`. + + **Note** The examples below show how to configure `trim_messages` to achieve + a behavior consistent with the above properties. + + Args: + messages: Sequence of Message-like objects to trim. + max_tokens: Max token count of trimmed messages. + token_counter: Function or llm for counting tokens in a BaseMessage or a list of + BaseMessage. If a BaseLanguageModel is passed in then + BaseLanguageModel.get_num_tokens_from_messages() will be used. + Set to `len` to count the number of **messages** in the chat history. + + .. note:: + Use `count_tokens_approximately` to get fast, approximate token counts. + This is recommended for using `trim_messages` on the hot path, where + exact token counting is not necessary. + + strategy: Strategy for trimming. + - "first": Keep the first <= n_count tokens of the messages. + - "last": Keep the last <= n_count tokens of the messages. + Default is "last". + allow_partial: Whether to split a message if only part of the message can be + included. If ``strategy="last"`` then the last partial contents of a message + are included. If ``strategy="first"`` then the first partial contents of a + message are included. + Default is False. + end_on: The message type to end on. If specified then every message after the + last occurrence of this type is ignored. If ``strategy=="last"`` then this + is done before we attempt to get the last ``max_tokens``. If + ``strategy=="first"`` then this is done after we get the first + ``max_tokens``. Can be specified as string names (e.g. "system", "human", + "ai", ...) or as BaseMessage classes (e.g. SystemMessage, HumanMessage, + AIMessage, ...). Can be a single type or a list of types. + Default is None. + start_on: The message type to start on. Should only be specified if + ``strategy="last"``. If specified then every message before + the first occurrence of this type is ignored. This is done after we trim + the initial messages to the last ``max_tokens``. Does not + apply to a SystemMessage at index 0 if ``include_system=True``. Can be + specified as string names (e.g. "system", "human", "ai", ...) or as + BaseMessage classes (e.g. SystemMessage, HumanMessage, AIMessage, ...). Can + be a single type or a list of types. + Default is None. + include_system: Whether to keep the SystemMessage if there is one at index 0. + Should only be specified if ``strategy="last"``. + Default is False. + text_splitter: Function or ``langchain_text_splitters.TextSplitter`` for + splitting the string contents of a message. Only used if + ``allow_partial=True``. If ``strategy="last"`` then the last split tokens + from a partial message will be included. if ``strategy=="first"`` then the + first split tokens from a partial message will be included. Token splitter + assumes that separators are kept, so that split contents can be directly + concatenated to recreate the original text. Defaults to splitting on + newlines. + + Returns: + list of trimmed BaseMessages. + + Raises: + ValueError: if two incompatible arguments are specified or an unrecognized + ``strategy`` is specified. + + Example: + Trim chat history based on token count, keeping the SystemMessage if + present, and ensuring that the chat history starts with a HumanMessage ( + or a SystemMessage followed by a HumanMessage). + + .. code-block:: python + + from langchain_core.messages import ( + AIMessage, + HumanMessage, + BaseMessage, + SystemMessage, + trim_messages, + ) + + messages = [ + SystemMessage("you're a good assistant, you always respond with a joke."), + HumanMessage("i wonder why it's called langchain"), + AIMessage( + 'Well, I guess they thought "WordRope" and "SentenceString" just didn\'t have the same ring to it!' + ), + HumanMessage("and who is harrison chasing anyways"), + AIMessage( + "Hmmm let me think.\n\nWhy, he's probably chasing after the last cup of coffee in the office!" + ), + HumanMessage("what do you call a speechless parrot"), + ] + + + trim_messages( + messages, + max_tokens=45, + strategy="last", + token_counter=ChatOpenAI(model="gpt-4o"), + # Most chat models expect that chat history starts with either: + # (1) a HumanMessage or + # (2) a SystemMessage followed by a HumanMessage + start_on="human", + # Usually, we want to keep the SystemMessage + # if it's present in the original history. + # The SystemMessage has special instructions for the model. + include_system=True, + allow_partial=False, + ) + + .. code-block:: python + + [ + SystemMessage(content="you're a good assistant, you always respond with a joke."), + HumanMessage(content='what do you call a speechless parrot'), + ] + + Trim chat history based on the message count, keeping the SystemMessage if + present, and ensuring that the chat history starts with a HumanMessage ( + or a SystemMessage followed by a HumanMessage). + + trim_messages( + messages, + # When `len` is passed in as the token counter function, + # max_tokens will count the number of messages in the chat history. + max_tokens=4, + strategy="last", + # Passing in `len` as a token counter function will + # count the number of messages in the chat history. + token_counter=len, + # Most chat models expect that chat history starts with either: + # (1) a HumanMessage or + # (2) a SystemMessage followed by a HumanMessage + start_on="human", + # Usually, we want to keep the SystemMessage + # if it's present in the original history. + # The SystemMessage has special instructions for the model. + include_system=True, + allow_partial=False, + ) + + .. code-block:: python + + [ + SystemMessage(content="you're a good assistant, you always respond with a joke."), + HumanMessage(content='and who is harrison chasing anyways'), + AIMessage(content="Hmmm let me think.\n\nWhy, he's probably chasing after the last cup of coffee in the office!"), + HumanMessage(content='what do you call a speechless parrot'), + ] + + + Trim chat history using a custom token counter function that counts the + number of tokens in each message. + + .. code-block:: python + + messages = [ + SystemMessage("This is a 4 token text. The full message is 10 tokens."), + HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"), + AIMessage( + [ + {"type": "text", "text": "This is the FIRST 4 token block."}, + {"type": "text", "text": "This is the SECOND 4 token block."}, + ], + id="second", + ), + HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"), + AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"), + ] + + def dummy_token_counter(messages: list[BaseMessage]) -> int: + # treat each message like it adds 3 default tokens at the beginning + # of the message and at the end of the message. 3 + 4 + 3 = 10 tokens + # per message. + + default_content_len = 4 + default_msg_prefix_len = 3 + default_msg_suffix_len = 3 + + count = 0 + for msg in messages: + if isinstance(msg.content, str): + count += default_msg_prefix_len + default_content_len + default_msg_suffix_len + if isinstance(msg.content, list): + count += default_msg_prefix_len + len(msg.content) * default_content_len + default_msg_suffix_len + return count + + First 30 tokens, allowing partial messages: + .. code-block:: python + + trim_messages( + messages, + max_tokens=30, + token_counter=dummy_token_counter, + strategy="first", + allow_partial=True, + ) + + .. code-block:: python + + [ + SystemMessage("This is a 4 token text. The full message is 10 tokens."), + HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"), + AIMessage( [{"type": "text", "text": "This is the FIRST 4 token block."}], id="second"), + ] + """ # noqa: E501 + # Validate arguments + if start_on and strategy == "first": + msg = "start_on parameter is only valid with strategy='last'" + raise ValueError(msg) + if include_system and strategy == "first": + msg = "include_system parameter is only valid with strategy='last'" + raise ValueError(msg) + + messages = convert_to_messages(messages) + if hasattr(token_counter, "get_num_tokens_from_messages"): + list_token_counter = token_counter.get_num_tokens_from_messages + elif callable(token_counter): + if ( + list(inspect.signature(token_counter).parameters.values())[0].annotation + is BaseMessage + ): + + def list_token_counter(messages: Sequence[BaseMessage]) -> int: + return sum(token_counter(msg) for msg in messages) # type: ignore[arg-type, misc] + + else: + list_token_counter = token_counter + else: + msg = ( + f"'token_counter' expected to be a model that implements " + f"'get_num_tokens_from_messages()' or a function. Received object of type " + f"{type(token_counter)}." + ) + raise ValueError(msg) + + try: + from langchain_text_splitters import TextSplitter + except ImportError: + text_splitter_fn: Optional[Callable] = cast("Optional[Callable]", text_splitter) + else: + if isinstance(text_splitter, TextSplitter): + text_splitter_fn = text_splitter.split_text + else: + text_splitter_fn = text_splitter + + text_splitter_fn = text_splitter_fn or _default_text_splitter + + if strategy == "first": + return _first_max_tokens( + messages, + max_tokens=max_tokens, + token_counter=list_token_counter, + text_splitter=text_splitter_fn, + partial_strategy="first" if allow_partial else None, + end_on=end_on, + ) + if strategy == "last": + return _last_max_tokens( + messages, + max_tokens=max_tokens, + token_counter=list_token_counter, + allow_partial=allow_partial, + include_system=include_system, + start_on=start_on, + end_on=end_on, + text_splitter=text_splitter_fn, + ) + msg = f"Unrecognized {strategy=}. Supported strategies are 'last' and 'first'." + raise ValueError(msg) + + +def convert_to_openai_messages( + messages: Union[MessageLikeRepresentation, Sequence[MessageLikeRepresentation]], + *, + text_format: Literal["string", "block"] = "string", +) -> Union[dict, list[dict]]: + """Convert LangChain messages into OpenAI message dicts. + + Args: + messages: Message-like object or iterable of objects whose contents are + in OpenAI, Anthropic, Bedrock Converse, or VertexAI formats. + text_format: How to format string or text block contents: + + - "string": + If a message has a string content, this is left as a string. If + a message has content blocks that are all of type 'text', these are + joined with a newline to make a single string. If a message has + content blocks and at least one isn't of type 'text', then + all blocks are left as dicts. + - "block": + If a message has a string content, this is turned into a list + with a single content block of type 'text'. If a message has content + blocks these are left as is. + + Returns: + The return type depends on the input type: + - dict: + If a single message-like object is passed in, a single OpenAI message + dict is returned. + - list[dict]: + If a sequence of message-like objects are passed in, a list of OpenAI + message dicts is returned. + + Example: + + .. code-block:: python + + from langchain_core.messages import ( + convert_to_openai_messages, + AIMessage, + SystemMessage, + ToolMessage, + ) + + messages = [ + SystemMessage([{"type": "text", "text": "foo"}]), + {"role": "user", "content": [{"type": "text", "text": "whats in this"}, {"type": "image_url", "image_url": {"url": "data:image/png;base64,'/9j/4AAQSk'"}}]}, + AIMessage("", tool_calls=[{"name": "analyze", "args": {"baz": "buz"}, "id": "1", "type": "tool_call"}]), + ToolMessage("foobar", tool_call_id="1", name="bar"), + {"role": "assistant", "content": "thats nice"}, + ] + oai_messages = convert_to_openai_messages(messages) + # -> [ + # {'role': 'system', 'content': 'foo'}, + # {'role': 'user', 'content': [{'type': 'text', 'text': 'whats in this'}, {'type': 'image_url', 'image_url': {'url': "data:image/png;base64,'/9j/4AAQSk'"}}]}, + # {'role': 'assistant', 'tool_calls': [{'type': 'function', 'id': '1','function': {'name': 'analyze', 'arguments': '{"baz": "buz"}'}}], 'content': ''}, + # {'role': 'tool', 'name': 'bar', 'content': 'foobar'}, + # {'role': 'assistant', 'content': 'thats nice'} + # ] + + .. versionadded:: 0.3.11 + + """ # noqa: E501 + if text_format not in ("string", "block"): + err = f"Unrecognized {text_format=}, expected one of 'string' or 'block'." + raise ValueError(err) + + oai_messages: list = [] + + if is_single := isinstance(messages, (BaseMessage, dict, str)): + messages = [messages] + + messages = convert_to_messages(messages) + + for i, message in enumerate(messages): + oai_msg: dict = {"role": _get_message_openai_role(message)} + tool_messages: list = [] + content: Union[str, list[dict]] + + if message.name: + oai_msg["name"] = message.name + if isinstance(message, AIMessage) and message.tool_calls: + oai_msg["tool_calls"] = _convert_to_openai_tool_calls(message.tool_calls) + if message.additional_kwargs.get("refusal"): + oai_msg["refusal"] = message.additional_kwargs["refusal"] + if isinstance(message, ToolMessage): + oai_msg["tool_call_id"] = message.tool_call_id + + if not message.content: + content = "" if text_format == "string" else [] + elif isinstance(message.content, str): + if text_format == "string": + content = message.content + else: + content = [{"type": "text", "text": message.content}] + elif text_format == "string" and all( + isinstance(block, str) or block.get("type") == "text" + for block in message.content + ): + content = "\n".join( + block if isinstance(block, str) else block["text"] + for block in message.content + ) + else: + content = [] + for j, block in enumerate(message.content): + # OpenAI format + if isinstance(block, str): + content.append({"type": "text", "text": block}) + elif block.get("type") == "text": + if missing := [k for k in ("text",) if k not in block]: + err = ( + f"Unrecognized content block at " + f"messages[{i}].content[{j}] has 'type': 'text' " + f"but is missing expected key(s) " + f"{missing}. Full content block:\n\n{block}" + ) + raise ValueError(err) + content.append({"type": block["type"], "text": block["text"]}) + elif block.get("type") == "image_url": + if missing := [k for k in ("image_url",) if k not in block]: + err = ( + f"Unrecognized content block at " + f"messages[{i}].content[{j}] has 'type': 'image_url' " + f"but is missing expected key(s) " + f"{missing}. Full content block:\n\n{block}" + ) + raise ValueError(err) + content.append( + { + "type": "image_url", + "image_url": block["image_url"], + } + ) + # Standard multi-modal content block + elif is_data_content_block(block): + formatted_block = convert_to_openai_data_block(block) + if ( + formatted_block.get("type") == "file" + and "file" in formatted_block + and "filename" not in formatted_block["file"] + ): + logger.info("Generating a fallback filename.") + formatted_block["file"]["filename"] = "LC_AUTOGENERATED" + content.append(formatted_block) + # Anthropic and Bedrock converse format + elif (block.get("type") == "image") or "image" in block: + # Anthropic + if source := block.get("source"): + if missing := [ + k for k in ("media_type", "type", "data") if k not in source + ]: + err = ( + f"Unrecognized content block at " + f"messages[{i}].content[{j}] has 'type': 'image' " + f"but 'source' is missing expected key(s) " + f"{missing}. Full content block:\n\n{block}" + ) + raise ValueError(err) + content.append( + { + "type": "image_url", + "image_url": { + "url": ( + f"data:{source['media_type']};" + f"{source['type']},{source['data']}" + ) + }, + } + ) + # Bedrock converse + elif image := block.get("image"): + if missing := [ + k for k in ("source", "format") if k not in image + ]: + err = ( + f"Unrecognized content block at " + f"messages[{i}].content[{j}] has key 'image', " + f"but 'image' is missing expected key(s) " + f"{missing}. Full content block:\n\n{block}" + ) + raise ValueError(err) + b64_image = _bytes_to_b64_str(image["source"]["bytes"]) + content.append( + { + "type": "image_url", + "image_url": { + "url": ( + f"data:image/{image['format']};" + f"base64,{b64_image}" + ) + }, + } + ) + else: + err = ( + f"Unrecognized content block at " + f"messages[{i}].content[{j}] has 'type': 'image' " + f"but does not have a 'source' or 'image' key. Full " + f"content block:\n\n{block}" + ) + raise ValueError(err) + # OpenAI file format + elif ( + block.get("type") == "file" + and isinstance(block.get("file"), dict) + and isinstance(block.get("file", {}).get("file_data"), str) + ): + if block.get("file", {}).get("filename") is None: + logger.info("Generating a fallback filename.") + block["file"]["filename"] = "LC_AUTOGENERATED" + content.append(block) + # OpenAI audio format + elif ( + block.get("type") == "input_audio" + and isinstance(block.get("input_audio"), dict) + and isinstance(block.get("input_audio", {}).get("data"), str) + and isinstance(block.get("input_audio", {}).get("format"), str) + ): + content.append(block) + elif block.get("type") == "tool_use": + if missing := [ + k for k in ("id", "name", "input") if k not in block + ]: + err = ( + f"Unrecognized content block at " + f"messages[{i}].content[{j}] has 'type': " + f"'tool_use', but is missing expected key(s) " + f"{missing}. Full content block:\n\n{block}" + ) + raise ValueError(err) + if not any( + tool_call["id"] == block["id"] + for tool_call in cast("AIMessage", message).tool_calls + ): + oai_msg["tool_calls"] = oai_msg.get("tool_calls", []) + oai_msg["tool_calls"].append( + { + "type": "function", + "id": block["id"], + "function": { + "name": block["name"], + "arguments": json.dumps(block["input"]), + }, + } + ) + elif block.get("type") == "tool_result": + if missing := [ + k for k in ("content", "tool_use_id") if k not in block + ]: + msg = ( + f"Unrecognized content block at " + f"messages[{i}].content[{j}] has 'type': " + f"'tool_result', but is missing expected key(s) " + f"{missing}. Full content block:\n\n{block}" + ) + raise ValueError(msg) + tool_message = ToolMessage( + block["content"], + tool_call_id=block["tool_use_id"], + status="error" if block.get("is_error") else "success", + ) + # Recurse to make sure tool message contents are OpenAI format. + tool_messages.extend( + convert_to_openai_messages( + [tool_message], text_format=text_format + ) + ) + elif (block.get("type") == "json") or "json" in block: + if "json" not in block: + msg = ( + f"Unrecognized content block at " + f"messages[{i}].content[{j}] has 'type': 'json' " + f"but does not have a 'json' key. Full " + f"content block:\n\n{block}" + ) + raise ValueError(msg) + content.append( + { + "type": "text", + "text": json.dumps(block["json"]), + } + ) + elif (block.get("type") == "guard_content") or "guard_content" in block: + if ( + "guard_content" not in block + or "text" not in block["guard_content"] + ): + msg = ( + f"Unrecognized content block at " + f"messages[{i}].content[{j}] has 'type': " + f"'guard_content' but does not have a " + f"messages[{i}].content[{j}]['guard_content']['text'] " + f"key. Full content block:\n\n{block}" + ) + raise ValueError(msg) + text = block["guard_content"]["text"] + if isinstance(text, dict): + text = text["text"] + content.append({"type": "text", "text": text}) + # VertexAI format + elif block.get("type") == "media": + if missing := [k for k in ("mime_type", "data") if k not in block]: + err = ( + f"Unrecognized content block at " + f"messages[{i}].content[{j}] has 'type': " + f"'media' but does not have key(s) {missing}. Full " + f"content block:\n\n{block}" + ) + raise ValueError(err) + if "image" not in block["mime_type"]: + err = ( + f"OpenAI messages can only support text and image data." + f" Received content block with media of type:" + f" {block['mime_type']}" + ) + raise ValueError(err) + b64_image = _bytes_to_b64_str(block["data"]) + content.append( + { + "type": "image_url", + "image_url": { + "url": (f"data:{block['mime_type']};base64,{b64_image}") + }, + } + ) + elif block.get("type") == "thinking": + content.append(block) + else: + err = ( + f"Unrecognized content block at " + f"messages[{i}].content[{j}] does not match OpenAI, " + f"Anthropic, Bedrock Converse, or VertexAI format. Full " + f"content block:\n\n{block}" + ) + raise ValueError(err) + if text_format == "string" and not any( + block["type"] != "text" for block in content + ): + content = "\n".join(block["text"] for block in content) + oai_msg["content"] = content + if message.content and not oai_msg["content"] and tool_messages: + oai_messages.extend(tool_messages) + else: + oai_messages.extend([oai_msg, *tool_messages]) + + if is_single: + return oai_messages[0] + return oai_messages + + +def _first_max_tokens( + messages: Sequence[BaseMessage], + *, + max_tokens: int, + token_counter: Callable[[list[BaseMessage]], int], + text_splitter: Callable[[str], list[str]], + partial_strategy: Optional[Literal["first", "last"]] = None, + end_on: Optional[ + Union[str, type[BaseMessage], Sequence[Union[str, type[BaseMessage]]]] + ] = None, +) -> list[BaseMessage]: + messages = list(messages) + if not messages: + return messages + + # Check if all messages already fit within token limit + if token_counter(messages) <= max_tokens: + # When all messages fit, only apply end_on filtering if needed + if end_on: + for _ in range(len(messages)): + if not _is_message_type(messages[-1], end_on): + messages.pop() + else: + break + return messages + + # Use binary search to find the maximum number of messages within token limit + left, right = 0, len(messages) + max_iterations = len(messages).bit_length() + for _ in range(max_iterations): + if left >= right: + break + mid = (left + right + 1) // 2 + if token_counter(messages[:mid]) <= max_tokens: + left = mid + idx = mid + else: + right = mid - 1 + + # idx now contains the maximum number of complete messages we can include + idx = left + + if partial_strategy and idx < len(messages): + included_partial = False + copied = False + if isinstance(messages[idx].content, list): + excluded = messages[idx].model_copy(deep=True) + copied = True + num_block = len(excluded.content) + if partial_strategy == "last": + excluded.content = list(reversed(excluded.content)) + for _ in range(1, num_block): + excluded.content = excluded.content[:-1] + if token_counter(messages[:idx] + [excluded]) <= max_tokens: + messages = messages[:idx] + [excluded] + idx += 1 + included_partial = True + break + if included_partial and partial_strategy == "last": + excluded.content = list(reversed(excluded.content)) + if not included_partial: + if not copied: + excluded = messages[idx].model_copy(deep=True) + copied = True + + # Extract text content efficiently + text = None + if isinstance(excluded.content, str): + text = excluded.content + elif isinstance(excluded.content, list) and excluded.content: + for block in excluded.content: + if isinstance(block, str): + text = block + break + if isinstance(block, dict) and block.get("type") == "text": + text = block.get("text") + break + + if text: + if not copied: + excluded = excluded.model_copy(deep=True) + + split_texts = text_splitter(text) + base_message_count = token_counter(messages[:idx]) + if partial_strategy == "last": + split_texts = list(reversed(split_texts)) + + # Binary search for the maximum number of splits we can include + left, right = 0, len(split_texts) + max_iterations = len(split_texts).bit_length() + for _ in range(max_iterations): + if left >= right: + break + mid = (left + right + 1) // 2 + excluded.content = "".join(split_texts[:mid]) + if base_message_count + token_counter([excluded]) <= max_tokens: + left = mid + else: + right = mid - 1 + + if left > 0: + content_splits = split_texts[:left] + if partial_strategy == "last": + content_splits = list(reversed(content_splits)) + excluded.content = "".join(content_splits) + messages = messages[:idx] + [excluded] + idx += 1 + + if end_on: + for _ in range(idx): + if idx > 0 and not _is_message_type(messages[idx - 1], end_on): + idx -= 1 + else: + break + + return messages[:idx] + + +def _last_max_tokens( + messages: Sequence[BaseMessage], + *, + max_tokens: int, + token_counter: Callable[[list[BaseMessage]], int], + text_splitter: Callable[[str], list[str]], + allow_partial: bool = False, + include_system: bool = False, + start_on: Optional[ + Union[str, type[BaseMessage], Sequence[Union[str, type[BaseMessage]]]] + ] = None, + end_on: Optional[ + Union[str, type[BaseMessage], Sequence[Union[str, type[BaseMessage]]]] + ] = None, +) -> list[BaseMessage]: + messages = list(messages) + if len(messages) == 0: + return [] + + # Filter out messages after end_on type + if end_on: + for _ in range(len(messages)): + if not _is_message_type(messages[-1], end_on): + messages.pop() + else: + break + + # Handle system message preservation + system_message = None + if include_system and len(messages) > 0 and isinstance(messages[0], SystemMessage): + system_message = messages[0] + messages = messages[1:] + + # Reverse messages to use _first_max_tokens with reversed logic + reversed_messages = messages[::-1] + + # Calculate remaining tokens after accounting for system message if present + remaining_tokens = max_tokens + if system_message: + system_tokens = token_counter([system_message]) + remaining_tokens = max(0, max_tokens - system_tokens) + + reversed_result = _first_max_tokens( + reversed_messages, + max_tokens=remaining_tokens, + token_counter=token_counter, + text_splitter=text_splitter, + partial_strategy="last" if allow_partial else None, + end_on=start_on, + ) + + # Re-reverse the messages and add back the system message if needed + result = reversed_result[::-1] + if system_message: + result = [system_message] + result + + return result + + +_MSG_CHUNK_MAP: dict[type[BaseMessage], type[BaseMessageChunk]] = { + HumanMessage: HumanMessageChunk, + AIMessage: AIMessageChunk, + SystemMessage: SystemMessageChunk, + ToolMessage: ToolMessageChunk, + FunctionMessage: FunctionMessageChunk, + ChatMessage: ChatMessageChunk, +} +_CHUNK_MSG_MAP = {v: k for k, v in _MSG_CHUNK_MAP.items()} + + +def _msg_to_chunk(message: BaseMessage) -> BaseMessageChunk: + if message.__class__ in _MSG_CHUNK_MAP: + return _MSG_CHUNK_MAP[message.__class__](**message.model_dump(exclude={"type"})) + + for msg_cls, chunk_cls in _MSG_CHUNK_MAP.items(): + if isinstance(message, msg_cls): + return chunk_cls(**message.model_dump(exclude={"type"})) + + msg = ( + f"Unrecognized message class {message.__class__}. Supported classes are " + f"{list(_MSG_CHUNK_MAP.keys())}" + ) + msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE) + raise ValueError(msg) + + +def _chunk_to_msg(chunk: BaseMessageChunk) -> BaseMessage: + if chunk.__class__ in _CHUNK_MSG_MAP: + return _CHUNK_MSG_MAP[chunk.__class__]( + **chunk.model_dump(exclude={"type", "tool_call_chunks"}) + ) + for chunk_cls, msg_cls in _CHUNK_MSG_MAP.items(): + if isinstance(chunk, chunk_cls): + return msg_cls(**chunk.model_dump(exclude={"type", "tool_call_chunks"})) + + msg = ( + f"Unrecognized message chunk class {chunk.__class__}. Supported classes are " + f"{list(_CHUNK_MSG_MAP.keys())}" + ) + msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE) + raise ValueError(msg) + + +def _default_text_splitter(text: str) -> list[str]: + splits = text.split("\n") + return [s + "\n" for s in splits[:-1]] + splits[-1:] + + +def _is_message_type( + message: BaseMessage, + type_: Union[str, type[BaseMessage], Sequence[Union[str, type[BaseMessage]]]], +) -> bool: + types = [type_] if isinstance(type_, (str, type)) else type_ + types_str = [t for t in types if isinstance(t, str)] + types_types = tuple(t for t in types if isinstance(t, type)) + + return message.type in types_str or isinstance(message, types_types) + + +def _bytes_to_b64_str(bytes_: bytes) -> str: + return base64.b64encode(bytes_).decode("utf-8") + + +def _get_message_openai_role(message: BaseMessage) -> str: + if isinstance(message, AIMessage): + return "assistant" + if isinstance(message, HumanMessage): + return "user" + if isinstance(message, ToolMessage): + return "tool" + if isinstance(message, SystemMessage): + return message.additional_kwargs.get("__openai_role__", "system") + if isinstance(message, FunctionMessage): + return "function" + if isinstance(message, ChatMessage): + return message.role + msg = f"Unknown BaseMessage type {message.__class__}." + raise ValueError(msg) # noqa: TRY004 + + +def _convert_to_openai_tool_calls(tool_calls: list[ToolCall]) -> list[dict]: + return [ + { + "type": "function", + "id": tool_call["id"], + "function": { + "name": tool_call["name"], + "arguments": json.dumps(tool_call["args"]), + }, + } + for tool_call in tool_calls + ] + + +def count_tokens_approximately( + messages: Iterable[MessageLikeRepresentation], + *, + chars_per_token: float = 4.0, + extra_tokens_per_message: float = 3.0, + count_name: bool = True, +) -> int: + """Approximate the total number of tokens in messages. + + The token count includes stringified message content, role, and (optionally) name. + - For AI messages, the token count also includes stringified tool calls. + - For tool messages, the token count also includes the tool call ID. + + Args: + messages: List of messages to count tokens for. + chars_per_token: Number of characters per token to use for the approximation. + Default is 4 (one token corresponds to ~4 chars for common English text). + You can also specify float values for more fine-grained control. + See more here: https://platform.openai.com/tokenizer + extra_tokens_per_message: Number of extra tokens to add per message. + Default is 3 (special tokens, including beginning/end of message). + You can also specify float values for more fine-grained control. + See more here: + https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb + count_name: Whether to include message names in the count. + Enabled by default. + + Returns: + Approximate number of tokens in the messages. + + Note: + This is a simple approximation that may not match the exact token count + used by specific models. For accurate counts, use model-specific tokenizers. + + Warning: + This function does not currently support counting image tokens. + + .. versionadded:: 0.3.46 + """ + token_count = 0.0 + for message in convert_to_messages(messages): + message_chars = 0 + if isinstance(message.content, str): + message_chars += len(message.content) + + # TODO: add support for approximate counting for image blocks + else: + content = repr(message.content) + message_chars += len(content) + + if ( + isinstance(message, AIMessage) + # exclude Anthropic format as tool calls are already included in the content + and not isinstance(message.content, list) + and message.tool_calls + ): + tool_calls_content = repr(message.tool_calls) + message_chars += len(tool_calls_content) + + if isinstance(message, ToolMessage): + message_chars += len(message.tool_call_id) + + role = _get_message_openai_role(message) + message_chars += len(role) + + if message.name and count_name: + message_chars += len(message.name) + + # NOTE: we're rounding up per message to ensure that + # individual message token counts add up to the total count + # for a list of messages + token_count += math.ceil(message_chars / chars_per_token) + + # add extra tokens per message + token_count += extra_tokens_per_message + + # round up once more time in case extra_tokens_per_message is a float + return math.ceil(token_count) diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/__init__.py b/venv/Lib/site-packages/langchain_core/output_parsers/__init__.py new file mode 100644 index 00000000..1c81be2a --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/output_parsers/__init__.py @@ -0,0 +1,98 @@ +"""**OutputParser** classes parse the output of an LLM call. + +**Class hierarchy:** + +.. code-block:: + + BaseLLMOutputParser --> BaseOutputParser --> OutputParser # ListOutputParser, PydanticOutputParser + +**Main helpers:** + +.. code-block:: + + Serializable, Generation, PromptValue +""" # noqa: E501 + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.output_parsers.base import ( + BaseGenerationOutputParser, + BaseLLMOutputParser, + BaseOutputParser, + ) + from langchain_core.output_parsers.json import ( + JsonOutputParser, + SimpleJsonOutputParser, + ) + from langchain_core.output_parsers.list import ( + CommaSeparatedListOutputParser, + ListOutputParser, + MarkdownListOutputParser, + NumberedListOutputParser, + ) + from langchain_core.output_parsers.openai_tools import ( + JsonOutputKeyToolsParser, + JsonOutputToolsParser, + PydanticToolsParser, + ) + from langchain_core.output_parsers.pydantic import PydanticOutputParser + from langchain_core.output_parsers.string import StrOutputParser + from langchain_core.output_parsers.transform import ( + BaseCumulativeTransformOutputParser, + BaseTransformOutputParser, + ) + from langchain_core.output_parsers.xml import XMLOutputParser + +__all__ = [ + "BaseLLMOutputParser", + "BaseGenerationOutputParser", + "BaseOutputParser", + "ListOutputParser", + "CommaSeparatedListOutputParser", + "NumberedListOutputParser", + "MarkdownListOutputParser", + "StrOutputParser", + "BaseTransformOutputParser", + "BaseCumulativeTransformOutputParser", + "SimpleJsonOutputParser", + "XMLOutputParser", + "JsonOutputParser", + "PydanticOutputParser", + "JsonOutputToolsParser", + "JsonOutputKeyToolsParser", + "PydanticToolsParser", +] + +_dynamic_imports = { + "BaseLLMOutputParser": "base", + "BaseGenerationOutputParser": "base", + "BaseOutputParser": "base", + "JsonOutputParser": "json", + "SimpleJsonOutputParser": "json", + "ListOutputParser": "list", + "CommaSeparatedListOutputParser": "list", + "MarkdownListOutputParser": "list", + "NumberedListOutputParser": "list", + "JsonOutputKeyToolsParser": "openai_tools", + "JsonOutputToolsParser": "openai_tools", + "PydanticToolsParser": "openai_tools", + "PydanticOutputParser": "pydantic", + "StrOutputParser": "string", + "BaseTransformOutputParser": "transform", + "BaseCumulativeTransformOutputParser": "transform", + "XMLOutputParser": "xml", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return __all__ diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..5faf66c4 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..90b3b929 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/format_instructions.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/format_instructions.cpython-312.pyc new file mode 100644 index 00000000..3ffecfe4 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/format_instructions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/json.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/json.cpython-312.pyc new file mode 100644 index 00000000..76cee1d0 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/json.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/list.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/list.cpython-312.pyc new file mode 100644 index 00000000..4281bf7e Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/list.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/openai_functions.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/openai_functions.cpython-312.pyc new file mode 100644 index 00000000..316561f9 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/openai_functions.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/openai_tools.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/openai_tools.cpython-312.pyc new file mode 100644 index 00000000..ca63f7af Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/openai_tools.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/pydantic.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/pydantic.cpython-312.pyc new file mode 100644 index 00000000..2d4f5e17 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/pydantic.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/string.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/string.cpython-312.pyc new file mode 100644 index 00000000..ebedd471 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/string.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/transform.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/transform.cpython-312.pyc new file mode 100644 index 00000000..dac52add Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/transform.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/xml.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/xml.cpython-312.pyc new file mode 100644 index 00000000..f025fc9b Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/output_parsers/__pycache__/xml.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/base.py b/venv/Lib/site-packages/langchain_core/output_parsers/base.py new file mode 100644 index 00000000..a14a1082 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/output_parsers/base.py @@ -0,0 +1,332 @@ +"""Base parser for language model outputs.""" + +from __future__ import annotations + +import contextlib +from abc import ABC, abstractmethod +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Optional, + TypeVar, + Union, +) + +from typing_extensions import override + +from langchain_core.language_models import LanguageModelOutput +from langchain_core.messages import AnyMessage, BaseMessage +from langchain_core.outputs import ChatGeneration, Generation +from langchain_core.runnables import Runnable, RunnableConfig, RunnableSerializable +from langchain_core.runnables.config import run_in_executor + +if TYPE_CHECKING: + from langchain_core.prompt_values import PromptValue + +T = TypeVar("T") +OutputParserLike = Runnable[LanguageModelOutput, T] + + +class BaseLLMOutputParser(Generic[T], ABC): + """Abstract base class for parsing the outputs of a model.""" + + @abstractmethod + def parse_result(self, result: list[Generation], *, partial: bool = False) -> T: + """Parse a list of candidate model Generations into a specific format. + + Args: + result: A list of Generations to be parsed. The Generations are assumed + to be different candidate outputs for a single model input. + partial: Whether to parse the output as a partial result. This is useful + for parsers that can parse partial results. Default is False. + + Returns: + Structured output. + """ + + async def aparse_result( + self, result: list[Generation], *, partial: bool = False + ) -> T: + """Async parse a list of candidate model Generations into a specific format. + + Args: + result: A list of Generations to be parsed. The Generations are assumed + to be different candidate outputs for a single model input. + partial: Whether to parse the output as a partial result. This is useful + for parsers that can parse partial results. Default is False. + + Returns: + Structured output. + """ + return await run_in_executor(None, self.parse_result, result, partial=partial) + + +class BaseGenerationOutputParser( + BaseLLMOutputParser, RunnableSerializable[LanguageModelOutput, T] +): + """Base class to parse the output of an LLM call.""" + + @property + @override + def InputType(self) -> Any: + """Return the input type for the parser.""" + return Union[str, AnyMessage] + + @property + @override + def OutputType(self) -> type[T]: + """Return the output type for the parser.""" + # even though mypy complains this isn't valid, + # it is good enough for pydantic to build the schema from + return T # type: ignore[misc] + + @override + def invoke( + self, + input: Union[str, BaseMessage], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> T: + if isinstance(input, BaseMessage): + return self._call_with_config( + lambda inner_input: self.parse_result( + [ChatGeneration(message=inner_input)] + ), + input, + config, + run_type="parser", + ) + return self._call_with_config( + lambda inner_input: self.parse_result([Generation(text=inner_input)]), + input, + config, + run_type="parser", + ) + + @override + async def ainvoke( + self, + input: Union[str, BaseMessage], + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> T: + if isinstance(input, BaseMessage): + return await self._acall_with_config( + lambda inner_input: self.aparse_result( + [ChatGeneration(message=inner_input)] + ), + input, + config, + run_type="parser", + ) + return await self._acall_with_config( + lambda inner_input: self.aparse_result([Generation(text=inner_input)]), + input, + config, + run_type="parser", + ) + + +class BaseOutputParser( + BaseLLMOutputParser, RunnableSerializable[LanguageModelOutput, T] +): + """Base class to parse the output of an LLM call. + + Output parsers help structure language model responses. + + Example: + .. code-block:: python + + class BooleanOutputParser(BaseOutputParser[bool]): + true_val: str = "YES" + false_val: str = "NO" + + def parse(self, text: str) -> bool: + cleaned_text = text.strip().upper() + if cleaned_text not in (self.true_val.upper(), self.false_val.upper()): + raise OutputParserException( + f"BooleanOutputParser expected output value to either be " + f"{self.true_val} or {self.false_val} (case-insensitive). " + f"Received {cleaned_text}." + ) + return cleaned_text == self.true_val.upper() + + @property + def _type(self) -> str: + return "boolean_output_parser" + """ # noqa: E501 + + @property + @override + def InputType(self) -> Any: + """Return the input type for the parser.""" + return Union[str, AnyMessage] + + @property + @override + def OutputType(self) -> type[T]: + """Return the output type for the parser. + + This property is inferred from the first type argument of the class. + + Raises: + TypeError: If the class doesn't have an inferable OutputType. + """ + for base in self.__class__.mro(): + if hasattr(base, "__pydantic_generic_metadata__"): + metadata = base.__pydantic_generic_metadata__ + if "args" in metadata and len(metadata["args"]) > 0: + return metadata["args"][0] + + msg = ( + f"Runnable {self.__class__.__name__} doesn't have an inferable OutputType. " + "Override the OutputType property to specify the output type." + ) + raise TypeError(msg) + + @override + def invoke( + self, + input: Union[str, BaseMessage], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> T: + if isinstance(input, BaseMessage): + return self._call_with_config( + lambda inner_input: self.parse_result( + [ChatGeneration(message=inner_input)] + ), + input, + config, + run_type="parser", + ) + return self._call_with_config( + lambda inner_input: self.parse_result([Generation(text=inner_input)]), + input, + config, + run_type="parser", + ) + + @override + async def ainvoke( + self, + input: Union[str, BaseMessage], + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> T: + if isinstance(input, BaseMessage): + return await self._acall_with_config( + lambda inner_input: self.aparse_result( + [ChatGeneration(message=inner_input)] + ), + input, + config, + run_type="parser", + ) + return await self._acall_with_config( + lambda inner_input: self.aparse_result([Generation(text=inner_input)]), + input, + config, + run_type="parser", + ) + + @override + def parse_result(self, result: list[Generation], *, partial: bool = False) -> T: + """Parse a list of candidate model Generations into a specific format. + + The return value is parsed from only the first Generation in the result, which + is assumed to be the highest-likelihood Generation. + + Args: + result: A list of Generations to be parsed. The Generations are assumed + to be different candidate outputs for a single model input. + partial: Whether to parse the output as a partial result. This is useful + for parsers that can parse partial results. Default is False. + + Returns: + Structured output. + """ + return self.parse(result[0].text) + + @abstractmethod + def parse(self, text: str) -> T: + """Parse a single string model output into some structure. + + Args: + text: String output of a language model. + + Returns: + Structured output. + """ + + async def aparse_result( + self, result: list[Generation], *, partial: bool = False + ) -> T: + """Async parse a list of candidate model Generations into a specific format. + + The return value is parsed from only the first Generation in the result, which + is assumed to be the highest-likelihood Generation. + + Args: + result: A list of Generations to be parsed. The Generations are assumed + to be different candidate outputs for a single model input. + partial: Whether to parse the output as a partial result. This is useful + for parsers that can parse partial results. Default is False. + + Returns: + Structured output. + """ + return await run_in_executor(None, self.parse_result, result, partial=partial) + + async def aparse(self, text: str) -> T: + """Async parse a single string model output into some structure. + + Args: + text: String output of a language model. + + Returns: + Structured output. + """ + return await run_in_executor(None, self.parse, text) + + # TODO: rename 'completion' -> 'text'. + def parse_with_prompt( + self, + completion: str, + prompt: PromptValue, # noqa: ARG002 + ) -> Any: + """Parse the output of an LLM call with the input prompt for context. + + The prompt is largely provided in the event the OutputParser wants + to retry or fix the output in some way, and needs information from + the prompt to do so. + + Args: + completion: String output of a language model. + prompt: Input PromptValue. + + Returns: + Structured output. + """ + return self.parse(completion) + + def get_format_instructions(self) -> str: + """Instructions on how the LLM output should be formatted.""" + raise NotImplementedError + + @property + def _type(self) -> str: + """Return the output parser type for serialization.""" + msg = ( + f"_type property is not implemented in class {self.__class__.__name__}." + " This is required for serialization." + ) + raise NotImplementedError(msg) + + def dict(self, **kwargs: Any) -> dict: + """Return dictionary representation of output parser.""" + output_parser_dict = super().dict(**kwargs) + with contextlib.suppress(NotImplementedError): + output_parser_dict["_type"] = self._type + return output_parser_dict diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/format_instructions.py b/venv/Lib/site-packages/langchain_core/output_parsers/format_instructions.py new file mode 100644 index 00000000..8ad789bc --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/output_parsers/format_instructions.py @@ -0,0 +1,11 @@ +"""Format instructions.""" + +JSON_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below. + +As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}} +the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. + +Here is the output schema: +``` +{schema} +```""" # noqa: E501 diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/json.py b/venv/Lib/site-packages/langchain_core/output_parsers/json.py new file mode 100644 index 00000000..84f510f3 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/output_parsers/json.py @@ -0,0 +1,137 @@ +"""Parser for JSON output.""" + +from __future__ import annotations + +import json +from json import JSONDecodeError +from typing import Annotated, Any, Optional, TypeVar, Union + +import jsonpatch # type: ignore[import-untyped] +import pydantic +from pydantic import SkipValidation + +from langchain_core.exceptions import OutputParserException +from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS +from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser +from langchain_core.outputs import Generation +from langchain_core.utils.json import ( + parse_and_check_json_markdown, + parse_json_markdown, + parse_partial_json, +) +from langchain_core.utils.pydantic import IS_PYDANTIC_V1 + +if IS_PYDANTIC_V1: + PydanticBaseModel = pydantic.BaseModel + +else: + from pydantic.v1 import BaseModel + + # Union type needs to be last assignment to PydanticBaseModel to make mypy happy. + PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore[assignment,misc] + +TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel) + + +class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]): + """Parse the output of an LLM call to a JSON object. + + When used in streaming mode, it will yield partial JSON objects containing + all the keys that have been returned so far. + + In streaming, if `diff` is set to `True`, yields JSONPatch operations + describing the difference between the previous and the current object. + """ + + pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore[valid-type] + """The Pydantic object to use for validation. + If None, no validation is performed.""" + + def _diff(self, prev: Optional[Any], next: Any) -> Any: + return jsonpatch.make_patch(prev, next).patch + + def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]: + if issubclass(pydantic_object, pydantic.BaseModel): + return pydantic_object.model_json_schema() + if issubclass(pydantic_object, pydantic.v1.BaseModel): + return pydantic_object.schema() + return None + + def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + """Parse the result of an LLM call to a JSON object. + + Args: + result: The result of the LLM call. + partial: Whether to parse partial JSON objects. + If True, the output will be a JSON object containing + all the keys that have been returned so far. + If False, the output will be the full JSON object. + Default is False. + + Returns: + The parsed JSON object. + + Raises: + OutputParserException: If the output is not valid JSON. + """ + text = result[0].text + text = text.strip() + if partial: + try: + return parse_json_markdown(text) + except JSONDecodeError: + return None + else: + try: + return parse_json_markdown(text) + except JSONDecodeError as e: + msg = f"Invalid json output: {text}" + raise OutputParserException(msg, llm_output=text) from e + + def parse(self, text: str) -> Any: + """Parse the output of an LLM call to a JSON object. + + Args: + text: The output of the LLM call. + + Returns: + The parsed JSON object. + """ + return self.parse_result([Generation(text=text)]) + + def get_format_instructions(self) -> str: + """Return the format instructions for the JSON output. + + Returns: + The format instructions for the JSON output. + """ + if self.pydantic_object is None: + return "Return a JSON object." + # Copy schema to avoid altering original Pydantic schema. + schema = dict(self._get_schema(self.pydantic_object).items()) + + # Remove extraneous fields. + reduced_schema = schema + if "title" in reduced_schema: + del reduced_schema["title"] + if "type" in reduced_schema: + del reduced_schema["type"] + # Ensure json in context is well-formed with double quotes. + schema_str = json.dumps(reduced_schema, ensure_ascii=False) + return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str) + + @property + def _type(self) -> str: + return "simple_json_output_parser" + + +# For backwards compatibility +SimpleJsonOutputParser = JsonOutputParser + + +__all__ = [ + "JsonOutputParser", + "SimpleJsonOutputParser", # For backwards compatibility + "parse_partial_json", # For backwards compatibility + "parse_and_check_json_markdown", # For backwards compatibility +] diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/list.py b/venv/Lib/site-packages/langchain_core/output_parsers/list.py new file mode 100644 index 00000000..3dba7e3a --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/output_parsers/list.py @@ -0,0 +1,261 @@ +"""Parsers for list output.""" + +from __future__ import annotations + +import csv +import re +from abc import abstractmethod +from collections import deque +from io import StringIO +from typing import TYPE_CHECKING, TypeVar, Union + +from typing_extensions import override + +from langchain_core.messages import BaseMessage +from langchain_core.output_parsers.transform import BaseTransformOutputParser + +if TYPE_CHECKING: + from collections.abc import AsyncIterator, Iterator + +T = TypeVar("T") + + +def droplastn(iter: Iterator[T], n: int) -> Iterator[T]: + """Drop the last n elements of an iterator. + + Args: + iter: The iterator to drop elements from. + n: The number of elements to drop. + + Yields: + The elements of the iterator, except the last n elements. + """ + buffer: deque[T] = deque() + for item in iter: + buffer.append(item) + if len(buffer) > n: + yield buffer.popleft() + + +class ListOutputParser(BaseTransformOutputParser[list[str]]): + """Parse the output of an LLM call to a list.""" + + @property + def _type(self) -> str: + return "list" + + @abstractmethod + def parse(self, text: str) -> list[str]: + """Parse the output of an LLM call. + + Args: + text: The output of an LLM call. + + Returns: + A list of strings. + """ + + def parse_iter(self, text: str) -> Iterator[re.Match]: + """Parse the output of an LLM call. + + Args: + text: The output of an LLM call. + + Yields: + A match object for each part of the output. + """ + raise NotImplementedError + + def _transform( + self, input: Iterator[Union[str, BaseMessage]] + ) -> Iterator[list[str]]: + buffer = "" + for chunk in input: + if isinstance(chunk, BaseMessage): + # extract text + chunk_content = chunk.content + if not isinstance(chunk_content, str): + continue + buffer += chunk_content + else: + # add current chunk to buffer + buffer += chunk + # parse buffer into a list of parts + try: + done_idx = 0 + # yield only complete parts + for m in droplastn(self.parse_iter(buffer), 1): + done_idx = m.end() + yield [m.group(1)] + buffer = buffer[done_idx:] + except NotImplementedError: + parts = self.parse(buffer) + # yield only complete parts + if len(parts) > 1: + for part in parts[:-1]: + yield [part] + buffer = parts[-1] + # yield the last part + for part in self.parse(buffer): + yield [part] + + async def _atransform( + self, input: AsyncIterator[Union[str, BaseMessage]] + ) -> AsyncIterator[list[str]]: + buffer = "" + async for chunk in input: + if isinstance(chunk, BaseMessage): + # extract text + chunk_content = chunk.content + if not isinstance(chunk_content, str): + continue + buffer += chunk_content + else: + # add current chunk to buffer + buffer += chunk + # parse buffer into a list of parts + try: + done_idx = 0 + # yield only complete parts + for m in droplastn(self.parse_iter(buffer), 1): + done_idx = m.end() + yield [m.group(1)] + buffer = buffer[done_idx:] + except NotImplementedError: + parts = self.parse(buffer) + # yield only complete parts + if len(parts) > 1: + for part in parts[:-1]: + yield [part] + buffer = parts[-1] + # yield the last part + for part in self.parse(buffer): + yield [part] + + +class CommaSeparatedListOutputParser(ListOutputParser): + """Parse the output of an LLM call to a comma-separated list.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + """Check if the langchain object is serializable. + + Returns True. + """ + return True + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + Returns: + A list of strings. + Default is ["langchain", "output_parsers", "list"]. + """ + return ["langchain", "output_parsers", "list"] + + def get_format_instructions(self) -> str: + """Return the format instructions for the comma-separated list output.""" + return ( + "Your response should be a list of comma separated values, " + "eg: `foo, bar, baz` or `foo,bar,baz`" + ) + + def parse(self, text: str) -> list[str]: + """Parse the output of an LLM call. + + Args: + text: The output of an LLM call. + + Returns: + A list of strings. + """ + try: + reader = csv.reader( + StringIO(text), quotechar='"', delimiter=",", skipinitialspace=True + ) + return [item for sublist in reader for item in sublist] + except csv.Error: + # keep old logic for backup + return [part.strip() for part in text.split(",")] + + @property + def _type(self) -> str: + return "comma-separated-list" + + +class NumberedListOutputParser(ListOutputParser): + """Parse a numbered list.""" + + pattern: str = r"\d+\.\s([^\n]+)" + """The pattern to match a numbered list item.""" + + @override + def get_format_instructions(self) -> str: + return ( + "Your response should be a numbered list with each item on a new line. " + "For example: \n\n1. foo\n\n2. bar\n\n3. baz" + ) + + def parse(self, text: str) -> list[str]: + """Parse the output of an LLM call. + + Args: + text: The output of an LLM call. + + Returns: + A list of strings. + """ + return re.findall(self.pattern, text) + + def parse_iter(self, text: str) -> Iterator[re.Match]: + """Parse the output of an LLM call. + + Args: + text: The output of an LLM call. + + Yields: + A match object for each part of the output. + """ + return re.finditer(self.pattern, text) + + @property + def _type(self) -> str: + return "numbered-list" + + +class MarkdownListOutputParser(ListOutputParser): + """Parse a Markdown list.""" + + pattern: str = r"^\s*[-*]\s([^\n]+)$" + """The pattern to match a Markdown list item.""" + + def get_format_instructions(self) -> str: + """Return the format instructions for the Markdown list output.""" + return "Your response should be a markdown list, eg: `- foo\n- bar\n- baz`" + + def parse(self, text: str) -> list[str]: + """Parse the output of an LLM call. + + Args: + text: The output of an LLM call. + + Returns: + A list of strings. + """ + return re.findall(self.pattern, text, re.MULTILINE) + + def parse_iter(self, text: str) -> Iterator[re.Match]: + """Parse the output of an LLM call. + + Args: + text: The output of an LLM call. + + Yields: + A match object for each part of the output. + """ + return re.finditer(self.pattern, text, re.MULTILINE) + + @property + def _type(self) -> str: + return "markdown-list" diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/openai_functions.py b/venv/Lib/site-packages/langchain_core/output_parsers/openai_functions.py new file mode 100644 index 00000000..f71b2c80 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/output_parsers/openai_functions.py @@ -0,0 +1,302 @@ +"""Parsers for OpenAI functions output.""" + +import copy +import json +from types import GenericAlias +from typing import Any, Optional, Union + +import jsonpatch # type: ignore[import-untyped] +from pydantic import BaseModel, model_validator +from typing_extensions import override + +from langchain_core.exceptions import OutputParserException +from langchain_core.output_parsers import ( + BaseCumulativeTransformOutputParser, + BaseGenerationOutputParser, +) +from langchain_core.output_parsers.json import parse_partial_json +from langchain_core.outputs import ChatGeneration, Generation + + +class OutputFunctionsParser(BaseGenerationOutputParser[Any]): + """Parse an output that is one of sets of values.""" + + args_only: bool = True + """Whether to only return the arguments to the function call.""" + + @override + def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + """Parse the result of an LLM call to a JSON object. + + Args: + result: The result of the LLM call. + partial: Whether to parse partial JSON objects. Default is False. + + Returns: + The parsed JSON object. + + Raises: + OutputParserException: If the output is not valid JSON. + """ + generation = result[0] + if not isinstance(generation, ChatGeneration): + msg = "This output parser can only be used with a chat generation." + raise OutputParserException(msg) + message = generation.message + try: + func_call = copy.deepcopy(message.additional_kwargs["function_call"]) + except KeyError as exc: + msg = f"Could not parse function call: {exc}" + raise OutputParserException(msg) from exc + + if self.args_only: + return func_call["arguments"] + return func_call + + +class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]): + """Parse an output as the Json object.""" + + strict: bool = False + """Whether to allow non-JSON-compliant strings. + + See: https://docs.python.org/3/library/json.html#encoders-and-decoders + + Useful when the parsed output may include unicode characters or new lines. + """ + + args_only: bool = True + """Whether to only return the arguments to the function call.""" + + @property + def _type(self) -> str: + return "json_functions" + + def _diff(self, prev: Optional[Any], next: Any) -> Any: + return jsonpatch.make_patch(prev, next).patch + + def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + """Parse the result of an LLM call to a JSON object. + + Args: + result: The result of the LLM call. + partial: Whether to parse partial JSON objects. Default is False. + + Returns: + The parsed JSON object. + + Raises: + OutputParserException: If the output is not valid JSON. + """ + if len(result) != 1: + msg = f"Expected exactly one result, but got {len(result)}" + raise OutputParserException(msg) + generation = result[0] + if not isinstance(generation, ChatGeneration): + msg = "This output parser can only be used with a chat generation." + raise OutputParserException(msg) + message = generation.message + try: + function_call = message.additional_kwargs["function_call"] + except KeyError as exc: + if partial: + return None + msg = f"Could not parse function call: {exc}" + raise OutputParserException(msg) from exc + try: + if partial: + try: + if self.args_only: + return parse_partial_json( + function_call["arguments"], strict=self.strict + ) + return { + **function_call, + "arguments": parse_partial_json( + function_call["arguments"], strict=self.strict + ), + } + except json.JSONDecodeError: + return None + elif self.args_only: + try: + return json.loads(function_call["arguments"], strict=self.strict) + except (json.JSONDecodeError, TypeError) as exc: + msg = f"Could not parse function call data: {exc}" + raise OutputParserException(msg) from exc + else: + try: + return { + **function_call, + "arguments": json.loads( + function_call["arguments"], strict=self.strict + ), + } + except (json.JSONDecodeError, TypeError) as exc: + msg = f"Could not parse function call data: {exc}" + raise OutputParserException(msg) from exc + except KeyError: + return None + + # This method would be called by the default implementation of `parse_result` + # but we're overriding that method so it's not needed. + def parse(self, text: str) -> Any: + """Parse the output of an LLM call to a JSON object. + + Args: + text: The output of the LLM call. + + Returns: + The parsed JSON object. + """ + raise NotImplementedError + + +class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser): + """Parse an output as the element of the Json object.""" + + key_name: str + """The name of the key to return.""" + + def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + """Parse the result of an LLM call to a JSON object. + + Args: + result: The result of the LLM call. + partial: Whether to parse partial JSON objects. Default is False. + + Returns: + The parsed JSON object. + """ + res = super().parse_result(result, partial=partial) + if partial and res is None: + return None + return res.get(self.key_name) if partial else res[self.key_name] + + +class PydanticOutputFunctionsParser(OutputFunctionsParser): + """Parse an output as a pydantic object. + + This parser is used to parse the output of a ChatModel that uses + OpenAI function format to invoke functions. + + The parser extracts the function call invocation and matches + them to the pydantic schema provided. + + An exception will be raised if the function call does not match + the provided schema. + + Example: + ... code-block:: python + + message = AIMessage( + content="This is a test message", + additional_kwargs={ + "function_call": { + "name": "cookie", + "arguments": json.dumps({"name": "value", "age": 10}), + } + }, + ) + chat_generation = ChatGeneration(message=message) + + class Cookie(BaseModel): + name: str + age: int + + class Dog(BaseModel): + species: str + + # Full output + parser = PydanticOutputFunctionsParser( + pydantic_schema={"cookie": Cookie, "dog": Dog} + ) + result = parser.parse_result([chat_generation]) + """ + + pydantic_schema: Union[type[BaseModel], dict[str, type[BaseModel]]] + """The pydantic schema to parse the output with. + + If multiple schemas are provided, then the function name will be used to + determine which schema to use. + """ + + @model_validator(mode="before") + @classmethod + def validate_schema(cls, values: dict) -> Any: + """Validate the pydantic schema. + + Args: + values: The values to validate. + + Returns: + The validated values. + + Raises: + ValueError: If the schema is not a pydantic schema. + """ + schema = values["pydantic_schema"] + if "args_only" not in values: + values["args_only"] = ( + isinstance(schema, type) + and not isinstance(schema, GenericAlias) + and issubclass(schema, BaseModel) + ) + elif values["args_only"] and isinstance(schema, dict): + msg = ( + "If multiple pydantic schemas are provided then args_only should be" + " False." + ) + raise ValueError(msg) + return values + + @override + def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + """Parse the result of an LLM call to a JSON object. + + Args: + result: The result of the LLM call. + partial: Whether to parse partial JSON objects. Default is False. + + Returns: + The parsed JSON object. + """ + _result = super().parse_result(result) + if self.args_only: + if hasattr(self.pydantic_schema, "model_validate_json"): + pydantic_args = self.pydantic_schema.model_validate_json(_result) + else: + pydantic_args = self.pydantic_schema.parse_raw(_result) # type: ignore[attr-defined] + else: + fn_name = _result["name"] + _args = _result["arguments"] + if isinstance(self.pydantic_schema, dict): + pydantic_schema = self.pydantic_schema[fn_name] + else: + pydantic_schema = self.pydantic_schema + if hasattr(pydantic_schema, "model_validate_json"): + pydantic_args = pydantic_schema.model_validate_json(_args) + else: + pydantic_args = pydantic_schema.parse_raw(_args) + return pydantic_args + + +class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser): + """Parse an output as an attribute of a pydantic object.""" + + attr_name: str + """The name of the attribute to return.""" + + @override + def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + """Parse the result of an LLM call to a JSON object. + + Args: + result: The result of the LLM call. + partial: Whether to parse partial JSON objects. Default is False. + + Returns: + The parsed JSON object. + """ + result = super().parse_result(result) + return getattr(result, self.attr_name) diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/openai_tools.py b/venv/Lib/site-packages/langchain_core/output_parsers/openai_tools.py new file mode 100644 index 00000000..b4f845bd --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/output_parsers/openai_tools.py @@ -0,0 +1,321 @@ +"""Parse tools for OpenAI tools output.""" + +import copy +import json +import logging +from json import JSONDecodeError +from typing import Annotated, Any, Optional + +from pydantic import SkipValidation, ValidationError + +from langchain_core.exceptions import OutputParserException +from langchain_core.messages import AIMessage, InvalidToolCall +from langchain_core.messages.tool import invalid_tool_call +from langchain_core.messages.tool import tool_call as create_tool_call +from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser +from langchain_core.outputs import ChatGeneration, Generation +from langchain_core.utils.json import parse_partial_json +from langchain_core.utils.pydantic import TypeBaseModel + +logger = logging.getLogger(__name__) + + +def parse_tool_call( + raw_tool_call: dict[str, Any], + *, + partial: bool = False, + strict: bool = False, + return_id: bool = True, +) -> Optional[dict[str, Any]]: + """Parse a single tool call. + + Args: + raw_tool_call: The raw tool call to parse. + partial: Whether to parse partial JSON. Default is False. + strict: Whether to allow non-JSON-compliant strings. + Default is False. + return_id: Whether to return the tool call id. Default is True. + + Returns: + The parsed tool call. + + Raises: + OutputParserException: If the tool call is not valid JSON. + """ + if "function" not in raw_tool_call: + return None + if partial: + try: + function_args = parse_partial_json( + raw_tool_call["function"]["arguments"], strict=strict + ) + except (JSONDecodeError, TypeError): # None args raise TypeError + return None + else: + try: + function_args = json.loads( + raw_tool_call["function"]["arguments"], strict=strict + ) + except JSONDecodeError as e: + msg = ( + f"Function {raw_tool_call['function']['name']} arguments:\n\n" + f"{raw_tool_call['function']['arguments']}\n\nare not valid JSON. " + f"Received JSONDecodeError {e}" + ) + raise OutputParserException(msg) from e + parsed = { + "name": raw_tool_call["function"]["name"] or "", + "args": function_args or {}, + } + if return_id: + parsed["id"] = raw_tool_call.get("id") + parsed = create_tool_call(**parsed) # type: ignore[assignment,arg-type] + return parsed + + +def make_invalid_tool_call( + raw_tool_call: dict[str, Any], + error_msg: Optional[str], +) -> InvalidToolCall: + """Create an InvalidToolCall from a raw tool call. + + Args: + raw_tool_call: The raw tool call. + error_msg: The error message. + + Returns: + An InvalidToolCall instance with the error message. + """ + return invalid_tool_call( + name=raw_tool_call["function"]["name"], + args=raw_tool_call["function"]["arguments"], + id=raw_tool_call.get("id"), + error=error_msg, + ) + + +def parse_tool_calls( + raw_tool_calls: list[dict], + *, + partial: bool = False, + strict: bool = False, + return_id: bool = True, +) -> list[dict[str, Any]]: + """Parse a list of tool calls. + + Args: + raw_tool_calls: The raw tool calls to parse. + partial: Whether to parse partial JSON. Default is False. + strict: Whether to allow non-JSON-compliant strings. + Default is False. + return_id: Whether to return the tool call id. Default is True. + + Returns: + The parsed tool calls. + + Raises: + OutputParserException: If any of the tool calls are not valid JSON. + """ + final_tools: list[dict[str, Any]] = [] + exceptions = [] + for tool_call in raw_tool_calls: + try: + parsed = parse_tool_call( + tool_call, partial=partial, strict=strict, return_id=return_id + ) + if parsed: + final_tools.append(parsed) + except OutputParserException as e: + exceptions.append(str(e)) + continue + if exceptions: + raise OutputParserException("\n\n".join(exceptions)) + return final_tools + + +class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]): + """Parse tools from OpenAI response.""" + + strict: bool = False + """Whether to allow non-JSON-compliant strings. + + See: https://docs.python.org/3/library/json.html#encoders-and-decoders + + Useful when the parsed output may include unicode characters or new lines. + """ + return_id: bool = False + """Whether to return the tool call id.""" + first_tool_only: bool = False + """Whether to return only the first tool call. + + If False, the result will be a list of tool calls, or an empty list + if no tool calls are found. + + If true, and multiple tool calls are found, only the first one will be returned, + and the other tool calls will be ignored. + If no tool calls are found, None will be returned. + """ + + def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + """Parse the result of an LLM call to a list of tool calls. + + Args: + result: The result of the LLM call. + partial: Whether to parse partial JSON. + If True, the output will be a JSON object containing + all the keys that have been returned so far. + If False, the output will be the full JSON object. + Default is False. + + Returns: + The parsed tool calls. + + Raises: + OutputParserException: If the output is not valid JSON. + """ + generation = result[0] + if not isinstance(generation, ChatGeneration): + msg = "This output parser can only be used with a chat generation." + raise OutputParserException(msg) + message = generation.message + if isinstance(message, AIMessage) and message.tool_calls: + tool_calls = [dict(tc) for tc in message.tool_calls] + for tool_call in tool_calls: + if not self.return_id: + _ = tool_call.pop("id") + else: + try: + raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"]) + except KeyError: + return [] + tool_calls = parse_tool_calls( + raw_tool_calls, + partial=partial, + strict=self.strict, + return_id=self.return_id, + ) + # for backwards compatibility + for tc in tool_calls: + tc["type"] = tc.pop("name") + + if self.first_tool_only: + return tool_calls[0] if tool_calls else None + return tool_calls + + def parse(self, text: str) -> Any: + """Parse the output of an LLM call to a list of tool calls. + + Args: + text: The output of the LLM call. + + Returns: + The parsed tool calls. + """ + raise NotImplementedError + + +class JsonOutputKeyToolsParser(JsonOutputToolsParser): + """Parse tools from OpenAI response.""" + + key_name: str + """The type of tools to return.""" + + def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + """Parse the result of an LLM call to a list of tool calls. + + Args: + result: The result of the LLM call. + partial: Whether to parse partial JSON. + If True, the output will be a JSON object containing + all the keys that have been returned so far. + If False, the output will be the full JSON object. + Default is False. + + Returns: + The parsed tool calls. + """ + parsed_result = super().parse_result(result, partial=partial) + + if self.first_tool_only: + single_result = ( + parsed_result + if parsed_result and parsed_result["type"] == self.key_name + else None + ) + if self.return_id: + return single_result + if single_result: + return single_result["args"] + return None + parsed_result = [res for res in parsed_result if res["type"] == self.key_name] + if not self.return_id: + parsed_result = [res["args"] for res in parsed_result] + return parsed_result + + +# Common cause of ValidationError is truncated output due to max_tokens. +_MAX_TOKENS_ERROR = ( + "Output parser received a `max_tokens` stop reason. " + "The output is likely incomplete—please increase `max_tokens` " + "or shorten your prompt." +) + + +class PydanticToolsParser(JsonOutputToolsParser): + """Parse tools from OpenAI response.""" + + tools: Annotated[list[TypeBaseModel], SkipValidation()] + """The tools to parse.""" + + # TODO: Support more granular streaming of objects. Currently only streams once all + # Pydantic object fields are present. + def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + """Parse the result of an LLM call to a list of Pydantic objects. + + Args: + result: The result of the LLM call. + partial: Whether to parse partial JSON. + If True, the output will be a JSON object containing + all the keys that have been returned so far. + If False, the output will be the full JSON object. + Default is False. + + Returns: + The parsed Pydantic objects. + + Raises: + OutputParserException: If the output is not valid JSON. + """ + json_results = super().parse_result(result, partial=partial) + if not json_results: + return None if self.first_tool_only else [] + + json_results = [json_results] if self.first_tool_only else json_results + name_dict = {tool.__name__: tool for tool in self.tools} + pydantic_objects = [] + for res in json_results: + if not isinstance(res["args"], dict): + if partial: + continue + msg = ( + f"Tool arguments must be specified as a dict, received: " + f"{res['args']}" + ) + raise ValueError(msg) + try: + pydantic_objects.append(name_dict[res["type"]](**res["args"])) + except (ValidationError, ValueError): + if partial: + continue + has_max_tokens_stop_reason = any( + generation.message.response_metadata.get("stop_reason") + == "max_tokens" + for generation in result + if isinstance(generation, ChatGeneration) + ) + if has_max_tokens_stop_reason: + logger.exception(_MAX_TOKENS_ERROR) + raise + if self.first_tool_only: + return pydantic_objects[0] if pydantic_objects else None + return pydantic_objects diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/pydantic.py b/venv/Lib/site-packages/langchain_core/output_parsers/pydantic.py new file mode 100644 index 00000000..194cc3a2 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/output_parsers/pydantic.py @@ -0,0 +1,132 @@ +"""Output parsers using Pydantic.""" + +import json +from typing import Annotated, Generic, Optional + +import pydantic +from pydantic import SkipValidation +from typing_extensions import override + +from langchain_core.exceptions import OutputParserException +from langchain_core.output_parsers import JsonOutputParser +from langchain_core.outputs import Generation +from langchain_core.utils.pydantic import ( + IS_PYDANTIC_V2, + PydanticBaseModel, + TBaseModel, +) + + +class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]): + """Parse an output using a pydantic model.""" + + pydantic_object: Annotated[type[TBaseModel], SkipValidation()] + """The pydantic model to parse.""" + + def _parse_obj(self, obj: dict) -> TBaseModel: + if IS_PYDANTIC_V2: + try: + if issubclass(self.pydantic_object, pydantic.BaseModel): + return self.pydantic_object.model_validate(obj) + if issubclass(self.pydantic_object, pydantic.v1.BaseModel): + return self.pydantic_object.parse_obj(obj) + msg = f"Unsupported model version for PydanticOutputParser: \ + {self.pydantic_object.__class__}" + raise OutputParserException(msg) + except (pydantic.ValidationError, pydantic.v1.ValidationError) as e: + raise self._parser_exception(e, obj) from e + else: # pydantic v1 + try: + return self.pydantic_object.parse_obj(obj) + except pydantic.ValidationError as e: + raise self._parser_exception(e, obj) from e + + def _parser_exception( + self, e: Exception, json_object: dict + ) -> OutputParserException: + json_string = json.dumps(json_object) + name = self.pydantic_object.__name__ + msg = f"Failed to parse {name} from completion {json_string}. Got: {e}" + return OutputParserException(msg, llm_output=json_string) + + def parse_result( + self, result: list[Generation], *, partial: bool = False + ) -> Optional[TBaseModel]: + """Parse the result of an LLM call to a pydantic object. + + Args: + result: The result of the LLM call. + partial: Whether to parse partial JSON objects. + If True, the output will be a JSON object containing + all the keys that have been returned so far. + Defaults to False. + + Returns: + The parsed pydantic object. + """ + try: + json_object = super().parse_result(result) + return self._parse_obj(json_object) + except OutputParserException: + if partial: + return None + raise + + def parse(self, text: str) -> TBaseModel: + """Parse the output of an LLM call to a pydantic object. + + Args: + text: The output of the LLM call. + + Returns: + The parsed pydantic object. + """ + return super().parse(text) + + def get_format_instructions(self) -> str: + """Return the format instructions for the JSON output. + + Returns: + The format instructions for the JSON output. + """ + # Copy schema to avoid altering original Pydantic schema. + schema = dict(self.pydantic_object.model_json_schema().items()) + + # Remove extraneous fields. + reduced_schema = schema + if "title" in reduced_schema: + del reduced_schema["title"] + if "type" in reduced_schema: + del reduced_schema["type"] + # Ensure json in context is well-formed with double quotes. + schema_str = json.dumps(reduced_schema, ensure_ascii=False) + + return _PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str) + + @property + def _type(self) -> str: + return "pydantic" + + @property + @override + def OutputType(self) -> type[TBaseModel]: + """Return the pydantic model.""" + return self.pydantic_object + + +_PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below. + +As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}} +the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted. + +Here is the output schema: +``` +{schema} +```""" # noqa: E501 + +# Re-exporting types for backwards compatibility +__all__ = [ + "PydanticBaseModel", + "PydanticOutputParser", + "TBaseModel", +] diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/string.py b/venv/Lib/site-packages/langchain_core/output_parsers/string.py new file mode 100644 index 00000000..4f952c68 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/output_parsers/string.py @@ -0,0 +1,33 @@ +"""String output parser.""" + +from langchain_core.output_parsers.transform import BaseTransformOutputParser + + +class StrOutputParser(BaseTransformOutputParser[str]): + """OutputParser that parses LLMResult into the top likely string.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + """StrOutputParser is serializable. + + Returns: + True + """ + return True + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + Default is ["langchain", "schema", "output_parser"]. + """ + return ["langchain", "schema", "output_parser"] + + @property + def _type(self) -> str: + """Return the output parser type for serialization.""" + return "default" + + def parse(self, text: str) -> str: + """Returns the input text with no changes.""" + return text diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/transform.py b/venv/Lib/site-packages/langchain_core/output_parsers/transform.py new file mode 100644 index 00000000..783abedf --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/output_parsers/transform.py @@ -0,0 +1,167 @@ +"""Base classes for output parsers that can handle streaming input.""" + +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Optional, + Union, +) + +from typing_extensions import override + +from langchain_core.messages import BaseMessage, BaseMessageChunk +from langchain_core.output_parsers.base import BaseOutputParser, T +from langchain_core.outputs import ( + ChatGeneration, + ChatGenerationChunk, + Generation, + GenerationChunk, +) +from langchain_core.runnables.config import run_in_executor + +if TYPE_CHECKING: + from collections.abc import AsyncIterator, Iterator + + from langchain_core.runnables import RunnableConfig + + +class BaseTransformOutputParser(BaseOutputParser[T]): + """Base class for an output parser that can handle streaming input.""" + + def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[T]: + for chunk in input: + if isinstance(chunk, BaseMessage): + yield self.parse_result([ChatGeneration(message=chunk)]) + else: + yield self.parse_result([Generation(text=chunk)]) + + async def _atransform( + self, input: AsyncIterator[Union[str, BaseMessage]] + ) -> AsyncIterator[T]: + async for chunk in input: + if isinstance(chunk, BaseMessage): + yield await run_in_executor( + None, self.parse_result, [ChatGeneration(message=chunk)] + ) + else: + yield await run_in_executor( + None, self.parse_result, [Generation(text=chunk)] + ) + + @override + def transform( + self, + input: Iterator[Union[str, BaseMessage]], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Iterator[T]: + """Transform the input into the output format. + + Args: + input: The input to transform. + config: The configuration to use for the transformation. + kwargs: Additional keyword arguments. + + Yields: + The transformed output. + """ + yield from self._transform_stream_with_config( + input, self._transform, config, run_type="parser" + ) + + @override + async def atransform( + self, + input: AsyncIterator[Union[str, BaseMessage]], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[T]: + """Async transform the input into the output format. + + Args: + input: The input to transform. + config: The configuration to use for the transformation. + kwargs: Additional keyword arguments. + + Yields: + The transformed output. + """ + async for chunk in self._atransform_stream_with_config( + input, self._atransform, config, run_type="parser" + ): + yield chunk + + +class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]): + """Base class for an output parser that can handle streaming input.""" + + diff: bool = False + """In streaming mode, whether to yield diffs between the previous and current + parsed output, or just the current parsed output. + """ + + def _diff(self, prev: Optional[T], next: T) -> T: + """Convert parsed outputs into a diff format. + + The semantics of this are up to the output parser. + + Args: + prev: The previous parsed output. + next: The current parsed output. + + Returns: + The diff between the previous and current parsed output. + """ + raise NotImplementedError + + def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[Any]: + prev_parsed = None + acc_gen: Union[GenerationChunk, ChatGenerationChunk, None] = None + for chunk in input: + chunk_gen: Union[GenerationChunk, ChatGenerationChunk] + if isinstance(chunk, BaseMessageChunk): + chunk_gen = ChatGenerationChunk(message=chunk) + elif isinstance(chunk, BaseMessage): + chunk_gen = ChatGenerationChunk( + message=BaseMessageChunk(**chunk.dict()) + ) + else: + chunk_gen = GenerationChunk(text=chunk) + + acc_gen = chunk_gen if acc_gen is None else acc_gen + chunk_gen # type: ignore[operator] + + parsed = self.parse_result([acc_gen], partial=True) + if parsed is not None and parsed != prev_parsed: + if self.diff: + yield self._diff(prev_parsed, parsed) + else: + yield parsed + prev_parsed = parsed + + async def _atransform( + self, input: AsyncIterator[Union[str, BaseMessage]] + ) -> AsyncIterator[T]: + prev_parsed = None + acc_gen: Union[GenerationChunk, ChatGenerationChunk, None] = None + async for chunk in input: + chunk_gen: Union[GenerationChunk, ChatGenerationChunk] + if isinstance(chunk, BaseMessageChunk): + chunk_gen = ChatGenerationChunk(message=chunk) + elif isinstance(chunk, BaseMessage): + chunk_gen = ChatGenerationChunk( + message=BaseMessageChunk(**chunk.dict()) + ) + else: + chunk_gen = GenerationChunk(text=chunk) + + acc_gen = chunk_gen if acc_gen is None else acc_gen + chunk_gen # type: ignore[operator] + + parsed = await self.aparse_result([acc_gen], partial=True) + if parsed is not None and parsed != prev_parsed: + if self.diff: + yield await run_in_executor(None, self._diff, prev_parsed, parsed) + else: + yield parsed + prev_parsed = parsed diff --git a/venv/Lib/site-packages/langchain_core/output_parsers/xml.py b/venv/Lib/site-packages/langchain_core/output_parsers/xml.py new file mode 100644 index 00000000..e7b8278c --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/output_parsers/xml.py @@ -0,0 +1,285 @@ +"""Output parser for XML format.""" + +import contextlib +import re +import xml +import xml.etree.ElementTree as ET # noqa: N817 +from collections.abc import AsyncIterator, Iterator +from typing import Any, Literal, Optional, Union +from xml.etree.ElementTree import TreeBuilder + +from langchain_core.exceptions import OutputParserException +from langchain_core.messages import BaseMessage +from langchain_core.output_parsers.transform import BaseTransformOutputParser +from langchain_core.runnables.utils import AddableDict + +XML_FORMAT_INSTRUCTIONS = """The output should be formatted as a XML file. +1. Output should conform to the tags below. +2. If tags are not given, make them on your own. +3. Remember to always open and close all the tags. + +As an example, for the tags ["foo", "bar", "baz"]: +1. String "\n \n \n \n" is a well-formatted instance of the schema. +2. String "\n \n " is a badly-formatted instance. +3. String "\n \n \n" is a badly-formatted instance. + +Here are the output tags: +``` +{tags} +```""" # noqa: E501 + + +class _StreamingParser: + """Streaming parser for XML. + + This implementation is pulled into a class to avoid implementation + drift between transform and atransform of the XMLOutputParser. + """ + + def __init__(self, parser: Literal["defusedxml", "xml"]) -> None: + """Initialize the streaming parser. + + Args: + parser: Parser to use for XML parsing. Can be either 'defusedxml' or 'xml'. + See documentation in XMLOutputParser for more information. + + Raises: + ImportError: If defusedxml is not installed and the defusedxml + parser is requested. + """ + if parser == "defusedxml": + try: + import defusedxml # type: ignore[import-untyped] + except ImportError as e: + msg = ( + "defusedxml is not installed. " + "Please install it to use the defusedxml parser." + "You can install it with `pip install defusedxml` " + ) + raise ImportError(msg) from e + _parser = defusedxml.ElementTree.DefusedXMLParser(target=TreeBuilder()) + else: + _parser = None + self.pull_parser = ET.XMLPullParser(["start", "end"], _parser=_parser) + self.xml_start_re = re.compile(r"<[a-zA-Z:_]") + self.current_path: list[str] = [] + self.current_path_has_children = False + self.buffer = "" + self.xml_started = False + + def parse(self, chunk: Union[str, BaseMessage]) -> Iterator[AddableDict]: + """Parse a chunk of text. + + Args: + chunk: A chunk of text to parse. This can be a string or a BaseMessage. + + Yields: + AddableDict: A dictionary representing the parsed XML element. + + Raises: + xml.etree.ElementTree.ParseError: If the XML is not well-formed. + """ + if isinstance(chunk, BaseMessage): + # extract text + chunk_content = chunk.content + if not isinstance(chunk_content, str): + # ignore non-string messages (e.g., function calls) + return + chunk = chunk_content + # add chunk to buffer of unprocessed text + self.buffer += chunk + # if xml string hasn't started yet, continue to next chunk + if not self.xml_started: + if match := self.xml_start_re.search(self.buffer): + # if xml string has started, remove all text before it + self.buffer = self.buffer[match.start() :] + self.xml_started = True + else: + return + # feed buffer to parser + self.pull_parser.feed(self.buffer) + self.buffer = "" + # yield all events + try: + for event, elem in self.pull_parser.read_events(): + if event == "start": + # update current path + self.current_path.append(elem.tag) + self.current_path_has_children = False + elif event == "end": + # remove last element from current path + # + self.current_path.pop() + # yield element + if not self.current_path_has_children: + yield nested_element(self.current_path, elem) + # prevent yielding of parent element + if self.current_path: + self.current_path_has_children = True + else: + self.xml_started = False + except xml.etree.ElementTree.ParseError: + # This might be junk at the end of the XML input. + # Let's check whether the current path is empty. + if not self.current_path: + # If it is empty, we can ignore this error. + return + else: + raise + + def close(self) -> None: + """Close the parser. + + This should be called after all chunks have been parsed. + + Raises: + xml.etree.ElementTree.ParseError: If the XML is not well-formed. + """ + # Ignore ParseError. This will ignore any incomplete XML at the end of the input + with contextlib.suppress(xml.etree.ElementTree.ParseError): + self.pull_parser.close() + + +class XMLOutputParser(BaseTransformOutputParser): + """Parse an output using xml format.""" + + tags: Optional[list[str]] = None + """Tags to tell the LLM to expect in the XML output. + + Note this may not be perfect depending on the LLM implementation. + + For example, with tags=["foo", "bar", "baz"]: + 1. A well-formatted XML instance: + "\n \n \n \n" + + 2. A badly-formatted XML instance (missing closing tag for 'bar'): + "\n \n " + + 3. A badly-formatted XML instance (unexpected 'tag' element): + "\n \n \n" + """ + encoding_matcher: re.Pattern = re.compile( + r"<([^>]*encoding[^>]*)>\n(.*)", re.MULTILINE | re.DOTALL + ) + parser: Literal["defusedxml", "xml"] = "defusedxml" + """Parser to use for XML parsing. Can be either 'defusedxml' or 'xml'. + + * 'defusedxml' is the default parser and is used to prevent XML vulnerabilities + present in some distributions of Python's standard library xml. + `defusedxml` is a wrapper around the standard library parser that + sets up the parser with secure defaults. + * 'xml' is the standard library parser. + + Use `xml` only if you are sure that your distribution of the standard library + is not vulnerable to XML vulnerabilities. + + Please review the following resources for more information: + + * https://docs.python.org/3/library/xml.html#xml-vulnerabilities + * https://github.com/tiran/defusedxml + + The standard library relies on libexpat for parsing XML: + https://github.com/libexpat/libexpat + """ + + def get_format_instructions(self) -> str: + """Return the format instructions for the XML output.""" + return XML_FORMAT_INSTRUCTIONS.format(tags=self.tags) + + def parse(self, text: str) -> dict[str, Union[str, list[Any]]]: + """Parse the output of an LLM call. + + Args: + text: The output of an LLM call. + + Returns: + A dictionary representing the parsed XML. + + Raises: + OutputParserException: If the XML is not well-formed. + ImportError: If defusedxml is not installed and the defusedxml + parser is requested. + """ + # Try to find XML string within triple backticks + # Imports are temporarily placed here to avoid issue with caching on CI + # likely if you're reading this you can move them to the top of the file + if self.parser == "defusedxml": + try: + from defusedxml import ElementTree + except ImportError as e: + msg = ( + "defusedxml is not installed. " + "Please install it to use the defusedxml parser." + "You can install it with `pip install defusedxml`" + "See https://github.com/tiran/defusedxml for more details" + ) + raise ImportError(msg) from e + _et = ElementTree # Use the defusedxml parser + else: + _et = ET # Use the standard library parser + + match = re.search(r"```(xml)?(.*)```", text, re.DOTALL) + if match is not None: + # If match found, use the content within the backticks + text = match.group(2) + encoding_match = self.encoding_matcher.search(text) + if encoding_match: + text = encoding_match.group(2) + + text = text.strip() + try: + root = _et.fromstring(text) + return self._root_to_dict(root) + except _et.ParseError as e: + msg = f"Failed to parse XML format from completion {text}. Got: {e}" + raise OutputParserException(msg, llm_output=text) from e + + def _transform( + self, input: Iterator[Union[str, BaseMessage]] + ) -> Iterator[AddableDict]: + streaming_parser = _StreamingParser(self.parser) + for chunk in input: + yield from streaming_parser.parse(chunk) + streaming_parser.close() + + async def _atransform( + self, input: AsyncIterator[Union[str, BaseMessage]] + ) -> AsyncIterator[AddableDict]: + streaming_parser = _StreamingParser(self.parser) + async for chunk in input: + for output in streaming_parser.parse(chunk): + yield output + streaming_parser.close() + + def _root_to_dict(self, root: ET.Element) -> dict[str, Union[str, list[Any]]]: + """Converts xml tree to python dictionary.""" + if root.text and bool(re.search(r"\S", root.text)): + # If root text contains any non-whitespace character it + # returns {root.tag: root.text} + return {root.tag: root.text} + result: dict = {root.tag: []} + for child in root: + if len(child) == 0: + result[root.tag].append({child.tag: child.text}) + else: + result[root.tag].append(self._root_to_dict(child)) + return result + + @property + def _type(self) -> str: + return "xml" + + +def nested_element(path: list[str], elem: ET.Element) -> Any: + """Get nested element from path. + + Args: + path: The path to the element. + elem: The element to extract. + + Returns: + The nested element. + """ + if len(path) == 0: + return AddableDict({elem.tag: elem.text}) + return AddableDict({path[0]: [nested_element(path[1:], elem)]}) diff --git a/venv/Lib/site-packages/langchain_core/outputs/__init__.py b/venv/Lib/site-packages/langchain_core/outputs/__init__.py new file mode 100644 index 00000000..b9072b9c --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/outputs/__init__.py @@ -0,0 +1,67 @@ +"""Output classes. + +**Output** classes are used to represent the output of a language model call +and the output of a chat. + +The top container for information is the `LLMResult` object. `LLMResult` is used by +both chat models and LLMs. This object contains the output of the language +model and any additional information that the model provider wants to return. + +When invoking models via the standard runnable methods (e.g. invoke, batch, etc.): +- Chat models will return `AIMessage` objects. +- LLMs will return regular text strings. + +In addition, users can access the raw output of either LLMs or chat models via +callbacks. The on_chat_model_end and on_llm_end callbacks will return an +LLMResult object containing the generated outputs and any additional information +returned by the model provider. + +In general, if information is already available +in the AIMessage object, it is recommended to access it from there rather than +from the `LLMResult` object. +""" + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.outputs.chat_generation import ( + ChatGeneration, + ChatGenerationChunk, + ) + from langchain_core.outputs.chat_result import ChatResult + from langchain_core.outputs.generation import Generation, GenerationChunk + from langchain_core.outputs.llm_result import LLMResult + from langchain_core.outputs.run_info import RunInfo + +__all__ = ( + "ChatGeneration", + "ChatGenerationChunk", + "ChatResult", + "Generation", + "GenerationChunk", + "LLMResult", + "RunInfo", +) + +_dynamic_imports = { + "ChatGeneration": "chat_generation", + "ChatGenerationChunk": "chat_generation", + "ChatResult": "chat_result", + "Generation": "generation", + "GenerationChunk": "generation", + "LLMResult": "llm_result", + "RunInfo": "run_info", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/outputs/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/outputs/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..2d0d77c7 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/outputs/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/outputs/__pycache__/chat_generation.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/outputs/__pycache__/chat_generation.cpython-312.pyc new file mode 100644 index 00000000..71aefa1f Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/outputs/__pycache__/chat_generation.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/outputs/__pycache__/chat_result.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/outputs/__pycache__/chat_result.cpython-312.pyc new file mode 100644 index 00000000..c8b95d0f Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/outputs/__pycache__/chat_result.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/outputs/__pycache__/generation.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/outputs/__pycache__/generation.cpython-312.pyc new file mode 100644 index 00000000..c8b39b8e Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/outputs/__pycache__/generation.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/outputs/__pycache__/llm_result.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/outputs/__pycache__/llm_result.cpython-312.pyc new file mode 100644 index 00000000..c2360f41 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/outputs/__pycache__/llm_result.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/outputs/__pycache__/run_info.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/outputs/__pycache__/run_info.cpython-312.pyc new file mode 100644 index 00000000..8ae13412 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/outputs/__pycache__/run_info.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/outputs/chat_generation.py b/venv/Lib/site-packages/langchain_core/outputs/chat_generation.py new file mode 100644 index 00000000..d01ae516 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/outputs/chat_generation.py @@ -0,0 +1,117 @@ +"""Chat generation output classes.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Literal, Union + +from pydantic import model_validator + +from langchain_core.messages import BaseMessage, BaseMessageChunk +from langchain_core.outputs.generation import Generation +from langchain_core.utils._merge import merge_dicts + +if TYPE_CHECKING: + from typing_extensions import Self + + +class ChatGeneration(Generation): + """A single chat generation output. + + A subclass of Generation that represents the response from a chat model + that generates chat messages. + + The `message` attribute is a structured representation of the chat message. + Most of the time, the message will be of type `AIMessage`. + + Users working with chat models will usually access information via either + `AIMessage` (returned from runnable interfaces) or `LLMResult` (available + via callbacks). + """ + + text: str = "" + """*SHOULD NOT BE SET DIRECTLY* The text contents of the output message.""" + message: BaseMessage + """The message output by the chat model.""" + # Override type to be ChatGeneration, ignore mypy error as this is intentional + type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment] + """Type is used exclusively for serialization purposes.""" + + @model_validator(mode="after") + def set_text(self) -> Self: + """Set the text attribute to be the contents of the message. + + Args: + values: The values of the object. + + Returns: + The values of the object with the text attribute set. + + Raises: + ValueError: If the message is not a string or a list. + """ + try: + text = "" + if isinstance(self.message.content, str): + text = self.message.content + # Assumes text in content blocks in OpenAI format. + # Uses first text block. + elif isinstance(self.message.content, list): + for block in self.message.content: + if isinstance(block, str): + text = block + break + if isinstance(block, dict) and "text" in block: + text = block["text"] + break + else: + pass + self.text = text + except (KeyError, AttributeError) as e: + msg = "Error while initializing ChatGeneration" + raise ValueError(msg) from e + return self + + +class ChatGenerationChunk(ChatGeneration): + """ChatGeneration chunk. + + ChatGeneration chunks can be concatenated with other ChatGeneration chunks. + """ + + message: BaseMessageChunk + """The message chunk output by the chat model.""" + # Override type to be ChatGeneration, ignore mypy error as this is intentional + type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment] + """Type is used exclusively for serialization purposes.""" + + def __add__( + self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]] + ) -> ChatGenerationChunk: + """Concatenate two ChatGenerationChunks. + + Args: + other: The other ChatGenerationChunk or list of ChatGenerationChunks to + concatenate. + """ + if isinstance(other, ChatGenerationChunk): + generation_info = merge_dicts( + self.generation_info or {}, + other.generation_info or {}, + ) + return ChatGenerationChunk( + message=self.message + other.message, + generation_info=generation_info or None, + ) + if isinstance(other, list) and all( + isinstance(x, ChatGenerationChunk) for x in other + ): + generation_info = merge_dicts( + self.generation_info or {}, + *[chunk.generation_info for chunk in other if chunk.generation_info], + ) + return ChatGenerationChunk( + message=self.message + [chunk.message for chunk in other], + generation_info=generation_info or None, + ) + msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" + raise TypeError(msg) diff --git a/venv/Lib/site-packages/langchain_core/outputs/chat_result.py b/venv/Lib/site-packages/langchain_core/outputs/chat_result.py new file mode 100644 index 00000000..3e6a5076 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/outputs/chat_result.py @@ -0,0 +1,38 @@ +"""Chat result schema.""" + +from typing import Optional + +from pydantic import BaseModel + +from langchain_core.outputs.chat_generation import ChatGeneration + + +class ChatResult(BaseModel): + """Use to represent the result of a chat model call with a single prompt. + + This container is used internally by some implementations of chat model, + it will eventually be mapped to a more general `LLMResult` object, and + then projected into an `AIMessage` object. + + LangChain users working with chat models will usually access information via + `AIMessage` (returned from runnable interfaces) or `LLMResult` (available + via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation + for more information. + """ + + generations: list[ChatGeneration] + """List of the chat generations. + + Generations is a list to allow for multiple candidate generations for a single + input prompt. + """ + llm_output: Optional[dict] = None + """For arbitrary LLM provider specific output. + + This dictionary is a free-form dictionary that can contain any information that the + provider wants to return. It is not standardized and is provider-specific. + + Users should generally avoid relying on this field and instead rely on + accessing relevant information from standardized fields present in + AIMessage. + """ diff --git a/venv/Lib/site-packages/langchain_core/outputs/generation.py b/venv/Lib/site-packages/langchain_core/outputs/generation.py new file mode 100644 index 00000000..8f3bbe5a --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/outputs/generation.py @@ -0,0 +1,68 @@ +"""Generation output schema.""" + +from __future__ import annotations + +from typing import Any, Literal, Optional + +from langchain_core.load import Serializable +from langchain_core.utils._merge import merge_dicts + + +class Generation(Serializable): + """A single text generation output. + + Generation represents the response from an "old-fashioned" LLM that + generates regular text (not chat messages). + + This model is used internally by chat model and will eventually + be mapped to a more general `LLMResult` object, and then projected into + an `AIMessage` object. + + LangChain users working with chat models will usually access information via + `AIMessage` (returned from runnable interfaces) or `LLMResult` (available + via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation + for more information. + """ + + text: str + """Generated text output.""" + + generation_info: Optional[dict[str, Any]] = None + """Raw response from the provider. + + May include things like the reason for finishing or token log probabilities. + """ + type: Literal["Generation"] = "Generation" + """Type is used exclusively for serialization purposes. + Set to "Generation" for this class.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + """Return whether this class is serializable.""" + return True + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + Default namespace is ["langchain", "schema", "output"]. + """ + return ["langchain", "schema", "output"] + + +class GenerationChunk(Generation): + """Generation chunk, which can be concatenated with other Generation chunks.""" + + def __add__(self, other: GenerationChunk) -> GenerationChunk: + """Concatenate two GenerationChunks.""" + if isinstance(other, GenerationChunk): + generation_info = merge_dicts( + self.generation_info or {}, + other.generation_info or {}, + ) + return GenerationChunk( + text=self.text + other.text, + generation_info=generation_info or None, + ) + msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" + raise TypeError(msg) diff --git a/venv/Lib/site-packages/langchain_core/outputs/llm_result.py b/venv/Lib/site-packages/langchain_core/outputs/llm_result.py new file mode 100644 index 00000000..fb72f640 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/outputs/llm_result.py @@ -0,0 +1,99 @@ +"""LLMResult class.""" + +from __future__ import annotations + +from copy import deepcopy +from typing import Literal, Optional, Union + +from pydantic import BaseModel + +from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk +from langchain_core.outputs.generation import Generation, GenerationChunk +from langchain_core.outputs.run_info import RunInfo + + +class LLMResult(BaseModel): + """A container for results of an LLM call. + + Both chat models and LLMs generate an LLMResult object. This object contains + the generated outputs and any additional information that the model provider + wants to return. + """ + + generations: list[ + list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]] + ] + """Generated outputs. + + The first dimension of the list represents completions for different input + prompts. + + The second dimension of the list represents different candidate generations + for a given prompt. + + When returned from an LLM the type is list[list[Generation]]. + When returned from a chat model the type is list[list[ChatGeneration]]. + + ChatGeneration is a subclass of Generation that has a field for a structured + chat message. + """ + llm_output: Optional[dict] = None + """For arbitrary LLM provider specific output. + + This dictionary is a free-form dictionary that can contain any information that the + provider wants to return. It is not standardized and is provider-specific. + + Users should generally avoid relying on this field and instead rely on + accessing relevant information from standardized fields present in + AIMessage. + """ + run: Optional[list[RunInfo]] = None + """List of metadata info for model call for each input.""" + + type: Literal["LLMResult"] = "LLMResult" + """Type is used exclusively for serialization purposes.""" + + def flatten(self) -> list[LLMResult]: + """Flatten generations into a single list. + + Unpack list[list[Generation]] -> list[LLMResult] where each returned LLMResult + contains only a single Generation. If token usage information is available, + it is kept only for the LLMResult corresponding to the top-choice + Generation, to avoid over-counting of token usage downstream. + + Returns: + List of LLMResults where each returned LLMResult contains a single + Generation. + """ + llm_results = [] + for i, gen_list in enumerate(self.generations): + # Avoid double counting tokens in OpenAICallback + if i == 0: + llm_results.append( + LLMResult( + generations=[gen_list], + llm_output=self.llm_output, + ) + ) + else: + if self.llm_output is not None: + llm_output = deepcopy(self.llm_output) + llm_output["token_usage"] = {} + else: + llm_output = None + llm_results.append( + LLMResult( + generations=[gen_list], + llm_output=llm_output, + ) + ) + return llm_results + + def __eq__(self, other: object) -> bool: + """Check for LLMResult equality by ignoring any metadata related to runs.""" + if not isinstance(other, LLMResult): + return NotImplemented + return ( + self.generations == other.generations + and self.llm_output == other.llm_output + ) diff --git a/venv/Lib/site-packages/langchain_core/outputs/run_info.py b/venv/Lib/site-packages/langchain_core/outputs/run_info.py new file mode 100644 index 00000000..c1255097 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/outputs/run_info.py @@ -0,0 +1,22 @@ +"""RunInfo class.""" + +from __future__ import annotations + +from uuid import UUID + +from pydantic import BaseModel + + +class RunInfo(BaseModel): + """Class that contains metadata for a single execution of a Chain or model. + + Defined for backwards compatibility with older versions of langchain_core. + + This model will likely be deprecated in the future. + + Users can acquire the run_id information from callbacks or via run_id + information present in the astream_event API (depending on the use case). + """ + + run_id: UUID + """A unique identifier for the model or chain run.""" diff --git a/venv/Lib/site-packages/langchain_core/prompt_values.py b/venv/Lib/site-packages/langchain_core/prompt_values.py new file mode 100644 index 00000000..7652bd76 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompt_values.py @@ -0,0 +1,142 @@ +"""**Prompt values** for language model prompts. + +Prompt values are used to represent different pieces of prompts. +They can be used to represent text, images, or chat message pieces. +""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from collections.abc import Sequence +from typing import Literal, cast + +from typing_extensions import TypedDict + +from langchain_core.load.serializable import Serializable +from langchain_core.messages import ( + AnyMessage, + BaseMessage, + HumanMessage, + get_buffer_string, +) + + +class PromptValue(Serializable, ABC): + """Base abstract class for inputs to any language model. + + PromptValues can be converted to both LLM (pure text-generation) inputs and + ChatModel inputs. + """ + + @classmethod + def is_lc_serializable(cls) -> bool: + """Return whether this class is serializable. Defaults to True.""" + return True + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + This is used to determine the namespace of the object when serializing. + Defaults to ["langchain", "schema", "prompt"]. + """ + return ["langchain", "schema", "prompt"] + + @abstractmethod + def to_string(self) -> str: + """Return prompt value as string.""" + + @abstractmethod + def to_messages(self) -> list[BaseMessage]: + """Return prompt as a list of Messages.""" + + +class StringPromptValue(PromptValue): + """String prompt value.""" + + text: str + """Prompt text.""" + type: Literal["StringPromptValue"] = "StringPromptValue" + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + This is used to determine the namespace of the object when serializing. + Defaults to ["langchain", "prompts", "base"]. + """ + return ["langchain", "prompts", "base"] + + def to_string(self) -> str: + """Return prompt as string.""" + return self.text + + def to_messages(self) -> list[BaseMessage]: + """Return prompt as messages.""" + return [HumanMessage(content=self.text)] + + +class ChatPromptValue(PromptValue): + """Chat prompt value. + + A type of a prompt value that is built from messages. + """ + + messages: Sequence[BaseMessage] + """List of messages.""" + + def to_string(self) -> str: + """Return prompt as string.""" + return get_buffer_string(self.messages) + + def to_messages(self) -> list[BaseMessage]: + """Return prompt as a list of messages.""" + return list(self.messages) + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + This is used to determine the namespace of the object when serializing. + Defaults to ["langchain", "prompts", "chat"]. + """ + return ["langchain", "prompts", "chat"] + + +class ImageURL(TypedDict, total=False): + """Image URL.""" + + detail: Literal["auto", "low", "high"] + """Specifies the detail level of the image. Defaults to "auto". + Can be "auto", "low", or "high".""" + + url: str + """Either a URL of the image or the base64 encoded image data.""" + + +class ImagePromptValue(PromptValue): + """Image prompt value.""" + + image_url: ImageURL + """Image URL.""" + type: Literal["ImagePromptValue"] = "ImagePromptValue" + + def to_string(self) -> str: + """Return prompt (image URL) as string.""" + return self.image_url["url"] + + def to_messages(self) -> list[BaseMessage]: + """Return prompt (image URL) as messages.""" + return [HumanMessage(content=[cast("dict", self.image_url)])] + + +class ChatPromptValueConcrete(ChatPromptValue): + """Chat prompt value which explicitly lists out the message types it accepts. + + For use in external schemas. + """ + + messages: Sequence[AnyMessage] + """Sequence of messages.""" + + type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete" diff --git a/venv/Lib/site-packages/langchain_core/prompts/__init__.py b/venv/Lib/site-packages/langchain_core/prompts/__init__.py new file mode 100644 index 00000000..61f1ceb7 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/__init__.py @@ -0,0 +1,125 @@ +"""**Prompt** is the input to the model. + +Prompt is often constructed +from multiple components and prompt values. Prompt classes and functions make constructing + and working with prompts easy. + +**Class hierarchy:** + +.. code-block:: + + BasePromptTemplate --> PipelinePromptTemplate + StringPromptTemplate --> PromptTemplate + FewShotPromptTemplate + FewShotPromptWithTemplates + BaseChatPromptTemplate --> AutoGPTPrompt + ChatPromptTemplate --> AgentScratchPadChatPromptTemplate + + + + BaseMessagePromptTemplate --> MessagesPlaceholder + BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate + HumanMessagePromptTemplate + AIMessagePromptTemplate + SystemMessagePromptTemplate + +""" # noqa: E501 + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.prompts.base import ( + BasePromptTemplate, + aformat_document, + format_document, + ) + from langchain_core.prompts.chat import ( + AIMessagePromptTemplate, + BaseChatPromptTemplate, + ChatMessagePromptTemplate, + ChatPromptTemplate, + HumanMessagePromptTemplate, + MessagesPlaceholder, + SystemMessagePromptTemplate, + ) + from langchain_core.prompts.dict import DictPromptTemplate + from langchain_core.prompts.few_shot import ( + FewShotChatMessagePromptTemplate, + FewShotPromptTemplate, + ) + from langchain_core.prompts.few_shot_with_templates import ( + FewShotPromptWithTemplates, + ) + from langchain_core.prompts.loading import load_prompt + from langchain_core.prompts.pipeline import PipelinePromptTemplate + from langchain_core.prompts.prompt import PromptTemplate + from langchain_core.prompts.string import ( + StringPromptTemplate, + check_valid_template, + get_template_variables, + jinja2_formatter, + validate_jinja2, + ) + +__all__ = ( + "AIMessagePromptTemplate", + "BaseChatPromptTemplate", + "BasePromptTemplate", + "ChatMessagePromptTemplate", + "ChatPromptTemplate", + "DictPromptTemplate", + "FewShotPromptTemplate", + "FewShotPromptWithTemplates", + "FewShotChatMessagePromptTemplate", + "HumanMessagePromptTemplate", + "MessagesPlaceholder", + "PipelinePromptTemplate", + "PromptTemplate", + "StringPromptTemplate", + "SystemMessagePromptTemplate", + "load_prompt", + "format_document", + "aformat_document", + "check_valid_template", + "get_template_variables", + "jinja2_formatter", + "validate_jinja2", +) + +_dynamic_imports = { + "BasePromptTemplate": "base", + "format_document": "base", + "aformat_document": "base", + "AIMessagePromptTemplate": "chat", + "BaseChatPromptTemplate": "chat", + "ChatMessagePromptTemplate": "chat", + "ChatPromptTemplate": "chat", + "DictPromptTemplate": "dict", + "HumanMessagePromptTemplate": "chat", + "MessagesPlaceholder": "chat", + "SystemMessagePromptTemplate": "chat", + "FewShotChatMessagePromptTemplate": "few_shot", + "FewShotPromptTemplate": "few_shot", + "FewShotPromptWithTemplates": "few_shot_with_templates", + "load_prompt": "loading", + "PipelinePromptTemplate": "pipeline", + "PromptTemplate": "prompt", + "StringPromptTemplate": "string", + "check_valid_template": "string", + "get_template_variables": "string", + "jinja2_formatter": "string", + "validate_jinja2": "string", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..2b00fb4f Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..65fb8919 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/chat.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/chat.cpython-312.pyc new file mode 100644 index 00000000..b5c3d4a9 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/chat.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/dict.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/dict.cpython-312.pyc new file mode 100644 index 00000000..b3601277 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/dict.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/few_shot.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/few_shot.cpython-312.pyc new file mode 100644 index 00000000..a9919d6d Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/few_shot.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/few_shot_with_templates.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/few_shot_with_templates.cpython-312.pyc new file mode 100644 index 00000000..0b9077a2 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/few_shot_with_templates.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/image.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/image.cpython-312.pyc new file mode 100644 index 00000000..1abbcb02 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/image.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/loading.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/loading.cpython-312.pyc new file mode 100644 index 00000000..ee3c0122 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/loading.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/message.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/message.cpython-312.pyc new file mode 100644 index 00000000..bd92f1f1 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/message.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/pipeline.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/pipeline.cpython-312.pyc new file mode 100644 index 00000000..12d1d240 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/pipeline.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/prompt.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/prompt.cpython-312.pyc new file mode 100644 index 00000000..410d853c Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/prompt.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/string.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/string.cpython-312.pyc new file mode 100644 index 00000000..779b277a Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/string.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/__pycache__/structured.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/structured.cpython-312.pyc new file mode 100644 index 00000000..6f514a8e Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/prompts/__pycache__/structured.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/prompts/base.py b/venv/Lib/site-packages/langchain_core/prompts/base.py new file mode 100644 index 00000000..d8809717 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/base.py @@ -0,0 +1,472 @@ +"""Base class for prompt templates.""" + +from __future__ import annotations + +import contextlib +import json +import typing +from abc import ABC, abstractmethod +from collections.abc import Mapping +from functools import cached_property +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Generic, + Optional, + TypeVar, + Union, +) + +import yaml +from pydantic import BaseModel, ConfigDict, Field, model_validator +from typing_extensions import Self, override + +from langchain_core.exceptions import ErrorCode, create_message +from langchain_core.load import dumpd +from langchain_core.output_parsers.base import BaseOutputParser +from langchain_core.prompt_values import ( + ChatPromptValueConcrete, + PromptValue, + StringPromptValue, +) +from langchain_core.runnables import RunnableConfig, RunnableSerializable +from langchain_core.runnables.config import ensure_config +from langchain_core.utils.pydantic import create_model_v2 + +if TYPE_CHECKING: + from langchain_core.documents import Document + + +FormatOutputType = TypeVar("FormatOutputType") + + +class BasePromptTemplate( + RunnableSerializable[dict, PromptValue], Generic[FormatOutputType], ABC +): + """Base class for all prompt templates, returning a prompt.""" + + input_variables: list[str] + """A list of the names of the variables whose values are required as inputs to the + prompt.""" + optional_variables: list[str] = Field(default=[]) + """optional_variables: A list of the names of the variables for placeholder + or MessagePlaceholder that are optional. These variables are auto inferred + from the prompt and user need not provide them.""" + input_types: typing.Dict[str, Any] = Field(default_factory=dict, exclude=True) # noqa: UP006 + """A dictionary of the types of the variables the prompt template expects. + If not provided, all variables are assumed to be strings.""" + output_parser: Optional[BaseOutputParser] = None + """How to parse the output of calling an LLM on this formatted prompt.""" + partial_variables: Mapping[str, Any] = Field(default_factory=dict) + """A dictionary of the partial variables the prompt template carries. + + Partial variables populate the template so that you don't need to + pass them in every time you call the prompt.""" + metadata: Optional[typing.Dict[str, Any]] = None # noqa: UP006 + """Metadata to be used for tracing.""" + tags: Optional[list[str]] = None + """Tags to be used for tracing.""" + + @model_validator(mode="after") + def validate_variable_names(self) -> Self: + """Validate variable names do not include restricted names.""" + if "stop" in self.input_variables: + msg = ( + "Cannot have an input variable named 'stop', as it is used internally," + " please rename." + ) + raise ValueError( + create_message(message=msg, error_code=ErrorCode.INVALID_PROMPT_INPUT) + ) + if "stop" in self.partial_variables: + msg = ( + "Cannot have an partial variable named 'stop', as it is used " + "internally, please rename." + ) + raise ValueError( + create_message(message=msg, error_code=ErrorCode.INVALID_PROMPT_INPUT) + ) + + overall = set(self.input_variables).intersection(self.partial_variables) + if overall: + msg = f"Found overlapping input and partial variables: {overall}" + raise ValueError( + create_message(message=msg, error_code=ErrorCode.INVALID_PROMPT_INPUT) + ) + return self + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + Returns ["langchain", "schema", "prompt_template"]. + """ + return ["langchain", "schema", "prompt_template"] + + @classmethod + def is_lc_serializable(cls) -> bool: + """Return whether this class is serializable. + + Returns True. + """ + return True + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @cached_property + def _serialized(self) -> dict[str, Any]: + return dumpd(self) + + @property + @override + def OutputType(self) -> Any: + """Return the output type of the prompt.""" + return Union[StringPromptValue, ChatPromptValueConcrete] + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + """Get the input schema for the prompt. + + Args: + config: RunnableConfig, configuration for the prompt. + + Returns: + Type[BaseModel]: The input schema for the prompt. + """ + # This is correct, but pydantic typings/mypy don't think so. + required_input_variables = { + k: (self.input_types.get(k, str), ...) for k in self.input_variables + } + optional_input_variables = { + k: (self.input_types.get(k, str), None) for k in self.optional_variables + } + return create_model_v2( + "PromptInput", + field_definitions={**required_input_variables, **optional_input_variables}, + ) + + def _validate_input(self, inner_input: Any) -> dict: + if not isinstance(inner_input, dict): + if len(self.input_variables) == 1: + var_name = self.input_variables[0] + inner_input = {var_name: inner_input} + + else: + msg = ( + f"Expected mapping type as input to {self.__class__.__name__}. " + f"Received {type(inner_input)}." + ) + raise TypeError( + create_message( + message=msg, error_code=ErrorCode.INVALID_PROMPT_INPUT + ) + ) + missing = set(self.input_variables).difference(inner_input) + if missing: + msg = ( + f"Input to {self.__class__.__name__} is missing variables {missing}. " + f" Expected: {self.input_variables}" + f" Received: {list(inner_input.keys())}" + ) + example_key = missing.pop() + msg += ( + f"\nNote: if you intended {{{example_key}}} to be part of the string" + " and not a variable, please escape it with double curly braces like: " + f"'{{{{{example_key}}}}}'." + ) + raise KeyError( + create_message(message=msg, error_code=ErrorCode.INVALID_PROMPT_INPUT) + ) + return inner_input + + def _format_prompt_with_error_handling(self, inner_input: dict) -> PromptValue: + _inner_input = self._validate_input(inner_input) + return self.format_prompt(**_inner_input) + + async def _aformat_prompt_with_error_handling( + self, inner_input: dict + ) -> PromptValue: + _inner_input = self._validate_input(inner_input) + return await self.aformat_prompt(**_inner_input) + + @override + def invoke( + self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> PromptValue: + """Invoke the prompt. + + Args: + input: Dict, input to the prompt. + config: RunnableConfig, configuration for the prompt. + + Returns: + PromptValue: The output of the prompt. + """ + config = ensure_config(config) + if self.metadata: + config["metadata"] = {**config["metadata"], **self.metadata} + if self.tags: + config["tags"] = config["tags"] + self.tags + return self._call_with_config( + self._format_prompt_with_error_handling, + input, + config, + run_type="prompt", + serialized=self._serialized, + ) + + @override + async def ainvoke( + self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> PromptValue: + """Async invoke the prompt. + + Args: + input: Dict, input to the prompt. + config: RunnableConfig, configuration for the prompt. + + Returns: + PromptValue: The output of the prompt. + """ + config = ensure_config(config) + if self.metadata: + config["metadata"].update(self.metadata) + if self.tags: + config["tags"].extend(self.tags) + return await self._acall_with_config( + self._aformat_prompt_with_error_handling, + input, + config, + run_type="prompt", + serialized=self._serialized, + ) + + @abstractmethod + def format_prompt(self, **kwargs: Any) -> PromptValue: + """Create Prompt Value. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + PromptValue: The output of the prompt. + """ + + async def aformat_prompt(self, **kwargs: Any) -> PromptValue: + """Async create Prompt Value. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + PromptValue: The output of the prompt. + """ + return self.format_prompt(**kwargs) + + def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: + """Return a partial of the prompt template. + + Args: + kwargs: Union[str, Callable[[], str]], partial variables to set. + + Returns: + BasePromptTemplate: A partial of the prompt template. + """ + prompt_dict = self.__dict__.copy() + prompt_dict["input_variables"] = list( + set(self.input_variables).difference(kwargs) + ) + prompt_dict["partial_variables"] = {**self.partial_variables, **kwargs} + return type(self)(**prompt_dict) + + def _merge_partial_and_user_variables(self, **kwargs: Any) -> dict[str, Any]: + # Get partial params: + partial_kwargs = { + k: v if not callable(v) else v() for k, v in self.partial_variables.items() + } + return {**partial_kwargs, **kwargs} + + @abstractmethod + def format(self, **kwargs: Any) -> FormatOutputType: + """Format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + + Example: + + .. code-block:: python + + prompt.format(variable1="foo") + """ + + async def aformat(self, **kwargs: Any) -> FormatOutputType: + """Async format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + + Example: + + .. code-block:: python + + await prompt.aformat(variable1="foo") + """ + return self.format(**kwargs) + + @property + def _prompt_type(self) -> str: + """Return the prompt type key.""" + raise NotImplementedError + + def dict(self, **kwargs: Any) -> dict: + """Return dictionary representation of prompt. + + Args: + kwargs: Any additional arguments to pass to the dictionary. + + Returns: + Dict: Dictionary representation of the prompt. + + Raises: + NotImplementedError: If the prompt type is not implemented. + """ + prompt_dict = super().model_dump(**kwargs) + with contextlib.suppress(NotImplementedError): + prompt_dict["_type"] = self._prompt_type + return prompt_dict + + def save(self, file_path: Union[Path, str]) -> None: + """Save the prompt. + + Args: + file_path: Path to directory to save prompt to. + + Raises: + ValueError: If the prompt has partial variables. + ValueError: If the file path is not json or yaml. + NotImplementedError: If the prompt type is not implemented. + + Example: + .. code-block:: python + + prompt.save(file_path="path/prompt.yaml") + """ + if self.partial_variables: + msg = "Cannot save prompt with partial variables." + raise ValueError(msg) + + # Fetch dictionary to save + prompt_dict = self.dict() + if "_type" not in prompt_dict: + msg = f"Prompt {self} does not support saving." + raise NotImplementedError(msg) + + # Convert file to Path object. + save_path = Path(file_path) + + directory_path = save_path.parent + directory_path.mkdir(parents=True, exist_ok=True) + + if save_path.suffix == ".json": + with save_path.open("w") as f: + json.dump(prompt_dict, f, indent=4) + elif save_path.suffix.endswith((".yaml", ".yml")): + with save_path.open("w") as f: + yaml.dump(prompt_dict, f, default_flow_style=False) + else: + msg = f"{save_path} must be json or yaml" + raise ValueError(msg) + + +def _get_document_info(doc: Document, prompt: BasePromptTemplate[str]) -> dict: + base_info = {"page_content": doc.page_content, **doc.metadata} + missing_metadata = set(prompt.input_variables).difference(base_info) + if len(missing_metadata) > 0: + required_metadata = [ + iv for iv in prompt.input_variables if iv != "page_content" + ] + msg = ( + f"Document prompt requires documents to have metadata variables: " + f"{required_metadata}. Received document with missing metadata: " + f"{list(missing_metadata)}." + ) + raise ValueError( + create_message(message=msg, error_code=ErrorCode.INVALID_PROMPT_INPUT) + ) + return {k: base_info[k] for k in prompt.input_variables} + + +def format_document(doc: Document, prompt: BasePromptTemplate[str]) -> str: + """Format a document into a string based on a prompt template. + + First, this pulls information from the document from two sources: + + 1. page_content: + This takes the information from the `document.page_content` + and assigns it to a variable named `page_content`. + 2. metadata: + This takes information from `document.metadata` and assigns + it to variables of the same name. + + Those variables are then passed into the `prompt` to produce a formatted string. + + Args: + doc: Document, the page_content and metadata will be used to create + the final string. + prompt: BasePromptTemplate, will be used to format the page_content + and metadata into the final string. + + Returns: + string of the document formatted. + + Example: + .. code-block:: python + + from langchain_core.documents import Document + from langchain_core.prompts import PromptTemplate + + doc = Document(page_content="This is a joke", metadata={"page": "1"}) + prompt = PromptTemplate.from_template("Page {page}: {page_content}") + format_document(doc, prompt) + >>> "Page 1: This is a joke" + """ + return prompt.format(**_get_document_info(doc, prompt)) + + +async def aformat_document(doc: Document, prompt: BasePromptTemplate[str]) -> str: + """Async format a document into a string based on a prompt template. + + First, this pulls information from the document from two sources: + + 1. page_content: + This takes the information from the `document.page_content` + and assigns it to a variable named `page_content`. + 2. metadata: + This takes information from `document.metadata` and assigns + it to variables of the same name. + + Those variables are then passed into the `prompt` to produce a formatted string. + + Args: + doc: Document, the page_content and metadata will be used to create + the final string. + prompt: BasePromptTemplate, will be used to format the page_content + and metadata into the final string. + + Returns: + string of the document formatted. + """ + return await prompt.aformat(**_get_document_info(doc, prompt)) diff --git a/venv/Lib/site-packages/langchain_core/prompts/chat.py b/venv/Lib/site-packages/langchain_core/prompts/chat.py new file mode 100644 index 00000000..b8095433 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/chat.py @@ -0,0 +1,1459 @@ +"""Chat prompt template.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Annotated, + Any, + Optional, + TypedDict, + TypeVar, + Union, + cast, + overload, +) + +from pydantic import ( + Field, + PositiveInt, + SkipValidation, + model_validator, +) +from typing_extensions import Self, override + +from langchain_core._api import deprecated +from langchain_core.messages import ( + AIMessage, + AnyMessage, + BaseMessage, + ChatMessage, + HumanMessage, + SystemMessage, + convert_to_messages, +) +from langchain_core.messages.base import get_msg_title_repr +from langchain_core.prompt_values import ChatPromptValue, ImageURL, PromptValue +from langchain_core.prompts.base import BasePromptTemplate +from langchain_core.prompts.dict import DictPromptTemplate +from langchain_core.prompts.image import ImagePromptTemplate +from langchain_core.prompts.message import ( + BaseMessagePromptTemplate, +) +from langchain_core.prompts.prompt import PromptTemplate +from langchain_core.prompts.string import ( + PromptTemplateFormat, + StringPromptTemplate, + get_template_variables, +) +from langchain_core.utils import get_colored_text +from langchain_core.utils.interactive_env import is_interactive_env + +if TYPE_CHECKING: + from collections.abc import Sequence + + +class MessagesPlaceholder(BaseMessagePromptTemplate): + """Prompt template that assumes variable is already list of messages. + + A placeholder which can be used to pass in a list of messages. + + Direct usage: + + .. code-block:: python + + from langchain_core.prompts import MessagesPlaceholder + + prompt = MessagesPlaceholder("history") + prompt.format_messages() # raises KeyError + + prompt = MessagesPlaceholder("history", optional=True) + prompt.format_messages() # returns empty list [] + + prompt.format_messages( + history=[ + ("system", "You are an AI assistant."), + ("human", "Hello!"), + ] + ) + # -> [ + # SystemMessage(content="You are an AI assistant."), + # HumanMessage(content="Hello!"), + # ] + + Building a prompt with chat history: + + .. code-block:: python + + from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder + + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful assistant."), + MessagesPlaceholder("history"), + ("human", "{question}") + ] + ) + prompt.invoke( + { + "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")], + "question": "now multiply that by 4" + } + ) + # -> ChatPromptValue(messages=[ + # SystemMessage(content="You are a helpful assistant."), + # HumanMessage(content="what's 5 + 2"), + # AIMessage(content="5 + 2 is 7"), + # HumanMessage(content="now multiply that by 4"), + # ]) + + Limiting the number of messages: + + .. code-block:: python + + from langchain_core.prompts import MessagesPlaceholder + + prompt = MessagesPlaceholder("history", n_messages=1) + + prompt.format_messages( + history=[ + ("system", "You are an AI assistant."), + ("human", "Hello!"), + ] + ) + # -> [ + # HumanMessage(content="Hello!"), + # ] + """ + + variable_name: str + """Name of variable to use as messages.""" + + optional: bool = False + """If True format_messages can be called with no arguments and will return an empty + list. If False then a named argument with name `variable_name` must be passed + in, even if the value is an empty list.""" + + n_messages: Optional[PositiveInt] = None + """Maximum number of messages to include. If None, then will include all. + Defaults to None.""" + + def __init__( + self, variable_name: str, *, optional: bool = False, **kwargs: Any + ) -> None: + """Create a messages placeholder. + + Args: + variable_name: Name of variable to use as messages. + optional: If True format_messages can be called with no arguments and will + return an empty list. If False then a named argument with name + `variable_name` must be passed in, even if the value is an empty list. + Defaults to False.] + """ + # mypy can't detect the init which is defined in the parent class + # b/c these are BaseModel classes. + super().__init__( # type: ignore[call-arg] + variable_name=variable_name, optional=optional, **kwargs + ) + + def format_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Format messages from kwargs. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + List of BaseMessage. + + Raises: + ValueError: If variable is not a list of messages. + """ + value = ( + kwargs.get(self.variable_name, []) + if self.optional + else kwargs[self.variable_name] + ) + if not isinstance(value, list): + msg = ( + f"variable {self.variable_name} should be a list of base messages, " + f"got {value} of type {type(value)}" + ) + raise ValueError(msg) # noqa: TRY004 + value = convert_to_messages(value) + if self.n_messages: + value = value[-self.n_messages :] + return value + + @property + def input_variables(self) -> list[str]: + """Input variables for this prompt template. + + Returns: + List of input variable names. + """ + return [self.variable_name] if not self.optional else [] + + @override + def pretty_repr(self, html: bool = False) -> str: + """Human-readable representation. + + Args: + html: Whether to format as HTML. Defaults to False. + + Returns: + Human-readable representation. + """ + var = "{" + self.variable_name + "}" + if html: + title = get_msg_title_repr("Messages Placeholder", bold=True) + var = get_colored_text(var, "yellow") + else: + title = get_msg_title_repr("Messages Placeholder") + return f"{title}\n\n{var}" + + +MessagePromptTemplateT = TypeVar( + "MessagePromptTemplateT", bound="BaseStringMessagePromptTemplate" +) +"""Type variable for message prompt templates.""" + + +class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC): + """Base class for message prompt templates that use a string prompt template.""" + + prompt: StringPromptTemplate + """String prompt template.""" + additional_kwargs: dict = Field(default_factory=dict) + """Additional keyword arguments to pass to the prompt template.""" + + @classmethod + def from_template( + cls, + template: str, + template_format: PromptTemplateFormat = "f-string", + partial_variables: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Self: + """Create a class from a string template. + + Args: + template: a template. + template_format: format of the template. Defaults to "f-string". + partial_variables: A dictionary of variables that can be used to partially + fill in the template. For example, if the template is + `"{variable1} {variable2}"`, and `partial_variables` is + `{"variable1": "foo"}`, then the final prompt will be + `"foo {variable2}"`. + Defaults to None. + **kwargs: keyword arguments to pass to the constructor. + + Returns: + A new instance of this class. + """ + prompt = PromptTemplate.from_template( + template, + template_format=template_format, + partial_variables=partial_variables, + ) + return cls(prompt=prompt, **kwargs) + + @classmethod + def from_template_file( + cls, + template_file: Union[str, Path], + input_variables: list[str], + **kwargs: Any, + ) -> Self: + """Create a class from a template file. + + Args: + template_file: path to a template file. String or Path. + input_variables: list of input variables. + **kwargs: keyword arguments to pass to the constructor. + + Returns: + A new instance of this class. + """ + prompt = PromptTemplate.from_file(template_file, input_variables) + return cls(prompt=prompt, **kwargs) + + @abstractmethod + def format(self, **kwargs: Any) -> BaseMessage: + """Format the prompt template. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + Formatted message. + """ + + async def aformat(self, **kwargs: Any) -> BaseMessage: + """Async format the prompt template. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + Formatted message. + """ + return self.format(**kwargs) + + def format_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Format messages from kwargs. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + List of BaseMessages. + """ + return [self.format(**kwargs)] + + async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Async format messages from kwargs. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + List of BaseMessages. + """ + return [await self.aformat(**kwargs)] + + @property + def input_variables(self) -> list[str]: + """Input variables for this prompt template. + + Returns: + List of input variable names. + """ + return self.prompt.input_variables + + @override + def pretty_repr(self, html: bool = False) -> str: + """Human-readable representation. + + Args: + html: Whether to format as HTML. Defaults to False. + + Returns: + Human-readable representation. + """ + # TODO: Handle partials + title = self.__class__.__name__.replace("MessagePromptTemplate", " Message") + title = get_msg_title_repr(title, bold=html) + return f"{title}\n\n{self.prompt.pretty_repr(html=html)}" + + +class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate): + """Chat message prompt template.""" + + role: str + """Role of the message.""" + + def format(self, **kwargs: Any) -> BaseMessage: + """Format the prompt template. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + Formatted message. + """ + text = self.prompt.format(**kwargs) + return ChatMessage( + content=text, role=self.role, additional_kwargs=self.additional_kwargs + ) + + async def aformat(self, **kwargs: Any) -> BaseMessage: + """Async format the prompt template. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + Formatted message. + """ + text = await self.prompt.aformat(**kwargs) + return ChatMessage( + content=text, role=self.role, additional_kwargs=self.additional_kwargs + ) + + +class _TextTemplateParam(TypedDict, total=False): + text: Union[str, dict] + + +class _ImageTemplateParam(TypedDict, total=False): + image_url: Union[str, dict] + + +class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate): + """Human message prompt template. This is a message sent from the user.""" + + prompt: Union[ + StringPromptTemplate, + list[Union[StringPromptTemplate, ImagePromptTemplate, DictPromptTemplate]], + ] + """Prompt template.""" + additional_kwargs: dict = Field(default_factory=dict) + """Additional keyword arguments to pass to the prompt template.""" + + _msg_class: type[BaseMessage] + + @classmethod + def from_template( + cls: type[Self], + template: Union[ + str, + list[Union[str, _TextTemplateParam, _ImageTemplateParam, dict[str, Any]]], + ], + template_format: PromptTemplateFormat = "f-string", + *, + partial_variables: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Self: + """Create a class from a string template. + + Args: + template: a template. + template_format: format of the template. + Options are: 'f-string', 'mustache', 'jinja2'. Defaults to "f-string". + partial_variables: A dictionary of variables that can be used too partially. + Defaults to None. + **kwargs: keyword arguments to pass to the constructor. + + Returns: + A new instance of this class. + + Raises: + ValueError: If the template is not a string or list of strings. + """ + if isinstance(template, str): + prompt: Union[StringPromptTemplate, list] = PromptTemplate.from_template( + template, + template_format=template_format, + partial_variables=partial_variables, + ) + return cls(prompt=prompt, **kwargs) + if isinstance(template, list): + if (partial_variables is not None) and len(partial_variables) > 0: + msg = "Partial variables are not supported for list of templates." + raise ValueError(msg) + prompt = [] + for tmpl in template: + if ( + isinstance(tmpl, str) + or isinstance(tmpl, dict) + and "text" in tmpl + and set(tmpl.keys()) <= {"type", "text"} + ): + if isinstance(tmpl, str): + text: str = tmpl + else: + text = cast("_TextTemplateParam", tmpl)["text"] # type: ignore[assignment] + prompt.append( + PromptTemplate.from_template( + text, template_format=template_format + ) + ) + elif ( + isinstance(tmpl, dict) + and "image_url" in tmpl + and set(tmpl.keys()) + <= { + "type", + "image_url", + } + ): + img_template = cast("_ImageTemplateParam", tmpl)["image_url"] + input_variables = [] + if isinstance(img_template, str): + vars = get_template_variables(img_template, template_format) + if vars: + if len(vars) > 1: + msg = ( + "Only one format variable allowed per image" + f" template.\nGot: {vars}" + f"\nFrom: {tmpl}" + ) + raise ValueError(msg) + input_variables = [vars[0]] + img_template = {"url": img_template} + img_template_obj = ImagePromptTemplate( + input_variables=input_variables, + template=img_template, + template_format=template_format, + ) + elif isinstance(img_template, dict): + img_template = dict(img_template) + for key in ["url", "path", "detail"]: + if key in img_template: + input_variables.extend( + get_template_variables( + img_template[key], template_format + ) + ) + img_template_obj = ImagePromptTemplate( + input_variables=input_variables, + template=img_template, + template_format=template_format, + ) + else: + msg = f"Invalid image template: {tmpl}" + raise ValueError(msg) + prompt.append(img_template_obj) + elif isinstance(tmpl, dict): + if template_format == "jinja2": + msg = ( + "jinja2 is unsafe and is not supported for templates " + "expressed as dicts. Please use 'f-string' or 'mustache' " + "format." + ) + raise ValueError(msg) + data_template_obj = DictPromptTemplate( + template=cast("dict[str, Any]", tmpl), + template_format=template_format, + ) + prompt.append(data_template_obj) + else: + msg = f"Invalid template: {tmpl}" + raise ValueError(msg) + return cls(prompt=prompt, **kwargs) + msg = f"Invalid template: {template}" + raise ValueError(msg) # noqa: TRY004 + + @classmethod + def from_template_file( + cls: type[Self], + template_file: Union[str, Path], + input_variables: list[str], + **kwargs: Any, + ) -> Self: + """Create a class from a template file. + + Args: + template_file: path to a template file. String or Path. + input_variables: list of input variables. + **kwargs: keyword arguments to pass to the constructor. + + Returns: + A new instance of this class. + """ + template = Path(template_file).read_text() + return cls.from_template(template, input_variables=input_variables, **kwargs) + + def format_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Format messages from kwargs. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + List of BaseMessages. + """ + return [self.format(**kwargs)] + + async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Async format messages from kwargs. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + List of BaseMessages. + """ + return [await self.aformat(**kwargs)] + + @property + def input_variables(self) -> list[str]: + """Input variables for this prompt template. + + Returns: + List of input variable names. + """ + prompts = self.prompt if isinstance(self.prompt, list) else [self.prompt] + return [iv for prompt in prompts for iv in prompt.input_variables] + + def format(self, **kwargs: Any) -> BaseMessage: + """Format the prompt template. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + Formatted message. + """ + if isinstance(self.prompt, StringPromptTemplate): + text = self.prompt.format(**kwargs) + return self._msg_class( + content=text, additional_kwargs=self.additional_kwargs + ) + content: list = [] + for prompt in self.prompt: + inputs = {var: kwargs[var] for var in prompt.input_variables} + if isinstance(prompt, StringPromptTemplate): + formatted: Union[str, ImageURL, dict[str, Any]] = prompt.format( + **inputs + ) + content.append({"type": "text", "text": formatted}) + elif isinstance(prompt, ImagePromptTemplate): + formatted = prompt.format(**inputs) + content.append({"type": "image_url", "image_url": formatted}) + elif isinstance(prompt, DictPromptTemplate): + formatted = prompt.format(**inputs) + content.append(formatted) + return self._msg_class( + content=content, additional_kwargs=self.additional_kwargs + ) + + async def aformat(self, **kwargs: Any) -> BaseMessage: + """Async format the prompt template. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + Formatted message. + """ + if isinstance(self.prompt, StringPromptTemplate): + text = await self.prompt.aformat(**kwargs) + return self._msg_class( + content=text, additional_kwargs=self.additional_kwargs + ) + content: list = [] + for prompt in self.prompt: + inputs = {var: kwargs[var] for var in prompt.input_variables} + if isinstance(prompt, StringPromptTemplate): + formatted: Union[str, ImageURL, dict[str, Any]] = await prompt.aformat( + **inputs + ) + content.append({"type": "text", "text": formatted}) + elif isinstance(prompt, ImagePromptTemplate): + formatted = await prompt.aformat(**inputs) + content.append({"type": "image_url", "image_url": formatted}) + elif isinstance(prompt, DictPromptTemplate): + formatted = prompt.format(**inputs) + content.append(formatted) + return self._msg_class( + content=content, additional_kwargs=self.additional_kwargs + ) + + @override + def pretty_repr(self, html: bool = False) -> str: + """Human-readable representation. + + Args: + html: Whether to format as HTML. Defaults to False. + + Returns: + Human-readable representation. + """ + # TODO: Handle partials + title = self.__class__.__name__.replace("MessagePromptTemplate", " Message") + title = get_msg_title_repr(title, bold=html) + prompts = self.prompt if isinstance(self.prompt, list) else [self.prompt] + prompt_reprs = "\n\n".join(prompt.pretty_repr(html=html) for prompt in prompts) + return f"{title}\n\n{prompt_reprs}" + + +class HumanMessagePromptTemplate(_StringImageMessagePromptTemplate): + """Human message prompt template. This is a message sent from the user.""" + + _msg_class: type[BaseMessage] = HumanMessage + + +class AIMessagePromptTemplate(_StringImageMessagePromptTemplate): + """AI message prompt template. This is a message sent from the AI.""" + + _msg_class: type[BaseMessage] = AIMessage + + +class SystemMessagePromptTemplate(_StringImageMessagePromptTemplate): + """System message prompt template. + + This is a message that is not sent to the user. + """ + + _msg_class: type[BaseMessage] = SystemMessage + + +class BaseChatPromptTemplate(BasePromptTemplate, ABC): + """Base class for chat prompt templates.""" + + @property + @override + def lc_attributes(self) -> dict: + return {"input_variables": self.input_variables} + + def format(self, **kwargs: Any) -> str: + """Format the chat template into a string. + + Args: + **kwargs: keyword arguments to use for filling in template variables + in all the template messages in this chat template. + + Returns: + formatted string. + """ + return self.format_prompt(**kwargs).to_string() + + async def aformat(self, **kwargs: Any) -> str: + """Async format the chat template into a string. + + Args: + **kwargs: keyword arguments to use for filling in template variables + in all the template messages in this chat template. + + Returns: + formatted string. + """ + return (await self.aformat_prompt(**kwargs)).to_string() + + def format_prompt(self, **kwargs: Any) -> PromptValue: + """Format prompt. Should return a PromptValue. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + PromptValue. + """ + messages = self.format_messages(**kwargs) + return ChatPromptValue(messages=messages) + + async def aformat_prompt(self, **kwargs: Any) -> PromptValue: + """Async format prompt. Should return a PromptValue. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + PromptValue. + """ + messages = await self.aformat_messages(**kwargs) + return ChatPromptValue(messages=messages) + + @abstractmethod + def format_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Format kwargs into a list of messages.""" + + async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Async format kwargs into a list of messages.""" + return self.format_messages(**kwargs) + + def pretty_repr( + self, + html: bool = False, # noqa: FBT001,FBT002 + ) -> str: + """Human-readable representation. + + Args: + html: Whether to format as HTML. Defaults to False. + + Returns: + Human-readable representation. + """ + raise NotImplementedError + + def pretty_print(self) -> None: + """Print a human-readable representation.""" + print(self.pretty_repr(html=is_interactive_env())) # noqa: T201 + + +MessageLike = Union[BaseMessagePromptTemplate, BaseMessage, BaseChatPromptTemplate] + +MessageLikeRepresentation = Union[ + MessageLike, + tuple[ + Union[str, type], + Union[str, list[dict], list[object]], + ], + str, + dict[str, Any], +] + + +class ChatPromptTemplate(BaseChatPromptTemplate): + """Prompt template for chat models. + + Use to create flexible templated prompts for chat models. + + Examples: + + .. versionchanged:: 0.2.24 + + You can pass any Message-like formats supported by + ``ChatPromptTemplate.from_messages()`` directly to ``ChatPromptTemplate()`` + init. + + .. code-block:: python + + from langchain_core.prompts import ChatPromptTemplate + + template = ChatPromptTemplate([ + ("system", "You are a helpful AI bot. Your name is {name}."), + ("human", "Hello, how are you doing?"), + ("ai", "I'm doing well, thanks!"), + ("human", "{user_input}"), + ]) + + prompt_value = template.invoke( + { + "name": "Bob", + "user_input": "What is your name?" + } + ) + # Output: + # ChatPromptValue( + # messages=[ + # SystemMessage(content='You are a helpful AI bot. Your name is Bob.'), + # HumanMessage(content='Hello, how are you doing?'), + # AIMessage(content="I'm doing well, thanks!"), + # HumanMessage(content='What is your name?') + # ] + #) + + Messages Placeholder: + + .. code-block:: python + + # In addition to Human/AI/Tool/Function messages, + # you can initialize the template with a MessagesPlaceholder + # either using the class directly or with the shorthand tuple syntax: + + template = ChatPromptTemplate([ + ("system", "You are a helpful AI bot."), + # Means the template will receive an optional list of messages under + # the "conversation" key + ("placeholder", "{conversation}") + # Equivalently: + # MessagesPlaceholder(variable_name="conversation", optional=True) + ]) + + prompt_value = template.invoke( + { + "conversation": [ + ("human", "Hi!"), + ("ai", "How can I assist you today?"), + ("human", "Can you make me an ice cream sundae?"), + ("ai", "No.") + ] + } + ) + + # Output: + # ChatPromptValue( + # messages=[ + # SystemMessage(content='You are a helpful AI bot.'), + # HumanMessage(content='Hi!'), + # AIMessage(content='How can I assist you today?'), + # HumanMessage(content='Can you make me an ice cream sundae?'), + # AIMessage(content='No.'), + # ] + #) + + Single-variable template: + + If your prompt has only a single input variable (i.e., 1 instance of "{variable_nams}"), + and you invoke the template with a non-dict object, the prompt template will + inject the provided argument into that variable location. + + + .. code-block:: python + + from langchain_core.prompts import ChatPromptTemplate + + template = ChatPromptTemplate([ + ("system", "You are a helpful AI bot. Your name is Carl."), + ("human", "{user_input}"), + ]) + + prompt_value = template.invoke("Hello, there!") + # Equivalent to + # prompt_value = template.invoke({"user_input": "Hello, there!"}) + + # Output: + # ChatPromptValue( + # messages=[ + # SystemMessage(content='You are a helpful AI bot. Your name is Carl.'), + # HumanMessage(content='Hello, there!'), + # ] + # ) + + """ # noqa: E501 + + messages: Annotated[list[MessageLike], SkipValidation()] + """List of messages consisting of either message prompt templates or messages.""" + validate_template: bool = False + """Whether or not to try validating the template.""" + + def __init__( + self, + messages: Sequence[MessageLikeRepresentation], + *, + template_format: PromptTemplateFormat = "f-string", + **kwargs: Any, + ) -> None: + """Create a chat prompt template from a variety of message formats. + + Args: + messages: sequence of message representations. + A message can be represented using the following formats: + (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of + (message type, template); e.g., ("human", "{user_input}"), + (4) 2-tuple of (message class, template), (5) a string which is + shorthand for ("human", template); e.g., "{user_input}". + template_format: format of the template. Defaults to "f-string". + input_variables: A list of the names of the variables whose values are + required as inputs to the prompt. + optional_variables: A list of the names of the variables for placeholder + or MessagePlaceholder that are optional. + These variables are auto inferred from the prompt and user need not + provide them. + partial_variables: A dictionary of the partial variables the prompt + template carries. Partial variables populate the template so that you + don't need to pass them in every time you call the prompt. + validate_template: Whether to validate the template. + input_types: A dictionary of the types of the variables the prompt template + expects. If not provided, all variables are assumed to be strings. + + Returns: + A chat prompt template. + + Examples: + Instantiation from a list of message templates: + + .. code-block:: python + + template = ChatPromptTemplate([ + ("human", "Hello, how are you?"), + ("ai", "I'm doing well, thanks!"), + ("human", "That's good to hear."), + ]) + + Instantiation from mixed message formats: + + .. code-block:: python + + template = ChatPromptTemplate([ + SystemMessage(content="hello"), + ("human", "Hello, how are you?"), + ]) + + """ + _messages = [ + _convert_to_message_template(message, template_format) + for message in messages + ] + + # Automatically infer input variables from messages + input_vars: set[str] = set() + optional_variables: set[str] = set() + partial_vars: dict[str, Any] = {} + for _message in _messages: + if isinstance(_message, MessagesPlaceholder) and _message.optional: + partial_vars[_message.variable_name] = [] + optional_variables.add(_message.variable_name) + elif isinstance( + _message, (BaseChatPromptTemplate, BaseMessagePromptTemplate) + ): + input_vars.update(_message.input_variables) + + kwargs = { + "input_variables": sorted(input_vars), + "optional_variables": sorted(optional_variables), + "partial_variables": partial_vars, + **kwargs, + } + cast("type[ChatPromptTemplate]", super()).__init__(messages=_messages, **kwargs) + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object.""" + return ["langchain", "prompts", "chat"] + + def __add__(self, other: Any) -> ChatPromptTemplate: + """Combine two prompt templates. + + Args: + other: Another prompt template. + + Returns: + Combined prompt template. + """ + partials = {**self.partial_variables} + + # Need to check that other has partial variables since it may not be + # a ChatPromptTemplate. + if hasattr(other, "partial_variables") and other.partial_variables: + partials.update(other.partial_variables) + + # Allow for easy combining + if isinstance(other, ChatPromptTemplate): + return ChatPromptTemplate(messages=self.messages + other.messages).partial( + **partials + ) + if isinstance( + other, (BaseMessagePromptTemplate, BaseMessage, BaseChatPromptTemplate) + ): + return ChatPromptTemplate(messages=self.messages + [other]).partial( + **partials + ) + if isinstance(other, (list, tuple)): + _other = ChatPromptTemplate.from_messages(other) + return ChatPromptTemplate(messages=self.messages + _other.messages).partial( + **partials + ) + if isinstance(other, str): + prompt = HumanMessagePromptTemplate.from_template(other) + return ChatPromptTemplate(messages=self.messages + [prompt]).partial( + **partials + ) + msg = f"Unsupported operand type for +: {type(other)}" + raise NotImplementedError(msg) + + @model_validator(mode="before") + @classmethod + def validate_input_variables(cls, values: dict) -> Any: + """Validate input variables. + + If input_variables is not set, it will be set to the union of + all input variables in the messages. + + Args: + values: values to validate. + + Returns: + Validated values. + + Raises: + ValueError: If input variables do not match. + """ + messages = values["messages"] + input_vars: set = set() + optional_variables = set() + input_types: dict[str, Any] = values.get("input_types", {}) + for message in messages: + if isinstance(message, (BaseMessagePromptTemplate, BaseChatPromptTemplate)): + input_vars.update(message.input_variables) + if isinstance(message, MessagesPlaceholder): + if "partial_variables" not in values: + values["partial_variables"] = {} + if ( + message.optional + and message.variable_name not in values["partial_variables"] + ): + values["partial_variables"][message.variable_name] = [] + optional_variables.add(message.variable_name) + if message.variable_name not in input_types: + input_types[message.variable_name] = list[AnyMessage] + if "partial_variables" in values: + input_vars = input_vars - set(values["partial_variables"]) + if optional_variables: + input_vars = input_vars - optional_variables + if "input_variables" in values and values.get("validate_template"): + if input_vars != set(values["input_variables"]): + msg = ( + "Got mismatched input_variables. " + f"Expected: {input_vars}. " + f"Got: {values['input_variables']}" + ) + raise ValueError(msg) + else: + values["input_variables"] = sorted(input_vars) + if optional_variables: + values["optional_variables"] = sorted(optional_variables) + values["input_types"] = input_types + return values + + @classmethod + def from_template(cls, template: str, **kwargs: Any) -> ChatPromptTemplate: + """Create a chat prompt template from a template string. + + Creates a chat template consisting of a single message assumed to be from + the human. + + Args: + template: template string + **kwargs: keyword arguments to pass to the constructor. + + Returns: + A new instance of this class. + """ + prompt_template = PromptTemplate.from_template(template, **kwargs) + message = HumanMessagePromptTemplate(prompt=prompt_template) + return cls.from_messages([message]) + + @classmethod + @deprecated("0.0.1", alternative="from_messages", pending=True) + def from_role_strings( + cls, string_messages: list[tuple[str, str]] + ) -> ChatPromptTemplate: + """Create a chat prompt template from a list of (role, template) tuples. + + Args: + string_messages: list of (role, template) tuples. + + Returns: + a chat prompt template. + """ + return cls( + messages=[ + ChatMessagePromptTemplate.from_template(template, role=role) + for role, template in string_messages + ] + ) + + @classmethod + @deprecated("0.0.1", alternative="from_messages", pending=True) + def from_strings( + cls, string_messages: list[tuple[type[BaseMessagePromptTemplate], str]] + ) -> ChatPromptTemplate: + """Create a chat prompt template from a list of (role class, template) tuples. + + Args: + string_messages: list of (role class, template) tuples. + + Returns: + a chat prompt template. + """ + return cls.from_messages(string_messages) + + @classmethod + def from_messages( + cls, + messages: Sequence[MessageLikeRepresentation], + template_format: PromptTemplateFormat = "f-string", + ) -> ChatPromptTemplate: + """Create a chat prompt template from a variety of message formats. + + Examples: + Instantiation from a list of message templates: + + .. code-block:: python + + template = ChatPromptTemplate.from_messages([ + ("human", "Hello, how are you?"), + ("ai", "I'm doing well, thanks!"), + ("human", "That's good to hear."), + ]) + + Instantiation from mixed message formats: + + .. code-block:: python + + template = ChatPromptTemplate.from_messages([ + SystemMessage(content="hello"), + ("human", "Hello, how are you?"), + ]) + + Args: + messages: sequence of message representations. + A message can be represented using the following formats: + (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of + (message type, template); e.g., ("human", "{user_input}"), + (4) 2-tuple of (message class, template), (5) a string which is + shorthand for ("human", template); e.g., "{user_input}". + template_format: format of the template. Defaults to "f-string". + + Returns: + a chat prompt template. + """ + return cls(messages, template_format=template_format) + + def format_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Format the chat template into a list of finalized messages. + + Args: + **kwargs: keyword arguments to use for filling in template variables + in all the template messages in this chat template. + + Returns: + list of formatted messages. + """ + kwargs = self._merge_partial_and_user_variables(**kwargs) + result = [] + for message_template in self.messages: + if isinstance(message_template, BaseMessage): + result.extend([message_template]) + elif isinstance( + message_template, (BaseMessagePromptTemplate, BaseChatPromptTemplate) + ): + message = message_template.format_messages(**kwargs) + result.extend(message) + else: + msg = f"Unexpected input: {message_template}" + raise ValueError(msg) # noqa: TRY004 + return result + + async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Async format the chat template into a list of finalized messages. + + Args: + **kwargs: keyword arguments to use for filling in template variables + in all the template messages in this chat template. + + Returns: + list of formatted messages. + + Raises: + ValueError: If unexpected input. + """ + kwargs = self._merge_partial_and_user_variables(**kwargs) + result = [] + for message_template in self.messages: + if isinstance(message_template, BaseMessage): + result.extend([message_template]) + elif isinstance( + message_template, (BaseMessagePromptTemplate, BaseChatPromptTemplate) + ): + message = await message_template.aformat_messages(**kwargs) + result.extend(message) + else: + msg = f"Unexpected input: {message_template}" + raise ValueError(msg) # noqa:TRY004 + return result + + def partial(self, **kwargs: Any) -> ChatPromptTemplate: + """Get a new ChatPromptTemplate with some input variables already filled in. + + Args: + **kwargs: keyword arguments to use for filling in template variables. Ought + to be a subset of the input variables. + + Returns: + A new ChatPromptTemplate. + + + Example: + + .. code-block:: python + + from langchain_core.prompts import ChatPromptTemplate + + template = ChatPromptTemplate.from_messages( + [ + ("system", "You are an AI assistant named {name}."), + ("human", "Hi I'm {user}"), + ("ai", "Hi there, {user}, I'm {name}."), + ("human", "{input}"), + ] + ) + template2 = template.partial(user="Lucy", name="R2D2") + + template2.format_messages(input="hello") + """ + prompt_dict = self.__dict__.copy() + prompt_dict["input_variables"] = list( + set(self.input_variables).difference(kwargs) + ) + prompt_dict["partial_variables"] = {**self.partial_variables, **kwargs} + return type(self)(**prompt_dict) + + def append(self, message: MessageLikeRepresentation) -> None: + """Append a message to the end of the chat template. + + Args: + message: representation of a message to append. + """ + self.messages.append(_convert_to_message_template(message)) + + def extend(self, messages: Sequence[MessageLikeRepresentation]) -> None: + """Extend the chat template with a sequence of messages. + + Args: + messages: sequence of message representations to append. + """ + self.messages.extend( + [_convert_to_message_template(message) for message in messages] + ) + + @overload + def __getitem__(self, index: int) -> MessageLike: ... + + @overload + def __getitem__(self, index: slice) -> ChatPromptTemplate: ... + + def __getitem__( + self, index: Union[int, slice] + ) -> Union[MessageLike, ChatPromptTemplate]: + """Use to index into the chat template.""" + if isinstance(index, slice): + start, stop, step = index.indices(len(self.messages)) + messages = self.messages[start:stop:step] + return ChatPromptTemplate.from_messages(messages) + return self.messages[index] + + def __len__(self) -> int: + """Get the length of the chat template.""" + return len(self.messages) + + @property + def _prompt_type(self) -> str: + """Name of prompt type. Used for serialization.""" + return "chat" + + def save(self, file_path: Union[Path, str]) -> None: + """Save prompt to file. + + Args: + file_path: path to file. + """ + raise NotImplementedError + + @override + def pretty_repr(self, html: bool = False) -> str: + """Human-readable representation. + + Args: + html: Whether to format as HTML. Defaults to False. + + Returns: + Human-readable representation. + """ + # TODO: handle partials + return "\n\n".join(msg.pretty_repr(html=html) for msg in self.messages) + + +def _create_template_from_message_type( + message_type: str, + template: Union[str, list], + template_format: PromptTemplateFormat = "f-string", +) -> BaseMessagePromptTemplate: + """Create a message prompt template from a message type and template string. + + Args: + message_type: str the type of the message template (e.g., "human", "ai", etc.) + template: str the template string. + template_format: format of the template. Defaults to "f-string". + + Returns: + a message prompt template of the appropriate type. + + Raises: + ValueError: If unexpected message type. + """ + if message_type in ("human", "user"): + message: BaseMessagePromptTemplate = HumanMessagePromptTemplate.from_template( + template, template_format=template_format + ) + elif message_type in ("ai", "assistant"): + message = AIMessagePromptTemplate.from_template( + cast("str", template), template_format=template_format + ) + elif message_type == "system": + message = SystemMessagePromptTemplate.from_template( + cast("str", template), template_format=template_format + ) + elif message_type == "placeholder": + if isinstance(template, str): + if template[0] != "{" or template[-1] != "}": + msg = ( + f"Invalid placeholder template: {template}." + " Expected a variable name surrounded by curly braces." + ) + raise ValueError(msg) + var_name = template[1:-1] + message = MessagesPlaceholder(variable_name=var_name, optional=True) + elif len(template) == 2 and isinstance(template[1], bool): + var_name_wrapped, is_optional = template + if not isinstance(var_name_wrapped, str): + msg = f"Expected variable name to be a string. Got: {var_name_wrapped}" + raise ValueError(msg) # noqa:TRY004 + if var_name_wrapped[0] != "{" or var_name_wrapped[-1] != "}": + msg = ( + f"Invalid placeholder template: {var_name_wrapped}." + " Expected a variable name surrounded by curly braces." + ) + raise ValueError(msg) + var_name = var_name_wrapped[1:-1] + + message = MessagesPlaceholder(variable_name=var_name, optional=is_optional) + else: + msg = ( + "Unexpected arguments for placeholder message type." + " Expected either a single string variable name" + " or a list of [variable_name: str, is_optional: bool]." + f" Got: {template}" + ) + raise ValueError(msg) + else: + msg = ( + f"Unexpected message type: {message_type}. Use one of 'human'," + f" 'user', 'ai', 'assistant', or 'system'." + ) + raise ValueError(msg) + return message + + +def _convert_to_message_template( + message: MessageLikeRepresentation, + template_format: PromptTemplateFormat = "f-string", +) -> Union[BaseMessage, BaseMessagePromptTemplate, BaseChatPromptTemplate]: + """Instantiate a message from a variety of message formats. + + The message format can be one of the following: + + - BaseMessagePromptTemplate + - BaseMessage + - 2-tuple of (role string, template); e.g., ("human", "{user_input}") + - 2-tuple of (message class, template) + - string: shorthand for ("human", template); e.g., "{user_input}" + + Args: + message: a representation of a message in one of the supported formats. + template_format: format of the template. Defaults to "f-string". + + Returns: + an instance of a message or a message template. + + Raises: + ValueError: If unexpected message type. + ValueError: If 2-tuple does not have 2 elements. + """ + if isinstance(message, (BaseMessagePromptTemplate, BaseChatPromptTemplate)): + _message: Union[ + BaseMessage, BaseMessagePromptTemplate, BaseChatPromptTemplate + ] = message + elif isinstance(message, BaseMessage): + _message = message + elif isinstance(message, str): + _message = _create_template_from_message_type( + "human", message, template_format=template_format + ) + elif isinstance(message, (tuple, dict)): + if isinstance(message, dict): + if set(message.keys()) != {"content", "role"}: + msg = ( + "Expected dict to have exact keys 'role' and 'content'." + f" Got: {message}" + ) + raise ValueError(msg) + message = (message["role"], message["content"]) + if len(message) != 2: + msg = f"Expected 2-tuple of (role, template), got {message}" + raise ValueError(msg) + message_type_str, template = message + if isinstance(message_type_str, str): + _message = _create_template_from_message_type( + message_type_str, template, template_format=template_format + ) + else: + _message = message_type_str( + prompt=PromptTemplate.from_template( + cast("str", template), template_format=template_format + ) + ) + else: + msg = f"Unsupported message type: {type(message)}" + raise NotImplementedError(msg) + + return _message + + +# For backwards compat: +_convert_to_message = _convert_to_message_template diff --git a/venv/Lib/site-packages/langchain_core/prompts/dict.py b/venv/Lib/site-packages/langchain_core/prompts/dict.py new file mode 100644 index 00000000..0ccdf7a6 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/dict.py @@ -0,0 +1,137 @@ +"""Dict prompt template.""" + +import warnings +from functools import cached_property +from typing import Any, Literal, Optional + +from langchain_core.load import dumpd +from langchain_core.prompts.string import ( + DEFAULT_FORMATTER_MAPPING, + get_template_variables, +) +from langchain_core.runnables import RunnableConfig, RunnableSerializable +from langchain_core.runnables.config import ensure_config + + +class DictPromptTemplate(RunnableSerializable[dict, dict]): + """Template represented by a dict. + + Recognizes variables in f-string or mustache formatted string dict values. Does NOT + recognize variables in dict keys. Applies recursively. + """ + + template: dict[str, Any] + template_format: Literal["f-string", "mustache"] + + @property + def input_variables(self) -> list[str]: + """Template input variables.""" + return _get_input_variables(self.template, self.template_format) + + def format(self, **kwargs: Any) -> dict[str, Any]: + """Format the prompt with the inputs.""" + return _insert_input_variables(self.template, kwargs, self.template_format) + + async def aformat(self, **kwargs: Any) -> dict[str, Any]: + """Format the prompt with the inputs.""" + return self.format(**kwargs) + + def invoke( + self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> dict: + """Invoke the prompt.""" + return self._call_with_config( + lambda x: self.format(**x), + input, + ensure_config(config), + run_type="prompt", + serialized=self._serialized, + **kwargs, + ) + + @property + def _prompt_type(self) -> str: + return "dict-prompt" + + @cached_property + def _serialized(self) -> dict[str, Any]: + return dumpd(self) + + @classmethod + def is_lc_serializable(cls) -> bool: + """Return whether or not the class is serializable. + + Returns: True. + """ + return True + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Serialization namespace.""" + return ["langchain_core", "prompts", "dict"] + + def pretty_repr(self, *, html: bool = False) -> str: + """Human-readable representation. + + Args: + html: Whether to format as HTML. Defaults to False. + + Returns: + Human-readable representation. + """ + raise NotImplementedError + + +def _get_input_variables( + template: dict, template_format: Literal["f-string", "mustache"] +) -> list[str]: + input_variables = [] + for v in template.values(): + if isinstance(v, str): + input_variables += get_template_variables(v, template_format) + elif isinstance(v, dict): + input_variables += _get_input_variables(v, template_format) + elif isinstance(v, (list, tuple)): + for x in v: + if isinstance(x, str): + input_variables += get_template_variables(x, template_format) + elif isinstance(x, dict): + input_variables += _get_input_variables(x, template_format) + else: + pass + return list(set(input_variables)) + + +def _insert_input_variables( + template: dict[str, Any], + inputs: dict[str, Any], + template_format: Literal["f-string", "mustache"], +) -> dict[str, Any]: + formatted = {} + formatter = DEFAULT_FORMATTER_MAPPING[template_format] + for k, v in template.items(): + if isinstance(v, str): + formatted[k] = formatter(v, **inputs) + elif isinstance(v, dict): + if k == "image_url" and "path" in v: + msg = ( + "Specifying image inputs via file path in environments with " + "user-input paths is a security vulnerability. Out of an abundance " + "of caution, the utility has been removed to prevent possible " + "misuse." + ) + warnings.warn(msg, stacklevel=2) + formatted[k] = _insert_input_variables(v, inputs, template_format) + elif isinstance(v, (list, tuple)): + formatted_v = [] + for x in v: + if isinstance(x, str): + formatted_v.append(formatter(x, **inputs)) + elif isinstance(x, dict): + formatted_v.append( + _insert_input_variables(x, inputs, template_format) + ) + formatted[k] = type(v)(formatted_v) + else: + formatted[k] = v + return formatted diff --git a/venv/Lib/site-packages/langchain_core/prompts/few_shot.py b/venv/Lib/site-packages/langchain_core/prompts/few_shot.py new file mode 100644 index 00000000..49d6bdf3 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/few_shot.py @@ -0,0 +1,465 @@ +"""Prompt template that contains few shot examples.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Literal, Optional, Union + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + model_validator, +) +from typing_extensions import override + +from langchain_core.example_selectors import BaseExampleSelector +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_core.prompts.chat import BaseChatPromptTemplate +from langchain_core.prompts.message import BaseMessagePromptTemplate +from langchain_core.prompts.prompt import PromptTemplate +from langchain_core.prompts.string import ( + DEFAULT_FORMATTER_MAPPING, + StringPromptTemplate, + check_valid_template, + get_template_variables, +) + +if TYPE_CHECKING: + from pathlib import Path + + from typing_extensions import Self + + +class _FewShotPromptTemplateMixin(BaseModel): + """Prompt template that contains few shot examples.""" + + examples: Optional[list[dict]] = None + """Examples to format into the prompt. + Either this or example_selector should be provided.""" + + example_selector: Optional[BaseExampleSelector] = None + """ExampleSelector to choose the examples to format into the prompt. + Either this or examples should be provided.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + @model_validator(mode="before") + @classmethod + def check_examples_and_selector(cls, values: dict) -> Any: + """Check that one and only one of examples/example_selector are provided. + + Args: + values: The values to check. + + Returns: + The values if they are valid. + + Raises: + ValueError: If neither or both examples and example_selector are provided. + ValueError: If both examples and example_selector are provided. + """ + examples = values.get("examples") + example_selector = values.get("example_selector") + if examples and example_selector: + msg = "Only one of 'examples' and 'example_selector' should be provided" + raise ValueError(msg) + + if examples is None and example_selector is None: + msg = "One of 'examples' and 'example_selector' should be provided" + raise ValueError(msg) + + return values + + def _get_examples(self, **kwargs: Any) -> list[dict]: + """Get the examples to use for formatting the prompt. + + Args: + **kwargs: Keyword arguments to be passed to the example selector. + + Returns: + List of examples. + + Raises: + ValueError: If neither examples nor example_selector are provided. + """ + if self.examples is not None: + return self.examples + if self.example_selector is not None: + return self.example_selector.select_examples(kwargs) + msg = "One of 'examples' and 'example_selector' should be provided" + raise ValueError(msg) + + async def _aget_examples(self, **kwargs: Any) -> list[dict]: + """Async get the examples to use for formatting the prompt. + + Args: + **kwargs: Keyword arguments to be passed to the example selector. + + Returns: + List of examples. + + Raises: + ValueError: If neither examples nor example_selector are provided. + """ + if self.examples is not None: + return self.examples + if self.example_selector is not None: + return await self.example_selector.aselect_examples(kwargs) + msg = "One of 'examples' and 'example_selector' should be provided" + raise ValueError(msg) + + +class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate): + """Prompt template that contains few shot examples.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + """Return whether or not the class is serializable.""" + return False + + validate_template: bool = False + """Whether or not to try validating the template.""" + + example_prompt: PromptTemplate + """PromptTemplate used to format an individual example.""" + + suffix: str + """A prompt template string to put after the examples.""" + + example_separator: str = "\n\n" + """String separator used to join the prefix, the examples, and suffix.""" + + prefix: str = "" + """A prompt template string to put before the examples.""" + + template_format: Literal["f-string", "jinja2"] = "f-string" + """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" + + def __init__(self, **kwargs: Any) -> None: + """Initialize the few shot prompt template.""" + if "input_variables" not in kwargs and "example_prompt" in kwargs: + kwargs["input_variables"] = kwargs["example_prompt"].input_variables + super().__init__(**kwargs) + + @model_validator(mode="after") + def template_is_valid(self) -> Self: + """Check that prefix, suffix, and input variables are consistent.""" + if self.validate_template: + check_valid_template( + self.prefix + self.suffix, + self.template_format, + self.input_variables + list(self.partial_variables), + ) + elif self.template_format or None: + self.input_variables = [ + var + for var in get_template_variables( + self.prefix + self.suffix, self.template_format + ) + if var not in self.partial_variables + ] + return self + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + def format(self, **kwargs: Any) -> str: + """Format the prompt with inputs generating a string. + + Use this method to generate a string representation of a prompt. + + Args: + **kwargs: keyword arguments to use for formatting. + + Returns: + A string representation of the prompt. + """ + kwargs = self._merge_partial_and_user_variables(**kwargs) + # Get the examples to use. + examples = self._get_examples(**kwargs) + examples = [ + {k: e[k] for k in self.example_prompt.input_variables} for e in examples + ] + # Format the examples. + example_strings = [ + self.example_prompt.format(**example) for example in examples + ] + # Create the overall template. + pieces = [self.prefix, *example_strings, self.suffix] + template = self.example_separator.join([piece for piece in pieces if piece]) + + # Format the template with the input variables. + return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) + + async def aformat(self, **kwargs: Any) -> str: + """Async format the prompt with inputs generating a string. + + Use this method to generate a string representation of a prompt. + + Args: + **kwargs: keyword arguments to use for formatting. + + Returns: + A string representation of the prompt. + """ + kwargs = self._merge_partial_and_user_variables(**kwargs) + # Get the examples to use. + examples = await self._aget_examples(**kwargs) + examples = [ + {k: e[k] for k in self.example_prompt.input_variables} for e in examples + ] + # Format the examples. + example_strings = [ + await self.example_prompt.aformat(**example) for example in examples + ] + # Create the overall template. + pieces = [self.prefix, *example_strings, self.suffix] + template = self.example_separator.join([piece for piece in pieces if piece]) + + # Format the template with the input variables. + return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) + + @property + def _prompt_type(self) -> str: + """Return the prompt type key.""" + return "few_shot" + + def save(self, file_path: Union[Path, str]) -> None: + """Save the prompt template to a file. + + Args: + file_path: The path to save the prompt template to. + + Raises: + ValueError: If example_selector is provided. + """ + if self.example_selector: + msg = "Saving an example selector is not currently supported" + raise ValueError(msg) + return super().save(file_path) + + +class FewShotChatMessagePromptTemplate( + BaseChatPromptTemplate, _FewShotPromptTemplateMixin +): + """Chat prompt template that supports few-shot examples. + + The high level structure of produced by this prompt template is a list of messages + consisting of prefix message(s), example message(s), and suffix message(s). + + This structure enables creating a conversation with intermediate examples like: + + System: You are a helpful AI Assistant + Human: What is 2+2? + AI: 4 + Human: What is 2+3? + AI: 5 + Human: What is 4+4? + + This prompt template can be used to generate a fixed list of examples or else + to dynamically select examples based on the input. + + Examples: + Prompt template with a fixed list of examples (matching the sample + conversation above): + + .. code-block:: python + + from langchain_core.prompts import ( + FewShotChatMessagePromptTemplate, + ChatPromptTemplate + ) + + examples = [ + {"input": "2+2", "output": "4"}, + {"input": "2+3", "output": "5"}, + ] + + example_prompt = ChatPromptTemplate.from_messages( + [('human', 'What is {input}?'), ('ai', '{output}')] + ) + + few_shot_prompt = FewShotChatMessagePromptTemplate( + examples=examples, + # This is a prompt template used to format each individual example. + example_prompt=example_prompt, + ) + + final_prompt = ChatPromptTemplate.from_messages( + [ + ('system', 'You are a helpful AI Assistant'), + few_shot_prompt, + ('human', '{input}'), + ] + ) + final_prompt.format(input="What is 4+4?") + + Prompt template with dynamically selected examples: + + .. code-block:: python + + from langchain_core.prompts import SemanticSimilarityExampleSelector + from langchain_core.embeddings import OpenAIEmbeddings + from langchain_core.vectorstores import Chroma + + examples = [ + {"input": "2+2", "output": "4"}, + {"input": "2+3", "output": "5"}, + {"input": "2+4", "output": "6"}, + # ... + ] + + to_vectorize = [ + " ".join(example.values()) + for example in examples + ] + embeddings = OpenAIEmbeddings() + vectorstore = Chroma.from_texts( + to_vectorize, embeddings, metadatas=examples + ) + example_selector = SemanticSimilarityExampleSelector( + vectorstore=vectorstore + ) + + from langchain_core import SystemMessage + from langchain_core.prompts import HumanMessagePromptTemplate + from langchain_core.prompts.few_shot import FewShotChatMessagePromptTemplate + + few_shot_prompt = FewShotChatMessagePromptTemplate( + # Which variable(s) will be passed to the example selector. + input_variables=["input"], + example_selector=example_selector, + # Define how each example will be formatted. + # In this case, each example will become 2 messages: + # 1 human, and 1 AI + example_prompt=( + HumanMessagePromptTemplate.from_template("{input}") + + AIMessagePromptTemplate.from_template("{output}") + ), + ) + # Define the overall prompt. + final_prompt = ( + SystemMessagePromptTemplate.from_template( + "You are a helpful AI Assistant" + ) + + few_shot_prompt + + HumanMessagePromptTemplate.from_template("{input}") + ) + # Show the prompt + print(final_prompt.format_messages(input="What's 3+3?")) # noqa: T201 + + # Use within an LLM + from langchain_core.chat_models import ChatAnthropic + chain = final_prompt | ChatAnthropic(model="claude-3-haiku-20240307") + chain.invoke({"input": "What's 3+3?"}) + """ + + input_variables: list[str] = Field(default_factory=list) + """A list of the names of the variables the prompt template will use + to pass to the example_selector, if provided.""" + + example_prompt: Union[BaseMessagePromptTemplate, BaseChatPromptTemplate] + """The class to format each example.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + """Return whether or not the class is serializable.""" + return False + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + def format_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Format kwargs into a list of messages. + + Args: + **kwargs: keyword arguments to use for filling in templates in messages. + + Returns: + A list of formatted messages with all template variables filled in. + """ + # Get the examples to use. + examples = self._get_examples(**kwargs) + examples = [ + {k: e[k] for k in self.example_prompt.input_variables} for e in examples + ] + # Format the examples. + return [ + message + for example in examples + for message in self.example_prompt.format_messages(**example) + ] + + async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Async format kwargs into a list of messages. + + Args: + **kwargs: keyword arguments to use for filling in templates in messages. + + Returns: + A list of formatted messages with all template variables filled in. + """ + # Get the examples to use. + examples = await self._aget_examples(**kwargs) + examples = [ + {k: e[k] for k in self.example_prompt.input_variables} for e in examples + ] + # Format the examples. + return [ + message + for example in examples + for message in await self.example_prompt.aformat_messages(**example) + ] + + def format(self, **kwargs: Any) -> str: + """Format the prompt with inputs generating a string. + + Use this method to generate a string representation of a prompt consisting + of chat messages. + + Useful for feeding into a string-based completion language model or debugging. + + Args: + **kwargs: keyword arguments to use for formatting. + + Returns: + A string representation of the prompt + """ + messages = self.format_messages(**kwargs) + return get_buffer_string(messages) + + async def aformat(self, **kwargs: Any) -> str: + """Async format the prompt with inputs generating a string. + + Use this method to generate a string representation of a prompt consisting + of chat messages. + + Useful for feeding into a string-based completion language model or debugging. + + Args: + **kwargs: keyword arguments to use for formatting. + + Returns: + A string representation of the prompt + """ + messages = await self.aformat_messages(**kwargs) + return get_buffer_string(messages) + + @override + def pretty_repr(self, html: bool = False) -> str: + """Return a pretty representation of the prompt template. + + Args: + html: Whether or not to return an HTML formatted string. + + Returns: + A pretty representation of the prompt template. + """ + raise NotImplementedError diff --git a/venv/Lib/site-packages/langchain_core/prompts/few_shot_with_templates.py b/venv/Lib/site-packages/langchain_core/prompts/few_shot_with_templates.py new file mode 100644 index 00000000..7a32146f --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/few_shot_with_templates.py @@ -0,0 +1,220 @@ +"""Prompt template that contains few shot examples.""" + +from pathlib import Path +from typing import Any, Optional, Union + +from pydantic import ConfigDict, model_validator +from typing_extensions import Self + +from langchain_core.prompts.prompt import PromptTemplate +from langchain_core.prompts.string import ( + DEFAULT_FORMATTER_MAPPING, + PromptTemplateFormat, + StringPromptTemplate, +) + + +class FewShotPromptWithTemplates(StringPromptTemplate): + """Prompt template that contains few shot examples.""" + + examples: Optional[list[dict]] = None + """Examples to format into the prompt. + Either this or example_selector should be provided.""" + + example_selector: Any = None + """ExampleSelector to choose the examples to format into the prompt. + Either this or examples should be provided.""" + + example_prompt: PromptTemplate + """PromptTemplate used to format an individual example.""" + + suffix: StringPromptTemplate + """A PromptTemplate to put after the examples.""" + + example_separator: str = "\n\n" + """String separator used to join the prefix, the examples, and suffix.""" + + prefix: Optional[StringPromptTemplate] = None + """A PromptTemplate to put before the examples.""" + + template_format: PromptTemplateFormat = "f-string" + """The format of the prompt template. + Options are: 'f-string', 'jinja2', 'mustache'.""" + + validate_template: bool = False + """Whether or not to try validating the template.""" + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object.""" + return ["langchain", "prompts", "few_shot_with_templates"] + + @model_validator(mode="before") + @classmethod + def check_examples_and_selector(cls, values: dict) -> Any: + """Check that one and only one of examples/example_selector are provided.""" + examples = values.get("examples") + example_selector = values.get("example_selector") + if examples and example_selector: + msg = "Only one of 'examples' and 'example_selector' should be provided" + raise ValueError(msg) + + if examples is None and example_selector is None: + msg = "One of 'examples' and 'example_selector' should be provided" + raise ValueError(msg) + + return values + + @model_validator(mode="after") + def template_is_valid(self) -> Self: + """Check that prefix, suffix, and input variables are consistent.""" + if self.validate_template: + input_variables = self.input_variables + expected_input_variables = set(self.suffix.input_variables) + expected_input_variables |= set(self.partial_variables) + if self.prefix is not None: + expected_input_variables |= set(self.prefix.input_variables) + missing_vars = expected_input_variables.difference(input_variables) + if missing_vars: + msg = ( + f"Got input_variables={input_variables}, but based on " + f"prefix/suffix expected {expected_input_variables}" + ) + raise ValueError(msg) + else: + self.input_variables = sorted( + set(self.suffix.input_variables) + | set(self.prefix.input_variables if self.prefix else []) + - set(self.partial_variables) + ) + return self + + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + ) + + def _get_examples(self, **kwargs: Any) -> list[dict]: + if self.examples is not None: + return self.examples + if self.example_selector is not None: + return self.example_selector.select_examples(kwargs) + raise ValueError + + async def _aget_examples(self, **kwargs: Any) -> list[dict]: + if self.examples is not None: + return self.examples + if self.example_selector is not None: + return await self.example_selector.aselect_examples(kwargs) + raise ValueError + + def format(self, **kwargs: Any) -> str: + """Format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + + Example: + + .. code-block:: python + + prompt.format(variable1="foo") + """ + kwargs = self._merge_partial_and_user_variables(**kwargs) + # Get the examples to use. + examples = self._get_examples(**kwargs) + # Format the examples. + example_strings = [ + self.example_prompt.format(**example) for example in examples + ] + # Create the overall prefix. + if self.prefix is None: + prefix = "" + else: + prefix_kwargs = { + k: v for k, v in kwargs.items() if k in self.prefix.input_variables + } + for k in prefix_kwargs: + kwargs.pop(k) + prefix = self.prefix.format(**prefix_kwargs) + + # Create the overall suffix + suffix_kwargs = { + k: v for k, v in kwargs.items() if k in self.suffix.input_variables + } + for k in suffix_kwargs: + kwargs.pop(k) + suffix = self.suffix.format( + **suffix_kwargs, + ) + + pieces = [prefix, *example_strings, suffix] + template = self.example_separator.join([piece for piece in pieces if piece]) + # Format the template with the input variables. + return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) + + async def aformat(self, **kwargs: Any) -> str: + """Async format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + """ + kwargs = self._merge_partial_and_user_variables(**kwargs) + # Get the examples to use. + examples = await self._aget_examples(**kwargs) + # Format the examples. + example_strings = [ + # We can use the sync method here as PromptTemplate doesn't block + self.example_prompt.format(**example) + for example in examples + ] + # Create the overall prefix. + if self.prefix is None: + prefix = "" + else: + prefix_kwargs = { + k: v for k, v in kwargs.items() if k in self.prefix.input_variables + } + for k in prefix_kwargs: + kwargs.pop(k) + prefix = await self.prefix.aformat(**prefix_kwargs) + + # Create the overall suffix + suffix_kwargs = { + k: v for k, v in kwargs.items() if k in self.suffix.input_variables + } + for k in suffix_kwargs: + kwargs.pop(k) + suffix = await self.suffix.aformat( + **suffix_kwargs, + ) + + pieces = [prefix, *example_strings, suffix] + template = self.example_separator.join([piece for piece in pieces if piece]) + # Format the template with the input variables. + return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) + + @property + def _prompt_type(self) -> str: + """Return the prompt type key.""" + return "few_shot_with_templates" + + def save(self, file_path: Union[Path, str]) -> None: + """Save the prompt to a file. + + Args: + file_path: The path to save the prompt to. + + Raises: + ValueError: If example_selector is provided. + """ + if self.example_selector: + msg = "Saving an example selector is not currently supported" + raise ValueError(msg) + return super().save(file_path) diff --git a/venv/Lib/site-packages/langchain_core/prompts/image.py b/venv/Lib/site-packages/langchain_core/prompts/image.py new file mode 100644 index 00000000..525d2941 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/image.py @@ -0,0 +1,148 @@ +"""Image prompt template for a multimodal model.""" + +from typing import Any + +from pydantic import Field + +from langchain_core.prompt_values import ImagePromptValue, ImageURL, PromptValue +from langchain_core.prompts.base import BasePromptTemplate +from langchain_core.prompts.string import ( + DEFAULT_FORMATTER_MAPPING, + PromptTemplateFormat, +) +from langchain_core.runnables import run_in_executor + + +class ImagePromptTemplate(BasePromptTemplate[ImageURL]): + """Image prompt template for a multimodal model.""" + + template: dict = Field(default_factory=dict) + """Template for the prompt.""" + template_format: PromptTemplateFormat = "f-string" + """The format of the prompt template. + Options are: 'f-string', 'mustache', 'jinja2'.""" + + def __init__(self, **kwargs: Any) -> None: + """Create an image prompt template.""" + if "input_variables" not in kwargs: + kwargs["input_variables"] = [] + + overlap = set(kwargs["input_variables"]) & {"url", "path", "detail"} + if overlap: + msg = ( + "input_variables for the image template cannot contain" + " any of 'url', 'path', or 'detail'." + f" Found: {overlap}" + ) + raise ValueError(msg) + super().__init__(**kwargs) + + @property + def _prompt_type(self) -> str: + """Return the prompt type key.""" + return "image-prompt" + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object.""" + return ["langchain", "prompts", "image"] + + def format_prompt(self, **kwargs: Any) -> PromptValue: + """Format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + """ + return ImagePromptValue(image_url=self.format(**kwargs)) + + async def aformat_prompt(self, **kwargs: Any) -> PromptValue: + """Async format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + """ + return ImagePromptValue(image_url=await self.aformat(**kwargs)) + + def format( + self, + **kwargs: Any, + ) -> ImageURL: + """Format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + + Raises: + ValueError: If the url is not provided. + ValueError: If the url is not a string. + + Example: + + .. code-block:: python + + prompt.format(variable1="foo") + """ + formatted = {} + for k, v in self.template.items(): + if isinstance(v, str): + formatted[k] = DEFAULT_FORMATTER_MAPPING[self.template_format]( + v, **kwargs + ) + else: + formatted[k] = v + url = kwargs.get("url") or formatted.get("url") + if kwargs.get("path") or formatted.get("path"): + msg = ( + "Loading images from 'path' has been removed as of 0.3.15 for security " + "reasons. Please specify images by 'url'." + ) + raise ValueError(msg) + detail = kwargs.get("detail") or formatted.get("detail") + if not url: + msg = "Must provide url." + raise ValueError(msg) + if not isinstance(url, str): + msg = "url must be a string." + raise ValueError(msg) # noqa: TRY004 + output: ImageURL = {"url": url} + if detail: + # Don't check literal values here: let the API check them + output["detail"] = detail + return output + + async def aformat(self, **kwargs: Any) -> ImageURL: + """Async format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + + Raises: + ValueError: If the path or url is not a string. + """ + return await run_in_executor(None, self.format, **kwargs) + + def pretty_repr( + self, + html: bool = False, # noqa: FBT001,FBT002 + ) -> str: + """Return a pretty representation of the prompt. + + Args: + html: Whether to return an html formatted string. + + Returns: + A pretty representation of the prompt. + """ + raise NotImplementedError diff --git a/venv/Lib/site-packages/langchain_core/prompts/loading.py b/venv/Lib/site-packages/langchain_core/prompts/loading.py new file mode 100644 index 00000000..3eb24c78 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/loading.py @@ -0,0 +1,203 @@ +"""Load prompts.""" + +import json +import logging +from pathlib import Path +from typing import Callable, Optional, Union + +import yaml + +from langchain_core.output_parsers.string import StrOutputParser +from langchain_core.prompts.base import BasePromptTemplate +from langchain_core.prompts.chat import ChatPromptTemplate +from langchain_core.prompts.few_shot import FewShotPromptTemplate +from langchain_core.prompts.prompt import PromptTemplate + +URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/" +logger = logging.getLogger(__name__) + + +def load_prompt_from_config(config: dict) -> BasePromptTemplate: + """Load prompt from Config Dict. + + Args: + config: Dict containing the prompt configuration. + + Returns: + A PromptTemplate object. + + Raises: + ValueError: If the prompt type is not supported. + """ + if "_type" not in config: + logger.warning("No `_type` key found, defaulting to `prompt`.") + config_type = config.pop("_type", "prompt") + + if config_type not in type_to_loader_dict: + msg = f"Loading {config_type} prompt not supported" + raise ValueError(msg) + + prompt_loader = type_to_loader_dict[config_type] + return prompt_loader(config) + + +def _load_template(var_name: str, config: dict) -> dict: + """Load template from the path if applicable.""" + # Check if template_path exists in config. + if f"{var_name}_path" in config: + # If it does, make sure template variable doesn't also exist. + if var_name in config: + msg = f"Both `{var_name}_path` and `{var_name}` cannot be provided." + raise ValueError(msg) + # Pop the template path from the config. + template_path = Path(config.pop(f"{var_name}_path")) + # Load the template. + if template_path.suffix == ".txt": + template = template_path.read_text() + else: + raise ValueError + # Set the template variable to the extracted variable. + config[var_name] = template + return config + + +def _load_examples(config: dict) -> dict: + """Load examples if necessary.""" + if isinstance(config["examples"], list): + pass + elif isinstance(config["examples"], str): + path = Path(config["examples"]) + with path.open() as f: + if path.suffix == ".json": + examples = json.load(f) + elif path.suffix in {".yaml", ".yml"}: + examples = yaml.safe_load(f) + else: + msg = "Invalid file format. Only json or yaml formats are supported." + raise ValueError(msg) + config["examples"] = examples + else: + msg = "Invalid examples format. Only list or string are supported." + raise ValueError(msg) # noqa:TRY004 + return config + + +def _load_output_parser(config: dict) -> dict: + """Load output parser.""" + if "output_parser" in config and config["output_parser"]: + _config = config.pop("output_parser") + output_parser_type = _config.pop("_type") + if output_parser_type == "default": + output_parser = StrOutputParser(**_config) + else: + msg = f"Unsupported output parser {output_parser_type}" + raise ValueError(msg) + config["output_parser"] = output_parser + return config + + +def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate: + """Load the "few shot" prompt from the config.""" + # Load the suffix and prefix templates. + config = _load_template("suffix", config) + config = _load_template("prefix", config) + # Load the example prompt. + if "example_prompt_path" in config: + if "example_prompt" in config: + msg = ( + "Only one of example_prompt and example_prompt_path should " + "be specified." + ) + raise ValueError(msg) + config["example_prompt"] = load_prompt(config.pop("example_prompt_path")) + else: + config["example_prompt"] = load_prompt_from_config(config["example_prompt"]) + # Load the examples. + config = _load_examples(config) + config = _load_output_parser(config) + return FewShotPromptTemplate(**config) + + +def _load_prompt(config: dict) -> PromptTemplate: + """Load the prompt template from config.""" + # Load the template from disk if necessary. + config = _load_template("template", config) + config = _load_output_parser(config) + + template_format = config.get("template_format", "f-string") + if template_format == "jinja2": + # Disabled due to: + # https://github.com/langchain-ai/langchain/issues/4394 + msg = ( + f"Loading templates with '{template_format}' format is no longer supported " + f"since it can lead to arbitrary code execution. Please migrate to using " + f"the 'f-string' template format, which does not suffer from this issue." + ) + raise ValueError(msg) + + return PromptTemplate(**config) + + +def load_prompt( + path: Union[str, Path], encoding: Optional[str] = None +) -> BasePromptTemplate: + """Unified method for loading a prompt from LangChainHub or local fs. + + Args: + path: Path to the prompt file. + encoding: Encoding of the file. Defaults to None. + + Returns: + A PromptTemplate object. + + Raises: + RuntimeError: If the path is a Lang Chain Hub path. + """ + if isinstance(path, str) and path.startswith("lc://"): + msg = ( + "Loading from the deprecated github-based Hub is no longer supported. " + "Please use the new LangChain Hub at https://smith.langchain.com/hub " + "instead." + ) + raise RuntimeError(msg) + return _load_prompt_from_file(path, encoding) + + +def _load_prompt_from_file( + file: Union[str, Path], encoding: Optional[str] = None +) -> BasePromptTemplate: + """Load prompt from file.""" + # Convert file to a Path object. + file_path = Path(file) + # Load from either json or yaml. + if file_path.suffix == ".json": + with file_path.open(encoding=encoding) as f: + config = json.load(f) + elif file_path.suffix.endswith((".yaml", ".yml")): + with file_path.open(encoding=encoding) as f: + config = yaml.safe_load(f) + else: + msg = f"Got unsupported file type {file_path.suffix}" + raise ValueError(msg) + # Load the prompt from the config now. + return load_prompt_from_config(config) + + +def _load_chat_prompt(config: dict) -> ChatPromptTemplate: + """Load chat prompt from config.""" + messages = config.pop("messages") + template = messages[0]["prompt"].pop("template") if messages else None + config.pop("input_variables") + + if not template: + msg = "Can't load chat prompt without template" + raise ValueError(msg) + + return ChatPromptTemplate.from_template(template=template, **config) + + +type_to_loader_dict: dict[str, Callable[[dict], BasePromptTemplate]] = { + "prompt": _load_prompt, + "few_shot": _load_few_shot_prompt, + "chat": _load_chat_prompt, +} diff --git a/venv/Lib/site-packages/langchain_core/prompts/message.py b/venv/Lib/site-packages/langchain_core/prompts/message.py new file mode 100644 index 00000000..668374a1 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/message.py @@ -0,0 +1,96 @@ +"""Message prompt templates.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any + +from langchain_core.load import Serializable +from langchain_core.messages import BaseMessage +from langchain_core.utils.interactive_env import is_interactive_env + +if TYPE_CHECKING: + from langchain_core.prompts.chat import ChatPromptTemplate + + +class BaseMessagePromptTemplate(Serializable, ABC): + """Base class for message prompt templates.""" + + @classmethod + def is_lc_serializable(cls) -> bool: + """Return whether or not the class is serializable. + + Returns: True. + """ + return True + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + Default namespace is ["langchain", "prompts", "chat"]. + """ + return ["langchain", "prompts", "chat"] + + @abstractmethod + def format_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Format messages from kwargs. Should return a list of BaseMessages. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + List of BaseMessages. + """ + + async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]: + """Async format messages from kwargs. + + Args: + **kwargs: Keyword arguments to use for formatting. + + Returns: + List of BaseMessages. + """ + return self.format_messages(**kwargs) + + @property + @abstractmethod + def input_variables(self) -> list[str]: + """Input variables for this prompt template. + + Returns: + List of input variables. + """ + + def pretty_repr( + self, + html: bool = False, # noqa: FBT001,FBT002 + ) -> str: + """Human-readable representation. + + Args: + html: Whether to format as HTML. Defaults to False. + + Returns: + Human-readable representation. + """ + raise NotImplementedError + + def pretty_print(self) -> None: + """Print a human-readable representation.""" + print(self.pretty_repr(html=is_interactive_env())) # noqa: T201 + + def __add__(self, other: Any) -> ChatPromptTemplate: + """Combine two prompt templates. + + Args: + other: Another prompt template. + + Returns: + Combined prompt template. + """ + from langchain_core.prompts.chat import ChatPromptTemplate + + prompt = ChatPromptTemplate(messages=[self]) + return prompt + other diff --git a/venv/Lib/site-packages/langchain_core/prompts/pipeline.py b/venv/Lib/site-packages/langchain_core/prompts/pipeline.py new file mode 100644 index 00000000..3c771b2d --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/pipeline.py @@ -0,0 +1,134 @@ +"""[DEPRECATED] Pipeline prompt template.""" + +from typing import Any + +from pydantic import model_validator + +from langchain_core._api.deprecation import deprecated +from langchain_core.prompt_values import PromptValue +from langchain_core.prompts.base import BasePromptTemplate +from langchain_core.prompts.chat import BaseChatPromptTemplate + + +def _get_inputs(inputs: dict, input_variables: list[str]) -> dict: + return {k: inputs[k] for k in input_variables} + + +@deprecated( + since="0.3.22", + removal="1.0", + message=( + "This class is deprecated. Please see the docstring below or at the link" + " for a replacement option: " + "https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.pipeline.PipelinePromptTemplate.html" + ), +) +class PipelinePromptTemplate(BasePromptTemplate): + """[DEPRECATED] Pipeline prompt template. + + This has been deprecated in favor of chaining individual prompts together in your + code. E.g. using a for loop, you could do: + + .. code-block:: python + + my_input = {"key": "value"} + for name, prompt in pipeline_prompts: + my_input[name] = prompt.invoke(my_input).to_string() + my_output = final_prompt.invoke(my_input) + + Prompt template for composing multiple prompt templates together. + + This can be useful when you want to reuse parts of prompts. + + A PipelinePrompt consists of two main parts: + - final_prompt: This is the final prompt that is returned + - pipeline_prompts: This is a list of tuples, consisting + of a string (`name`) and a Prompt Template. + Each PromptTemplate will be formatted and then passed + to future prompt templates as a variable with + the same name as `name` + """ + + final_prompt: BasePromptTemplate + """The final prompt that is returned.""" + pipeline_prompts: list[tuple[str, BasePromptTemplate]] + """A list of tuples, consisting of a string (`name`) and a Prompt Template.""" + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object.""" + return ["langchain", "prompts", "pipeline"] + + @model_validator(mode="before") + @classmethod + def get_input_variables(cls, values: dict) -> Any: + """Get input variables.""" + created_variables = set() + all_variables = set() + for k, prompt in values["pipeline_prompts"]: + created_variables.add(k) + all_variables.update(prompt.input_variables) + values["input_variables"] = list(all_variables.difference(created_variables)) + return values + + def format_prompt(self, **kwargs: Any) -> PromptValue: + """Format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + """ + for k, prompt in self.pipeline_prompts: + _inputs = _get_inputs(kwargs, prompt.input_variables) + if isinstance(prompt, BaseChatPromptTemplate): + kwargs[k] = prompt.format_messages(**_inputs) + else: + kwargs[k] = prompt.format(**_inputs) + _inputs = _get_inputs(kwargs, self.final_prompt.input_variables) + return self.final_prompt.format_prompt(**_inputs) + + async def aformat_prompt(self, **kwargs: Any) -> PromptValue: + """Async format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + """ + for k, prompt in self.pipeline_prompts: + _inputs = _get_inputs(kwargs, prompt.input_variables) + if isinstance(prompt, BaseChatPromptTemplate): + kwargs[k] = await prompt.aformat_messages(**_inputs) + else: + kwargs[k] = await prompt.aformat(**_inputs) + _inputs = _get_inputs(kwargs, self.final_prompt.input_variables) + return await self.final_prompt.aformat_prompt(**_inputs) + + def format(self, **kwargs: Any) -> str: + """Format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + """ + return self.format_prompt(**kwargs).to_string() + + async def aformat(self, **kwargs: Any) -> str: + """Async format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + """ + return (await self.aformat_prompt(**kwargs)).to_string() + + @property + def _prompt_type(self) -> str: + raise ValueError diff --git a/venv/Lib/site-packages/langchain_core/prompts/prompt.py b/venv/Lib/site-packages/langchain_core/prompts/prompt.py new file mode 100644 index 00000000..190b0aab --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/prompt.py @@ -0,0 +1,303 @@ +"""Prompt schema definition.""" + +from __future__ import annotations + +import warnings +from pathlib import Path +from typing import TYPE_CHECKING, Any, Optional, Union + +from pydantic import BaseModel, model_validator +from typing_extensions import override + +from langchain_core.prompts.string import ( + DEFAULT_FORMATTER_MAPPING, + PromptTemplateFormat, + StringPromptTemplate, + check_valid_template, + get_template_variables, + mustache_schema, +) + +if TYPE_CHECKING: + from langchain_core.runnables.config import RunnableConfig + + +class PromptTemplate(StringPromptTemplate): + """Prompt template for a language model. + + A prompt template consists of a string template. It accepts a set of parameters + from the user that can be used to generate a prompt for a language model. + + The template can be formatted using either f-strings (default), jinja2, + or mustache syntax. + + *Security warning*: + Prefer using `template_format="f-string"` instead of + `template_format="jinja2"`, or make sure to NEVER accept jinja2 templates + from untrusted sources as they may lead to arbitrary Python code execution. + + As of LangChain 0.0.329, Jinja2 templates will be rendered using + Jinja2's SandboxedEnvironment by default. This sand-boxing should + be treated as a best-effort approach rather than a guarantee of security, + as it is an opt-out rather than opt-in approach. + + Despite the sand-boxing, we recommend to never use jinja2 templates + from untrusted sources. + + Example: + + .. code-block:: python + + from langchain_core.prompts import PromptTemplate + + # Instantiation using from_template (recommended) + prompt = PromptTemplate.from_template("Say {foo}") + prompt.format(foo="bar") + + # Instantiation using initializer + prompt = PromptTemplate(template="Say {foo}") + """ + + @property + @override + def lc_attributes(self) -> dict[str, Any]: + return { + "template_format": self.template_format, + } + + @classmethod + @override + def get_lc_namespace(cls) -> list[str]: + return ["langchain", "prompts", "prompt"] + + template: str + """The prompt template.""" + + template_format: PromptTemplateFormat = "f-string" + """The format of the prompt template. + Options are: 'f-string', 'mustache', 'jinja2'.""" + + validate_template: bool = False + """Whether or not to try validating the template.""" + + @model_validator(mode="before") + @classmethod + def pre_init_validation(cls, values: dict) -> Any: + """Check that template and input variables are consistent.""" + if values.get("template") is None: + # Will let pydantic fail with a ValidationError if template + # is not provided. + return values + + # Set some default values based on the field defaults + values.setdefault("template_format", "f-string") + values.setdefault("partial_variables", {}) + + if values.get("validate_template"): + if values["template_format"] == "mustache": + msg = "Mustache templates cannot be validated." + raise ValueError(msg) + + if "input_variables" not in values: + msg = "Input variables must be provided to validate the template." + raise ValueError(msg) + + all_inputs = values["input_variables"] + list(values["partial_variables"]) + check_valid_template( + values["template"], values["template_format"], all_inputs + ) + + if values["template_format"]: + values["input_variables"] = [ + var + for var in get_template_variables( + values["template"], values["template_format"] + ) + if var not in values["partial_variables"] + ] + + return values + + @override + def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]: + """Get the input schema for the prompt. + + Args: + config: The runnable configuration. + + Returns: + The input schema for the prompt. + """ + if self.template_format != "mustache": + return super().get_input_schema(config) + + return mustache_schema(self.template) + + def __add__(self, other: Any) -> PromptTemplate: + """Override the + operator to allow for combining prompt templates.""" + # Allow for easy combining + if isinstance(other, PromptTemplate): + if self.template_format != "f-string": + msg = "Adding prompt templates only supported for f-strings." + raise ValueError(msg) + if other.template_format != "f-string": + msg = "Adding prompt templates only supported for f-strings." + raise ValueError(msg) + input_variables = list( + set(self.input_variables) | set(other.input_variables) + ) + template = self.template + other.template + # If any do not want to validate, then don't + validate_template = self.validate_template and other.validate_template + partial_variables = dict(self.partial_variables.items()) + for k, v in other.partial_variables.items(): + if k in partial_variables: + msg = "Cannot have same variable partialed twice." + raise ValueError(msg) + partial_variables[k] = v + return PromptTemplate( + template=template, + input_variables=input_variables, + partial_variables=partial_variables, + template_format="f-string", + validate_template=validate_template, + ) + if isinstance(other, str): + prompt = PromptTemplate.from_template(other) + return self + prompt + msg = f"Unsupported operand type for +: {type(other)}" + raise NotImplementedError(msg) + + @property + def _prompt_type(self) -> str: + """Return the prompt type key.""" + return "prompt" + + def format(self, **kwargs: Any) -> str: + """Format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + """ + kwargs = self._merge_partial_and_user_variables(**kwargs) + return DEFAULT_FORMATTER_MAPPING[self.template_format](self.template, **kwargs) + + @classmethod + def from_examples( + cls, + examples: list[str], + suffix: str, + input_variables: list[str], + example_separator: str = "\n\n", + prefix: str = "", + **kwargs: Any, + ) -> PromptTemplate: + """Take examples in list format with prefix and suffix to create a prompt. + + Intended to be used as a way to dynamically create a prompt from examples. + + Args: + examples: List of examples to use in the prompt. + suffix: String to go after the list of examples. Should generally + set up the user's input. + input_variables: A list of variable names the final prompt template + will expect. + example_separator: The separator to use in between examples. Defaults + to two new line characters. + prefix: String that should go before any examples. Generally includes + examples. Default to an empty string. + + Returns: + The final prompt generated. + """ + template = example_separator.join([prefix, *examples, suffix]) + return cls(input_variables=input_variables, template=template, **kwargs) + + @classmethod + def from_file( + cls, + template_file: Union[str, Path], + input_variables: Optional[list[str]] = None, + encoding: Optional[str] = None, + **kwargs: Any, + ) -> PromptTemplate: + """Load a prompt from a file. + + Args: + template_file: The path to the file containing the prompt template. + input_variables: [DEPRECATED] A list of variable names the final prompt + template will expect. Defaults to None. + encoding: The encoding system for opening the template file. + If not provided, will use the OS default. + + input_variables is ignored as from_file now delegates to from_template(). + + Returns: + The prompt loaded from the file. + """ + template = Path(template_file).read_text(encoding=encoding) + if input_variables: + warnings.warn( + "`input_variables' is deprecated and ignored.", + DeprecationWarning, + stacklevel=2, + ) + return cls.from_template(template=template, **kwargs) + + @classmethod + def from_template( + cls, + template: str, + *, + template_format: PromptTemplateFormat = "f-string", + partial_variables: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> PromptTemplate: + """Load a prompt template from a template. + + *Security warning*: + Prefer using `template_format="f-string"` instead of + `template_format="jinja2"`, or make sure to NEVER accept jinja2 templates + from untrusted sources as they may lead to arbitrary Python code execution. + + As of LangChain 0.0.329, Jinja2 templates will be rendered using + Jinja2's SandboxedEnvironment by default. This sand-boxing should + be treated as a best-effort approach rather than a guarantee of security, + as it is an opt-out rather than opt-in approach. + + Despite the sand-boxing, we recommend never using jinja2 templates + from untrusted sources. + + Args: + template: The template to load. + template_format: The format of the template. Use `jinja2` for jinja2, + `mustache` for mustache, and `f-string` for f-strings. + Defaults to `f-string`. + partial_variables: A dictionary of variables that can be used to partially + fill in the template. For example, if the template is + `"{variable1} {variable2}"`, and `partial_variables` is + `{"variable1": "foo"}`, then the final prompt will be + `"foo {variable2}"`. Defaults to None. + kwargs: Any other arguments to pass to the prompt template. + + Returns: + The prompt template loaded from the template. + """ + input_variables = get_template_variables(template, template_format) + _partial_variables = partial_variables or {} + + if _partial_variables: + input_variables = [ + var for var in input_variables if var not in _partial_variables + ] + + return cls( + input_variables=input_variables, + template=template, + template_format=template_format, + partial_variables=_partial_variables, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain_core/prompts/string.py b/venv/Lib/site-packages/langchain_core/prompts/string.py new file mode 100644 index 00000000..e63d54db --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/string.py @@ -0,0 +1,320 @@ +"""BasePrompt schema definition.""" + +from __future__ import annotations + +import warnings +from abc import ABC +from string import Formatter +from typing import Any, Callable, Literal + +from pydantic import BaseModel, create_model + +from langchain_core.prompt_values import PromptValue, StringPromptValue +from langchain_core.prompts.base import BasePromptTemplate +from langchain_core.utils import get_colored_text, mustache +from langchain_core.utils.formatting import formatter +from langchain_core.utils.interactive_env import is_interactive_env + +PromptTemplateFormat = Literal["f-string", "mustache", "jinja2"] + + +def jinja2_formatter(template: str, /, **kwargs: Any) -> str: + """Format a template using jinja2. + + *Security warning*: + As of LangChain 0.0.329, this method uses Jinja2's + SandboxedEnvironment by default. However, this sand-boxing should + be treated as a best-effort approach rather than a guarantee of security. + Do not accept jinja2 templates from untrusted sources as they may lead + to arbitrary Python code execution. + + https://jinja.palletsprojects.com/en/3.1.x/sandbox/ + + Args: + template: The template string. + **kwargs: The variables to format the template with. + + Returns: + The formatted string. + + Raises: + ImportError: If jinja2 is not installed. + """ + try: + from jinja2.sandbox import SandboxedEnvironment + except ImportError as e: + msg = ( + "jinja2 not installed, which is needed to use the jinja2_formatter. " + "Please install it with `pip install jinja2`." + "Please be cautious when using jinja2 templates. " + "Do not expand jinja2 templates using unverified or user-controlled " + "inputs as that can result in arbitrary Python code execution." + ) + raise ImportError(msg) from e + + # This uses a sandboxed environment to prevent arbitrary code execution. + # Jinja2 uses an opt-out rather than opt-in approach for sand-boxing. + # Please treat this sand-boxing as a best-effort approach rather than + # a guarantee of security. + # We recommend to never use jinja2 templates with untrusted inputs. + # https://jinja.palletsprojects.com/en/3.1.x/sandbox/ + # approach not a guarantee of security. + return SandboxedEnvironment().from_string(template).render(**kwargs) + + +def validate_jinja2(template: str, input_variables: list[str]) -> None: + """Validate that the input variables are valid for the template. + + Issues a warning if missing or extra variables are found. + + Args: + template: The template string. + input_variables: The input variables. + """ + input_variables_set = set(input_variables) + valid_variables = _get_jinja2_variables_from_template(template) + missing_variables = valid_variables - input_variables_set + extra_variables = input_variables_set - valid_variables + + warning_message = "" + if missing_variables: + warning_message += f"Missing variables: {missing_variables} " + + if extra_variables: + warning_message += f"Extra variables: {extra_variables}" + + if warning_message: + warnings.warn(warning_message.strip(), stacklevel=7) + + +def _get_jinja2_variables_from_template(template: str) -> set[str]: + try: + from jinja2 import Environment, meta + except ImportError as e: + msg = ( + "jinja2 not installed, which is needed to use the jinja2_formatter. " + "Please install it with `pip install jinja2`." + ) + raise ImportError(msg) from e + env = Environment() # noqa: S701 + ast = env.parse(template) + return meta.find_undeclared_variables(ast) + + +def mustache_formatter(template: str, /, **kwargs: Any) -> str: + """Format a template using mustache. + + Args: + template: The template string. + **kwargs: The variables to format the template with. + + Returns: + The formatted string. + """ + return mustache.render(template, kwargs) + + +def mustache_template_vars( + template: str, +) -> set[str]: + """Get the variables from a mustache template. + + Args: + template: The template string. + + Returns: + The variables from the template. + """ + vars: set[str] = set() + section_depth = 0 + for type, key in mustache.tokenize(template): + if type == "end": + section_depth -= 1 + elif ( + type in ("variable", "section", "inverted section", "no escape") + and key != "." + and section_depth == 0 + ): + vars.add(key.split(".")[0]) + if type in ("section", "inverted section"): + section_depth += 1 + return vars + + +Defs = dict[str, "Defs"] + + +def mustache_schema( + template: str, +) -> type[BaseModel]: + """Get the variables from a mustache template. + + Args: + template: The template string. + + Returns: + The variables from the template as a Pydantic model. + """ + fields = {} + prefix: tuple[str, ...] = () + section_stack: list[tuple[str, ...]] = [] + for type, key in mustache.tokenize(template): + if key == ".": + continue + if type == "end": + if section_stack: + prefix = section_stack.pop() + elif type in ("section", "inverted section"): + section_stack.append(prefix) + prefix = prefix + tuple(key.split(".")) + fields[prefix] = False + elif type in ("variable", "no escape"): + fields[prefix + tuple(key.split("."))] = True + defs: Defs = {} # None means leaf node + while fields: + field, is_leaf = fields.popitem() + current = defs + for part in field[:-1]: + current = current.setdefault(part, {}) + current.setdefault(field[-1], "" if is_leaf else {}) # type: ignore[arg-type] + return _create_model_recursive("PromptInput", defs) + + +def _create_model_recursive(name: str, defs: Defs) -> type: + return create_model( # type: ignore[call-overload] + name, + **{ + k: (_create_model_recursive(k, v), None) if v else (type(v), None) + for k, v in defs.items() + }, + ) + + +DEFAULT_FORMATTER_MAPPING: dict[str, Callable] = { + "f-string": formatter.format, + "mustache": mustache_formatter, + "jinja2": jinja2_formatter, +} + +DEFAULT_VALIDATOR_MAPPING: dict[str, Callable] = { + "f-string": formatter.validate_input_variables, + "jinja2": validate_jinja2, +} + + +def check_valid_template( + template: str, template_format: str, input_variables: list[str] +) -> None: + """Check that template string is valid. + + Args: + template: The template string. + template_format: The template format. Should be one of "f-string" or "jinja2". + input_variables: The input variables. + + Raises: + ValueError: If the template format is not supported. + ValueError: If the prompt schema is invalid. + """ + try: + validator_func = DEFAULT_VALIDATOR_MAPPING[template_format] + except KeyError as exc: + msg = ( + f"Invalid template format {template_format!r}, should be one of" + f" {list(DEFAULT_FORMATTER_MAPPING)}." + ) + raise ValueError(msg) from exc + try: + validator_func(template, input_variables) + except (KeyError, IndexError) as exc: + msg = ( + "Invalid prompt schema; check for mismatched or missing input parameters" + f" from {input_variables}." + ) + raise ValueError(msg) from exc + + +def get_template_variables(template: str, template_format: str) -> list[str]: + """Get the variables from the template. + + Args: + template: The template string. + template_format: The template format. Should be one of "f-string" or "jinja2". + + Returns: + The variables from the template. + + Raises: + ValueError: If the template format is not supported. + """ + if template_format == "jinja2": + # Get the variables for the template + input_variables = _get_jinja2_variables_from_template(template) + elif template_format == "f-string": + input_variables = { + v for _, v, _, _ in Formatter().parse(template) if v is not None + } + elif template_format == "mustache": + input_variables = mustache_template_vars(template) + else: + msg = f"Unsupported template format: {template_format}" + raise ValueError(msg) + + return sorted(input_variables) + + +class StringPromptTemplate(BasePromptTemplate, ABC): + """String prompt that exposes the format method, returning a prompt.""" + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object.""" + return ["langchain", "prompts", "base"] + + def format_prompt(self, **kwargs: Any) -> PromptValue: + """Format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + """ + return StringPromptValue(text=self.format(**kwargs)) + + async def aformat_prompt(self, **kwargs: Any) -> PromptValue: + """Async format the prompt with the inputs. + + Args: + kwargs: Any arguments to be passed to the prompt template. + + Returns: + A formatted string. + """ + return StringPromptValue(text=await self.aformat(**kwargs)) + + def pretty_repr( + self, + html: bool = False, # noqa: FBT001,FBT002 + ) -> str: + """Get a pretty representation of the prompt. + + Args: + html: Whether to return an HTML-formatted string. + + Returns: + A pretty representation of the prompt. + """ + # TODO: handle partials + dummy_vars = { + input_var: "{" + f"{input_var}" + "}" for input_var in self.input_variables + } + if html: + dummy_vars = { + k: get_colored_text(v, "yellow") for k, v in dummy_vars.items() + } + return self.format(**dummy_vars) + + def pretty_print(self) -> None: + """Print a pretty representation of the prompt.""" + print(self.pretty_repr(html=is_interactive_env())) # noqa: T201 diff --git a/venv/Lib/site-packages/langchain_core/prompts/structured.py b/venv/Lib/site-packages/langchain_core/prompts/structured.py new file mode 100644 index 00000000..06407327 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/prompts/structured.py @@ -0,0 +1,170 @@ +"""Structured prompt template for a language model.""" + +from collections.abc import Iterator, Mapping, Sequence +from typing import ( + Any, + Callable, + Optional, + Union, +) + +from pydantic import BaseModel, Field +from typing_extensions import override + +from langchain_core._api.beta_decorator import beta +from langchain_core.language_models.base import BaseLanguageModel +from langchain_core.prompts.chat import ( + ChatPromptTemplate, + MessageLikeRepresentation, +) +from langchain_core.prompts.string import PromptTemplateFormat +from langchain_core.runnables.base import ( + Other, + Runnable, + RunnableSequence, + RunnableSerializable, +) +from langchain_core.utils import get_pydantic_field_names + + +@beta() +class StructuredPrompt(ChatPromptTemplate): + """Structured prompt template for a language model.""" + + schema_: Union[dict, type] + """Schema for the structured prompt.""" + structured_output_kwargs: dict[str, Any] = Field(default_factory=dict) + + def __init__( + self, + messages: Sequence[MessageLikeRepresentation], + schema_: Optional[Union[dict, type[BaseModel]]] = None, + *, + structured_output_kwargs: Optional[dict[str, Any]] = None, + template_format: PromptTemplateFormat = "f-string", + **kwargs: Any, + ) -> None: + """Create a structured prompt template. + + Args: + messages: sequence of messages. + schema_: schema for the structured prompt. + structured_output_kwargs: additional kwargs for structured output. + template_format: template format for the prompt. + """ + schema_ = schema_ or kwargs.pop("schema") + structured_output_kwargs = structured_output_kwargs or {} + for k in set(kwargs).difference(get_pydantic_field_names(self.__class__)): + structured_output_kwargs[k] = kwargs.pop(k) + super().__init__( + messages=messages, + schema_=schema_, + structured_output_kwargs=structured_output_kwargs, + template_format=template_format, + **kwargs, + ) + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + For example, if the class is `langchain.llms.openai.OpenAI`, then the + namespace is ["langchain", "llms", "openai"] + """ + return cls.__module__.split(".") + + @classmethod + def from_messages_and_schema( + cls, + messages: Sequence[MessageLikeRepresentation], + schema: Union[dict, type], + **kwargs: Any, + ) -> ChatPromptTemplate: + """Create a chat prompt template from a variety of message formats. + + Examples: + Instantiation from a list of message templates: + + .. code-block:: python + + from langchain_core.prompts import StructuredPrompt + + class OutputSchema(BaseModel): + name: str + value: int + + template = StructuredPrompt( + [ + ("human", "Hello, how are you?"), + ("ai", "I'm doing well, thanks!"), + ("human", "That's good to hear."), + ], + OutputSchema, + ) + + Args: + messages: sequence of message representations. + A message can be represented using the following formats: + (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of + (message type, template); e.g., ("human", "{user_input}"), + (4) 2-tuple of (message class, template), (5) a string which is + shorthand for ("human", template); e.g., "{user_input}" + schema: a dictionary representation of function call, or a Pydantic model. + kwargs: Any additional kwargs to pass through to + ``ChatModel.with_structured_output(schema, **kwargs)``. + + Returns: + a structured prompt template + """ + return cls(messages, schema, **kwargs) + + @override + def __or__( + self, + other: Union[ + Runnable[Any, Other], + Callable[[Any], Other], + Callable[[Iterator[Any]], Iterator[Other]], + Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]], + ], + ) -> RunnableSerializable[dict, Other]: + return self.pipe(other) + + def pipe( + self, + *others: Union[ + Runnable[Any, Other], + Callable[[Any], Other], + Callable[[Iterator[Any]], Iterator[Other]], + Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]], + ], + name: Optional[str] = None, + ) -> RunnableSerializable[dict, Other]: + """Pipe the structured prompt to a language model. + + Args: + others: The language model to pipe the structured prompt to. + name: The name of the pipeline. Defaults to None. + + Returns: + A RunnableSequence object. + + Raises: + NotImplementedError: If the first element of `others` + is not a language model. + """ + if ( + others + and isinstance(others[0], BaseLanguageModel) + or hasattr(others[0], "with_structured_output") + ): + return RunnableSequence( + self, + others[0].with_structured_output( + self.schema_, **self.structured_output_kwargs + ), + *others[1:], + name=name, + ) + msg = "Structured prompts need to be piped to a language model." + raise NotImplementedError(msg) diff --git a/venv/Lib/site-packages/langchain_core/py.typed b/venv/Lib/site-packages/langchain_core/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/venv/Lib/site-packages/langchain_core/pydantic_v1/__init__.py b/venv/Lib/site-packages/langchain_core/pydantic_v1/__init__.py new file mode 100644 index 00000000..e4dfbbae --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/pydantic_v1/__init__.py @@ -0,0 +1,45 @@ +"""Pydantic v1 compatibility shim.""" + +from importlib import metadata + +from langchain_core._api.deprecation import warn_deprecated + +# Create namespaces for pydantic v1 and v2. +# This code must stay at the top of the file before other modules may +# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules. +# +# This hack is done for the following reasons: +# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since +# both dependencies and dependents may be stuck on either version of v1 or v2. +# * Creating namespaces for pydantic v1 and v2 should allow us to write code that +# unambiguously uses either v1 or v2 API. +# * This change is easier to roll out and roll back. + +try: + from pydantic.v1 import * # noqa: F403 +except ImportError: + from pydantic import * # type: ignore[assignment,no-redef] # noqa: F403 + + +try: + _PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0]) +except metadata.PackageNotFoundError: + _PYDANTIC_MAJOR_VERSION = 0 + +warn_deprecated( + "0.3.0", + removal="1.0.0", + alternative="pydantic.v1 or pydantic", + message=( + "As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. " + "The langchain_core.pydantic_v1 module was a " + "compatibility shim for pydantic v1, and should no longer be used. " + "Please update the code to import from Pydantic directly.\n\n" + "For example, replace imports like: " + "`from langchain_core.pydantic_v1 import BaseModel`\n" + "with: `from pydantic import BaseModel`\n" + "or the v1 compatibility namespace if you are working in a code base " + "that has not been fully upgraded to pydantic 2 yet. " + "\tfrom pydantic.v1 import BaseModel\n" + ), +) diff --git a/venv/Lib/site-packages/langchain_core/pydantic_v1/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/pydantic_v1/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..61ae1e3d Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/pydantic_v1/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/pydantic_v1/__pycache__/dataclasses.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/pydantic_v1/__pycache__/dataclasses.cpython-312.pyc new file mode 100644 index 00000000..4708748c Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/pydantic_v1/__pycache__/dataclasses.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/pydantic_v1/__pycache__/main.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/pydantic_v1/__pycache__/main.cpython-312.pyc new file mode 100644 index 00000000..352475cc Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/pydantic_v1/__pycache__/main.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/pydantic_v1/dataclasses.py b/venv/Lib/site-packages/langchain_core/pydantic_v1/dataclasses.py new file mode 100644 index 00000000..81266057 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/pydantic_v1/dataclasses.py @@ -0,0 +1,26 @@ +"""Pydantic v1 compatibility shim.""" + +from langchain_core._api import warn_deprecated + +try: + from pydantic.v1.dataclasses import * # noqa: F403 +except ImportError: + from pydantic.dataclasses import * # type: ignore[no-redef] # noqa: F403 + +warn_deprecated( + "0.3.0", + removal="1.0.0", + alternative="pydantic.v1 or pydantic", + message=( + "As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. " + "The langchain_core.pydantic_v1 module was a " + "compatibility shim for pydantic v1, and should no longer be used. " + "Please update the code to import from Pydantic directly.\n\n" + "For example, replace imports like: " + "`from langchain_core.pydantic_v1 import BaseModel`\n" + "with: `from pydantic import BaseModel`\n" + "or the v1 compatibility namespace if you are working in a code base " + "that has not been fully upgraded to pydantic 2 yet. " + "\tfrom pydantic.v1 import BaseModel\n" + ), +) diff --git a/venv/Lib/site-packages/langchain_core/pydantic_v1/main.py b/venv/Lib/site-packages/langchain_core/pydantic_v1/main.py new file mode 100644 index 00000000..88492823 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/pydantic_v1/main.py @@ -0,0 +1,26 @@ +"""Pydantic v1 compatibility shim.""" + +from langchain_core._api import warn_deprecated + +try: + from pydantic.v1.main import * # noqa: F403 +except ImportError: + from pydantic.main import * # type: ignore[assignment,no-redef] # noqa: F403 + +warn_deprecated( + "0.3.0", + removal="1.0.0", + alternative="pydantic.v1 or pydantic", + message=( + "As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. " + "The langchain_core.pydantic_v1 module was a " + "compatibility shim for pydantic v1, and should no longer be used. " + "Please update the code to import from Pydantic directly.\n\n" + "For example, replace imports like: " + "`from langchain_core.pydantic_v1 import BaseModel`\n" + "with: `from pydantic import BaseModel`\n" + "or the v1 compatibility namespace if you are working in a code base " + "that has not been fully upgraded to pydantic 2 yet. " + "\tfrom pydantic.v1 import BaseModel\n" + ), +) diff --git a/venv/Lib/site-packages/langchain_core/rate_limiters.py b/venv/Lib/site-packages/langchain_core/rate_limiters.py new file mode 100644 index 00000000..952bdaaf --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/rate_limiters.py @@ -0,0 +1,259 @@ +"""Interface for a rate limiter and an in-memory rate limiter.""" + +from __future__ import annotations + +import abc +import asyncio +import threading +import time +from typing import Optional + + +class BaseRateLimiter(abc.ABC): + """Base class for rate limiters. + + Usage of the base limiter is through the acquire and aacquire methods depending + on whether running in a sync or async context. + + Implementations are free to add a timeout parameter to their initialize method + to allow users to specify a timeout for acquiring the necessary tokens when + using a blocking call. + + Current limitations: + + - Rate limiting information is not surfaced in tracing or callbacks. This means + that the total time it takes to invoke a chat model will encompass both + the time spent waiting for tokens and the time spent making the request. + + + .. versionadded:: 0.2.24 + """ + + @abc.abstractmethod + def acquire(self, *, blocking: bool = True) -> bool: + """Attempt to acquire the necessary tokens for the rate limiter. + + This method blocks until the required tokens are available if `blocking` + is set to True. + + If `blocking` is set to False, the method will immediately return the result + of the attempt to acquire the tokens. + + Args: + blocking: If True, the method will block until the tokens are available. + If False, the method will return immediately with the result of + the attempt. Defaults to True. + + Returns: + True if the tokens were successfully acquired, False otherwise. + """ + + @abc.abstractmethod + async def aacquire(self, *, blocking: bool = True) -> bool: + """Attempt to acquire the necessary tokens for the rate limiter. + + This method blocks until the required tokens are available if `blocking` + is set to True. + + If `blocking` is set to False, the method will immediately return the result + of the attempt to acquire the tokens. + + Args: + blocking: If True, the method will block until the tokens are available. + If False, the method will return immediately with the result of + the attempt. Defaults to True. + + Returns: + True if the tokens were successfully acquired, False otherwise. + """ + + +class InMemoryRateLimiter(BaseRateLimiter): + """An in memory rate limiter based on a token bucket algorithm. + + This is an in memory rate limiter, so it cannot rate limit across + different processes. + + The rate limiter only allows time-based rate limiting and does not + take into account any information about the input or the output, so it + cannot be used to rate limit based on the size of the request. + + It is thread safe and can be used in either a sync or async context. + + The in memory rate limiter is based on a token bucket. The bucket is filled + with tokens at a given rate. Each request consumes a token. If there are + not enough tokens in the bucket, the request is blocked until there are + enough tokens. + + These *tokens* have NOTHING to do with LLM tokens. They are just + a way to keep track of how many requests can be made at a given time. + + Current limitations: + + - The rate limiter is not designed to work across different processes. It is + an in-memory rate limiter, but it is thread safe. + - The rate limiter only supports time-based rate limiting. It does not take + into account the size of the request or any other factors. + + Example: + + .. code-block:: python + + import time + + from langchain_core.rate_limiters import InMemoryRateLimiter + + rate_limiter = InMemoryRateLimiter( + requests_per_second=0.1, # <-- Can only make a request once every 10 seconds!! + check_every_n_seconds=0.1, # Wake up every 100 ms to check whether allowed to make a request, + max_bucket_size=10, # Controls the maximum burst size. + ) + + from langchain_anthropic import ChatAnthropic + model = ChatAnthropic( + model_name="claude-3-opus-20240229", + rate_limiter=rate_limiter + ) + + for _ in range(5): + tic = time.time() + model.invoke("hello") + toc = time.time() + print(toc - tic) + + + .. versionadded:: 0.2.24 + """ # noqa: E501 + + def __init__( + self, + *, + requests_per_second: float = 1, + check_every_n_seconds: float = 0.1, + max_bucket_size: float = 1, + ) -> None: + """A rate limiter based on a token bucket. + + These *tokens* have NOTHING to do with LLM tokens. They are just + a way to keep track of how many requests can be made at a given time. + + This rate limiter is designed to work in a threaded environment. + + It works by filling up a bucket with tokens at a given rate. Each + request consumes a given number of tokens. If there are not enough + tokens in the bucket, the request is blocked until there are enough + tokens. + + Args: + requests_per_second: The number of tokens to add per second to the bucket. + Must be at least 1. The tokens represent "credit" that can be used + to make requests. + check_every_n_seconds: check whether the tokens are available + every this many seconds. Can be a float to represent + fractions of a second. + max_bucket_size: The maximum number of tokens that can be in the bucket. + This is used to prevent bursts of requests. + """ + # Number of requests that we can make per second. + self.requests_per_second = requests_per_second + # Number of tokens in the bucket. + self.available_tokens = 0.0 + self.max_bucket_size = max_bucket_size + # A lock to ensure that tokens can only be consumed by one thread + # at a given time. + self._consume_lock = threading.Lock() + # The last time we tried to consume tokens. + self.last: Optional[float] = None + self.check_every_n_seconds = check_every_n_seconds + + def _consume(self) -> bool: + """Try to consume a token. + + Returns: + True means that the tokens were consumed, and the caller can proceed to + make the request. A False means that the tokens were not consumed, and + the caller should try again later. + """ + with self._consume_lock: + now = time.monotonic() + + # initialize on first call to avoid a burst + if self.last is None: + self.last = now + + elapsed = now - self.last + + if elapsed * self.requests_per_second >= 1: + self.available_tokens += elapsed * self.requests_per_second + self.last = now + + # Make sure that we don't exceed the bucket size. + # This is used to prevent bursts of requests. + self.available_tokens = min(self.available_tokens, self.max_bucket_size) + + # As long as we have at least one token, we can proceed. + if self.available_tokens >= 1: + self.available_tokens -= 1 + return True + + return False + + def acquire(self, *, blocking: bool = True) -> bool: + """Attempt to acquire a token from the rate limiter. + + This method blocks until the required tokens are available if `blocking` + is set to True. + + If `blocking` is set to False, the method will immediately return the result + of the attempt to acquire the tokens. + + Args: + blocking: If True, the method will block until the tokens are available. + If False, the method will return immediately with the result of + the attempt. Defaults to True. + + Returns: + True if the tokens were successfully acquired, False otherwise. + """ + if not blocking: + return self._consume() + + while not self._consume(): + time.sleep(self.check_every_n_seconds) + return True + + async def aacquire(self, *, blocking: bool = True) -> bool: + """Attempt to acquire a token from the rate limiter. Async version. + + This method blocks until the required tokens are available if `blocking` + is set to True. + + If `blocking` is set to False, the method will immediately return the result + of the attempt to acquire the tokens. + + Args: + blocking: If True, the method will block until the tokens are available. + If False, the method will return immediately with the result of + the attempt. Defaults to True. + + Returns: + True if the tokens were successfully acquired, False otherwise. + """ + if not blocking: + return self._consume() + + while not self._consume(): # noqa: ASYNC110 + # This code ignores the ASYNC110 warning which is a false positive in this + # case. + # There is no external actor that can mark that the Event is done + # since the tokens are managed by the rate limiter itself. + # It needs to wake up to re-fill the tokens. + # https://docs.astral.sh/ruff/rules/async-busy-wait/ + await asyncio.sleep(self.check_every_n_seconds) + return True + + +__all__ = [ + "BaseRateLimiter", + "InMemoryRateLimiter", +] diff --git a/venv/Lib/site-packages/langchain_core/retrievers.py b/venv/Lib/site-packages/langchain_core/retrievers.py new file mode 100644 index 00000000..3456cc3e --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/retrievers.py @@ -0,0 +1,453 @@ +"""**Retriever** class returns Documents given a text **query**. + +It is more general than a vector store. A retriever does not need to be able to +store documents, only to return (or retrieve) it. Vector stores can be used as +the backbone of a retriever, but there are other types of retrievers as well. + +**Class hierarchy:** + +.. code-block:: + + BaseRetriever --> Retriever # Examples: ArxivRetriever, MergerRetriever + +**Main helpers:** + +.. code-block:: + + RetrieverInput, RetrieverOutput, RetrieverLike, RetrieverOutputLike, + Document, Serializable, Callbacks, + CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun +""" + +from __future__ import annotations + +import warnings +from abc import ABC, abstractmethod +from inspect import signature +from typing import TYPE_CHECKING, Any, Optional + +from pydantic import ConfigDict +from typing_extensions import Self, TypedDict, override + +from langchain_core._api import deprecated +from langchain_core.callbacks import Callbacks +from langchain_core.documents import Document +from langchain_core.runnables import ( + Runnable, + RunnableConfig, + RunnableSerializable, + ensure_config, +) +from langchain_core.runnables.config import run_in_executor + +if TYPE_CHECKING: + from langchain_core.callbacks.manager import ( + AsyncCallbackManagerForRetrieverRun, + CallbackManagerForRetrieverRun, + ) + +RetrieverInput = str +RetrieverOutput = list[Document] +RetrieverLike = Runnable[RetrieverInput, RetrieverOutput] +RetrieverOutputLike = Runnable[Any, RetrieverOutput] + + +class LangSmithRetrieverParams(TypedDict, total=False): + """LangSmith parameters for tracing.""" + + ls_retriever_name: str + """Retriever name.""" + ls_vector_store_provider: Optional[str] + """Vector store provider.""" + ls_embedding_provider: Optional[str] + """Embedding provider.""" + ls_embedding_model: Optional[str] + """Embedding model.""" + + +class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC): + """Abstract base class for a Document retrieval system. + + A retrieval system is defined as something that can take string queries and return + the most 'relevant' Documents from some source. + + Usage: + + A retriever follows the standard Runnable interface, and should be used + via the standard Runnable methods of `invoke`, `ainvoke`, `batch`, `abatch`. + + Implementation: + + When implementing a custom retriever, the class should implement + the `_get_relevant_documents` method to define the logic for retrieving documents. + + Optionally, an async native implementations can be provided by overriding the + `_aget_relevant_documents` method. + + Example: A retriever that returns the first 5 documents from a list of documents + + .. code-block:: python + + from langchain_core.documents import Document + from langchain_core.retrievers import BaseRetriever + + class SimpleRetriever(BaseRetriever): + docs: list[Document] + k: int = 5 + + def _get_relevant_documents(self, query: str) -> list[Document]: + \"\"\"Return the first k documents from the list of documents\"\"\" + return self.docs[:self.k] + + async def _aget_relevant_documents(self, query: str) -> list[Document]: + \"\"\"(Optional) async native implementation.\"\"\" + return self.docs[:self.k] + + Example: A simple retriever based on a scikit-learn vectorizer + + .. code-block:: python + + from sklearn.metrics.pairwise import cosine_similarity + + class TFIDFRetriever(BaseRetriever, BaseModel): + vectorizer: Any + docs: list[Document] + tfidf_array: Any + k: int = 4 + + class Config: + arbitrary_types_allowed = True + + def _get_relevant_documents(self, query: str) -> list[Document]: + # Ip -- (n_docs,x), Op -- (n_docs,n_Feats) + query_vec = self.vectorizer.transform([query]) + # Op -- (n_docs,1) -- Cosine Sim with each doc + results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,)) + return [self.docs[i] for i in results.argsort()[-self.k :][::-1]] + """ # noqa: E501 + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + _new_arg_supported: bool = False + _expects_other_args: bool = False + tags: Optional[list[str]] = None + """Optional list of tags associated with the retriever. Defaults to None. + These tags will be associated with each call to this retriever, + and passed as arguments to the handlers defined in `callbacks`. + You can use these to eg identify a specific instance of a retriever with its + use case. + """ + metadata: Optional[dict[str, Any]] = None + """Optional metadata associated with the retriever. Defaults to None. + This metadata will be associated with each call to this retriever, + and passed as arguments to the handlers defined in `callbacks`. + You can use these to eg identify a specific instance of a retriever with its + use case. + """ + + @override + def __init_subclass__(cls, **kwargs: Any) -> None: + super().__init_subclass__(**kwargs) + # Version upgrade for old retrievers that implemented the public + # methods directly. + if cls.get_relevant_documents != BaseRetriever.get_relevant_documents: + warnings.warn( + "Retrievers must implement abstract `_get_relevant_documents` method" + " instead of `get_relevant_documents`", + DeprecationWarning, + stacklevel=4, + ) + swap = cls.get_relevant_documents + cls.get_relevant_documents = ( # type: ignore[method-assign] + BaseRetriever.get_relevant_documents + ) + cls._get_relevant_documents = swap # type: ignore[method-assign] + if ( + hasattr(cls, "aget_relevant_documents") + and cls.aget_relevant_documents != BaseRetriever.aget_relevant_documents + ): + warnings.warn( + "Retrievers must implement abstract `_aget_relevant_documents` method" + " instead of `aget_relevant_documents`", + DeprecationWarning, + stacklevel=4, + ) + aswap = cls.aget_relevant_documents + cls.aget_relevant_documents = ( # type: ignore[method-assign] + BaseRetriever.aget_relevant_documents + ) + cls._aget_relevant_documents = aswap # type: ignore[method-assign] + parameters = signature(cls._get_relevant_documents).parameters + cls._new_arg_supported = parameters.get("run_manager") is not None + if ( + not cls._new_arg_supported + and cls._aget_relevant_documents == BaseRetriever._aget_relevant_documents + ): + # we need to tolerate no run_manager in _aget_relevant_documents signature + async def _aget_relevant_documents( + self: Self, query: str + ) -> list[Document]: + return await run_in_executor(None, self._get_relevant_documents, query) # type: ignore[call-arg] + + cls._aget_relevant_documents = _aget_relevant_documents # type: ignore[assignment] + + # If a V1 retriever broke the interface and expects additional arguments + cls._expects_other_args = ( + len(set(parameters.keys()) - {"self", "query", "run_manager"}) > 0 + ) + + def _get_ls_params(self, **_kwargs: Any) -> LangSmithRetrieverParams: + """Get standard params for tracing.""" + default_retriever_name = self.get_name() + if default_retriever_name.startswith("Retriever"): + default_retriever_name = default_retriever_name[9:] + elif default_retriever_name.endswith("Retriever"): + default_retriever_name = default_retriever_name[:-9] + default_retriever_name = default_retriever_name.lower() + + return LangSmithRetrieverParams(ls_retriever_name=default_retriever_name) + + def invoke( + self, input: str, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> list[Document]: + """Invoke the retriever to get relevant documents. + + Main entry point for synchronous retriever invocations. + + Args: + input: The query string. + config: Configuration for the retriever. Defaults to None. + kwargs: Additional arguments to pass to the retriever. + + Returns: + List of relevant documents. + + Examples: + + .. code-block:: python + + retriever.invoke("query") + """ + from langchain_core.callbacks.manager import CallbackManager + + config = ensure_config(config) + inheritable_metadata = { + **(config.get("metadata") or {}), + **self._get_ls_params(**kwargs), + } + callback_manager = CallbackManager.configure( + config.get("callbacks"), + None, + verbose=kwargs.get("verbose", False), + inheritable_tags=config.get("tags"), + local_tags=self.tags, + inheritable_metadata=inheritable_metadata, + local_metadata=self.metadata, + ) + run_manager = callback_manager.on_retriever_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=kwargs.pop("run_id", None), + ) + try: + _kwargs = kwargs if self._expects_other_args else {} + if self._new_arg_supported: + result = self._get_relevant_documents( + input, run_manager=run_manager, **_kwargs + ) + else: + result = self._get_relevant_documents(input, **_kwargs) + except Exception as e: + run_manager.on_retriever_error(e) + raise + else: + run_manager.on_retriever_end( + result, + ) + return result + + async def ainvoke( + self, + input: str, + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> list[Document]: + """Asynchronously invoke the retriever to get relevant documents. + + Main entry point for asynchronous retriever invocations. + + Args: + input: The query string. + config: Configuration for the retriever. Defaults to None. + kwargs: Additional arguments to pass to the retriever. + + Returns: + List of relevant documents. + + Examples: + + .. code-block:: python + + await retriever.ainvoke("query") + """ + from langchain_core.callbacks.manager import AsyncCallbackManager + + config = ensure_config(config) + inheritable_metadata = { + **(config.get("metadata") or {}), + **self._get_ls_params(**kwargs), + } + callback_manager = AsyncCallbackManager.configure( + config.get("callbacks"), + None, + verbose=kwargs.get("verbose", False), + inheritable_tags=config.get("tags"), + local_tags=self.tags, + inheritable_metadata=inheritable_metadata, + local_metadata=self.metadata, + ) + run_manager = await callback_manager.on_retriever_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=kwargs.pop("run_id", None), + ) + try: + _kwargs = kwargs if self._expects_other_args else {} + if self._new_arg_supported: + result = await self._aget_relevant_documents( + input, run_manager=run_manager, **_kwargs + ) + else: + result = await self._aget_relevant_documents(input, **_kwargs) + except Exception as e: + await run_manager.on_retriever_error(e) + raise + else: + await run_manager.on_retriever_end( + result, + ) + return result + + @abstractmethod + def _get_relevant_documents( + self, query: str, *, run_manager: CallbackManagerForRetrieverRun + ) -> list[Document]: + """Get documents relevant to a query. + + Args: + query: String to find relevant documents for. + run_manager: The callback handler to use. + + Returns: + List of relevant documents. + """ + + async def _aget_relevant_documents( + self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun + ) -> list[Document]: + """Asynchronously get documents relevant to a query. + + Args: + query: String to find relevant documents for + run_manager: The callback handler to use + Returns: + List of relevant documents + """ + return await run_in_executor( + None, + self._get_relevant_documents, + query, + run_manager=run_manager.get_sync(), + ) + + @deprecated(since="0.1.46", alternative="invoke", removal="1.0") + def get_relevant_documents( + self, + query: str, + *, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + run_name: Optional[str] = None, + **kwargs: Any, + ) -> list[Document]: + """Retrieve documents relevant to a query. + + Users should favor using `.invoke` or `.batch` rather than + `get_relevant_documents directly`. + + Args: + query: string to find relevant documents for. + callbacks: Callback manager or list of callbacks. Defaults to None. + tags: Optional list of tags associated with the retriever. + These tags will be associated with each call to this retriever, + and passed as arguments to the handlers defined in `callbacks`. + Defaults to None. + metadata: Optional metadata associated with the retriever. + This metadata will be associated with each call to this retriever, + and passed as arguments to the handlers defined in `callbacks`. + Defaults to None. + run_name: Optional name for the run. Defaults to None. + kwargs: Additional arguments to pass to the retriever. + + Returns: + List of relevant documents. + """ + config: RunnableConfig = {} + if callbacks: + config["callbacks"] = callbacks + if tags: + config["tags"] = tags + if metadata: + config["metadata"] = metadata + if run_name: + config["run_name"] = run_name + return self.invoke(query, config, **kwargs) + + @deprecated(since="0.1.46", alternative="ainvoke", removal="1.0") + async def aget_relevant_documents( + self, + query: str, + *, + callbacks: Callbacks = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + run_name: Optional[str] = None, + **kwargs: Any, + ) -> list[Document]: + """Asynchronously get documents relevant to a query. + + Users should favor using `.ainvoke` or `.abatch` rather than + `aget_relevant_documents directly`. + + Args: + query: string to find relevant documents for. + callbacks: Callback manager or list of callbacks. + tags: Optional list of tags associated with the retriever. + These tags will be associated with each call to this retriever, + and passed as arguments to the handlers defined in `callbacks`. + Defaults to None. + metadata: Optional metadata associated with the retriever. + This metadata will be associated with each call to this retriever, + and passed as arguments to the handlers defined in `callbacks`. + Defaults to None. + run_name: Optional name for the run. Defaults to None. + kwargs: Additional arguments to pass to the retriever. + + Returns: + List of relevant documents. + """ + config: RunnableConfig = {} + if callbacks: + config["callbacks"] = callbacks + if tags: + config["tags"] = tags + if metadata: + config["metadata"] = metadata + if run_name: + config["run_name"] = run_name + return await self.ainvoke(query, config, **kwargs) diff --git a/venv/Lib/site-packages/langchain_core/runnables/__init__.py b/venv/Lib/site-packages/langchain_core/runnables/__init__.py new file mode 100644 index 00000000..12825773 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/__init__.py @@ -0,0 +1,135 @@ +"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**. + +The LangChain Expression Language (LCEL) offers a declarative method to build +production-grade programs that harness the power of LLMs. + +Programs created using LCEL and LangChain Runnables inherently support +synchronous, asynchronous, batch, and streaming operations. + +Support for **async** allows servers hosting LCEL based programs to scale better +for higher concurrent loads. + +**Batch** operations allow for processing multiple inputs in parallel. + +**Streaming** of intermediate outputs, as they're being generated, allows for +creating more responsive UX. + +This module contains schema and implementation of LangChain Runnables primitives. +""" + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.runnables.base import ( + Runnable, + RunnableBinding, + RunnableGenerator, + RunnableLambda, + RunnableMap, + RunnableParallel, + RunnableSequence, + RunnableSerializable, + chain, + ) + from langchain_core.runnables.branch import RunnableBranch + from langchain_core.runnables.config import ( + RunnableConfig, + ensure_config, + get_config_list, + patch_config, + run_in_executor, + ) + from langchain_core.runnables.fallbacks import RunnableWithFallbacks + from langchain_core.runnables.history import RunnableWithMessageHistory + from langchain_core.runnables.passthrough import ( + RunnableAssign, + RunnablePassthrough, + RunnablePick, + ) + from langchain_core.runnables.router import RouterInput, RouterRunnable + from langchain_core.runnables.utils import ( + AddableDict, + ConfigurableField, + ConfigurableFieldMultiOption, + ConfigurableFieldSingleOption, + ConfigurableFieldSpec, + aadd, + add, + ) + +__all__ = ( + "chain", + "AddableDict", + "ConfigurableField", + "ConfigurableFieldSingleOption", + "ConfigurableFieldMultiOption", + "ConfigurableFieldSpec", + "ensure_config", + "run_in_executor", + "patch_config", + "RouterInput", + "RouterRunnable", + "Runnable", + "RunnableSerializable", + "RunnableBinding", + "RunnableBranch", + "RunnableConfig", + "RunnableGenerator", + "RunnableLambda", + "RunnableMap", + "RunnableParallel", + "RunnablePassthrough", + "RunnableAssign", + "RunnablePick", + "RunnableSequence", + "RunnableWithFallbacks", + "RunnableWithMessageHistory", + "get_config_list", + "aadd", + "add", +) + +_dynamic_imports = { + "chain": "base", + "Runnable": "base", + "RunnableBinding": "base", + "RunnableGenerator": "base", + "RunnableLambda": "base", + "RunnableMap": "base", + "RunnableParallel": "base", + "RunnableSequence": "base", + "RunnableSerializable": "base", + "RunnableBranch": "branch", + "RunnableConfig": "config", + "ensure_config": "config", + "get_config_list": "config", + "patch_config": "config", + "run_in_executor": "config", + "RunnableWithFallbacks": "fallbacks", + "RunnableWithMessageHistory": "history", + "RunnableAssign": "passthrough", + "RunnablePassthrough": "passthrough", + "RunnablePick": "passthrough", + "RouterInput": "router", + "RouterRunnable": "router", + "AddableDict": "utils", + "ConfigurableField": "utils", + "ConfigurableFieldMultiOption": "utils", + "ConfigurableFieldSingleOption": "utils", + "ConfigurableFieldSpec": "utils", + "aadd": "utils", + "add": "utils", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..aaf91c42 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..a2216c14 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/branch.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/branch.cpython-312.pyc new file mode 100644 index 00000000..e2e2d2a3 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/branch.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/config.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/config.cpython-312.pyc new file mode 100644 index 00000000..363fa3c8 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/config.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/configurable.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/configurable.cpython-312.pyc new file mode 100644 index 00000000..7043b040 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/configurable.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/fallbacks.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/fallbacks.cpython-312.pyc new file mode 100644 index 00000000..75a67181 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/fallbacks.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/graph.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/graph.cpython-312.pyc new file mode 100644 index 00000000..72a964d8 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/graph.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/graph_ascii.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/graph_ascii.cpython-312.pyc new file mode 100644 index 00000000..cd8e1380 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/graph_ascii.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/graph_mermaid.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/graph_mermaid.cpython-312.pyc new file mode 100644 index 00000000..f49ff475 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/graph_mermaid.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/graph_png.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/graph_png.cpython-312.pyc new file mode 100644 index 00000000..9da50426 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/graph_png.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/history.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/history.cpython-312.pyc new file mode 100644 index 00000000..208e3595 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/history.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/passthrough.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/passthrough.cpython-312.pyc new file mode 100644 index 00000000..e1f9aa0a Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/passthrough.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/retry.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/retry.cpython-312.pyc new file mode 100644 index 00000000..740c90fb Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/retry.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/router.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/router.cpython-312.pyc new file mode 100644 index 00000000..989a6c6d Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/router.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/schema.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/schema.cpython-312.pyc new file mode 100644 index 00000000..1e5e0282 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/schema.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..ab14bcaa Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/runnables/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/runnables/base.py b/venv/Lib/site-packages/langchain_core/runnables/base.py new file mode 100644 index 00000000..e51f1d70 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/base.py @@ -0,0 +1,5978 @@ +"""Base classes and utilities for Runnables.""" + +from __future__ import annotations + +import asyncio +import collections +import contextlib +import functools +import inspect +import threading +from abc import ABC, abstractmethod +from collections.abc import ( + AsyncGenerator, + AsyncIterator, + Awaitable, + Coroutine, + Iterator, + Mapping, + Sequence, +) +from concurrent.futures import FIRST_COMPLETED, wait +from functools import wraps +from itertools import groupby, tee +from operator import itemgetter +from types import GenericAlias +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Generic, + Optional, + Protocol, + TypeVar, + Union, + cast, + get_type_hints, + overload, +) + +from pydantic import BaseModel, ConfigDict, Field, RootModel +from typing_extensions import Literal, get_args, override + +from langchain_core._api import beta_decorator +from langchain_core.load.serializable import ( + Serializable, + SerializedConstructor, + SerializedNotImplemented, +) +from langchain_core.runnables.config import ( + RunnableConfig, + acall_func_with_variable_args, + call_func_with_variable_args, + ensure_config, + get_async_callback_manager_for_config, + get_callback_manager_for_config, + get_config_list, + get_executor_for_config, + merge_configs, + patch_config, + run_in_executor, + set_config_context, +) +from langchain_core.runnables.graph import Graph +from langchain_core.runnables.utils import ( + AddableDict, + AnyConfigurableField, + ConfigurableField, + ConfigurableFieldSpec, + Input, + Output, + accepts_config, + accepts_run_manager, + coro_with_context, + gated_coro, + gather_with_concurrency, + get_function_first_arg_dict_keys, + get_function_nonlocals, + get_lambda_source, + get_unique_config_specs, + indent_lines_after_first, + is_async_callable, + is_async_generator, +) +from langchain_core.utils.aiter import aclosing, atee, py_anext +from langchain_core.utils.iter import safetee +from langchain_core.utils.pydantic import create_model_v2 + +if TYPE_CHECKING: + from langchain_core.callbacks.manager import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, + ) + from langchain_core.prompts.base import BasePromptTemplate + from langchain_core.runnables.fallbacks import ( + RunnableWithFallbacks as RunnableWithFallbacksT, + ) + from langchain_core.runnables.retry import ExponentialJitterParams + from langchain_core.runnables.schema import StreamEvent + from langchain_core.tools import BaseTool + from langchain_core.tracers.log_stream import ( + RunLog, + RunLogPatch, + ) + from langchain_core.tracers.root_listeners import AsyncListener + from langchain_core.tracers.schemas import Run + + +Other = TypeVar("Other") + + +class Runnable(Generic[Input, Output], ABC): + """A unit of work that can be invoked, batched, streamed, transformed and composed. + + Key Methods + =========== + + - **invoke/ainvoke**: Transforms a single input into an output. + - **batch/abatch**: Efficiently transforms multiple inputs into outputs. + - **stream/astream**: Streams output from a single input as it's produced. + - **astream_log**: Streams output and selected intermediate results from an input. + + Built-in optimizations: + + - **Batch**: By default, batch runs invoke() in parallel using a thread pool executor. + Override to optimize batching. + + - **Async**: Methods with "a" suffix are asynchronous. By default, they execute + the sync counterpart using asyncio's thread pool. + Override for native async. + + All methods accept an optional config argument, which can be used to configure + execution, add tags and metadata for tracing and debugging etc. + + Runnables expose schematic information about their input, output and config via + the input_schema property, the output_schema property and config_schema method. + + LCEL and Composition + ==================== + + The LangChain Expression Language (LCEL) is a declarative way to compose Runnables + into chains. Any chain constructed this way will automatically have sync, async, + batch, and streaming support. + + The main composition primitives are RunnableSequence and RunnableParallel. + + **RunnableSequence** invokes a series of runnables sequentially, with + one Runnable's output serving as the next's input. Construct using + the `|` operator or by passing a list of runnables to RunnableSequence. + + **RunnableParallel** invokes runnables concurrently, providing the same input + to each. Construct it using a dict literal within a sequence or by passing a + dict to RunnableParallel. + + + For example, + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + + # A RunnableSequence constructed using the `|` operator + sequence = RunnableLambda(lambda x: x + 1) | RunnableLambda(lambda x: x * 2) + sequence.invoke(1) # 4 + sequence.batch([1, 2, 3]) # [4, 6, 8] + + + # A sequence that contains a RunnableParallel constructed using a dict literal + sequence = RunnableLambda(lambda x: x + 1) | { + 'mul_2': RunnableLambda(lambda x: x * 2), + 'mul_5': RunnableLambda(lambda x: x * 5) + } + sequence.invoke(1) # {'mul_2': 4, 'mul_5': 10} + + Standard Methods + ================ + + All Runnables expose additional methods that can be used to modify their behavior + (e.g., add a retry policy, add lifecycle listeners, make them configurable, etc.). + + These methods will work on any Runnable, including Runnable chains constructed + by composing other Runnables. See the individual methods for details. + + For example, + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + + import random + + def add_one(x: int) -> int: + return x + 1 + + + def buggy_double(y: int) -> int: + \"\"\"Buggy code that will fail 70% of the time\"\"\" + if random.random() > 0.3: + print('This code failed, and will probably be retried!') # noqa: T201 + raise ValueError('Triggered buggy code') + return y * 2 + + sequence = ( + RunnableLambda(add_one) | + RunnableLambda(buggy_double).with_retry( # Retry on failure + stop_after_attempt=10, + wait_exponential_jitter=False + ) + ) + + print(sequence.input_schema.model_json_schema()) # Show inferred input schema + print(sequence.output_schema.model_json_schema()) # Show inferred output schema + print(sequence.invoke(2)) # invoke the sequence (note the retry above!!) + + Debugging and tracing + ===================== + + As the chains get longer, it can be useful to be able to see intermediate results + to debug and trace the chain. + + You can set the global debug flag to True to enable debug output for all chains: + + .. code-block:: python + + from langchain_core.globals import set_debug + set_debug(True) + + Alternatively, you can pass existing or custom callbacks to any given chain: + + .. code-block:: python + + from langchain_core.tracers import ConsoleCallbackHandler + + chain.invoke( + ..., + config={'callbacks': [ConsoleCallbackHandler()]} + ) + + For a UI (and much more) checkout LangSmith: https://docs.smith.langchain.com/ + """ # noqa: E501 + + name: Optional[str] + """The name of the Runnable. Used for debugging and tracing.""" + + def get_name( + self, suffix: Optional[str] = None, *, name: Optional[str] = None + ) -> str: + """Get the name of the Runnable.""" + if name: + name_ = name + elif hasattr(self, "name") and self.name: + name_ = self.name + else: + # Here we handle a case where the runnable subclass is also a pydantic + # model. + cls = self.__class__ + # Then it's a pydantic sub-class, and we have to check + # whether it's a generic, and if so recover the original name. + if ( + hasattr( + cls, + "__pydantic_generic_metadata__", + ) + and "origin" in cls.__pydantic_generic_metadata__ + and cls.__pydantic_generic_metadata__["origin"] is not None + ): + name_ = cls.__pydantic_generic_metadata__["origin"].__name__ + else: + name_ = cls.__name__ + + if suffix: + if name_[0].isupper(): + return name_ + suffix.title() + return name_ + "_" + suffix.lower() + return name_ + + @property + def InputType(self) -> type[Input]: # noqa: N802 + """The type of input this Runnable accepts specified as a type annotation.""" + # First loop through all parent classes and if any of them is + # a pydantic model, we will pick up the generic parameterization + # from that model via the __pydantic_generic_metadata__ attribute. + for base in self.__class__.mro(): + if hasattr(base, "__pydantic_generic_metadata__"): + metadata = base.__pydantic_generic_metadata__ + if "args" in metadata and len(metadata["args"]) == 2: + return metadata["args"][0] + + # If we didn't find a pydantic model in the parent classes, + # then loop through __orig_bases__. This corresponds to + # Runnables that are not pydantic models. + for cls in self.__class__.__orig_bases__: # type: ignore[attr-defined] + type_args = get_args(cls) + if type_args and len(type_args) == 2: + return type_args[0] + + msg = ( + f"Runnable {self.get_name()} doesn't have an inferable InputType. " + "Override the InputType property to specify the input type." + ) + raise TypeError(msg) + + @property + def OutputType(self) -> type[Output]: # noqa: N802 + """The type of output this Runnable produces specified as a type annotation.""" + # First loop through bases -- this will help generic + # any pydantic models. + for base in self.__class__.mro(): + if hasattr(base, "__pydantic_generic_metadata__"): + metadata = base.__pydantic_generic_metadata__ + if "args" in metadata and len(metadata["args"]) == 2: + return metadata["args"][1] + + for cls in self.__class__.__orig_bases__: # type: ignore[attr-defined] + type_args = get_args(cls) + if type_args and len(type_args) == 2: + return type_args[1] + + msg = ( + f"Runnable {self.get_name()} doesn't have an inferable OutputType. " + "Override the OutputType property to specify the output type." + ) + raise TypeError(msg) + + @property + def input_schema(self) -> type[BaseModel]: + """The type of input this Runnable accepts specified as a pydantic model.""" + return self.get_input_schema() + + def get_input_schema( + self, + config: Optional[RunnableConfig] = None, # noqa: ARG002 + ) -> type[BaseModel]: + """Get a pydantic model that can be used to validate input to the Runnable. + + Runnables that leverage the configurable_fields and configurable_alternatives + methods will have a dynamic input schema that depends on which + configuration the Runnable is invoked with. + + This method allows to get an input schema for a specific configuration. + + Args: + config: A config to use when generating the schema. + + Returns: + A pydantic model that can be used to validate input. + """ + root_type = self.InputType + + if ( + inspect.isclass(root_type) + and not isinstance(root_type, GenericAlias) + and issubclass(root_type, BaseModel) + ): + return root_type + + return create_model_v2( + self.get_name("Input"), + root=root_type, + # create model needs access to appropriate type annotations to be + # able to construct the pydantic model. + # When we create the model, we pass information about the namespace + # where the model is being created, so the type annotations can + # be resolved correctly as well. + # self.__class__.__module__ handles the case when the Runnable is + # being sub-classed in a different module. + module_name=self.__class__.__module__, + ) + + def get_input_jsonschema( + self, config: Optional[RunnableConfig] = None + ) -> dict[str, Any]: + """Get a JSON schema that represents the input to the Runnable. + + Args: + config: A config to use when generating the schema. + + Returns: + A JSON schema that represents the input to the Runnable. + + Example: + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + + def add_one(x: int) -> int: + return x + 1 + + runnable = RunnableLambda(add_one) + + print(runnable.get_input_jsonschema()) + + .. versionadded:: 0.3.0 + """ + return self.get_input_schema(config).model_json_schema() + + @property + def output_schema(self) -> type[BaseModel]: + """The type of output this Runnable produces specified as a pydantic model.""" + return self.get_output_schema() + + def get_output_schema( + self, + config: Optional[RunnableConfig] = None, # noqa: ARG002 + ) -> type[BaseModel]: + """Get a pydantic model that can be used to validate output to the Runnable. + + Runnables that leverage the configurable_fields and configurable_alternatives + methods will have a dynamic output schema that depends on which + configuration the Runnable is invoked with. + + This method allows to get an output schema for a specific configuration. + + Args: + config: A config to use when generating the schema. + + Returns: + A pydantic model that can be used to validate output. + """ + root_type = self.OutputType + + if ( + inspect.isclass(root_type) + and not isinstance(root_type, GenericAlias) + and issubclass(root_type, BaseModel) + ): + return root_type + + return create_model_v2( + self.get_name("Output"), + root=root_type, + # create model needs access to appropriate type annotations to be + # able to construct the pydantic model. + # When we create the model, we pass information about the namespace + # where the model is being created, so the type annotations can + # be resolved correctly as well. + # self.__class__.__module__ handles the case when the Runnable is + # being sub-classed in a different module. + module_name=self.__class__.__module__, + ) + + def get_output_jsonschema( + self, config: Optional[RunnableConfig] = None + ) -> dict[str, Any]: + """Get a JSON schema that represents the output of the Runnable. + + Args: + config: A config to use when generating the schema. + + Returns: + A JSON schema that represents the output of the Runnable. + + Example: + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + + def add_one(x: int) -> int: + return x + 1 + + runnable = RunnableLambda(add_one) + + print(runnable.get_output_jsonschema()) + + .. versionadded:: 0.3.0 + """ + return self.get_output_schema(config).model_json_schema() + + @property + def config_specs(self) -> list[ConfigurableFieldSpec]: + """List configurable fields for this Runnable.""" + return [] + + def config_schema( + self, *, include: Optional[Sequence[str]] = None + ) -> type[BaseModel]: + """The type of config this Runnable accepts specified as a pydantic model. + + To mark a field as configurable, see the `configurable_fields` + and `configurable_alternatives` methods. + + Args: + include: A list of fields to include in the config schema. + + Returns: + A pydantic model that can be used to validate config. + """ + include = include or [] + config_specs = self.config_specs + configurable = ( + create_model_v2( + "Configurable", + field_definitions={ + spec.id: ( + spec.annotation, + Field( + spec.default, title=spec.name, description=spec.description + ), + ) + for spec in config_specs + }, + ) + if config_specs + else None + ) + + # Many need to create a typed dict instead to implement NotRequired! + all_fields = { + **({"configurable": (configurable, None)} if configurable else {}), + **{ + field_name: (field_type, None) + for field_name, field_type in get_type_hints(RunnableConfig).items() + if field_name in [i for i in include if i != "configurable"] + }, + } + return create_model_v2(self.get_name("Config"), field_definitions=all_fields) + + def get_config_jsonschema( + self, *, include: Optional[Sequence[str]] = None + ) -> dict[str, Any]: + """Get a JSON schema that represents the config of the Runnable. + + Args: + include: A list of fields to include in the config schema. + + Returns: + A JSON schema that represents the config of the Runnable. + + .. versionadded:: 0.3.0 + """ + return self.config_schema(include=include).model_json_schema() + + def get_graph(self, config: Optional[RunnableConfig] = None) -> Graph: + """Return a graph representation of this Runnable.""" + graph = Graph() + try: + input_node = graph.add_node(self.get_input_schema(config)) + except TypeError: + input_node = graph.add_node(create_model_v2(self.get_name("Input"))) + runnable_node = graph.add_node( + self, metadata=config.get("metadata") if config else None + ) + try: + output_node = graph.add_node(self.get_output_schema(config)) + except TypeError: + output_node = graph.add_node(create_model_v2(self.get_name("Output"))) + graph.add_edge(input_node, runnable_node) + graph.add_edge(runnable_node, output_node) + return graph + + def get_prompts( + self, config: Optional[RunnableConfig] = None + ) -> list[BasePromptTemplate]: + """Return a list of prompts used by this Runnable.""" + from langchain_core.prompts.base import BasePromptTemplate + + return [ + node.data + for node in self.get_graph(config=config).nodes.values() + if isinstance(node.data, BasePromptTemplate) + ] + + def __or__( + self, + other: Union[ + Runnable[Any, Other], + Callable[[Any], Other], + Callable[[Iterator[Any]], Iterator[Other]], + Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]], + ], + ) -> RunnableSerializable[Input, Other]: + """Compose this Runnable with another object to create a RunnableSequence.""" + return RunnableSequence(self, coerce_to_runnable(other)) + + def __ror__( + self, + other: Union[ + Runnable[Other, Any], + Callable[[Other], Any], + Callable[[Iterator[Other]], Iterator[Any]], + Mapping[str, Union[Runnable[Other, Any], Callable[[Other], Any], Any]], + ], + ) -> RunnableSerializable[Other, Output]: + """Compose this Runnable with another object to create a RunnableSequence.""" + return RunnableSequence(coerce_to_runnable(other), self) + + def pipe( + self, + *others: Union[Runnable[Any, Other], Callable[[Any], Other]], + name: Optional[str] = None, + ) -> RunnableSerializable[Input, Other]: + """Compose this Runnable with Runnable-like objects to make a RunnableSequence. + + Equivalent to `RunnableSequence(self, *others)` or `self | others[0] | ...` + + Example: + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + + def add_one(x: int) -> int: + return x + 1 + + def mul_two(x: int) -> int: + return x * 2 + + runnable_1 = RunnableLambda(add_one) + runnable_2 = RunnableLambda(mul_two) + sequence = runnable_1.pipe(runnable_2) + # Or equivalently: + # sequence = runnable_1 | runnable_2 + # sequence = RunnableSequence(first=runnable_1, last=runnable_2) + sequence.invoke(1) + await sequence.ainvoke(1) + # -> 4 + + sequence.batch([1, 2, 3]) + await sequence.abatch([1, 2, 3]) + # -> [4, 6, 8] + """ + return RunnableSequence(self, *others, name=name) + + def pick(self, keys: Union[str, list[str]]) -> RunnableSerializable[Any, Any]: + """Pick keys from the output dict of this Runnable. + + Pick single key: + .. code-block:: python + + import json + + from langchain_core.runnables import RunnableLambda, RunnableMap + + as_str = RunnableLambda(str) + as_json = RunnableLambda(json.loads) + chain = RunnableMap(str=as_str, json=as_json) + + chain.invoke("[1, 2, 3]") + # -> {"str": "[1, 2, 3]", "json": [1, 2, 3]} + + json_only_chain = chain.pick("json") + json_only_chain.invoke("[1, 2, 3]") + # -> [1, 2, 3] + + Pick list of keys: + .. code-block:: python + + from typing import Any + + import json + + from langchain_core.runnables import RunnableLambda, RunnableMap + + as_str = RunnableLambda(str) + as_json = RunnableLambda(json.loads) + def as_bytes(x: Any) -> bytes: + return bytes(x, "utf-8") + + chain = RunnableMap( + str=as_str, + json=as_json, + bytes=RunnableLambda(as_bytes) + ) + + chain.invoke("[1, 2, 3]") + # -> {"str": "[1, 2, 3]", "json": [1, 2, 3], "bytes": b"[1, 2, 3]"} + + json_and_bytes_chain = chain.pick(["json", "bytes"]) + json_and_bytes_chain.invoke("[1, 2, 3]") + # -> {"json": [1, 2, 3], "bytes": b"[1, 2, 3]"} + + """ + from langchain_core.runnables.passthrough import RunnablePick + + return self | RunnablePick(keys) + + def assign( + self, + **kwargs: Union[ + Runnable[dict[str, Any], Any], + Callable[[dict[str, Any]], Any], + Mapping[ + str, + Union[Runnable[dict[str, Any], Any], Callable[[dict[str, Any]], Any]], + ], + ], + ) -> RunnableSerializable[Any, Any]: + """Assigns new fields to the dict output of this Runnable. + + Returns a new Runnable. + + .. code-block:: python + + from langchain_community.llms.fake import FakeStreamingListLLM + from langchain_core.output_parsers import StrOutputParser + from langchain_core.prompts import SystemMessagePromptTemplate + from langchain_core.runnables import Runnable + from operator import itemgetter + + prompt = ( + SystemMessagePromptTemplate.from_template("You are a nice assistant.") + + "{question}" + ) + llm = FakeStreamingListLLM(responses=["foo-lish"]) + + chain: Runnable = prompt | llm | {"str": StrOutputParser()} + + chain_with_assign = chain.assign(hello=itemgetter("str") | llm) + + print(chain_with_assign.input_schema.model_json_schema()) + # {'title': 'PromptInput', 'type': 'object', 'properties': + {'question': {'title': 'Question', 'type': 'string'}}} + print(chain_with_assign.output_schema.model_json_schema()) + # {'title': 'RunnableSequenceOutput', 'type': 'object', 'properties': + {'str': {'title': 'Str', + 'type': 'string'}, 'hello': {'title': 'Hello', 'type': 'string'}}} + + """ + from langchain_core.runnables.passthrough import RunnableAssign + + return self | RunnableAssign(RunnableParallel[dict[str, Any]](kwargs)) + + """ --- Public API --- """ + + @abstractmethod + def invoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + """Transform a single input into an output. + + Args: + input: The input to the Runnable. + config: A config to use when invoking the Runnable. + The config supports standard keys like 'tags', 'metadata' for tracing + purposes, 'max_concurrency' for controlling how much work to do + in parallel, and other keys. Please refer to the RunnableConfig + for more details. + + Returns: + The output of the Runnable. + """ + + async def ainvoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + """Default implementation of ainvoke, calls invoke from a thread. + + The default implementation allows usage of async code even if + the Runnable did not implement a native async version of invoke. + + Subclasses should override this method if they can run asynchronously. + """ + return await run_in_executor(config, self.invoke, input, config, **kwargs) + + def batch( + self, + inputs: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Output]: + """Default implementation runs invoke in parallel using a thread pool executor. + + The default implementation of batch works well for IO bound runnables. + + Subclasses should override this method if they can batch more efficiently; + e.g., if the underlying Runnable uses an API which supports a batch mode. + """ + if not inputs: + return [] + + configs = get_config_list(config, len(inputs)) + + def invoke(input: Input, config: RunnableConfig) -> Union[Output, Exception]: + if return_exceptions: + try: + return self.invoke(input, config, **kwargs) + except Exception as e: + return e + else: + return self.invoke(input, config, **kwargs) + + # If there's only one input, don't bother with the executor + if len(inputs) == 1: + return cast("list[Output]", [invoke(inputs[0], configs[0])]) + + with get_executor_for_config(configs[0]) as executor: + return cast("list[Output]", list(executor.map(invoke, inputs, configs))) + + @overload + def batch_as_completed( + self, + inputs: Sequence[Input], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: Literal[False] = False, + **kwargs: Any, + ) -> Iterator[tuple[int, Output]]: ... + + @overload + def batch_as_completed( + self, + inputs: Sequence[Input], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: Literal[True], + **kwargs: Any, + ) -> Iterator[tuple[int, Union[Output, Exception]]]: ... + + def batch_as_completed( + self, + inputs: Sequence[Input], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> Iterator[tuple[int, Union[Output, Exception]]]: + """Run invoke in parallel on a list of inputs. + + Yields results as they complete. + """ + if not inputs: + return + + configs = get_config_list(config, len(inputs)) + + def invoke( + i: int, input: Input, config: RunnableConfig + ) -> tuple[int, Union[Output, Exception]]: + if return_exceptions: + try: + out: Union[Output, Exception] = self.invoke(input, config, **kwargs) + except Exception as e: + out = e + else: + out = self.invoke(input, config, **kwargs) + + return (i, out) + + if len(inputs) == 1: + yield invoke(0, inputs[0], configs[0]) + return + + with get_executor_for_config(configs[0]) as executor: + futures = { + executor.submit(invoke, i, input, config) + for i, (input, config) in enumerate(zip(inputs, configs)) + } + + try: + while futures: + done, futures = wait(futures, return_when=FIRST_COMPLETED) + while done: + yield done.pop().result() + finally: + for future in futures: + future.cancel() + + async def abatch( + self, + inputs: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Output]: + """Default implementation runs ainvoke in parallel using asyncio.gather. + + The default implementation of batch works well for IO bound runnables. + + Subclasses should override this method if they can batch more efficiently; + e.g., if the underlying Runnable uses an API which supports a batch mode. + + Args: + inputs: A list of inputs to the Runnable. + config: A config to use when invoking the Runnable. + The config supports standard keys like 'tags', 'metadata' for tracing + purposes, 'max_concurrency' for controlling how much work to do + in parallel, and other keys. Please refer to the RunnableConfig + for more details. Defaults to None. + return_exceptions: Whether to return exceptions instead of raising them. + Defaults to False. + kwargs: Additional keyword arguments to pass to the Runnable. + + Returns: + A list of outputs from the Runnable. + """ + if not inputs: + return [] + + configs = get_config_list(config, len(inputs)) + + async def ainvoke( + input: Input, config: RunnableConfig + ) -> Union[Output, Exception]: + if return_exceptions: + try: + return await self.ainvoke(input, config, **kwargs) + except Exception as e: + return e + else: + return await self.ainvoke(input, config, **kwargs) + + coros = map(ainvoke, inputs, configs) + return await gather_with_concurrency(configs[0].get("max_concurrency"), *coros) + + @overload + def abatch_as_completed( + self, + inputs: Sequence[Input], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: Literal[False] = False, + **kwargs: Optional[Any], + ) -> AsyncIterator[tuple[int, Output]]: ... + + @overload + def abatch_as_completed( + self, + inputs: Sequence[Input], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: Literal[True], + **kwargs: Optional[Any], + ) -> AsyncIterator[tuple[int, Union[Output, Exception]]]: ... + + async def abatch_as_completed( + self, + inputs: Sequence[Input], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> AsyncIterator[tuple[int, Union[Output, Exception]]]: + """Run ainvoke in parallel on a list of inputs. + + Yields results as they complete. + + Args: + inputs: A list of inputs to the Runnable. + config: A config to use when invoking the Runnable. + The config supports standard keys like 'tags', 'metadata' for tracing + purposes, 'max_concurrency' for controlling how much work to do + in parallel, and other keys. Please refer to the RunnableConfig + for more details. Defaults to None. Defaults to None. + return_exceptions: Whether to return exceptions instead of raising them. + Defaults to False. + kwargs: Additional keyword arguments to pass to the Runnable. + + Yields: + A tuple of the index of the input and the output from the Runnable. + """ + if not inputs: + return + + configs = get_config_list(config, len(inputs)) + # Get max_concurrency from first config, defaulting to None (unlimited) + max_concurrency = configs[0].get("max_concurrency") if configs else None + semaphore = asyncio.Semaphore(max_concurrency) if max_concurrency else None + + async def ainvoke_task( + i: int, input: Input, config: RunnableConfig + ) -> tuple[int, Union[Output, Exception]]: + if return_exceptions: + try: + out: Union[Output, Exception] = await self.ainvoke( + input, config, **kwargs + ) + except Exception as e: + out = e + else: + out = await self.ainvoke(input, config, **kwargs) + return (i, out) + + coros = [ + gated_coro(semaphore, ainvoke_task(i, input, config)) + if semaphore + else ainvoke_task(i, input, config) + for i, (input, config) in enumerate(zip(inputs, configs)) + ] + + for coro in asyncio.as_completed(coros): + yield await coro + + def stream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + """Default implementation of stream, which calls invoke. + + Subclasses should override this method if they support streaming output. + + Args: + input: The input to the Runnable. + config: The config to use for the Runnable. Defaults to None. + kwargs: Additional keyword arguments to pass to the Runnable. + + Yields: + The output of the Runnable. + """ + yield self.invoke(input, config, **kwargs) + + async def astream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + """Default implementation of astream, which calls ainvoke. + + Subclasses should override this method if they support streaming output. + + Args: + input: The input to the Runnable. + config: The config to use for the Runnable. Defaults to None. + kwargs: Additional keyword arguments to pass to the Runnable. + + Yields: + The output of the Runnable. + """ + yield await self.ainvoke(input, config, **kwargs) + + @overload + def astream_log( + self, + input: Any, + config: Optional[RunnableConfig] = None, + *, + diff: Literal[True] = True, + with_streamed_output_list: bool = True, + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> AsyncIterator[RunLogPatch]: ... + + @overload + def astream_log( + self, + input: Any, + config: Optional[RunnableConfig] = None, + *, + diff: Literal[False], + with_streamed_output_list: bool = True, + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> AsyncIterator[RunLog]: ... + + async def astream_log( + self, + input: Any, + config: Optional[RunnableConfig] = None, + *, + diff: bool = True, + with_streamed_output_list: bool = True, + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> Union[AsyncIterator[RunLogPatch], AsyncIterator[RunLog]]: + """Stream all output from a Runnable, as reported to the callback system. + + This includes all inner runs of LLMs, Retrievers, Tools, etc. + + Output is streamed as Log objects, which include a list of + Jsonpatch ops that describe how the state of the run has changed in each + step, and the final state of the run. + + The Jsonpatch ops can be applied in order to construct state. + + Args: + input: The input to the Runnable. + config: The config to use for the Runnable. + diff: Whether to yield diffs between each step or the current state. + with_streamed_output_list: Whether to yield the streamed_output list. + include_names: Only include logs with these names. + include_types: Only include logs with these types. + include_tags: Only include logs with these tags. + exclude_names: Exclude logs with these names. + exclude_types: Exclude logs with these types. + exclude_tags: Exclude logs with these tags. + kwargs: Additional keyword arguments to pass to the Runnable. + + Yields: + A RunLogPatch or RunLog object. + """ + from langchain_core.tracers.log_stream import ( + LogStreamCallbackHandler, + _astream_log_implementation, + ) + + stream = LogStreamCallbackHandler( + auto_close=False, + include_names=include_names, + include_types=include_types, + include_tags=include_tags, + exclude_names=exclude_names, + exclude_types=exclude_types, + exclude_tags=exclude_tags, + _schema_format="original", + ) + + # Mypy isn't resolving the overloads here + # Likely an issue b/c `self` is being passed through + # and it's can't map it to Runnable[Input,Output]? + async for item in _astream_log_implementation( # type: ignore[call-overload] + self, + input, + config, + diff=diff, + stream=stream, + with_streamed_output_list=with_streamed_output_list, + **kwargs, + ): + yield item + + async def astream_events( + self, + input: Any, + config: Optional[RunnableConfig] = None, + *, + version: Literal["v1", "v2"] = "v2", + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> AsyncIterator[StreamEvent]: + """Generate a stream of events. + + Use to create an iterator over StreamEvents that provide real-time information + about the progress of the Runnable, including StreamEvents from intermediate + results. + + A StreamEvent is a dictionary with the following schema: + + - ``event``: **str** - Event names are of the + format: on_[runnable_type]_(start|stream|end). + - ``name``: **str** - The name of the Runnable that generated the event. + - ``run_id``: **str** - randomly generated ID associated with the given execution of + the Runnable that emitted the event. + A child Runnable that gets invoked as part of the execution of a + parent Runnable is assigned its own unique ID. + - ``parent_ids``: **list[str]** - The IDs of the parent runnables that + generated the event. The root Runnable will have an empty list. + The order of the parent IDs is from the root to the immediate parent. + Only available for v2 version of the API. The v1 version of the API + will return an empty list. + - ``tags``: **Optional[list[str]]** - The tags of the Runnable that generated + the event. + - ``metadata``: **Optional[dict[str, Any]]** - The metadata of the Runnable + that generated the event. + - ``data``: **dict[str, Any]** + + + Below is a table that illustrates some events that might be emitted by various + chains. Metadata fields have been omitted from the table for brevity. + Chain definitions have been included after the table. + + **ATTENTION** This reference table is for the V2 version of the schema. + + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | event | name | chunk | input | output | + +======================+==================+=================================+===============================================+=================================================+ + | on_chat_model_start | [model name] | | {"messages": [[SystemMessage, HumanMessage]]} | | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_chat_model_stream | [model name] | AIMessageChunk(content="hello") | | | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_chat_model_end | [model name] | | {"messages": [[SystemMessage, HumanMessage]]} | AIMessageChunk(content="hello world") | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_llm_start | [model name] | | {'input': 'hello'} | | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_llm_stream | [model name] | 'Hello' | | | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_llm_end | [model name] | | 'Hello human!' | | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_chain_start | format_docs | | | | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_chain_stream | format_docs | "hello world!, goodbye world!" | | | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_chain_end | format_docs | | [Document(...)] | "hello world!, goodbye world!" | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_tool_start | some_tool | | {"x": 1, "y": "2"} | | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_tool_end | some_tool | | | {"x": 1, "y": "2"} | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_retriever_start | [retriever name] | | {"query": "hello"} | | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_retriever_end | [retriever name] | | {"query": "hello"} | [Document(...), ..] | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_prompt_start | [template_name] | | {"question": "hello"} | | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + | on_prompt_end | [template_name] | | {"question": "hello"} | ChatPromptValue(messages: [SystemMessage, ...]) | + +----------------------+------------------+---------------------------------+-----------------------------------------------+-------------------------------------------------+ + + In addition to the standard events, users can also dispatch custom events (see example below). + + Custom events will be only be surfaced with in the `v2` version of the API! + + A custom event has following format: + + +-----------+------+-----------------------------------------------------------------------------------------------------------+ + | Attribute | Type | Description | + +===========+======+===========================================================================================================+ + | name | str | A user defined name for the event. | + +-----------+------+-----------------------------------------------------------------------------------------------------------+ + | data | Any | The data associated with the event. This can be anything, though we suggest making it JSON serializable. | + +-----------+------+-----------------------------------------------------------------------------------------------------------+ + + Here are declarations associated with the standard events shown above: + + `format_docs`: + + .. code-block:: python + + def format_docs(docs: list[Document]) -> str: + '''Format the docs.''' + return ", ".join([doc.page_content for doc in docs]) + + format_docs = RunnableLambda(format_docs) + + `some_tool`: + + .. code-block:: python + + @tool + def some_tool(x: int, y: str) -> dict: + '''Some_tool.''' + return {"x": x, "y": y} + + `prompt`: + + .. code-block:: python + + template = ChatPromptTemplate.from_messages( + [("system", "You are Cat Agent 007"), ("human", "{question}")] + ).with_config({"run_name": "my_template", "tags": ["my_template"]}) + + + Example: + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + + async def reverse(s: str) -> str: + return s[::-1] + + chain = RunnableLambda(func=reverse) + + events = [ + event async for event in chain.astream_events("hello", version="v2") + ] + + # will produce the following events (run_id, and parent_ids + # has been omitted for brevity): + [ + { + "data": {"input": "hello"}, + "event": "on_chain_start", + "metadata": {}, + "name": "reverse", + "tags": [], + }, + { + "data": {"chunk": "olleh"}, + "event": "on_chain_stream", + "metadata": {}, + "name": "reverse", + "tags": [], + }, + { + "data": {"output": "olleh"}, + "event": "on_chain_end", + "metadata": {}, + "name": "reverse", + "tags": [], + }, + ] + + + Example: Dispatch Custom Event + + .. code-block:: python + + from langchain_core.callbacks.manager import ( + adispatch_custom_event, + ) + from langchain_core.runnables import RunnableLambda, RunnableConfig + import asyncio + + + async def slow_thing(some_input: str, config: RunnableConfig) -> str: + \"\"\"Do something that takes a long time.\"\"\" + await asyncio.sleep(1) # Placeholder for some slow operation + await adispatch_custom_event( + "progress_event", + {"message": "Finished step 1 of 3"}, + config=config # Must be included for python < 3.10 + ) + await asyncio.sleep(1) # Placeholder for some slow operation + await adispatch_custom_event( + "progress_event", + {"message": "Finished step 2 of 3"}, + config=config # Must be included for python < 3.10 + ) + await asyncio.sleep(1) # Placeholder for some slow operation + return "Done" + + slow_thing = RunnableLambda(slow_thing) + + async for event in slow_thing.astream_events("some_input", version="v2"): + print(event) + + Args: + input: The input to the Runnable. + config: The config to use for the Runnable. + version: The version of the schema to use either `v2` or `v1`. + Users should use `v2`. + `v1` is for backwards compatibility and will be deprecated + in 0.4.0. + No default will be assigned until the API is stabilized. + custom events will only be surfaced in `v2`. + include_names: Only include events from runnables with matching names. + include_types: Only include events from runnables with matching types. + include_tags: Only include events from runnables with matching tags. + exclude_names: Exclude events from runnables with matching names. + exclude_types: Exclude events from runnables with matching types. + exclude_tags: Exclude events from runnables with matching tags. + kwargs: Additional keyword arguments to pass to the Runnable. + These will be passed to astream_log as this implementation + of astream_events is built on top of astream_log. + + Yields: + An async stream of StreamEvents. + + Raises: + NotImplementedError: If the version is not `v1` or `v2`. + """ # noqa: E501 + from langchain_core.tracers.event_stream import ( + _astream_events_implementation_v1, + _astream_events_implementation_v2, + ) + + if version == "v2": + event_stream = _astream_events_implementation_v2( + self, + input, + config=config, + include_names=include_names, + include_types=include_types, + include_tags=include_tags, + exclude_names=exclude_names, + exclude_types=exclude_types, + exclude_tags=exclude_tags, + **kwargs, + ) + elif version == "v1": + # First implementation, built on top of astream_log API + # This implementation will be deprecated as of 0.2.0 + event_stream = _astream_events_implementation_v1( + self, + input, + config=config, + include_names=include_names, + include_types=include_types, + include_tags=include_tags, + exclude_names=exclude_names, + exclude_types=exclude_types, + exclude_tags=exclude_tags, + **kwargs, + ) + else: + msg = 'Only versions "v1" and "v2" of the schema is currently supported.' + raise NotImplementedError(msg) + + async with aclosing(event_stream): + async for event in event_stream: + yield event + + def transform( + self, + input: Iterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + """Default implementation of transform, which buffers input and calls astream. + + Subclasses should override this method if they can start producing output while + input is still being generated. + + Args: + input: An iterator of inputs to the Runnable. + config: The config to use for the Runnable. Defaults to None. + kwargs: Additional keyword arguments to pass to the Runnable. + + Yields: + The output of the Runnable. + """ + final: Input + got_first_val = False + + for ichunk in input: + # The default implementation of transform is to buffer input and + # then call stream. + # It'll attempt to gather all input into a single chunk using + # the `+` operator. + # If the input is not addable, then we'll assume that we can + # only operate on the last chunk, + # and we'll iterate until we get to the last chunk. + if not got_first_val: + final = ichunk + got_first_val = True + else: + try: + final = final + ichunk # type: ignore[operator] + except TypeError: + final = ichunk + + if got_first_val: + yield from self.stream(final, config, **kwargs) + + async def atransform( + self, + input: AsyncIterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + """Default implementation of atransform, which buffers input and calls astream. + + Subclasses should override this method if they can start producing output while + input is still being generated. + + Args: + input: An async iterator of inputs to the Runnable. + config: The config to use for the Runnable. Defaults to None. + kwargs: Additional keyword arguments to pass to the Runnable. + + Yields: + The output of the Runnable. + """ + final: Input + got_first_val = False + + async for ichunk in input: + # The default implementation of transform is to buffer input and + # then call stream. + # It'll attempt to gather all input into a single chunk using + # the `+` operator. + # If the input is not addable, then we'll assume that we can + # only operate on the last chunk, + # and we'll iterate until we get to the last chunk. + if not got_first_val: + final = ichunk + got_first_val = True + else: + try: + final = final + ichunk # type: ignore[operator] + except TypeError: + final = ichunk + + if got_first_val: + async for output in self.astream(final, config, **kwargs): + yield output + + def bind(self, **kwargs: Any) -> Runnable[Input, Output]: + """Bind arguments to a Runnable, returning a new Runnable. + + Useful when a Runnable in a chain requires an argument that is not + in the output of the previous Runnable or included in the user input. + + Args: + kwargs: The arguments to bind to the Runnable. + + Returns: + A new Runnable with the arguments bound. + + Example: + + .. code-block:: python + + from langchain_community.chat_models import ChatOllama + from langchain_core.output_parsers import StrOutputParser + + llm = ChatOllama(model='llama2') + + # Without bind. + chain = ( + llm + | StrOutputParser() + ) + + chain.invoke("Repeat quoted words exactly: 'One two three four five.'") + # Output is 'One two three four five.' + + # With bind. + chain = ( + llm.bind(stop=["three"]) + | StrOutputParser() + ) + + chain.invoke("Repeat quoted words exactly: 'One two three four five.'") + # Output is 'One two' + + """ + return RunnableBinding(bound=self, kwargs=kwargs, config={}) + + def with_config( + self, + config: Optional[RunnableConfig] = None, + # Sadly Unpack is not well-supported by mypy so this will have to be untyped + **kwargs: Any, + ) -> Runnable[Input, Output]: + """Bind config to a Runnable, returning a new Runnable. + + Args: + config: The config to bind to the Runnable. + kwargs: Additional keyword arguments to pass to the Runnable. + + Returns: + A new Runnable with the config bound. + """ + return RunnableBinding( + bound=self, + config=cast( + "RunnableConfig", + {**(config or {}), **kwargs}, + ), + kwargs={}, + ) + + def with_listeners( + self, + *, + on_start: Optional[ + Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]] + ] = None, + on_end: Optional[ + Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]] + ] = None, + on_error: Optional[ + Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]] + ] = None, + ) -> Runnable[Input, Output]: + """Bind lifecycle listeners to a Runnable, returning a new Runnable. + + on_start: Called before the Runnable starts running, with the Run object. + on_end: Called after the Runnable finishes running, with the Run object. + on_error: Called if the Runnable throws an error, with the Run object. + + The Run object contains information about the run, including its id, + type, input, output, error, start_time, end_time, and any tags or metadata + added to the run. + + Args: + on_start: Called before the Runnable starts running. Defaults to None. + on_end: Called after the Runnable finishes running. Defaults to None. + on_error: Called if the Runnable throws an error. Defaults to None. + + Returns: + A new Runnable with the listeners bound. + + Example: + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + from langchain_core.tracers.schemas import Run + + import time + + def test_runnable(time_to_sleep : int): + time.sleep(time_to_sleep) + + def fn_start(run_obj: Run): + print("start_time:", run_obj.start_time) + + def fn_end(run_obj: Run): + print("end_time:", run_obj.end_time) + + chain = RunnableLambda(test_runnable).with_listeners( + on_start=fn_start, + on_end=fn_end + ) + chain.invoke(2) + """ + from langchain_core.tracers.root_listeners import RootListenersTracer + + return RunnableBinding( + bound=self, + config_factories=[ + lambda config: { + "callbacks": [ + RootListenersTracer( + config=config, + on_start=on_start, + on_end=on_end, + on_error=on_error, + ) + ], + } + ], + ) + + def with_alisteners( + self, + *, + on_start: Optional[AsyncListener] = None, + on_end: Optional[AsyncListener] = None, + on_error: Optional[AsyncListener] = None, + ) -> Runnable[Input, Output]: + """Bind async lifecycle listeners to a Runnable, returning a new Runnable. + + on_start: Asynchronously called before the Runnable starts running. + on_end: Asynchronously called after the Runnable finishes running. + on_error: Asynchronously called if the Runnable throws an error. + + The Run object contains information about the run, including its id, + type, input, output, error, start_time, end_time, and any tags or metadata + added to the run. + + Args: + on_start: Asynchronously called before the Runnable starts running. + Defaults to None. + on_end: Asynchronously called after the Runnable finishes running. + Defaults to None. + on_error: Asynchronously called if the Runnable throws an error. + Defaults to None. + + Returns: + A new Runnable with the listeners bound. + + Example: + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda, Runnable + from datetime import datetime, timezone + import time + import asyncio + + def format_t(timestamp: float) -> str: + return datetime.fromtimestamp(timestamp, tz=timezone.utc).isoformat() + + async def test_runnable(time_to_sleep : int): + print(f"Runnable[{time_to_sleep}s]: starts at {format_t(time.time())}") + await asyncio.sleep(time_to_sleep) + print(f"Runnable[{time_to_sleep}s]: ends at {format_t(time.time())}") + + async def fn_start(run_obj : Runnable): + print(f"on start callback starts at {format_t(time.time())}") + await asyncio.sleep(3) + print(f"on start callback ends at {format_t(time.time())}") + + async def fn_end(run_obj : Runnable): + print(f"on end callback starts at {format_t(time.time())}") + await asyncio.sleep(2) + print(f"on end callback ends at {format_t(time.time())}") + + runnable = RunnableLambda(test_runnable).with_alisteners( + on_start=fn_start, + on_end=fn_end + ) + async def concurrent_runs(): + await asyncio.gather(runnable.ainvoke(2), runnable.ainvoke(3)) + + asyncio.run(concurrent_runs()) + Result: + on start callback starts at 2025-03-01T07:05:22.875378+00:00 + on start callback starts at 2025-03-01T07:05:22.875495+00:00 + on start callback ends at 2025-03-01T07:05:25.878862+00:00 + on start callback ends at 2025-03-01T07:05:25.878947+00:00 + Runnable[2s]: starts at 2025-03-01T07:05:25.879392+00:00 + Runnable[3s]: starts at 2025-03-01T07:05:25.879804+00:00 + Runnable[2s]: ends at 2025-03-01T07:05:27.881998+00:00 + on end callback starts at 2025-03-01T07:05:27.882360+00:00 + Runnable[3s]: ends at 2025-03-01T07:05:28.881737+00:00 + on end callback starts at 2025-03-01T07:05:28.882428+00:00 + on end callback ends at 2025-03-01T07:05:29.883893+00:00 + on end callback ends at 2025-03-01T07:05:30.884831+00:00 + + """ + from langchain_core.tracers.root_listeners import AsyncRootListenersTracer + + return RunnableBinding( + bound=self, + config_factories=[ + lambda config: { + "callbacks": [ + AsyncRootListenersTracer( + config=config, + on_start=on_start, + on_end=on_end, + on_error=on_error, + ) + ], + } + ], + ) + + def with_types( + self, + *, + input_type: Optional[type[Input]] = None, + output_type: Optional[type[Output]] = None, + ) -> Runnable[Input, Output]: + """Bind input and output types to a Runnable, returning a new Runnable. + + Args: + input_type: The input type to bind to the Runnable. Defaults to None. + output_type: The output type to bind to the Runnable. Defaults to None. + + Returns: + A new Runnable with the types bound. + """ + return RunnableBinding( + bound=self, + custom_input_type=input_type, + custom_output_type=output_type, + kwargs={}, + ) + + def with_retry( + self, + *, + retry_if_exception_type: tuple[type[BaseException], ...] = (Exception,), + wait_exponential_jitter: bool = True, + exponential_jitter_params: Optional[ExponentialJitterParams] = None, + stop_after_attempt: int = 3, + ) -> Runnable[Input, Output]: + """Create a new Runnable that retries the original Runnable on exceptions. + + Args: + retry_if_exception_type: A tuple of exception types to retry on. + Defaults to (Exception,). + wait_exponential_jitter: Whether to add jitter to the wait + time between retries. Defaults to True. + stop_after_attempt: The maximum number of attempts to make before + giving up. Defaults to 3. + exponential_jitter_params: Parameters for + ``tenacity.wait_exponential_jitter``. Namely: ``initial``, ``max``, + ``exp_base``, and ``jitter`` (all float values). + + Returns: + A new Runnable that retries the original Runnable on exceptions. + + Example: + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + + count = 0 + + + def _lambda(x: int) -> None: + global count + count = count + 1 + if x == 1: + raise ValueError("x is 1") + else: + pass + + + runnable = RunnableLambda(_lambda) + try: + runnable.with_retry( + stop_after_attempt=2, + retry_if_exception_type=(ValueError,), + ).invoke(1) + except ValueError: + pass + + assert (count == 2) + + """ + from langchain_core.runnables.retry import RunnableRetry + + return RunnableRetry( + bound=self, + kwargs={}, + config={}, + retry_exception_types=retry_if_exception_type, + wait_exponential_jitter=wait_exponential_jitter, + max_attempt_number=stop_after_attempt, + exponential_jitter_params=exponential_jitter_params, + ) + + def map(self) -> Runnable[list[Input], list[Output]]: + """Return a new Runnable that maps a list of inputs to a list of outputs. + + Calls invoke() with each input. + + Returns: + A new Runnable that maps a list of inputs to a list of outputs. + + Example: + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + + def _lambda(x: int) -> int: + return x + 1 + + runnable = RunnableLambda(_lambda) + print(runnable.map().invoke([1, 2, 3])) # [2, 3, 4] + """ + return RunnableEach(bound=self) + + def with_fallbacks( + self, + fallbacks: Sequence[Runnable[Input, Output]], + *, + exceptions_to_handle: tuple[type[BaseException], ...] = (Exception,), + exception_key: Optional[str] = None, + ) -> RunnableWithFallbacksT[Input, Output]: + """Add fallbacks to a Runnable, returning a new Runnable. + + The new Runnable will try the original Runnable, and then each fallback + in order, upon failures. + + Args: + fallbacks: A sequence of runnables to try if the original Runnable fails. + exceptions_to_handle: A tuple of exception types to handle. + Defaults to (Exception,). + exception_key: If string is specified then handled exceptions will be passed + to fallbacks as part of the input under the specified key. If None, + exceptions will not be passed to fallbacks. If used, the base Runnable + and its fallbacks must accept a dictionary as input. Defaults to None. + + Returns: + A new Runnable that will try the original Runnable, and then each + fallback in order, upon failures. + + Example: + + .. code-block:: python + + from typing import Iterator + + from langchain_core.runnables import RunnableGenerator + + + def _generate_immediate_error(input: Iterator) -> Iterator[str]: + raise ValueError() + yield "" + + + def _generate(input: Iterator) -> Iterator[str]: + yield from "foo bar" + + + runnable = RunnableGenerator(_generate_immediate_error).with_fallbacks( + [RunnableGenerator(_generate)] + ) + print(''.join(runnable.stream({}))) #foo bar + + Args: + fallbacks: A sequence of runnables to try if the original Runnable fails. + exceptions_to_handle: A tuple of exception types to handle. + exception_key: If string is specified then handled exceptions will be passed + to fallbacks as part of the input under the specified key. If None, + exceptions will not be passed to fallbacks. If used, the base Runnable + and its fallbacks must accept a dictionary as input. + + Returns: + A new Runnable that will try the original Runnable, and then each + fallback in order, upon failures. + + """ + from langchain_core.runnables.fallbacks import RunnableWithFallbacks + + return RunnableWithFallbacks( + runnable=self, + fallbacks=fallbacks, + exceptions_to_handle=exceptions_to_handle, + exception_key=exception_key, + ) + + """ --- Helper methods for Subclasses --- """ + + def _call_with_config( + self, + func: Union[ + Callable[[Input], Output], + Callable[[Input, CallbackManagerForChainRun], Output], + Callable[[Input, CallbackManagerForChainRun, RunnableConfig], Output], + ], + input: Input, + config: Optional[RunnableConfig], + run_type: Optional[str] = None, + serialized: Optional[dict[str, Any]] = None, + **kwargs: Optional[Any], + ) -> Output: + """Helper method to transform an Input value to an Output value, with callbacks. + + Use this method to implement invoke() in subclasses. + """ + config = ensure_config(config) + callback_manager = get_callback_manager_for_config(config) + run_manager = callback_manager.on_chain_start( + serialized, + input, + run_type=run_type, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + try: + child_config = patch_config(config, callbacks=run_manager.get_child()) + with set_config_context(child_config) as context: + output = cast( + "Output", + context.run( + call_func_with_variable_args, # type: ignore[arg-type] + func, + input, + config, + run_manager, + **kwargs, + ), + ) + except BaseException as e: + run_manager.on_chain_error(e) + raise + else: + run_manager.on_chain_end(output) + return output + + async def _acall_with_config( + self, + func: Union[ + Callable[[Input], Awaitable[Output]], + Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]], + Callable[ + [Input, AsyncCallbackManagerForChainRun, RunnableConfig], + Awaitable[Output], + ], + ], + input: Input, + config: Optional[RunnableConfig], + run_type: Optional[str] = None, + serialized: Optional[dict[str, Any]] = None, + **kwargs: Optional[Any], + ) -> Output: + """Helper method to transform an Input value to an Output value, with callbacks. + + Use this method to implement ainvoke() in subclasses. + """ + config = ensure_config(config) + callback_manager = get_async_callback_manager_for_config(config) + run_manager = await callback_manager.on_chain_start( + serialized, + input, + run_type=run_type, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + try: + child_config = patch_config(config, callbacks=run_manager.get_child()) + with set_config_context(child_config) as context: + coro = acall_func_with_variable_args( + func, input, config, run_manager, **kwargs + ) + output: Output = await coro_with_context(coro, context) + except BaseException as e: + await run_manager.on_chain_error(e) + raise + else: + await run_manager.on_chain_end(output) + return output + + def _batch_with_config( + self, + func: Union[ + Callable[[list[Input]], list[Union[Exception, Output]]], + Callable[ + [list[Input], list[CallbackManagerForChainRun]], + list[Union[Exception, Output]], + ], + Callable[ + [list[Input], list[CallbackManagerForChainRun], list[RunnableConfig]], + list[Union[Exception, Output]], + ], + ], + input: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + run_type: Optional[str] = None, + **kwargs: Optional[Any], + ) -> list[Output]: + """Transform a list of inputs to a list of outputs, with callbacks. + + Helper method to transform an Input value to an Output value, + with callbacks. Use this method to implement invoke() in subclasses. + """ + if not input: + return [] + + configs = get_config_list(config, len(input)) + callback_managers = [get_callback_manager_for_config(c) for c in configs] + run_managers = [ + callback_manager.on_chain_start( + None, + input, + run_type=run_type, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + for callback_manager, input, config in zip( + callback_managers, input, configs + ) + ] + try: + if accepts_config(func): + kwargs["config"] = [ + patch_config(c, callbacks=rm.get_child()) + for c, rm in zip(configs, run_managers) + ] + if accepts_run_manager(func): + kwargs["run_manager"] = run_managers + output = func(input, **kwargs) # type: ignore[call-arg] + except BaseException as e: + for run_manager in run_managers: + run_manager.on_chain_error(e) + if return_exceptions: + return cast("list[Output]", [e for _ in input]) + raise + else: + first_exception: Optional[Exception] = None + for run_manager, out in zip(run_managers, output): + if isinstance(out, Exception): + first_exception = first_exception or out + run_manager.on_chain_error(out) + else: + run_manager.on_chain_end(out) + if return_exceptions or first_exception is None: + return cast("list[Output]", output) + raise first_exception + + async def _abatch_with_config( + self, + func: Union[ + Callable[[list[Input]], Awaitable[list[Union[Exception, Output]]]], + Callable[ + [list[Input], list[AsyncCallbackManagerForChainRun]], + Awaitable[list[Union[Exception, Output]]], + ], + Callable[ + [ + list[Input], + list[AsyncCallbackManagerForChainRun], + list[RunnableConfig], + ], + Awaitable[list[Union[Exception, Output]]], + ], + ], + input: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + run_type: Optional[str] = None, + **kwargs: Optional[Any], + ) -> list[Output]: + """Transform a list of inputs to a list of outputs, with callbacks. + + Helper method to transform an Input value to an Output value, + with callbacks. + Use this method to implement invoke() in subclasses. + """ + if not input: + return [] + + configs = get_config_list(config, len(input)) + callback_managers = [get_async_callback_manager_for_config(c) for c in configs] + run_managers: list[AsyncCallbackManagerForChainRun] = await asyncio.gather( + *( + callback_manager.on_chain_start( + None, + input, + run_type=run_type, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + for callback_manager, input, config in zip( + callback_managers, input, configs + ) + ) + ) + try: + if accepts_config(func): + kwargs["config"] = [ + patch_config(c, callbacks=rm.get_child()) + for c, rm in zip(configs, run_managers) + ] + if accepts_run_manager(func): + kwargs["run_manager"] = run_managers + output = await func(input, **kwargs) # type: ignore[call-arg] + except BaseException as e: + await asyncio.gather( + *(run_manager.on_chain_error(e) for run_manager in run_managers) + ) + if return_exceptions: + return cast("list[Output]", [e for _ in input]) + raise + else: + first_exception: Optional[Exception] = None + coros: list[Awaitable[None]] = [] + for run_manager, out in zip(run_managers, output): + if isinstance(out, Exception): + first_exception = first_exception or out + coros.append(run_manager.on_chain_error(out)) + else: + coros.append(run_manager.on_chain_end(out)) + await asyncio.gather(*coros) + if return_exceptions or first_exception is None: + return cast("list[Output]", output) + raise first_exception + + def _transform_stream_with_config( + self, + input: Iterator[Input], + transformer: Union[ + Callable[[Iterator[Input]], Iterator[Output]], + Callable[[Iterator[Input], CallbackManagerForChainRun], Iterator[Output]], + Callable[ + [ + Iterator[Input], + CallbackManagerForChainRun, + RunnableConfig, + ], + Iterator[Output], + ], + ], + config: Optional[RunnableConfig], + run_type: Optional[str] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + """Transform a stream with config. + + Helper method to transform an Iterator of Input values into an Iterator of + Output values, with callbacks. + Use this to implement `stream()` or `transform()` in Runnable subclasses. + """ + # Mixin that is used by both astream log and astream events implementation + from langchain_core.tracers._streaming import _StreamingCallbackHandler + + # tee the input so we can iterate over it twice + input_for_tracing, input_for_transform = tee(input, 2) + # Start the input iterator to ensure the input Runnable starts before this one + final_input: Optional[Input] = next(input_for_tracing, None) + final_input_supported = True + final_output: Optional[Output] = None + final_output_supported = True + + config = ensure_config(config) + callback_manager = get_callback_manager_for_config(config) + run_manager = callback_manager.on_chain_start( + None, + {"input": ""}, + run_type=run_type, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + try: + child_config = patch_config(config, callbacks=run_manager.get_child()) + if accepts_config(transformer): + kwargs["config"] = child_config + if accepts_run_manager(transformer): + kwargs["run_manager"] = run_manager + with set_config_context(child_config) as context: + iterator = context.run(transformer, input_for_transform, **kwargs) # type: ignore[arg-type] + if stream_handler := next( + ( + cast("_StreamingCallbackHandler", h) + for h in run_manager.handlers + # instance check OK here, it's a mixin + if isinstance(h, _StreamingCallbackHandler) + ), + None, + ): + # populates streamed_output in astream_log() output if needed + iterator = stream_handler.tap_output_iter( + run_manager.run_id, iterator + ) + try: + while True: + chunk: Output = context.run(next, iterator) + yield chunk + if final_output_supported: + if final_output is None: + final_output = chunk + else: + try: + final_output = final_output + chunk # type: ignore[operator] + except TypeError: + final_output = chunk + final_output_supported = False + else: + final_output = chunk + except (StopIteration, GeneratorExit): + pass + for ichunk in input_for_tracing: + if final_input_supported: + if final_input is None: + final_input = ichunk + else: + try: + final_input = final_input + ichunk # type: ignore[operator] + except TypeError: + final_input = ichunk + final_input_supported = False + else: + final_input = ichunk + except BaseException as e: + run_manager.on_chain_error(e, inputs=final_input) + raise + else: + run_manager.on_chain_end(final_output, inputs=final_input) + + async def _atransform_stream_with_config( + self, + input: AsyncIterator[Input], + transformer: Union[ + Callable[[AsyncIterator[Input]], AsyncIterator[Output]], + Callable[ + [AsyncIterator[Input], AsyncCallbackManagerForChainRun], + AsyncIterator[Output], + ], + Callable[ + [ + AsyncIterator[Input], + AsyncCallbackManagerForChainRun, + RunnableConfig, + ], + AsyncIterator[Output], + ], + ], + config: Optional[RunnableConfig], + run_type: Optional[str] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + """Transform a stream with config. + + Helper method to transform an Async Iterator of Input values into an Async + Iterator of Output values, with callbacks. + Use this to implement `astream()` or `atransform()` in Runnable subclasses. + """ + # Mixin that is used by both astream log and astream events implementation + from langchain_core.tracers._streaming import _StreamingCallbackHandler + + # tee the input so we can iterate over it twice + input_for_tracing, input_for_transform = atee(input, 2) + # Start the input iterator to ensure the input Runnable starts before this one + final_input: Optional[Input] = await py_anext(input_for_tracing, None) + final_input_supported = True + final_output: Optional[Output] = None + final_output_supported = True + + config = ensure_config(config) + callback_manager = get_async_callback_manager_for_config(config) + run_manager = await callback_manager.on_chain_start( + None, + {"input": ""}, + run_type=run_type, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + try: + child_config = patch_config(config, callbacks=run_manager.get_child()) + if accepts_config(transformer): + kwargs["config"] = child_config + if accepts_run_manager(transformer): + kwargs["run_manager"] = run_manager + with set_config_context(child_config) as context: + iterator_ = context.run(transformer, input_for_transform, **kwargs) # type: ignore[arg-type] + + if stream_handler := next( + ( + cast("_StreamingCallbackHandler", h) + for h in run_manager.handlers + # instance check OK here, it's a mixin + if isinstance(h, _StreamingCallbackHandler) + ), + None, + ): + # populates streamed_output in astream_log() output if needed + iterator = stream_handler.tap_output_aiter( + run_manager.run_id, iterator_ + ) + else: + iterator = iterator_ + try: + while True: + chunk = await coro_with_context(py_anext(iterator), context) + yield chunk + if final_output_supported: + if final_output is None: + final_output = chunk + else: + try: + final_output = final_output + chunk + except TypeError: + final_output = chunk + final_output_supported = False + else: + final_output = chunk + except StopAsyncIteration: + pass + async for ichunk in input_for_tracing: + if final_input_supported: + if final_input is None: + final_input = ichunk + else: + try: + final_input = final_input + ichunk # type: ignore[operator] + except TypeError: + final_input = ichunk + final_input_supported = False + else: + final_input = ichunk + except BaseException as e: + await run_manager.on_chain_error(e, inputs=final_input) + raise + else: + await run_manager.on_chain_end(final_output, inputs=final_input) + finally: + if iterator_ is not None and hasattr(iterator_, "aclose"): + await iterator_.aclose() + + @beta_decorator.beta(message="This API is in beta and may change in the future.") + def as_tool( + self, + args_schema: Optional[type[BaseModel]] = None, + *, + name: Optional[str] = None, + description: Optional[str] = None, + arg_types: Optional[dict[str, type]] = None, + ) -> BaseTool: + """Create a BaseTool from a Runnable. + + ``as_tool`` will instantiate a BaseTool with a name, description, and + ``args_schema`` from a Runnable. Where possible, schemas are inferred + from ``runnable.get_input_schema``. Alternatively (e.g., if the + Runnable takes a dict as input and the specific dict keys are not typed), + the schema can be specified directly with ``args_schema``. You can also + pass ``arg_types`` to just specify the required arguments and their types. + + Args: + args_schema: The schema for the tool. Defaults to None. + name: The name of the tool. Defaults to None. + description: The description of the tool. Defaults to None. + arg_types: A dictionary of argument names to types. Defaults to None. + + Returns: + A BaseTool instance. + + Typed dict input: + + .. code-block:: python + + from typing_extensions import TypedDict + from langchain_core.runnables import RunnableLambda + + class Args(TypedDict): + a: int + b: list[int] + + def f(x: Args) -> str: + return str(x["a"] * max(x["b"])) + + runnable = RunnableLambda(f) + as_tool = runnable.as_tool() + as_tool.invoke({"a": 3, "b": [1, 2]}) + + ``dict`` input, specifying schema via ``args_schema``: + + .. code-block:: python + + from typing import Any + from pydantic import BaseModel, Field + from langchain_core.runnables import RunnableLambda + + def f(x: dict[str, Any]) -> str: + return str(x["a"] * max(x["b"])) + + class FSchema(BaseModel): + \"\"\"Apply a function to an integer and list of integers.\"\"\" + + a: int = Field(..., description="Integer") + b: list[int] = Field(..., description="List of ints") + + runnable = RunnableLambda(f) + as_tool = runnable.as_tool(FSchema) + as_tool.invoke({"a": 3, "b": [1, 2]}) + + ``dict`` input, specifying schema via ``arg_types``: + + .. code-block:: python + + from typing import Any + from langchain_core.runnables import RunnableLambda + + def f(x: dict[str, Any]) -> str: + return str(x["a"] * max(x["b"])) + + runnable = RunnableLambda(f) + as_tool = runnable.as_tool(arg_types={"a": int, "b": list[int]}) + as_tool.invoke({"a": 3, "b": [1, 2]}) + + String input: + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + + def f(x: str) -> str: + return x + "a" + + def g(x: str) -> str: + return x + "z" + + runnable = RunnableLambda(f) | g + as_tool = runnable.as_tool() + as_tool.invoke("b") + + .. versionadded:: 0.2.14 + """ + # Avoid circular import + from langchain_core.tools import convert_runnable_to_tool + + return convert_runnable_to_tool( + self, + args_schema=args_schema, + name=name, + description=description, + arg_types=arg_types, + ) + + +class RunnableSerializable(Serializable, Runnable[Input, Output]): + """Runnable that can be serialized to JSON.""" + + name: Optional[str] = None + + model_config = ConfigDict( + # Suppress warnings from pydantic protected namespaces + # (e.g., `model_`) + protected_namespaces=(), + ) + + @override + def to_json(self) -> Union[SerializedConstructor, SerializedNotImplemented]: + """Serialize the Runnable to JSON. + + Returns: + A JSON-serializable representation of the Runnable. + """ + dumped = super().to_json() + with contextlib.suppress(Exception): + dumped["name"] = self.get_name() + return dumped + + def configurable_fields( + self, **kwargs: AnyConfigurableField + ) -> RunnableSerializable[Input, Output]: + """Configure particular Runnable fields at runtime. + + Args: + **kwargs: A dictionary of ConfigurableField instances to configure. + + Returns: + A new Runnable with the fields configured. + + .. code-block:: python + + from langchain_core.runnables import ConfigurableField + from langchain_openai import ChatOpenAI + + model = ChatOpenAI(max_tokens=20).configurable_fields( + max_tokens=ConfigurableField( + id="output_token_number", + name="Max tokens in the output", + description="The maximum number of tokens in the output", + ) + ) + + # max_tokens = 20 + print( + "max_tokens_20: ", + model.invoke("tell me something about chess").content + ) + + # max_tokens = 200 + print("max_tokens_200: ", model.with_config( + configurable={"output_token_number": 200} + ).invoke("tell me something about chess").content + ) + """ + from langchain_core.runnables.configurable import RunnableConfigurableFields + + model_fields = type(self).model_fields + for key in kwargs: + if key not in model_fields: + msg = ( + f"Configuration key {key} not found in {self}: " + f"available keys are {model_fields.keys()}" + ) + raise ValueError(msg) + + return RunnableConfigurableFields(default=self, fields=kwargs) + + def configurable_alternatives( + self, + which: ConfigurableField, + *, + default_key: str = "default", + prefix_keys: bool = False, + **kwargs: Union[Runnable[Input, Output], Callable[[], Runnable[Input, Output]]], + ) -> RunnableSerializable[Input, Output]: + """Configure alternatives for Runnables that can be set at runtime. + + Args: + which: The ConfigurableField instance that will be used to select the + alternative. + default_key: The default key to use if no alternative is selected. + Defaults to "default". + prefix_keys: Whether to prefix the keys with the ConfigurableField id. + Defaults to False. + **kwargs: A dictionary of keys to Runnable instances or callables that + return Runnable instances. + + Returns: + A new Runnable with the alternatives configured. + + .. code-block:: python + + from langchain_anthropic import ChatAnthropic + from langchain_core.runnables.utils import ConfigurableField + from langchain_openai import ChatOpenAI + + model = ChatAnthropic( + model_name="claude-3-sonnet-20240229" + ).configurable_alternatives( + ConfigurableField(id="llm"), + default_key="anthropic", + openai=ChatOpenAI() + ) + + # uses the default model ChatAnthropic + print(model.invoke("which organization created you?").content) + + # uses ChatOpenAI + print( + model.with_config( + configurable={"llm": "openai"} + ).invoke("which organization created you?").content + ) + """ + from langchain_core.runnables.configurable import ( + RunnableConfigurableAlternatives, + ) + + return RunnableConfigurableAlternatives( + which=which, + default=self, + alternatives=kwargs, + default_key=default_key, + prefix_keys=prefix_keys, + ) + + +def _seq_input_schema( + steps: list[Runnable[Any, Any]], config: Optional[RunnableConfig] +) -> type[BaseModel]: + from langchain_core.runnables.passthrough import RunnableAssign, RunnablePick + + first = steps[0] + if len(steps) == 1: + return first.get_input_schema(config) + if isinstance(first, RunnableAssign): + next_input_schema = _seq_input_schema(steps[1:], config) + if not issubclass(next_input_schema, RootModel): + # it's a dict as expected + return create_model_v2( + "RunnableSequenceInput", + field_definitions={ + k: (v.annotation, v.default) + for k, v in next_input_schema.model_fields.items() + if k not in first.mapper.steps__ + }, + ) + elif isinstance(first, RunnablePick): + return _seq_input_schema(steps[1:], config) + + return first.get_input_schema(config) + + +def _seq_output_schema( + steps: list[Runnable[Any, Any]], config: Optional[RunnableConfig] +) -> type[BaseModel]: + from langchain_core.runnables.passthrough import RunnableAssign, RunnablePick + + last = steps[-1] + if len(steps) == 1: + return last.get_input_schema(config) + if isinstance(last, RunnableAssign): + mapper_output_schema = last.mapper.get_output_schema(config) + prev_output_schema = _seq_output_schema(steps[:-1], config) + if not issubclass(prev_output_schema, RootModel): + # it's a dict as expected + return create_model_v2( + "RunnableSequenceOutput", + field_definitions={ + **{ + k: (v.annotation, v.default) + for k, v in prev_output_schema.model_fields.items() + }, + **{ + k: (v.annotation, v.default) + for k, v in mapper_output_schema.model_fields.items() + }, + }, + ) + elif isinstance(last, RunnablePick): + prev_output_schema = _seq_output_schema(steps[:-1], config) + if not issubclass(prev_output_schema, RootModel): + # it's a dict as expected + if isinstance(last.keys, list): + return create_model_v2( + "RunnableSequenceOutput", + field_definitions={ + k: (v.annotation, v.default) + for k, v in prev_output_schema.model_fields.items() + if k in last.keys + }, + ) + field = prev_output_schema.model_fields[last.keys] + return create_model_v2( + "RunnableSequenceOutput", root=(field.annotation, field.default) + ) + + return last.get_output_schema(config) + + +class RunnableSequence(RunnableSerializable[Input, Output]): + """Sequence of Runnables, where the output of each is the input of the next. + + **RunnableSequence** is the most important composition operator in LangChain + as it is used in virtually every chain. + + A RunnableSequence can be instantiated directly or more commonly by using the `|` + operator where either the left or right operands (or both) must be a Runnable. + + Any RunnableSequence automatically supports sync, async, batch. + + The default implementations of `batch` and `abatch` utilize threadpools and + asyncio gather and will be faster than naive invocation of invoke or ainvoke + for IO bound Runnables. + + Batching is implemented by invoking the batch method on each component of the + RunnableSequence in order. + + A RunnableSequence preserves the streaming properties of its components, so if all + components of the sequence implement a `transform` method -- which + is the method that implements the logic to map a streaming input to a streaming + output -- then the sequence will be able to stream input to output! + + If any component of the sequence does not implement transform then the + streaming will only begin after this component is run. If there are + multiple blocking components, streaming begins after the last one. + + Please note: RunnableLambdas do not support `transform` by default! So if + you need to use a RunnableLambdas be careful about where you place them in a + RunnableSequence (if you need to use the .stream()/.astream() methods). + + If you need arbitrary logic and need streaming, you can subclass + Runnable, and implement `transform` for whatever logic you need. + + Here is a simple example that uses simple functions to illustrate the use of + RunnableSequence: + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + + def add_one(x: int) -> int: + return x + 1 + + def mul_two(x: int) -> int: + return x * 2 + + runnable_1 = RunnableLambda(add_one) + runnable_2 = RunnableLambda(mul_two) + sequence = runnable_1 | runnable_2 + # Or equivalently: + # sequence = RunnableSequence(first=runnable_1, last=runnable_2) + sequence.invoke(1) + await sequence.ainvoke(1) + + sequence.batch([1, 2, 3]) + await sequence.abatch([1, 2, 3]) + + Here's an example that uses streams JSON output generated by an LLM: + + .. code-block:: python + + from langchain_core.output_parsers.json import SimpleJsonOutputParser + from langchain_openai import ChatOpenAI + + prompt = PromptTemplate.from_template( + 'In JSON format, give me a list of {topic} and their ' + 'corresponding names in French, Spanish and in a ' + 'Cat Language.' + ) + + model = ChatOpenAI() + chain = prompt | model | SimpleJsonOutputParser() + + async for chunk in chain.astream({'topic': 'colors'}): + print('-') # noqa: T201 + print(chunk, sep='', flush=True) # noqa: T201 + """ + + # The steps are broken into first, middle and last, solely for type checking + # purposes. It allows specifying the `Input` on the first type, the `Output` of + # the last type. + first: Runnable[Input, Any] + """The first Runnable in the sequence.""" + middle: list[Runnable[Any, Any]] = Field(default_factory=list) + """The middle Runnables in the sequence.""" + last: Runnable[Any, Output] + """The last Runnable in the sequence.""" + + def __init__( + self, + *steps: RunnableLike, + name: Optional[str] = None, + first: Optional[Runnable[Any, Any]] = None, + middle: Optional[list[Runnable[Any, Any]]] = None, + last: Optional[Runnable[Any, Any]] = None, + ) -> None: + """Create a new RunnableSequence. + + Args: + steps: The steps to include in the sequence. + name: The name of the Runnable. Defaults to None. + first: The first Runnable in the sequence. Defaults to None. + middle: The middle Runnables in the sequence. Defaults to None. + last: The last Runnable in the sequence. Defaults to None. + + Raises: + ValueError: If the sequence has less than 2 steps. + """ + steps_flat: list[Runnable] = [] + if not steps and first is not None and last is not None: + steps_flat = [first] + (middle or []) + [last] + for step in steps: + if isinstance(step, RunnableSequence): + steps_flat.extend(step.steps) + else: + steps_flat.append(coerce_to_runnable(step)) + if len(steps_flat) < 2: + msg = f"RunnableSequence must have at least 2 steps, got {len(steps_flat)}" + raise ValueError(msg) + super().__init__( # type: ignore[call-arg] + first=steps_flat[0], + middle=list(steps_flat[1:-1]), + last=steps_flat[-1], + name=name, + ) + + @classmethod + @override + def get_lc_namespace(cls) -> list[str]: + return ["langchain", "schema", "runnable"] + + @property + def steps(self) -> list[Runnable[Any, Any]]: + """All the Runnables that make up the sequence in order. + + Returns: + A list of Runnables. + """ + return [self.first] + self.middle + [self.last] + + @classmethod + @override + def is_lc_serializable(cls) -> bool: + """Check if the object is serializable. + + Returns: + True if the object is serializable, False otherwise. + Defaults to True. + """ + return True + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @property + @override + def InputType(self) -> type[Input]: + """The type of the input to the Runnable.""" + return self.first.InputType + + @property + @override + def OutputType(self) -> type[Output]: + """The type of the output of the Runnable.""" + return self.last.OutputType + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + """Get the input schema of the Runnable. + + Args: + config: The config to use. Defaults to None. + + Returns: + The input schema of the Runnable. + """ + return _seq_input_schema(self.steps, config) + + @override + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + """Get the output schema of the Runnable. + + Args: + config: The config to use. Defaults to None. + + Returns: + The output schema of the Runnable. + """ + return _seq_output_schema(self.steps, config) + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + """Get the config specs of the Runnable. + + Returns: + The config specs of the Runnable. + """ + from langchain_core.beta.runnables.context import ( + CONTEXT_CONFIG_PREFIX, + _key_from_id, + ) + + # get all specs + all_specs = [ + (spec, idx) + for idx, step in enumerate(self.steps) + for spec in step.config_specs + ] + # calculate context dependencies + specs_by_pos = groupby( + [tup for tup in all_specs if tup[0].id.startswith(CONTEXT_CONFIG_PREFIX)], + itemgetter(1), + ) + next_deps: set[str] = set() + deps_by_pos: dict[int, set[str]] = {} + for pos, specs in specs_by_pos: + deps_by_pos[pos] = next_deps + next_deps = next_deps | {spec[0].id for spec in specs} + # assign context dependencies + for pos, (spec, idx) in enumerate(all_specs): + if spec.id.startswith(CONTEXT_CONFIG_PREFIX): + all_specs[pos] = ( + ConfigurableFieldSpec( + id=spec.id, + annotation=spec.annotation, + name=spec.name, + default=spec.default, + description=spec.description, + is_shared=spec.is_shared, + dependencies=[ + d + for d in deps_by_pos[idx] + if _key_from_id(d) != _key_from_id(spec.id) + ] + + (spec.dependencies or []), + ), + idx, + ) + + return get_unique_config_specs(spec for spec, _ in all_specs) + + @override + def get_graph(self, config: Optional[RunnableConfig] = None) -> Graph: + """Get the graph representation of the Runnable. + + Args: + config: The config to use. Defaults to None. + + Returns: + The graph representation of the Runnable. + + Raises: + ValueError: If a Runnable has no first or last node. + """ + from langchain_core.runnables.graph import Graph + + graph = Graph() + for step in self.steps: + current_last_node = graph.last_node() + step_graph = step.get_graph(config) + if step is not self.first: + step_graph.trim_first_node() + if step is not self.last: + step_graph.trim_last_node() + step_first_node, _ = graph.extend(step_graph) + if not step_first_node: + msg = f"Runnable {step} has no first node" + raise ValueError(msg) + if current_last_node: + graph.add_edge(current_last_node, step_first_node) + + return graph + + @override + def __repr__(self) -> str: + return "\n| ".join( + repr(s) if i == 0 else indent_lines_after_first(repr(s), "| ") + for i, s in enumerate(self.steps) + ) + + @override + def __or__( + self, + other: Union[ + Runnable[Any, Other], + Callable[[Any], Other], + Callable[[Iterator[Any]], Iterator[Other]], + Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]], + ], + ) -> RunnableSerializable[Input, Other]: + if isinstance(other, RunnableSequence): + return RunnableSequence( + self.first, + *self.middle, + self.last, + other.first, + *other.middle, + other.last, + name=self.name or other.name, + ) + return RunnableSequence( + self.first, + *self.middle, + self.last, + coerce_to_runnable(other), + name=self.name, + ) + + @override + def __ror__( + self, + other: Union[ + Runnable[Other, Any], + Callable[[Other], Any], + Callable[[Iterator[Other]], Iterator[Any]], + Mapping[str, Union[Runnable[Other, Any], Callable[[Other], Any], Any]], + ], + ) -> RunnableSerializable[Other, Output]: + if isinstance(other, RunnableSequence): + return RunnableSequence( + other.first, + *other.middle, + other.last, + self.first, + *self.middle, + self.last, + name=other.name or self.name, + ) + return RunnableSequence( + coerce_to_runnable(other), + self.first, + *self.middle, + self.last, + name=self.name, + ) + + @override + def invoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + from langchain_core.beta.runnables.context import config_with_context + + # setup callbacks and context + config = config_with_context(ensure_config(config), self.steps) + callback_manager = get_callback_manager_for_config(config) + # start the root run + run_manager = callback_manager.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + + # invoke all steps in sequence + try: + for i, step in enumerate(self.steps): + # mark each step as a child run + config = patch_config( + config, callbacks=run_manager.get_child(f"seq:step:{i + 1}") + ) + with set_config_context(config) as context: + if i == 0: + input = context.run(step.invoke, input, config, **kwargs) + else: + input = context.run(step.invoke, input, config) + # finish the root run + except BaseException as e: + run_manager.on_chain_error(e) + raise + else: + run_manager.on_chain_end(input) + return cast("Output", input) + + @override + async def ainvoke( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Output: + from langchain_core.beta.runnables.context import aconfig_with_context + + # setup callbacks and context + config = aconfig_with_context(ensure_config(config), self.steps) + callback_manager = get_async_callback_manager_for_config(config) + # start the root run + run_manager = await callback_manager.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + + # invoke all steps in sequence + try: + for i, step in enumerate(self.steps): + # mark each step as a child run + config = patch_config( + config, callbacks=run_manager.get_child(f"seq:step:{i + 1}") + ) + with set_config_context(config) as context: + if i == 0: + part = functools.partial(step.ainvoke, input, config, **kwargs) + else: + part = functools.partial(step.ainvoke, input, config) + input = await coro_with_context(part(), context, create_task=True) + # finish the root run + except BaseException as e: + await run_manager.on_chain_error(e) + raise + else: + await run_manager.on_chain_end(input) + return cast("Output", input) + + @override + def batch( + self, + inputs: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Output]: + from langchain_core.beta.runnables.context import config_with_context + from langchain_core.callbacks.manager import CallbackManager + + if not inputs: + return [] + + # setup callbacks and context + configs = [ + config_with_context(c, self.steps) + for c in get_config_list(config, len(inputs)) + ] + callback_managers = [ + CallbackManager.configure( + inheritable_callbacks=config.get("callbacks"), + local_callbacks=None, + verbose=False, + inheritable_tags=config.get("tags"), + local_tags=None, + inheritable_metadata=config.get("metadata"), + local_metadata=None, + ) + for config in configs + ] + # start the root runs, one per input + run_managers = [ + cm.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + for cm, input, config in zip(callback_managers, inputs, configs) + ] + + # invoke + try: + if return_exceptions: + # Track which inputs (by index) failed so far + # If an input has failed it will be present in this map, + # and the value will be the exception that was raised. + failed_inputs_map: dict[int, Exception] = {} + for stepidx, step in enumerate(self.steps): + # Assemble the original indexes of the remaining inputs + # (i.e. the ones that haven't failed yet) + remaining_idxs = [ + i for i in range(len(configs)) if i not in failed_inputs_map + ] + # Invoke the step on the remaining inputs + inputs = step.batch( + [ + inp + for i, inp in zip(remaining_idxs, inputs) + if i not in failed_inputs_map + ], + [ + # each step a child run of the corresponding root run + patch_config( + config, + callbacks=rm.get_child(f"seq:step:{stepidx + 1}"), + ) + for i, (rm, config) in enumerate(zip(run_managers, configs)) + if i not in failed_inputs_map + ], + return_exceptions=return_exceptions, + **(kwargs if stepidx == 0 else {}), + ) + # If an input failed, add it to the map + failed_inputs_map.update( + { + i: inp + for i, inp in zip(remaining_idxs, inputs) + if isinstance(inp, Exception) + } + ) + inputs = [inp for inp in inputs if not isinstance(inp, Exception)] + # If all inputs have failed, stop processing + if len(failed_inputs_map) == len(configs): + break + + # Reassemble the outputs, inserting Exceptions for failed inputs + inputs_copy = inputs.copy() + inputs = [] + for i in range(len(configs)): + if i in failed_inputs_map: + inputs.append(cast("Input", failed_inputs_map[i])) + else: + inputs.append(inputs_copy.pop(0)) + else: + for i, step in enumerate(self.steps): + inputs = step.batch( + inputs, + [ + # each step a child run of the corresponding root run + patch_config( + config, callbacks=rm.get_child(f"seq:step:{i + 1}") + ) + for rm, config in zip(run_managers, configs) + ], + return_exceptions=return_exceptions, + **(kwargs if i == 0 else {}), + ) + + # finish the root runs + except BaseException as e: + for rm in run_managers: + rm.on_chain_error(e) + if return_exceptions: + return cast("list[Output]", [e for _ in inputs]) + raise + else: + first_exception: Optional[Exception] = None + for run_manager, out in zip(run_managers, inputs): + if isinstance(out, Exception): + first_exception = first_exception or out + run_manager.on_chain_error(out) + else: + run_manager.on_chain_end(out) + if return_exceptions or first_exception is None: + return cast("list[Output]", inputs) + raise first_exception + + @override + async def abatch( + self, + inputs: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Output]: + from langchain_core.beta.runnables.context import aconfig_with_context + from langchain_core.callbacks.manager import AsyncCallbackManager + + if not inputs: + return [] + + # setup callbacks and context + configs = [ + aconfig_with_context(c, self.steps) + for c in get_config_list(config, len(inputs)) + ] + callback_managers = [ + AsyncCallbackManager.configure( + inheritable_callbacks=config.get("callbacks"), + local_callbacks=None, + verbose=False, + inheritable_tags=config.get("tags"), + local_tags=None, + inheritable_metadata=config.get("metadata"), + local_metadata=None, + ) + for config in configs + ] + # start the root runs, one per input + run_managers: list[AsyncCallbackManagerForChainRun] = await asyncio.gather( + *( + cm.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + for cm, input, config in zip(callback_managers, inputs, configs) + ) + ) + + # invoke .batch() on each step + # this uses batching optimizations in Runnable subclasses, like LLM + try: + if return_exceptions: + # Track which inputs (by index) failed so far + # If an input has failed it will be present in this map, + # and the value will be the exception that was raised. + failed_inputs_map: dict[int, Exception] = {} + for stepidx, step in enumerate(self.steps): + # Assemble the original indexes of the remaining inputs + # (i.e. the ones that haven't failed yet) + remaining_idxs = [ + i for i in range(len(configs)) if i not in failed_inputs_map + ] + # Invoke the step on the remaining inputs + inputs = await step.abatch( + [ + inp + for i, inp in zip(remaining_idxs, inputs) + if i not in failed_inputs_map + ], + [ + # each step a child run of the corresponding root run + patch_config( + config, + callbacks=rm.get_child(f"seq:step:{stepidx + 1}"), + ) + for i, (rm, config) in enumerate(zip(run_managers, configs)) + if i not in failed_inputs_map + ], + return_exceptions=return_exceptions, + **(kwargs if stepidx == 0 else {}), + ) + # If an input failed, add it to the map + failed_inputs_map.update( + { + i: inp + for i, inp in zip(remaining_idxs, inputs) + if isinstance(inp, Exception) + } + ) + inputs = [inp for inp in inputs if not isinstance(inp, Exception)] + # If all inputs have failed, stop processing + if len(failed_inputs_map) == len(configs): + break + + # Reassemble the outputs, inserting Exceptions for failed inputs + inputs_copy = inputs.copy() + inputs = [] + for i in range(len(configs)): + if i in failed_inputs_map: + inputs.append(cast("Input", failed_inputs_map[i])) + else: + inputs.append(inputs_copy.pop(0)) + else: + for i, step in enumerate(self.steps): + inputs = await step.abatch( + inputs, + [ + # each step a child run of the corresponding root run + patch_config( + config, callbacks=rm.get_child(f"seq:step:{i + 1}") + ) + for rm, config in zip(run_managers, configs) + ], + return_exceptions=return_exceptions, + **(kwargs if i == 0 else {}), + ) + # finish the root runs + except BaseException as e: + await asyncio.gather(*(rm.on_chain_error(e) for rm in run_managers)) + if return_exceptions: + return cast("list[Output]", [e for _ in inputs]) + raise + else: + first_exception: Optional[Exception] = None + coros: list[Awaitable[None]] = [] + for run_manager, out in zip(run_managers, inputs): + if isinstance(out, Exception): + first_exception = first_exception or out + coros.append(run_manager.on_chain_error(out)) + else: + coros.append(run_manager.on_chain_end(out)) + await asyncio.gather(*coros) + if return_exceptions or first_exception is None: + return cast("list[Output]", inputs) + raise first_exception + + def _transform( + self, + input: Iterator[Input], + run_manager: CallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> Iterator[Output]: + from langchain_core.beta.runnables.context import config_with_context + + steps = [self.first] + self.middle + [self.last] + config = config_with_context(config, self.steps) + + # transform the input stream of each step with the next + # steps that don't natively support transforming an input stream will + # buffer input in memory until all available, and then start emitting output + final_pipeline = cast("Iterator[Output]", input) + for idx, step in enumerate(steps): + config = patch_config( + config, callbacks=run_manager.get_child(f"seq:step:{idx + 1}") + ) + if idx == 0: + final_pipeline = step.transform(final_pipeline, config, **kwargs) + else: + final_pipeline = step.transform(final_pipeline, config) + + yield from final_pipeline + + async def _atransform( + self, + input: AsyncIterator[Input], + run_manager: AsyncCallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> AsyncIterator[Output]: + from langchain_core.beta.runnables.context import aconfig_with_context + + steps = [self.first] + self.middle + [self.last] + config = aconfig_with_context(config, self.steps) + + # stream the last steps + # transform the input stream of each step with the next + # steps that don't natively support transforming an input stream will + # buffer input in memory until all available, and then start emitting output + final_pipeline = cast("AsyncIterator[Output]", input) + for idx, step in enumerate(steps): + config = patch_config( + config, + callbacks=run_manager.get_child(f"seq:step:{idx + 1}"), + ) + if idx == 0: + final_pipeline = step.atransform(final_pipeline, config, **kwargs) + else: + final_pipeline = step.atransform(final_pipeline, config) + async for output in final_pipeline: + yield output + + @override + def transform( + self, + input: Iterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + yield from self._transform_stream_with_config( + input, + self._transform, + patch_config(config, run_name=(config or {}).get("run_name") or self.name), + **kwargs, + ) + + @override + def stream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + yield from self.transform(iter([input]), config, **kwargs) + + @override + async def atransform( + self, + input: AsyncIterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + async for chunk in self._atransform_stream_with_config( + input, + self._atransform, + patch_config(config, run_name=(config or {}).get("run_name") or self.name), + **kwargs, + ): + yield chunk + + @override + async def astream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + async def input_aiter() -> AsyncIterator[Input]: + yield input + + async for chunk in self.atransform(input_aiter(), config, **kwargs): + yield chunk + + +class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]): + """Runnable that runs a mapping of Runnables in parallel. + + Returns a mapping of their outputs. + + RunnableParallel is one of the two main composition primitives for the LCEL, + alongside RunnableSequence. It invokes Runnables concurrently, providing the same + input to each. + + A RunnableParallel can be instantiated directly or by using a dict literal within a + sequence. + + Here is a simple example that uses functions to illustrate the use of + RunnableParallel: + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + + def add_one(x: int) -> int: + return x + 1 + + def mul_two(x: int) -> int: + return x * 2 + + def mul_three(x: int) -> int: + return x * 3 + + runnable_1 = RunnableLambda(add_one) + runnable_2 = RunnableLambda(mul_two) + runnable_3 = RunnableLambda(mul_three) + + sequence = runnable_1 | { # this dict is coerced to a RunnableParallel + "mul_two": runnable_2, + "mul_three": runnable_3, + } + # Or equivalently: + # sequence = runnable_1 | RunnableParallel( + # {"mul_two": runnable_2, "mul_three": runnable_3} + # ) + # Also equivalently: + # sequence = runnable_1 | RunnableParallel( + # mul_two=runnable_2, + # mul_three=runnable_3, + # ) + + sequence.invoke(1) + await sequence.ainvoke(1) + + sequence.batch([1, 2, 3]) + await sequence.abatch([1, 2, 3]) + + RunnableParallel makes it easy to run Runnables in parallel. In the below example, + we simultaneously stream output from two different Runnables: + + .. code-block:: python + + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.runnables import RunnableParallel + from langchain_openai import ChatOpenAI + + model = ChatOpenAI() + joke_chain = ( + ChatPromptTemplate.from_template("tell me a joke about {topic}") + | model + ) + poem_chain = ( + ChatPromptTemplate.from_template("write a 2-line poem about {topic}") + | model + ) + + runnable = RunnableParallel(joke=joke_chain, poem=poem_chain) + + # Display stream + output = {key: "" for key, _ in runnable.output_schema()} + for chunk in runnable.stream({"topic": "bear"}): + for key in chunk: + output[key] = output[key] + chunk[key].content + print(output) # noqa: T201 + """ + + steps__: Mapping[str, Runnable[Input, Any]] + + def __init__( + self, + steps__: Optional[ + Mapping[ + str, + Union[ + Runnable[Input, Any], + Callable[[Input], Any], + Mapping[str, Union[Runnable[Input, Any], Callable[[Input], Any]]], + ], + ] + ] = None, + **kwargs: Union[ + Runnable[Input, Any], + Callable[[Input], Any], + Mapping[str, Union[Runnable[Input, Any], Callable[[Input], Any]]], + ], + ) -> None: + """Create a RunnableParallel. + + Args: + steps__: The steps to include. Defaults to None. + **kwargs: Additional steps to include. + """ + merged = {**steps__} if steps__ is not None else {} + merged.update(kwargs) + super().__init__( # type: ignore[call-arg] + steps__={key: coerce_to_runnable(r) for key, r in merged.items()} + ) + + @classmethod + @override + def is_lc_serializable(cls) -> bool: + return True + + @classmethod + @override + def get_lc_namespace(cls) -> list[str]: + return ["langchain", "schema", "runnable"] + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @override + def get_name( + self, suffix: Optional[str] = None, *, name: Optional[str] = None + ) -> str: + """Get the name of the Runnable. + + Args: + suffix: The suffix to use. Defaults to None. + name: The name to use. Defaults to None. + + Returns: + The name of the Runnable. + """ + name = name or self.name or f"RunnableParallel<{','.join(self.steps__.keys())}>" + return super().get_name(suffix, name=name) + + @property + @override + def InputType(self) -> Any: + """The type of the input to the Runnable.""" + for step in self.steps__.values(): + if step.InputType: + return step.InputType + + return Any + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + """Get the input schema of the Runnable. + + Args: + config: The config to use. Defaults to None. + + Returns: + The input schema of the Runnable. + """ + if all( + s.get_input_schema(config).model_json_schema().get("type", "object") + == "object" + for s in self.steps__.values() + ): + # This is correct, but pydantic typings/mypy don't think so. + return create_model_v2( + self.get_name("Input"), + field_definitions={ + k: (v.annotation, v.default) + for step in self.steps__.values() + for k, v in step.get_input_schema(config).model_fields.items() + if k != "__root__" + }, + ) + + return super().get_input_schema(config) + + @override + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + """Get the output schema of the Runnable. + + Args: + config: The config to use. Defaults to None. + + Returns: + The output schema of the Runnable. + """ + fields = {k: (v.OutputType, ...) for k, v in self.steps__.items()} + return create_model_v2(self.get_name("Output"), field_definitions=fields) + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + """Get the config specs of the Runnable. + + Returns: + The config specs of the Runnable. + """ + return get_unique_config_specs( + spec for step in self.steps__.values() for spec in step.config_specs + ) + + @override + def get_graph(self, config: Optional[RunnableConfig] = None) -> Graph: + """Get the graph representation of the Runnable. + + Args: + config: The config to use. Defaults to None. + + Returns: + The graph representation of the Runnable. + + Raises: + ValueError: If a Runnable has no first or last node. + """ + from langchain_core.runnables.graph import Graph + + graph = Graph() + input_node = graph.add_node(self.get_input_schema(config)) + output_node = graph.add_node(self.get_output_schema(config)) + for step in self.steps__.values(): + step_graph = step.get_graph() + step_graph.trim_first_node() + step_graph.trim_last_node() + if not step_graph: + graph.add_edge(input_node, output_node) + else: + step_first_node, step_last_node = graph.extend(step_graph) + if not step_first_node: + msg = f"Runnable {step} has no first node" + raise ValueError(msg) + if not step_last_node: + msg = f"Runnable {step} has no last node" + raise ValueError(msg) + graph.add_edge(input_node, step_first_node) + graph.add_edge(step_last_node, output_node) + + return graph + + @override + def __repr__(self) -> str: + map_for_repr = ",\n ".join( + f"{k}: {indent_lines_after_first(repr(v), ' ' + k + ': ')}" + for k, v in self.steps__.items() + ) + return "{\n " + map_for_repr + "\n}" + + @override + def invoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> dict[str, Any]: + from langchain_core.callbacks.manager import CallbackManager + + # setup callbacks + config = ensure_config(config) + callback_manager = CallbackManager.configure( + inheritable_callbacks=config.get("callbacks"), + local_callbacks=None, + verbose=False, + inheritable_tags=config.get("tags"), + local_tags=None, + inheritable_metadata=config.get("metadata"), + local_metadata=None, + ) + # start the root run + run_manager = callback_manager.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + + def _invoke_step( + step: Runnable[Input, Any], input: Input, config: RunnableConfig, key: str + ) -> Any: + child_config = patch_config( + config, + # mark each step as a child run + callbacks=run_manager.get_child(f"map:key:{key}"), + ) + with set_config_context(child_config) as context: + return context.run( + step.invoke, + input, + child_config, + ) + + # gather results from all steps + try: + # copy to avoid issues from the caller mutating the steps during invoke() + steps = dict(self.steps__) + + with get_executor_for_config(config) as executor: + futures = [ + executor.submit(_invoke_step, step, input, config, key) + for key, step in steps.items() + ] + output = {key: future.result() for key, future in zip(steps, futures)} + # finish the root run + except BaseException as e: + run_manager.on_chain_error(e) + raise + else: + run_manager.on_chain_end(output) + return output + + @override + async def ainvoke( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> dict[str, Any]: + # setup callbacks + config = ensure_config(config) + callback_manager = get_async_callback_manager_for_config(config) + # start the root run + run_manager = await callback_manager.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + + async def _ainvoke_step( + step: Runnable[Input, Any], input: Input, config: RunnableConfig, key: str + ) -> Any: + child_config = patch_config( + config, + callbacks=run_manager.get_child(f"map:key:{key}"), + ) + with set_config_context(child_config) as context: + return await coro_with_context( + step.ainvoke(input, child_config), context, create_task=True + ) + + # gather results from all steps + try: + # copy to avoid issues from the caller mutating the steps during invoke() + steps = dict(self.steps__) + results = await asyncio.gather( + *( + _ainvoke_step( + step, + input, + # mark each step as a child run + config, + key, + ) + for key, step in steps.items() + ) + ) + output = dict(zip(steps, results)) + # finish the root run + except BaseException as e: + await run_manager.on_chain_error(e) + raise + else: + await run_manager.on_chain_end(output) + return output + + def _transform( + self, + input: Iterator[Input], + run_manager: CallbackManagerForChainRun, + config: RunnableConfig, + ) -> Iterator[AddableDict]: + # Shallow copy steps to ignore mutations while in progress + steps = dict(self.steps__) + # Each step gets a copy of the input iterator, + # which is consumed in parallel in a separate thread. + input_copies = list(safetee(input, len(steps), lock=threading.Lock())) + with get_executor_for_config(config) as executor: + # Create the transform() generator for each step + named_generators = [ + ( + name, + step.transform( + input_copies.pop(), + patch_config( + config, callbacks=run_manager.get_child(f"map:key:{name}") + ), + ), + ) + for name, step in steps.items() + ] + # Start the first iteration of each generator + futures = { + executor.submit(next, generator): (step_name, generator) + for step_name, generator in named_generators + } + # Yield chunks from each as they become available, + # and start the next iteration of that generator that yielded it. + # When all generators are exhausted, stop. + while futures: + completed_futures, _ = wait(futures, return_when=FIRST_COMPLETED) + for future in completed_futures: + (step_name, generator) = futures.pop(future) + try: + chunk = AddableDict({step_name: future.result()}) + yield chunk + futures[executor.submit(next, generator)] = ( + step_name, + generator, + ) + except StopIteration: + pass + + @override + def transform( + self, + input: Iterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Iterator[dict[str, Any]]: + yield from self._transform_stream_with_config( + input, self._transform, config, **kwargs + ) + + @override + def stream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[dict[str, Any]]: + yield from self.transform(iter([input]), config) + + async def _atransform( + self, + input: AsyncIterator[Input], + run_manager: AsyncCallbackManagerForChainRun, + config: RunnableConfig, + ) -> AsyncIterator[AddableDict]: + # Shallow copy steps to ignore mutations while in progress + steps = dict(self.steps__) + # Each step gets a copy of the input iterator, + # which is consumed in parallel in a separate thread. + input_copies = list(atee(input, len(steps), lock=asyncio.Lock())) + # Create the transform() generator for each step + named_generators = [ + ( + name, + step.atransform( + input_copies.pop(), + patch_config( + config, callbacks=run_manager.get_child(f"map:key:{name}") + ), + ), + ) + for name, step in steps.items() + ] + + # Wrap in a coroutine to satisfy linter + async def get_next_chunk(generator: AsyncIterator) -> Optional[Output]: + return await py_anext(generator) + + # Start the first iteration of each generator + tasks = { + asyncio.create_task(get_next_chunk(generator)): (step_name, generator) + for step_name, generator in named_generators + } + # Yield chunks from each as they become available, + # and start the next iteration of the generator that yielded it. + # When all generators are exhausted, stop. + while tasks: + completed_tasks, _ = await asyncio.wait( + tasks, return_when=asyncio.FIRST_COMPLETED + ) + for task in completed_tasks: + (step_name, generator) = tasks.pop(task) + try: + chunk = AddableDict({step_name: task.result()}) + yield chunk + new_task = asyncio.create_task(get_next_chunk(generator)) + tasks[new_task] = (step_name, generator) + except StopAsyncIteration: + pass + + @override + async def atransform( + self, + input: AsyncIterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[dict[str, Any]]: + async for chunk in self._atransform_stream_with_config( + input, self._atransform, config, **kwargs + ): + yield chunk + + @override + async def astream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[dict[str, Any]]: + async def input_aiter() -> AsyncIterator[Input]: + yield input + + async for chunk in self.atransform(input_aiter(), config): + yield chunk + + +# We support both names +RunnableMap = RunnableParallel + + +class RunnableGenerator(Runnable[Input, Output]): + """Runnable that runs a generator function. + + RunnableGenerators can be instantiated directly or by using a generator within + a sequence. + + RunnableGenerators can be used to implement custom behavior, such as custom output + parsers, while preserving streaming capabilities. Given a generator function with + a signature Iterator[A] -> Iterator[B], wrapping it in a RunnableGenerator allows + it to emit output chunks as soon as they are streamed in from the previous step. + + Note that if a generator function has a signature A -> Iterator[B], such that it + requires its input from the previous step to be completed before emitting chunks + (e.g., most LLMs need the entire prompt available to start generating), it can + instead be wrapped in a RunnableLambda. + + Here is an example to show the basic mechanics of a RunnableGenerator: + + .. code-block:: python + + from typing import Any, AsyncIterator, Iterator + + from langchain_core.runnables import RunnableGenerator + + + def gen(input: Iterator[Any]) -> Iterator[str]: + for token in ["Have", " a", " nice", " day"]: + yield token + + + runnable = RunnableGenerator(gen) + runnable.invoke(None) # "Have a nice day" + list(runnable.stream(None)) # ["Have", " a", " nice", " day"] + runnable.batch([None, None]) # ["Have a nice day", "Have a nice day"] + + + # Async version: + async def agen(input: AsyncIterator[Any]) -> AsyncIterator[str]: + for token in ["Have", " a", " nice", " day"]: + yield token + + runnable = RunnableGenerator(agen) + await runnable.ainvoke(None) # "Have a nice day" + [p async for p in runnable.astream(None)] # ["Have", " a", " nice", " day"] + + RunnableGenerator makes it easy to implement custom behavior within a streaming + context. Below we show an example: + + .. code-block:: python + + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.runnables import RunnableGenerator, RunnableLambda + from langchain_openai import ChatOpenAI + from langchain_core.output_parsers import StrOutputParser + + + model = ChatOpenAI() + chant_chain = ( + ChatPromptTemplate.from_template("Give me a 3 word chant about {topic}") + | model + | StrOutputParser() + ) + + def character_generator(input: Iterator[str]) -> Iterator[str]: + for token in input: + if "," in token or "." in token: + yield "👏" + token + else: + yield token + + + runnable = chant_chain | character_generator + assert type(runnable.last) is RunnableGenerator + "".join(runnable.stream({"topic": "waste"})) # Reduce👏, Reuse👏, Recycle👏. + + # Note that RunnableLambda can be used to delay streaming of one step in a + # sequence until the previous step is finished: + def reverse_generator(input: str) -> Iterator[str]: + # Yield characters of input in reverse order. + for character in input[::-1]: + yield character + + runnable = chant_chain | RunnableLambda(reverse_generator) + "".join(runnable.stream({"topic": "waste"})) # ".elcycer ,esuer ,ecudeR" + """ + + def __init__( + self, + transform: Union[ + Callable[[Iterator[Input]], Iterator[Output]], + Callable[[AsyncIterator[Input]], AsyncIterator[Output]], + ], + atransform: Optional[ + Callable[[AsyncIterator[Input]], AsyncIterator[Output]] + ] = None, + *, + name: Optional[str] = None, + ) -> None: + """Initialize a RunnableGenerator. + + Args: + transform: The transform function. + atransform: The async transform function. Defaults to None. + name: The name of the Runnable. Defaults to None. + + Raises: + TypeError: If the transform is not a generator function. + """ + if atransform is not None: + self._atransform = atransform + func_for_name: Callable = atransform + + if is_async_generator(transform): + self._atransform = transform + func_for_name = transform + elif inspect.isgeneratorfunction(transform): + self._transform = transform + func_for_name = transform + else: + msg = ( + "Expected a generator function type for `transform`." + f"Instead got an unsupported type: {type(transform)}" + ) + raise TypeError(msg) + + try: + self.name = name or func_for_name.__name__ + except AttributeError: + self.name = "RunnableGenerator" + + @property + @override + def InputType(self) -> Any: + func = getattr(self, "_transform", None) or self._atransform + try: + params = inspect.signature(func).parameters + first_param = next(iter(params.values()), None) + if first_param and first_param.annotation != inspect.Parameter.empty: + return getattr(first_param.annotation, "__args__", (Any,))[0] + except ValueError: + pass + return Any + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + # Override the default implementation. + # For a runnable generator, we need to bring to provide the + # module of the underlying function when creating the model. + root_type = self.InputType + + func = getattr(self, "_transform", None) or self._atransform + module = getattr(func, "__module__", None) + + if ( + inspect.isclass(root_type) + and not isinstance(root_type, GenericAlias) + and issubclass(root_type, BaseModel) + ): + return root_type + + return create_model_v2( + self.get_name("Input"), + root=root_type, + # To create the schema, we need to provide the module + # where the underlying function is defined. + # This allows pydantic to resolve type annotations appropriately. + module_name=module, + ) + + @property + @override + def OutputType(self) -> Any: + func = getattr(self, "_transform", None) or self._atransform + try: + sig = inspect.signature(func) + return ( + getattr(sig.return_annotation, "__args__", (Any,))[0] + if sig.return_annotation != inspect.Signature.empty + else Any + ) + except ValueError: + return Any + + @override + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + # Override the default implementation. + # For a runnable generator, we need to bring to provide the + # module of the underlying function when creating the model. + root_type = self.OutputType + func = getattr(self, "_transform", None) or self._atransform + module = getattr(func, "__module__", None) + + if ( + inspect.isclass(root_type) + and not isinstance(root_type, GenericAlias) + and issubclass(root_type, BaseModel) + ): + return root_type + + return create_model_v2( + self.get_name("Output"), + root=root_type, + # To create the schema, we need to provide the module + # where the underlying function is defined. + # This allows pydantic to resolve type annotations appropriately. + module_name=module, + ) + + @override + def __eq__(self, other: object) -> bool: + if isinstance(other, RunnableGenerator): + if hasattr(self, "_transform") and hasattr(other, "_transform"): + return self._transform == other._transform + if hasattr(self, "_atransform") and hasattr(other, "_atransform"): + return self._atransform == other._atransform + return False + return False + + @override + def __repr__(self) -> str: + return f"RunnableGenerator({self.name})" + + @override + def transform( + self, + input: Iterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Iterator[Output]: + if not hasattr(self, "_transform"): + msg = f"{repr(self)} only supports async methods." + raise NotImplementedError(msg) + return self._transform_stream_with_config( + input, + self._transform, # type: ignore[arg-type] + config, + **kwargs, + ) + + @override + def stream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Iterator[Output]: + return self.transform(iter([input]), config, **kwargs) + + @override + def invoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + final: Optional[Output] = None + for output in self.stream(input, config, **kwargs): + final = output if final is None else final + output # type: ignore[operator] + return cast("Output", final) + + @override + def atransform( + self, + input: AsyncIterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[Output]: + if not hasattr(self, "_atransform"): + msg = f"{repr(self)} only supports sync methods." + raise NotImplementedError(msg) + + return self._atransform_stream_with_config( + input, self._atransform, config, **kwargs + ) + + @override + def astream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[Output]: + async def input_aiter() -> AsyncIterator[Input]: + yield input + + return self.atransform(input_aiter(), config, **kwargs) + + @override + async def ainvoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + final: Optional[Output] = None + async for output in self.astream(input, config, **kwargs): + final = output if final is None else final + output # type: ignore[operator] + return cast("Output", final) + + +class RunnableLambda(Runnable[Input, Output]): + """RunnableLambda converts a python callable into a Runnable. + + Wrapping a callable in a RunnableLambda makes the callable usable + within either a sync or async context. + + RunnableLambda can be composed as any other Runnable and provides + seamless integration with LangChain tracing. + + ``RunnableLambda`` is best suited for code that does not need to support + streaming. If you need to support streaming (i.e., be able to operate + on chunks of inputs and yield chunks of outputs), use ``RunnableGenerator`` + instead. + + Note that if a ``RunnableLambda`` returns an instance of ``Runnable``, that + instance is invoked (or streamed) during execution. + + Examples: + + .. code-block:: python + + # This is a RunnableLambda + from langchain_core.runnables import RunnableLambda + + def add_one(x: int) -> int: + return x + 1 + + runnable = RunnableLambda(add_one) + + runnable.invoke(1) # returns 2 + runnable.batch([1, 2, 3]) # returns [2, 3, 4] + + # Async is supported by default by delegating to the sync implementation + await runnable.ainvoke(1) # returns 2 + await runnable.abatch([1, 2, 3]) # returns [2, 3, 4] + + + # Alternatively, can provide both synd and sync implementations + async def add_one_async(x: int) -> int: + return x + 1 + + runnable = RunnableLambda(add_one, afunc=add_one_async) + runnable.invoke(1) # Uses add_one + await runnable.ainvoke(1) # Uses add_one_async + """ + + def __init__( + self, + func: Union[ + Union[ + Callable[[Input], Output], + Callable[[Input], Iterator[Output]], + Callable[[Input, RunnableConfig], Output], + Callable[[Input, CallbackManagerForChainRun], Output], + Callable[[Input, CallbackManagerForChainRun, RunnableConfig], Output], + ], + Union[ + Callable[[Input], Awaitable[Output]], + Callable[[Input], AsyncIterator[Output]], + Callable[[Input, RunnableConfig], Awaitable[Output]], + Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]], + Callable[ + [Input, AsyncCallbackManagerForChainRun, RunnableConfig], + Awaitable[Output], + ], + ], + ], + afunc: Optional[ + Union[ + Callable[[Input], Awaitable[Output]], + Callable[[Input], AsyncIterator[Output]], + Callable[[Input, RunnableConfig], Awaitable[Output]], + Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]], + Callable[ + [Input, AsyncCallbackManagerForChainRun, RunnableConfig], + Awaitable[Output], + ], + ] + ] = None, + name: Optional[str] = None, + ) -> None: + """Create a RunnableLambda from a callable, and async callable or both. + + Accepts both sync and async variants to allow providing efficient + implementations for sync and async execution. + + Args: + func: Either sync or async callable + afunc: An async callable that takes an input and returns an output. + Defaults to None. + name: The name of the Runnable. Defaults to None. + + Raises: + TypeError: If the func is not a callable type. + TypeError: If both func and afunc are provided. + """ + if afunc is not None: + self.afunc = afunc + func_for_name: Callable = afunc + + if is_async_callable(func) or is_async_generator(func): + if afunc is not None: + msg = ( + "Func was provided as a coroutine function, but afunc was " + "also provided. If providing both, func should be a regular " + "function to avoid ambiguity." + ) + raise TypeError(msg) + self.afunc = func + func_for_name = func + elif callable(func): + self.func = cast("Callable[[Input], Output]", func) + func_for_name = func + else: + msg = ( + "Expected a callable type for `func`." + f"Instead got an unsupported type: {type(func)}" + ) + raise TypeError(msg) + + try: + if name is not None: + self.name = name + elif func_for_name.__name__ != "": + self.name = func_for_name.__name__ + except AttributeError: + pass + + self._repr: Optional[str] = None + + @property + @override + def InputType(self) -> Any: + """The type of the input to this Runnable.""" + func = getattr(self, "func", None) or self.afunc + try: + params = inspect.signature(func).parameters + first_param = next(iter(params.values()), None) + if first_param and first_param.annotation != inspect.Parameter.empty: + return first_param.annotation + except ValueError: + pass + return Any + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + """The pydantic schema for the input to this Runnable. + + Args: + config: The config to use. Defaults to None. + + Returns: + The input schema for this Runnable. + """ + func = getattr(self, "func", None) or self.afunc + + if isinstance(func, itemgetter): + # This is terrible, but afaict it's not possible to access _items + # on itemgetter objects, so we have to parse the repr + items = str(func).replace("operator.itemgetter(", "")[:-1].split(", ") + if all( + item[0] == "'" and item[-1] == "'" and len(item) > 2 for item in items + ): + fields = {item[1:-1]: (Any, ...) for item in items} + # It's a dict, lol + return create_model_v2(self.get_name("Input"), field_definitions=fields) + module = getattr(func, "__module__", None) + return create_model_v2( + self.get_name("Input"), + root=list[Any], + # To create the schema, we need to provide the module + # where the underlying function is defined. + # This allows pydantic to resolve type annotations appropriately. + module_name=module, + ) + + if self.InputType != Any: + return super().get_input_schema(config) + + if dict_keys := get_function_first_arg_dict_keys(func): + return create_model_v2( + self.get_name("Input"), + field_definitions=dict.fromkeys(dict_keys, (Any, ...)), + ) + + return super().get_input_schema(config) + + @property + @override + def OutputType(self) -> Any: + """The type of the output of this Runnable as a type annotation. + + Returns: + The type of the output of this Runnable. + """ + func = getattr(self, "func", None) or self.afunc + try: + sig = inspect.signature(func) + if sig.return_annotation != inspect.Signature.empty: + # unwrap iterator types + if getattr(sig.return_annotation, "__origin__", None) in ( + collections.abc.Iterator, + collections.abc.AsyncIterator, + ): + return getattr(sig.return_annotation, "__args__", (Any,))[0] + return sig.return_annotation + except ValueError: + pass + return Any + + @override + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + # Override the default implementation. + # For a runnable lambda, we need to bring to provide the + # module of the underlying function when creating the model. + root_type = self.OutputType + func = getattr(self, "func", None) or self.afunc + module = getattr(func, "__module__", None) + + if ( + inspect.isclass(root_type) + and not isinstance(root_type, GenericAlias) + and issubclass(root_type, BaseModel) + ): + return root_type + + return create_model_v2( + self.get_name("Output"), + root=root_type, + # To create the schema, we need to provide the module + # where the underlying function is defined. + # This allows pydantic to resolve type annotations appropriately. + module_name=module, + ) + + @functools.cached_property + def deps(self) -> list[Runnable]: + """The dependencies of this Runnable. + + Returns: + The dependencies of this Runnable. If the function has nonlocal + variables that are Runnables, they are considered dependencies. + """ + if hasattr(self, "func"): + objects = get_function_nonlocals(self.func) + elif hasattr(self, "afunc"): + objects = get_function_nonlocals(self.afunc) + else: + objects = [] + + deps: list[Runnable] = [] + for obj in objects: + if isinstance(obj, Runnable): + deps.append(obj) + elif isinstance(getattr(obj, "__self__", None), Runnable): + deps.append(obj.__self__) + return deps + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + return get_unique_config_specs( + spec for dep in self.deps for spec in dep.config_specs + ) + + @override + def get_graph(self, config: RunnableConfig | None = None) -> Graph: + if deps := self.deps: + graph = Graph() + input_node = graph.add_node(self.get_input_schema(config)) + output_node = graph.add_node(self.get_output_schema(config)) + for dep in deps: + dep_graph = dep.get_graph() + dep_graph.trim_first_node() + dep_graph.trim_last_node() + if not dep_graph: + graph.add_edge(input_node, output_node) + else: + dep_first_node, dep_last_node = graph.extend(dep_graph) + if not dep_first_node: + msg = f"Runnable {dep} has no first node" + raise ValueError(msg) + if not dep_last_node: + msg = f"Runnable {dep} has no last node" + raise ValueError(msg) + graph.add_edge(input_node, dep_first_node) + graph.add_edge(dep_last_node, output_node) + else: + graph = super().get_graph(config) + + return graph + + @override + def __eq__(self, other: object) -> bool: + if isinstance(other, RunnableLambda): + if hasattr(self, "func") and hasattr(other, "func"): + return self.func == other.func + if hasattr(self, "afunc") and hasattr(other, "afunc"): + return self.afunc == other.afunc + return False + return False + + def __repr__(self) -> str: + """A string representation of this Runnable.""" + if self._repr is None: + if hasattr(self, "func") and isinstance(self.func, itemgetter): + self._repr = f"RunnableLambda({str(self.func)[len('operator.') :]})" + elif hasattr(self, "func"): + self._repr = f"RunnableLambda({get_lambda_source(self.func) or '...'})" + elif hasattr(self, "afunc"): + self._repr = ( + f"RunnableLambda(afunc={get_lambda_source(self.afunc) or '...'})" + ) + else: + self._repr = "RunnableLambda(...)" + return self._repr + + def _invoke( + self, + input: Input, + run_manager: CallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> Output: + if inspect.isgeneratorfunction(self.func): + output: Optional[Output] = None + for chunk in call_func_with_variable_args( + cast("Callable[[Input], Iterator[Output]]", self.func), + input, + config, + run_manager, + **kwargs, + ): + if output is None: + output = chunk + else: + try: + output = output + chunk # type: ignore[operator] + except TypeError: + output = chunk + else: + output = call_func_with_variable_args( + self.func, input, config, run_manager, **kwargs + ) + # If the output is a Runnable, invoke it + if isinstance(output, Runnable): + recursion_limit = config["recursion_limit"] + if recursion_limit <= 0: + msg = ( + f"Recursion limit reached when invoking {self} with input {input}." + ) + raise RecursionError(msg) + output = output.invoke( + input, + patch_config( + config, + callbacks=run_manager.get_child(), + recursion_limit=recursion_limit - 1, + ), + ) + return cast("Output", output) + + async def _ainvoke( + self, + input: Input, + run_manager: AsyncCallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> Output: + if hasattr(self, "afunc"): + afunc = self.afunc + else: + if inspect.isgeneratorfunction(self.func): + + def func( + input: Input, + run_manager: AsyncCallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> Output: + output: Optional[Output] = None + for chunk in call_func_with_variable_args( + cast("Callable[[Input], Iterator[Output]]", self.func), + input, + config, + run_manager.get_sync(), + **kwargs, + ): + if output is None: + output = chunk + else: + try: + output = output + chunk # type: ignore[operator] + except TypeError: + output = chunk + return cast("Output", output) + + else: + + def func( + input: Input, + run_manager: AsyncCallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> Output: + return call_func_with_variable_args( + self.func, input, config, run_manager.get_sync(), **kwargs + ) + + @wraps(func) + async def f(*args: Any, **kwargs: Any) -> Any: + return await run_in_executor(config, func, *args, **kwargs) + + afunc = f + + if is_async_generator(afunc): + output: Optional[Output] = None + async with aclosing( + cast( + "AsyncGenerator[Any, Any]", + acall_func_with_variable_args( + cast("Callable", afunc), + input, + config, + run_manager, + **kwargs, + ), + ) + ) as stream: + async for chunk in cast( + "AsyncIterator[Output]", + stream, + ): + if output is None: + output = chunk + else: + try: + output = output + chunk # type: ignore[operator] + except TypeError: + output = chunk + else: + output = await acall_func_with_variable_args( + cast("Callable", afunc), input, config, run_manager, **kwargs + ) + # If the output is a Runnable, invoke it + if isinstance(output, Runnable): + recursion_limit = config["recursion_limit"] + if recursion_limit <= 0: + msg = ( + f"Recursion limit reached when invoking {self} with input {input}." + ) + raise RecursionError(msg) + output = await output.ainvoke( + input, + patch_config( + config, + callbacks=run_manager.get_child(), + recursion_limit=recursion_limit - 1, + ), + ) + return cast("Output", output) + + @override + def invoke( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Output: + """Invoke this Runnable synchronously. + + Args: + input: The input to this Runnable. + config: The config to use. Defaults to None. + kwargs: Additional keyword arguments. + + Returns: + The output of this Runnable. + + Raises: + TypeError: If the Runnable is a coroutine function. + """ + if hasattr(self, "func"): + return self._call_with_config( + self._invoke, + input, + ensure_config(config), + **kwargs, + ) + msg = "Cannot invoke a coroutine function synchronously.Use `ainvoke` instead." + raise TypeError(msg) + + @override + async def ainvoke( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Output: + """Invoke this Runnable asynchronously. + + Args: + input: The input to this Runnable. + config: The config to use. Defaults to None. + kwargs: Additional keyword arguments. + + Returns: + The output of this Runnable. + """ + return await self._acall_with_config( + self._ainvoke, + input, + ensure_config(config), + **kwargs, + ) + + def _transform( + self, + input: Iterator[Input], + run_manager: CallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> Iterator[Output]: + final: Input + got_first_val = False + for ichunk in input: + # By definitions, RunnableLambdas consume all input before emitting output. + # If the input is not addable, then we'll assume that we can + # only operate on the last chunk. + # So we'll iterate until we get to the last chunk! + if not got_first_val: + final = ichunk + got_first_val = True + else: + try: + final = final + ichunk # type: ignore[operator] + except TypeError: + final = ichunk + + if inspect.isgeneratorfunction(self.func): + output: Optional[Output] = None + for chunk in call_func_with_variable_args( + self.func, final, config, run_manager, **kwargs + ): + yield chunk + if output is None: + output = chunk + else: + try: + output = output + chunk + except TypeError: + output = chunk + else: + output = call_func_with_variable_args( + self.func, final, config, run_manager, **kwargs + ) + + # If the output is a Runnable, use its stream output + if isinstance(output, Runnable): + recursion_limit = config["recursion_limit"] + if recursion_limit <= 0: + msg = ( + f"Recursion limit reached when invoking {self} with input {final}." + ) + raise RecursionError(msg) + for chunk in output.stream( + final, + patch_config( + config, + callbacks=run_manager.get_child(), + recursion_limit=recursion_limit - 1, + ), + ): + yield chunk + elif not inspect.isgeneratorfunction(self.func): + # Otherwise, just yield it + yield cast("Output", output) + + @override + def transform( + self, + input: Iterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + if hasattr(self, "func"): + yield from self._transform_stream_with_config( + input, + self._transform, + ensure_config(config), + **kwargs, + ) + else: + msg = ( + "Cannot stream a coroutine function synchronously." + "Use `astream` instead." + ) + raise TypeError(msg) + + @override + def stream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + return self.transform(iter([input]), config, **kwargs) + + async def _atransform( + self, + input: AsyncIterator[Input], + run_manager: AsyncCallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> AsyncIterator[Output]: + final: Input + got_first_val = False + async for ichunk in input: + # By definitions, RunnableLambdas consume all input before emitting output. + # If the input is not addable, then we'll assume that we can + # only operate on the last chunk. + # So we'll iterate until we get to the last chunk! + if not got_first_val: + final = ichunk + got_first_val = True + else: + try: + final = final + ichunk # type: ignore[operator] + except TypeError: + final = ichunk + + if hasattr(self, "afunc"): + afunc = self.afunc + else: + if inspect.isgeneratorfunction(self.func): + msg = ( + "Cannot stream from a generator function asynchronously." + "Use .stream() instead." + ) + raise TypeError(msg) + + def func( + input: Input, + run_manager: AsyncCallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> Output: + return call_func_with_variable_args( + self.func, input, config, run_manager.get_sync(), **kwargs + ) + + @wraps(func) + async def f(*args: Any, **kwargs: Any) -> Any: + return await run_in_executor(config, func, *args, **kwargs) + + afunc = f + + if is_async_generator(afunc): + output: Optional[Output] = None + async for chunk in cast( + "AsyncIterator[Output]", + acall_func_with_variable_args( + cast("Callable", afunc), + final, + config, + run_manager, + **kwargs, + ), + ): + yield chunk + if output is None: + output = chunk + else: + try: + output = output + chunk # type: ignore[operator] + except TypeError: + output = chunk + else: + output = await acall_func_with_variable_args( + cast("Callable", afunc), + final, + config, + run_manager, + **kwargs, + ) + + # If the output is a Runnable, use its astream output + if isinstance(output, Runnable): + recursion_limit = config["recursion_limit"] + if recursion_limit <= 0: + msg = ( + f"Recursion limit reached when invoking {self} with input {final}." + ) + raise RecursionError(msg) + async for chunk in output.astream( + final, + patch_config( + config, + callbacks=run_manager.get_child(), + recursion_limit=recursion_limit - 1, + ), + ): + yield chunk + elif not is_async_generator(afunc): + # Otherwise, just yield it + yield cast("Output", output) + + @override + async def atransform( + self, + input: AsyncIterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + async for output in self._atransform_stream_with_config( + input, + self._atransform, + ensure_config(config), + **kwargs, + ): + yield output + + @override + async def astream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + async def input_aiter() -> AsyncIterator[Input]: + yield input + + async for chunk in self.atransform(input_aiter(), config, **kwargs): + yield chunk + + +class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]): + """Runnable that calls another Runnable for each element of the input sequence. + + Use only if creating a new RunnableEach subclass with different __init__ args. + + See documentation for RunnableEach for more details. + """ + + bound: Runnable[Input, Output] + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @property + @override + def InputType(self) -> Any: + return list[self.bound.InputType] # type: ignore[name-defined] + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + return create_model_v2( + self.get_name("Input"), + root=( + list[self.bound.get_input_schema(config)], # type: ignore[misc] + None, + ), + # create model needs access to appropriate type annotations to be + # able to construct the pydantic model. + # When we create the model, we pass information about the namespace + # where the model is being created, so the type annotations can + # be resolved correctly as well. + # self.__class__.__module__ handles the case when the Runnable is + # being sub-classed in a different module. + module_name=self.__class__.__module__, + ) + + @property + @override + def OutputType(self) -> type[list[Output]]: + return list[self.bound.OutputType] # type: ignore[name-defined] + + @override + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + schema = self.bound.get_output_schema(config) + return create_model_v2( + self.get_name("Output"), + root=list[schema], # type: ignore[valid-type] + # create model needs access to appropriate type annotations to be + # able to construct the pydantic model. + # When we create the model, we pass information about the namespace + # where the model is being created, so the type annotations can + # be resolved correctly as well. + # self.__class__.__module__ handles the case when the Runnable is + # being sub-classed in a different module. + module_name=self.__class__.__module__, + ) + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + return self.bound.config_specs + + @override + def get_graph(self, config: Optional[RunnableConfig] = None) -> Graph: + return self.bound.get_graph(config) + + @classmethod + @override + def is_lc_serializable(cls) -> bool: + return True + + @classmethod + @override + def get_lc_namespace(cls) -> list[str]: + return ["langchain", "schema", "runnable"] + + def _invoke( + self, + inputs: list[Input], + run_manager: CallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> list[Output]: + configs = [ + patch_config(config, callbacks=run_manager.get_child()) for _ in inputs + ] + return self.bound.batch(inputs, configs, **kwargs) + + @override + def invoke( + self, input: list[Input], config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> list[Output]: + return self._call_with_config(self._invoke, input, config, **kwargs) + + async def _ainvoke( + self, + inputs: list[Input], + run_manager: AsyncCallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> list[Output]: + configs = [ + patch_config(config, callbacks=run_manager.get_child()) for _ in inputs + ] + return await self.bound.abatch(inputs, configs, **kwargs) + + @override + async def ainvoke( + self, input: list[Input], config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> list[Output]: + return await self._acall_with_config(self._ainvoke, input, config, **kwargs) + + @override + async def astream_events( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[StreamEvent]: + for _ in range(1): + msg = "RunnableEach does not support astream_events yet." + raise NotImplementedError(msg) + yield + + +class RunnableEach(RunnableEachBase[Input, Output]): + """Runnable that calls another Runnable for each element of the input sequence. + + It allows you to call multiple inputs with the bounded Runnable. + + RunnableEach makes it easy to run multiple inputs for the Runnable. + In the below example, we associate and run three inputs + with a Runnable: + + .. code-block:: python + + from langchain_core.runnables.base import RunnableEach + from langchain_openai import ChatOpenAI + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.output_parsers import StrOutputParser + prompt = ChatPromptTemplate.from_template("Tell me a short joke about + {topic}") + model = ChatOpenAI() + output_parser = StrOutputParser() + runnable = prompt | model | output_parser + runnable_each = RunnableEach(bound=runnable) + output = runnable_each.invoke([{'topic':'Computer Science'}, + {'topic':'Art'}, + {'topic':'Biology'}]) + print(output) # noqa: T201 + """ + + @override + def get_name( + self, suffix: Optional[str] = None, *, name: Optional[str] = None + ) -> str: + name = name or self.name or f"RunnableEach<{self.bound.get_name()}>" + return super().get_name(suffix, name=name) + + @override + def bind(self, **kwargs: Any) -> RunnableEach[Input, Output]: + return RunnableEach(bound=self.bound.bind(**kwargs)) + + @override + def with_config( + self, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> RunnableEach[Input, Output]: + return RunnableEach(bound=self.bound.with_config(config, **kwargs)) + + @override + def with_listeners( + self, + *, + on_start: Optional[ + Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]] + ] = None, + on_end: Optional[ + Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]] + ] = None, + on_error: Optional[ + Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]] + ] = None, + ) -> RunnableEach[Input, Output]: + """Bind lifecycle listeners to a Runnable, returning a new Runnable. + + Args: + on_start: Called before the Runnable starts running, with the Run object. + Defaults to None. + on_end: Called after the Runnable finishes running, with the Run object. + Defaults to None. + on_error: Called if the Runnable throws an error, with the Run object. + Defaults to None. + + Returns: + A new Runnable with the listeners bound. + + The Run object contains information about the run, including its id, + type, input, output, error, start_time, end_time, and any tags or metadata + added to the run. + """ + return RunnableEach( + bound=self.bound.with_listeners( + on_start=on_start, on_end=on_end, on_error=on_error + ) + ) + + def with_alisteners( + self, + *, + on_start: Optional[AsyncListener] = None, + on_end: Optional[AsyncListener] = None, + on_error: Optional[AsyncListener] = None, + ) -> RunnableEach[Input, Output]: + """Bind async lifecycle listeners to a Runnable, returning a new Runnable. + + Args: + on_start: Called asynchronously before the Runnable starts running, + with the Run object. Defaults to None. + on_end: Called asynchronously after the Runnable finishes running, + with the Run object. Defaults to None. + on_error: Called asynchronously if the Runnable throws an error, + with the Run object. Defaults to None. + + Returns: + A new Runnable with the listeners bound. + + The Run object contains information about the run, including its id, + type, input, output, error, start_time, end_time, and any tags or metadata + added to the run. + """ + return RunnableEach( + bound=self.bound.with_alisteners( + on_start=on_start, on_end=on_end, on_error=on_error + ) + ) + + +class RunnableBindingBase(RunnableSerializable[Input, Output]): + """Runnable that delegates calls to another Runnable with a set of kwargs. + + Use only if creating a new RunnableBinding subclass with different __init__ args. + + See documentation for RunnableBinding for more details. + """ + + bound: Runnable[Input, Output] + """The underlying Runnable that this Runnable delegates to.""" + + kwargs: Mapping[str, Any] = Field(default_factory=dict) + """kwargs to pass to the underlying Runnable when running. + + For example, when the Runnable binding is invoked the underlying + Runnable will be invoked with the same input but with these additional + kwargs. + """ + + config: RunnableConfig = Field(default_factory=RunnableConfig) # type: ignore[arg-type] + """The config to bind to the underlying Runnable.""" + + config_factories: list[Callable[[RunnableConfig], RunnableConfig]] = Field( + default_factory=list + ) + """The config factories to bind to the underlying Runnable.""" + + # Union[Type[Input], BaseModel] + things like list[str] + custom_input_type: Optional[Any] = None + """Override the input type of the underlying Runnable with a custom type. + + The type can be a pydantic model, or a type annotation (e.g., `list[str]`). + """ + # Union[Type[Output], BaseModel] + things like list[str] + custom_output_type: Optional[Any] = None + """Override the output type of the underlying Runnable with a custom type. + + The type can be a pydantic model, or a type annotation (e.g., `list[str]`). + """ + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def __init__( + self, + *, + bound: Runnable[Input, Output], + kwargs: Optional[Mapping[str, Any]] = None, + config: Optional[RunnableConfig] = None, + config_factories: Optional[ + list[Callable[[RunnableConfig], RunnableConfig]] + ] = None, + custom_input_type: Optional[Union[type[Input], BaseModel]] = None, + custom_output_type: Optional[Union[type[Output], BaseModel]] = None, + **other_kwargs: Any, + ) -> None: + """Create a RunnableBinding from a Runnable and kwargs. + + Args: + bound: The underlying Runnable that this Runnable delegates calls to. + kwargs: optional kwargs to pass to the underlying Runnable, when running + the underlying Runnable (e.g., via `invoke`, `batch`, + `transform`, or `stream` or async variants) + Defaults to None. + config: optional config to bind to the underlying Runnable. + Defaults to None. + config_factories: optional list of config factories to apply to the + config before binding to the underlying Runnable. + Defaults to None. + custom_input_type: Specify to override the input type of the underlying + Runnable with a custom type. Defaults to None. + custom_output_type: Specify to override the output type of the underlying + Runnable with a custom type. Defaults to None. + **other_kwargs: Unpacked into the base class. + """ + super().__init__( # type: ignore[call-arg] + bound=bound, + kwargs=kwargs or {}, + config=config or {}, + config_factories=config_factories or [], + custom_input_type=custom_input_type, + custom_output_type=custom_output_type, + **other_kwargs, + ) + # if we don't explicitly set config to the TypedDict here, + # the pydantic init above will strip out any of the "extra" + # fields even though total=False on the typed dict. + self.config = config or {} + + @override + def get_name( + self, suffix: Optional[str] = None, *, name: Optional[str] = None + ) -> str: + return self.bound.get_name(suffix, name=name) + + @property + @override + def InputType(self) -> type[Input]: + return ( + cast("type[Input]", self.custom_input_type) + if self.custom_input_type is not None + else self.bound.InputType + ) + + @property + @override + def OutputType(self) -> type[Output]: + return ( + cast("type[Output]", self.custom_output_type) + if self.custom_output_type is not None + else self.bound.OutputType + ) + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + if self.custom_input_type is not None: + return super().get_input_schema(config) + return self.bound.get_input_schema(merge_configs(self.config, config)) + + @override + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + if self.custom_output_type is not None: + return super().get_output_schema(config) + return self.bound.get_output_schema(merge_configs(self.config, config)) + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + return self.bound.config_specs + + @override + def get_graph(self, config: Optional[RunnableConfig] = None) -> Graph: + return self.bound.get_graph(self._merge_configs(config)) + + @classmethod + @override + def is_lc_serializable(cls) -> bool: + return True + + @classmethod + @override + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + Defaults to ["langchain", "schema", "runnable"]. + """ + return ["langchain", "schema", "runnable"] + + def _merge_configs(self, *configs: Optional[RunnableConfig]) -> RunnableConfig: + config = merge_configs(self.config, *configs) + return merge_configs(config, *(f(config) for f in self.config_factories)) + + @override + def invoke( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Output: + return self.bound.invoke( + input, + self._merge_configs(config), + **{**self.kwargs, **kwargs}, + ) + + @override + async def ainvoke( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Output: + return await self.bound.ainvoke( + input, + self._merge_configs(config), + **{**self.kwargs, **kwargs}, + ) + + @override + def batch( + self, + inputs: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Output]: + if isinstance(config, list): + configs = cast( + "list[RunnableConfig]", + [self._merge_configs(conf) for conf in config], + ) + else: + configs = [self._merge_configs(config) for _ in range(len(inputs))] + return self.bound.batch( + inputs, + configs, + return_exceptions=return_exceptions, + **{**self.kwargs, **kwargs}, + ) + + @override + async def abatch( + self, + inputs: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Output]: + if isinstance(config, list): + configs = cast( + "list[RunnableConfig]", + [self._merge_configs(conf) for conf in config], + ) + else: + configs = [self._merge_configs(config) for _ in range(len(inputs))] + return await self.bound.abatch( + inputs, + configs, + return_exceptions=return_exceptions, + **{**self.kwargs, **kwargs}, + ) + + @overload + def batch_as_completed( + self, + inputs: Sequence[Input], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: Literal[False] = False, + **kwargs: Any, + ) -> Iterator[tuple[int, Output]]: ... + + @overload + def batch_as_completed( + self, + inputs: Sequence[Input], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: Literal[True], + **kwargs: Any, + ) -> Iterator[tuple[int, Union[Output, Exception]]]: ... + + @override + def batch_as_completed( + self, + inputs: Sequence[Input], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> Iterator[tuple[int, Union[Output, Exception]]]: + if isinstance(config, Sequence): + configs = cast( + "list[RunnableConfig]", + [self._merge_configs(conf) for conf in config], + ) + else: + configs = [self._merge_configs(config) for _ in range(len(inputs))] + # lol mypy + if return_exceptions: + yield from self.bound.batch_as_completed( + inputs, + configs, + return_exceptions=return_exceptions, + **{**self.kwargs, **kwargs}, + ) + else: + yield from self.bound.batch_as_completed( + inputs, + configs, + return_exceptions=return_exceptions, + **{**self.kwargs, **kwargs}, + ) + + @overload + def abatch_as_completed( + self, + inputs: Sequence[Input], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: Literal[False] = False, + **kwargs: Optional[Any], + ) -> AsyncIterator[tuple[int, Output]]: ... + + @overload + def abatch_as_completed( + self, + inputs: Sequence[Input], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: Literal[True], + **kwargs: Optional[Any], + ) -> AsyncIterator[tuple[int, Union[Output, Exception]]]: ... + + @override + async def abatch_as_completed( + self, + inputs: Sequence[Input], + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> AsyncIterator[tuple[int, Union[Output, Exception]]]: + if isinstance(config, Sequence): + configs = cast( + "list[RunnableConfig]", + [self._merge_configs(conf) for conf in config], + ) + else: + configs = [self._merge_configs(config) for _ in range(len(inputs))] + if return_exceptions: + async for item in self.bound.abatch_as_completed( + inputs, + configs, + return_exceptions=return_exceptions, + **{**self.kwargs, **kwargs}, + ): + yield item + else: + async for item in self.bound.abatch_as_completed( + inputs, + configs, + return_exceptions=return_exceptions, + **{**self.kwargs, **kwargs}, + ): + yield item + + @override + def stream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + yield from self.bound.stream( + input, + self._merge_configs(config), + **{**self.kwargs, **kwargs}, + ) + + @override + async def astream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + async for item in self.bound.astream( + input, + self._merge_configs(config), + **{**self.kwargs, **kwargs}, + ): + yield item + + @override + async def astream_events( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[StreamEvent]: + async for item in self.bound.astream_events( + input, self._merge_configs(config), **{**self.kwargs, **kwargs} + ): + yield item + + @override + def transform( + self, + input: Iterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Iterator[Output]: + yield from self.bound.transform( + input, + self._merge_configs(config), + **{**self.kwargs, **kwargs}, + ) + + @override + async def atransform( + self, + input: AsyncIterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[Output]: + async for item in self.bound.atransform( + input, + self._merge_configs(config), + **{**self.kwargs, **kwargs}, + ): + yield item + + +class RunnableBinding(RunnableBindingBase[Input, Output]): + """Wrap a Runnable with additional functionality. + + A RunnableBinding can be thought of as a "runnable decorator" that + preserves the essential features of Runnable; i.e., batching, streaming, + and async support, while adding additional functionality. + + Any class that inherits from Runnable can be bound to a `RunnableBinding`. + Runnables expose a standard set of methods for creating `RunnableBindings` + or sub-classes of `RunnableBindings` (e.g., `RunnableRetry`, + `RunnableWithFallbacks`) that add additional functionality. + + These methods include: + + - ``bind``: Bind kwargs to pass to the underlying Runnable when running it. + - ``with_config``: Bind config to pass to the underlying Runnable when running it. + - ``with_listeners``: Bind lifecycle listeners to the underlying Runnable. + - ``with_types``: Override the input and output types of the underlying Runnable. + - ``with_retry``: Bind a retry policy to the underlying Runnable. + - ``with_fallbacks``: Bind a fallback policy to the underlying Runnable. + + Example: + `bind`: Bind kwargs to pass to the underlying Runnable when running it. + + .. code-block:: python + + # Create a Runnable binding that invokes the ChatModel with the + # additional kwarg `stop=['-']` when running it. + from langchain_community.chat_models import ChatOpenAI + model = ChatOpenAI() + model.invoke('Say "Parrot-MAGIC"', stop=['-']) # Should return `Parrot` + # Using it the easy way via `bind` method which returns a new + # RunnableBinding + runnable_binding = model.bind(stop=['-']) + runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot` + + Can also be done by instantiating a RunnableBinding directly (not recommended): + + .. code-block:: python + + from langchain_core.runnables import RunnableBinding + runnable_binding = RunnableBinding( + bound=model, + kwargs={'stop': ['-']} # <-- Note the additional kwargs + ) + runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot` + """ + + @override + def bind(self, **kwargs: Any) -> Runnable[Input, Output]: + """Bind additional kwargs to a Runnable, returning a new Runnable. + + Args: + **kwargs: The kwargs to bind to the Runnable. + + Returns: + A new Runnable with the same type and config as the original, + but with the additional kwargs bound. + """ + return self.__class__( + bound=self.bound, + config=self.config, + config_factories=self.config_factories, + kwargs={**self.kwargs, **kwargs}, + custom_input_type=self.custom_input_type, + custom_output_type=self.custom_output_type, + ) + + @override + def with_config( + self, + config: Optional[RunnableConfig] = None, + # Sadly Unpack is not well supported by mypy so this will have to be untyped + **kwargs: Any, + ) -> Runnable[Input, Output]: + return self.__class__( + bound=self.bound, + kwargs=self.kwargs, + config=cast("RunnableConfig", {**self.config, **(config or {}), **kwargs}), + config_factories=self.config_factories, + custom_input_type=self.custom_input_type, + custom_output_type=self.custom_output_type, + ) + + @override + def with_listeners( + self, + *, + on_start: Optional[ + Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]] + ] = None, + on_end: Optional[ + Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]] + ] = None, + on_error: Optional[ + Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]] + ] = None, + ) -> Runnable[Input, Output]: + """Bind lifecycle listeners to a Runnable, returning a new Runnable. + + Args: + on_start: Called before the Runnable starts running, with the Run object. + Defaults to None. + on_end: Called after the Runnable finishes running, with the Run object. + Defaults to None. + on_error: Called if the Runnable throws an error, with the Run object. + Defaults to None. + + Returns: + The Runnable object contains information about the run, including its id, + type, input, output, error, start_time, end_time, and any tags or metadata + added to the run. + """ + from langchain_core.tracers.root_listeners import RootListenersTracer + + def listener_config_factory(config: RunnableConfig) -> RunnableConfig: + return { + "callbacks": [ + RootListenersTracer( + config=config, + on_start=on_start, + on_end=on_end, + on_error=on_error, + ) + ], + } + + return self.__class__( + bound=self.bound, + kwargs=self.kwargs, + config=self.config, + config_factories=[listener_config_factory] + self.config_factories, + custom_input_type=self.custom_input_type, + custom_output_type=self.custom_output_type, + ) + + @override + def with_types( + self, + input_type: Optional[Union[type[Input], BaseModel]] = None, + output_type: Optional[Union[type[Output], BaseModel]] = None, + ) -> Runnable[Input, Output]: + return self.__class__( + bound=self.bound, + kwargs=self.kwargs, + config=self.config, + config_factories=self.config_factories, + custom_input_type=( + input_type if input_type is not None else self.custom_input_type + ), + custom_output_type=( + output_type if output_type is not None else self.custom_output_type + ), + ) + + @override + def with_retry(self, **kwargs: Any) -> Runnable[Input, Output]: + return self.__class__( + bound=self.bound.with_retry(**kwargs), + kwargs=self.kwargs, + config=self.config, + config_factories=self.config_factories, + ) + + @override + def __getattr__(self, name: str) -> Any: # type: ignore[misc] + attr = getattr(self.bound, name) + + if callable(attr) and ( + config_param := inspect.signature(attr).parameters.get("config") + ): + if config_param.kind == inspect.Parameter.KEYWORD_ONLY: + + @wraps(attr) + def wrapper(*args: Any, **kwargs: Any) -> Any: + return attr( + *args, + config=merge_configs(self.config, kwargs.pop("config", None)), + **kwargs, + ) + + return wrapper + if config_param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD: + idx = list(inspect.signature(attr).parameters).index("config") + + @wraps(attr) + def wrapper(*args: Any, **kwargs: Any) -> Any: + if len(args) >= idx + 1: + argsl = list(args) + argsl[idx] = merge_configs(self.config, argsl[idx]) + return attr(*argsl, **kwargs) + return attr( + *args, + config=merge_configs(self.config, kwargs.pop("config", None)), + **kwargs, + ) + + return wrapper + + return attr + + +class _RunnableCallableSync(Protocol[Input, Output]): + def __call__(self, _in: Input, /, *, config: RunnableConfig) -> Output: ... + + +class _RunnableCallableAsync(Protocol[Input, Output]): + def __call__( + self, _in: Input, /, *, config: RunnableConfig + ) -> Awaitable[Output]: ... + + +class _RunnableCallableIterator(Protocol[Input, Output]): + def __call__( + self, _in: Iterator[Input], /, *, config: RunnableConfig + ) -> Iterator[Output]: ... + + +class _RunnableCallableAsyncIterator(Protocol[Input, Output]): + def __call__( + self, _in: AsyncIterator[Input], /, *, config: RunnableConfig + ) -> AsyncIterator[Output]: ... + + +RunnableLike = Union[ + Runnable[Input, Output], + Callable[[Input], Output], + Callable[[Input], Awaitable[Output]], + Callable[[Iterator[Input]], Iterator[Output]], + Callable[[AsyncIterator[Input]], AsyncIterator[Output]], + _RunnableCallableSync[Input, Output], + _RunnableCallableAsync[Input, Output], + _RunnableCallableIterator[Input, Output], + _RunnableCallableAsyncIterator[Input, Output], + Mapping[str, Any], +] + + +def coerce_to_runnable(thing: RunnableLike) -> Runnable[Input, Output]: + """Coerce a Runnable-like object into a Runnable. + + Args: + thing: A Runnable-like object. + + Returns: + A Runnable. + + Raises: + TypeError: If the object is not Runnable-like. + """ + if isinstance(thing, Runnable): + return thing + if is_async_generator(thing) or inspect.isgeneratorfunction(thing): + return RunnableGenerator(thing) + if callable(thing): + return RunnableLambda(cast("Callable[[Input], Output]", thing)) + if isinstance(thing, dict): + return cast("Runnable[Input, Output]", RunnableParallel(thing)) + msg = ( + f"Expected a Runnable, callable or dict." + f"Instead got an unsupported type: {type(thing)}" + ) + raise TypeError(msg) + + +@overload +def chain( + func: Callable[[Input], Coroutine[Any, Any, Output]], +) -> Runnable[Input, Output]: ... + + +@overload +def chain( + func: Callable[[Input], Iterator[Output]], +) -> Runnable[Input, Output]: ... + + +@overload +def chain( + func: Callable[[Input], AsyncIterator[Output]], +) -> Runnable[Input, Output]: ... + + +@overload +def chain( + func: Callable[[Input], Output], +) -> Runnable[Input, Output]: ... + + +def chain( + func: Union[ + Callable[[Input], Output], + Callable[[Input], Iterator[Output]], + Callable[[Input], Coroutine[Any, Any, Output]], + Callable[[Input], AsyncIterator[Output]], + ], +) -> Runnable[Input, Output]: + """Decorate a function to make it a Runnable. + + Sets the name of the Runnable to the name of the function. + Any runnables called by the function will be traced as dependencies. + + Args: + func: A callable. + + Returns: + A Runnable. + + Example: + + .. code-block:: python + + from langchain_core.runnables import chain + from langchain_core.prompts import PromptTemplate + from langchain_openai import OpenAI + + @chain + def my_func(fields): + prompt = PromptTemplate("Hello, {name}!") + llm = OpenAI() + formatted = prompt.invoke(**fields) + + for chunk in llm.stream(formatted): + yield chunk + """ + return RunnableLambda(func) diff --git a/venv/Lib/site-packages/langchain_core/runnables/branch.py b/venv/Lib/site-packages/langchain_core/runnables/branch.py new file mode 100644 index 00000000..686426e9 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/branch.py @@ -0,0 +1,475 @@ +"""Runnable that selects which branch to run based on a condition.""" + +from collections.abc import AsyncIterator, Awaitable, Iterator, Mapping, Sequence +from typing import ( + Any, + Callable, + Optional, + Union, + cast, +) + +from pydantic import BaseModel, ConfigDict +from typing_extensions import override + +from langchain_core.runnables.base import ( + Runnable, + RunnableLike, + RunnableSerializable, + coerce_to_runnable, +) +from langchain_core.runnables.config import ( + RunnableConfig, + ensure_config, + get_async_callback_manager_for_config, + get_callback_manager_for_config, + patch_config, +) +from langchain_core.runnables.utils import ( + ConfigurableFieldSpec, + Input, + Output, + get_unique_config_specs, +) + + +class RunnableBranch(RunnableSerializable[Input, Output]): + """Runnable that selects which branch to run based on a condition. + + The Runnable is initialized with a list of (condition, Runnable) pairs and + a default branch. + + When operating on an input, the first condition that evaluates to True is + selected, and the corresponding Runnable is run on the input. + + If no condition evaluates to True, the default branch is run on the input. + + Parameters: + branches: A list of (condition, Runnable) pairs. + default: A Runnable to run if no condition is met. + + Examples: + + .. code-block:: python + + from langchain_core.runnables import RunnableBranch + + branch = RunnableBranch( + (lambda x: isinstance(x, str), lambda x: x.upper()), + (lambda x: isinstance(x, int), lambda x: x + 1), + (lambda x: isinstance(x, float), lambda x: x * 2), + lambda x: "goodbye", + ) + + branch.invoke("hello") # "HELLO" + branch.invoke(None) # "goodbye" + """ + + branches: Sequence[tuple[Runnable[Input, bool], Runnable[Input, Output]]] + default: Runnable[Input, Output] + + def __init__( + self, + *branches: Union[ + tuple[ + Union[ + Runnable[Input, bool], + Callable[[Input], bool], + Callable[[Input], Awaitable[bool]], + ], + RunnableLike, + ], + RunnableLike, # To accommodate the default branch + ], + ) -> None: + """A Runnable that runs one of two branches based on a condition. + + Args: + *branches: A list of (condition, Runnable) pairs. + Defaults a Runnable to run if no condition is met. + + Raises: + ValueError: If the number of branches is less than 2. + TypeError: If the default branch is not Runnable, Callable or Mapping. + TypeError: If a branch is not a tuple or list. + ValueError: If a branch is not of length 2. + """ + if len(branches) < 2: + msg = "RunnableBranch requires at least two branches" + raise ValueError(msg) + + default = branches[-1] + + if not isinstance( + default, + (Runnable, Callable, Mapping), # type: ignore[arg-type] + ): + msg = "RunnableBranch default must be Runnable, callable or mapping." + raise TypeError(msg) + + default_ = cast( + "Runnable[Input, Output]", coerce_to_runnable(cast("RunnableLike", default)) + ) + + _branches = [] + + for branch in branches[:-1]: + if not isinstance(branch, (tuple, list)): + msg = ( + f"RunnableBranch branches must be " + f"tuples or lists, not {type(branch)}" + ) + raise TypeError(msg) + + if len(branch) != 2: + msg = ( + f"RunnableBranch branches must be " + f"tuples or lists of length 2, not {len(branch)}" + ) + raise ValueError(msg) + condition, runnable = branch + condition = cast("Runnable[Input, bool]", coerce_to_runnable(condition)) + runnable = coerce_to_runnable(runnable) + _branches.append((condition, runnable)) + + super().__init__( + branches=_branches, + default=default_, + ) # type: ignore[call-arg] + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @classmethod + def is_lc_serializable(cls) -> bool: + """RunnableBranch is serializable if all its branches are serializable.""" + return True + + @classmethod + @override + def get_lc_namespace(cls) -> list[str]: + return ["langchain", "schema", "runnable"] + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + runnables = ( + [self.default] + + [r for _, r in self.branches] + + [r for r, _ in self.branches] + ) + + for runnable in runnables: + if ( + runnable.get_input_schema(config).model_json_schema().get("type") + is not None + ): + return runnable.get_input_schema(config) + + return super().get_input_schema(config) + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + from langchain_core.beta.runnables.context import ( + CONTEXT_CONFIG_PREFIX, + CONTEXT_CONFIG_SUFFIX_SET, + ) + + specs = get_unique_config_specs( + spec + for step in ( + [self.default] + + [r for _, r in self.branches] + + [r for r, _ in self.branches] + ) + for spec in step.config_specs + ) + if any( + s.id.startswith(CONTEXT_CONFIG_PREFIX) + and s.id.endswith(CONTEXT_CONFIG_SUFFIX_SET) + for s in specs + ): + msg = "RunnableBranch cannot contain context setters." + raise ValueError(msg) + return specs + + def invoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + """First evaluates the condition, then delegate to true or false branch. + + Args: + input: The input to the Runnable. + config: The configuration for the Runnable. Defaults to None. + kwargs: Additional keyword arguments to pass to the Runnable. + + Returns: + The output of the branch that was run. + """ + config = ensure_config(config) + callback_manager = get_callback_manager_for_config(config) + run_manager = callback_manager.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + + try: + for idx, branch in enumerate(self.branches): + condition, runnable = branch + + expression_value = condition.invoke( + input, + config=patch_config( + config, + callbacks=run_manager.get_child(tag=f"condition:{idx + 1}"), + ), + ) + + if expression_value: + output = runnable.invoke( + input, + config=patch_config( + config, + callbacks=run_manager.get_child(tag=f"branch:{idx + 1}"), + ), + **kwargs, + ) + break + else: + output = self.default.invoke( + input, + config=patch_config( + config, callbacks=run_manager.get_child(tag="branch:default") + ), + **kwargs, + ) + except BaseException as e: + run_manager.on_chain_error(e) + raise + run_manager.on_chain_end(output) + return output + + async def ainvoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + """Async version of invoke.""" + config = ensure_config(config) + callback_manager = get_async_callback_manager_for_config(config) + run_manager = await callback_manager.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + try: + for idx, branch in enumerate(self.branches): + condition, runnable = branch + + expression_value = await condition.ainvoke( + input, + config=patch_config( + config, + callbacks=run_manager.get_child(tag=f"condition:{idx + 1}"), + ), + ) + + if expression_value: + output = await runnable.ainvoke( + input, + config=patch_config( + config, + callbacks=run_manager.get_child(tag=f"branch:{idx + 1}"), + ), + **kwargs, + ) + break + else: + output = await self.default.ainvoke( + input, + config=patch_config( + config, callbacks=run_manager.get_child(tag="branch:default") + ), + **kwargs, + ) + except BaseException as e: + await run_manager.on_chain_error(e) + raise + await run_manager.on_chain_end(output) + return output + + def stream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + """First evaluates the condition, then delegate to true or false branch. + + Args: + input: The input to the Runnable. + config: The configuration for the Runnable. Defaults to None. + kwargs: Additional keyword arguments to pass to the Runnable. + + Yields: + The output of the branch that was run. + + Raises: + BaseException: If an error occurs during the execution of the Runnable. + """ + config = ensure_config(config) + callback_manager = get_callback_manager_for_config(config) + run_manager = callback_manager.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + final_output: Optional[Output] = None + final_output_supported = True + + try: + for idx, branch in enumerate(self.branches): + condition, runnable = branch + + expression_value = condition.invoke( + input, + config=patch_config( + config, + callbacks=run_manager.get_child(tag=f"condition:{idx + 1}"), + ), + ) + + if expression_value: + for chunk in runnable.stream( + input, + config=patch_config( + config, + callbacks=run_manager.get_child(tag=f"branch:{idx + 1}"), + ), + **kwargs, + ): + yield chunk + if final_output_supported: + if final_output is None: + final_output = chunk + else: + try: + final_output = final_output + chunk # type: ignore[operator] + except TypeError: + final_output = None + final_output_supported = False + break + else: + for chunk in self.default.stream( + input, + config=patch_config( + config, + callbacks=run_manager.get_child(tag="branch:default"), + ), + **kwargs, + ): + yield chunk + if final_output_supported: + if final_output is None: + final_output = chunk + else: + try: + final_output = final_output + chunk # type: ignore[operator] + except TypeError: + final_output = None + final_output_supported = False + except BaseException as e: + run_manager.on_chain_error(e) + raise + run_manager.on_chain_end(final_output) + + async def astream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + """First evaluates the condition, then delegate to true or false branch. + + Args: + input: The input to the Runnable. + config: The configuration for the Runnable. Defaults to None. + kwargs: Additional keyword arguments to pass to the Runnable. + + Yields: + The output of the branch that was run. + + Raises: + BaseException: If an error occurs during the execution of the Runnable. + """ + config = ensure_config(config) + callback_manager = get_async_callback_manager_for_config(config) + run_manager = await callback_manager.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + final_output: Optional[Output] = None + final_output_supported = True + + try: + for idx, branch in enumerate(self.branches): + condition, runnable = branch + + expression_value = await condition.ainvoke( + input, + config=patch_config( + config, + callbacks=run_manager.get_child(tag=f"condition:{idx + 1}"), + ), + ) + + if expression_value: + async for chunk in runnable.astream( + input, + config=patch_config( + config, + callbacks=run_manager.get_child(tag=f"branch:{idx + 1}"), + ), + **kwargs, + ): + yield chunk + if final_output_supported: + if final_output is None: + final_output = chunk + else: + try: + final_output = final_output + chunk # type: ignore[operator] + except TypeError: + final_output = None + final_output_supported = False + break + else: + async for chunk in self.default.astream( + input, + config=patch_config( + config, + callbacks=run_manager.get_child(tag="branch:default"), + ), + **kwargs, + ): + yield chunk + if final_output_supported: + if final_output is None: + final_output = chunk + else: + try: + final_output = final_output + chunk # type: ignore[operator] + except TypeError: + final_output = None + final_output_supported = False + except BaseException as e: + await run_manager.on_chain_error(e) + raise + await run_manager.on_chain_end(final_output) diff --git a/venv/Lib/site-packages/langchain_core/runnables/config.py b/venv/Lib/site-packages/langchain_core/runnables/config.py new file mode 100644 index 00000000..6eae4f23 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/config.py @@ -0,0 +1,621 @@ +"""Configuration utilities for Runnables.""" + +from __future__ import annotations + +import asyncio +import uuid +import warnings +from collections.abc import Awaitable, Generator, Iterable, Iterator, Sequence +from concurrent.futures import Executor, Future, ThreadPoolExecutor +from contextlib import contextmanager +from contextvars import Context, ContextVar, Token, copy_context +from functools import partial +from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, Union, cast + +from typing_extensions import ParamSpec, TypedDict + +from langchain_core.runnables.utils import ( + Input, + Output, + accepts_config, + accepts_run_manager, +) + +if TYPE_CHECKING: + from langchain_core.callbacks.base import BaseCallbackManager, Callbacks + from langchain_core.callbacks.manager import ( + AsyncCallbackManager, + AsyncCallbackManagerForChainRun, + CallbackManager, + CallbackManagerForChainRun, + ) +else: + # Pydantic validates through typed dicts, but + # the callbacks need forward refs updated + Callbacks = Optional[Union[list, Any]] + + +class EmptyDict(TypedDict, total=False): + """Empty dict type.""" + + +class RunnableConfig(TypedDict, total=False): + """Configuration for a Runnable.""" + + tags: list[str] + """ + Tags for this call and any sub-calls (eg. a Chain calling an LLM). + You can use these to filter calls. + """ + + metadata: dict[str, Any] + """ + Metadata for this call and any sub-calls (eg. a Chain calling an LLM). + Keys should be strings, values should be JSON-serializable. + """ + + callbacks: Callbacks + """ + Callbacks for this call and any sub-calls (eg. a Chain calling an LLM). + Tags are passed to all callbacks, metadata is passed to handle*Start callbacks. + """ + + run_name: str + """ + Name for the tracer run for this call. Defaults to the name of the class. + """ + + max_concurrency: Optional[int] + """ + Maximum number of parallel calls to make. If not provided, defaults to + ThreadPoolExecutor's default. + """ + + recursion_limit: int + """ + Maximum number of times a call can recurse. If not provided, defaults to 25. + """ + + configurable: dict[str, Any] + """ + Runtime values for attributes previously made configurable on this Runnable, + or sub-Runnables, through .configurable_fields() or .configurable_alternatives(). + Check .output_schema() for a description of the attributes that have been made + configurable. + """ + + run_id: Optional[uuid.UUID] + """ + Unique identifier for the tracer run for this call. If not provided, a new UUID + will be generated. + """ + + +CONFIG_KEYS = [ + "tags", + "metadata", + "callbacks", + "run_name", + "max_concurrency", + "recursion_limit", + "configurable", + "run_id", +] + +COPIABLE_KEYS = [ + "tags", + "metadata", + "callbacks", + "configurable", +] + +DEFAULT_RECURSION_LIMIT = 25 + + +var_child_runnable_config: ContextVar[RunnableConfig | None] = ContextVar( + "child_runnable_config", default=None +) + + +# This is imported and used in langgraph, so don't break. +def _set_config_context( + config: RunnableConfig, +) -> tuple[Token[Optional[RunnableConfig]], Optional[dict[str, Any]]]: + """Set the child Runnable config + tracing context. + + Args: + config (RunnableConfig): The config to set. + """ + from langchain_core.tracers.langchain import LangChainTracer + + config_token = var_child_runnable_config.set(config) + current_context = None + if ( + (callbacks := config.get("callbacks")) + and ( + parent_run_id := getattr(callbacks, "parent_run_id", None) + ) # Is callback manager + and ( + tracer := next( + ( + handler + for handler in getattr(callbacks, "handlers", []) + if isinstance(handler, LangChainTracer) + ), + None, + ) + ) + and (run := tracer.run_map.get(str(parent_run_id))) + ): + from langsmith.run_helpers import _set_tracing_context, get_tracing_context + + current_context = get_tracing_context() + _set_tracing_context({"parent": run}) + return config_token, current_context + + +@contextmanager +def set_config_context(config: RunnableConfig) -> Generator[Context, None, None]: + """Set the child Runnable config + tracing context. + + Args: + config (RunnableConfig): The config to set. + """ + from langsmith.run_helpers import _set_tracing_context + + ctx = copy_context() + config_token, _ = ctx.run(_set_config_context, config) + try: + yield ctx + finally: + ctx.run(var_child_runnable_config.reset, config_token) + ctx.run( + _set_tracing_context, + { + "parent": None, + "project_name": None, + "tags": None, + "metadata": None, + "enabled": None, + "client": None, + }, + ) + + +def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig: + """Ensure that a config is a dict with all keys present. + + Args: + config (Optional[RunnableConfig], optional): The config to ensure. + Defaults to None. + + Returns: + RunnableConfig: The ensured config. + """ + empty = RunnableConfig( + tags=[], + metadata={}, + callbacks=None, + recursion_limit=DEFAULT_RECURSION_LIMIT, + configurable={}, + ) + if var_config := var_child_runnable_config.get(): + empty.update( + cast( + "RunnableConfig", + { + k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined] + for k, v in var_config.items() + if v is not None + }, + ) + ) + if config is not None: + empty.update( + cast( + "RunnableConfig", + { + k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined] + for k, v in config.items() + if v is not None and k in CONFIG_KEYS + }, + ) + ) + if config is not None: + for k, v in config.items(): + if k not in CONFIG_KEYS and v is not None: + empty["configurable"][k] = v + for key, value in empty.get("configurable", {}).items(): + if ( + not key.startswith("__") + and isinstance(value, (str, int, float, bool)) + and key not in empty["metadata"] + ): + empty["metadata"][key] = value + return empty + + +def get_config_list( + config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]], length: int +) -> list[RunnableConfig]: + """Get a list of configs from a single config or a list of configs. + + It is useful for subclasses overriding batch() or abatch(). + + Args: + config (Optional[Union[RunnableConfig, list[RunnableConfig]]]): + The config or list of configs. + length (int): The length of the list. + + Returns: + list[RunnableConfig]: The list of configs. + + Raises: + ValueError: If the length of the list is not equal to the length of the inputs. + + """ + if length < 0: + msg = f"length must be >= 0, but got {length}" + raise ValueError(msg) + if isinstance(config, Sequence) and len(config) != length: + msg = ( + f"config must be a list of the same length as inputs, " + f"but got {len(config)} configs for {length} inputs" + ) + raise ValueError(msg) + + if isinstance(config, Sequence): + return list(map(ensure_config, config)) + if length > 1 and isinstance(config, dict) and config.get("run_id") is not None: + warnings.warn( + "Provided run_id be used only for the first element of the batch.", + category=RuntimeWarning, + stacklevel=3, + ) + subsequent = cast( + "RunnableConfig", {k: v for k, v in config.items() if k != "run_id"} + ) + return [ + ensure_config(subsequent) if i else ensure_config(config) + for i in range(length) + ] + return [ensure_config(config) for i in range(length)] + + +def patch_config( + config: Optional[RunnableConfig], + *, + callbacks: Optional[BaseCallbackManager] = None, + recursion_limit: Optional[int] = None, + max_concurrency: Optional[int] = None, + run_name: Optional[str] = None, + configurable: Optional[dict[str, Any]] = None, +) -> RunnableConfig: + """Patch a config with new values. + + Args: + config (Optional[RunnableConfig]): The config to patch. + callbacks (Optional[BaseCallbackManager], optional): The callbacks to set. + Defaults to None. + recursion_limit (Optional[int], optional): The recursion limit to set. + Defaults to None. + max_concurrency (Optional[int], optional): The max concurrency to set. + Defaults to None. + run_name (Optional[str], optional): The run name to set. Defaults to None. + configurable (Optional[dict[str, Any]], optional): The configurable to set. + Defaults to None. + + Returns: + RunnableConfig: The patched config. + """ + config = ensure_config(config) + if callbacks is not None: + # If we're replacing callbacks, we need to unset run_name + # As that should apply only to the same run as the original callbacks + config["callbacks"] = callbacks + if "run_name" in config: + del config["run_name"] + if "run_id" in config: + del config["run_id"] + if recursion_limit is not None: + config["recursion_limit"] = recursion_limit + if max_concurrency is not None: + config["max_concurrency"] = max_concurrency + if run_name is not None: + config["run_name"] = run_name + if configurable is not None: + config["configurable"] = {**config.get("configurable", {}), **configurable} + return config + + +def merge_configs(*configs: Optional[RunnableConfig]) -> RunnableConfig: + """Merge multiple configs into one. + + Args: + *configs (Optional[RunnableConfig]): The configs to merge. + + Returns: + RunnableConfig: The merged config. + """ + base: RunnableConfig = {} + # Even though the keys aren't literals, this is correct + # because both dicts are the same type + for config in (ensure_config(c) for c in configs if c is not None): + for key in config: + if key == "metadata": + base["metadata"] = { + **base.get("metadata", {}), + **(config.get("metadata") or {}), + } + elif key == "tags": + base["tags"] = sorted( + set(base.get("tags", []) + (config.get("tags") or [])), + ) + elif key == "configurable": + base["configurable"] = { + **base.get("configurable", {}), + **(config.get("configurable") or {}), + } + elif key == "callbacks": + base_callbacks = base.get("callbacks") + these_callbacks = config["callbacks"] + # callbacks can be either None, list[handler] or manager + # so merging two callbacks values has 6 cases + if isinstance(these_callbacks, list): + if base_callbacks is None: + base["callbacks"] = these_callbacks.copy() + elif isinstance(base_callbacks, list): + base["callbacks"] = base_callbacks + these_callbacks + else: + # base_callbacks is a manager + mngr = base_callbacks.copy() + for callback in these_callbacks: + mngr.add_handler(callback, inherit=True) + base["callbacks"] = mngr + elif these_callbacks is not None: + # these_callbacks is a manager + if base_callbacks is None: + base["callbacks"] = these_callbacks.copy() + elif isinstance(base_callbacks, list): + mngr = these_callbacks.copy() + for callback in base_callbacks: + mngr.add_handler(callback, inherit=True) + base["callbacks"] = mngr + else: + # base_callbacks is also a manager + base["callbacks"] = base_callbacks.merge(these_callbacks) + elif key == "recursion_limit": + if config["recursion_limit"] != DEFAULT_RECURSION_LIMIT: + base["recursion_limit"] = config["recursion_limit"] + elif key in COPIABLE_KEYS and config[key] is not None: # type: ignore[literal-required] + base[key] = config[key].copy() # type: ignore[literal-required] + else: + base[key] = config[key] or base.get(key) # type: ignore[literal-required] + return base + + +def call_func_with_variable_args( + func: Union[ + Callable[[Input], Output], + Callable[[Input, RunnableConfig], Output], + Callable[[Input, CallbackManagerForChainRun], Output], + Callable[[Input, CallbackManagerForChainRun, RunnableConfig], Output], + ], + input: Input, + config: RunnableConfig, + run_manager: Optional[CallbackManagerForChainRun] = None, + **kwargs: Any, +) -> Output: + """Call function that may optionally accept a run_manager and/or config. + + Args: + func: The function to call. + input: The input to the function. + config: The config to pass to the function. + run_manager: The run manager to pass to the function. Defaults to None. + **kwargs: The keyword arguments to pass to the function. + + Returns: + The output of the function. + """ + if accepts_config(func): + if run_manager is not None: + kwargs["config"] = patch_config(config, callbacks=run_manager.get_child()) + else: + kwargs["config"] = config + if run_manager is not None and accepts_run_manager(func): + kwargs["run_manager"] = run_manager + return func(input, **kwargs) # type: ignore[call-arg] + + +def acall_func_with_variable_args( + func: Union[ + Callable[[Input], Awaitable[Output]], + Callable[[Input, RunnableConfig], Awaitable[Output]], + Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]], + Callable[ + [Input, AsyncCallbackManagerForChainRun, RunnableConfig], + Awaitable[Output], + ], + ], + input: Input, + config: RunnableConfig, + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + **kwargs: Any, +) -> Awaitable[Output]: + """Async call function that may optionally accept a run_manager and/or config. + + Args: + func: The function to call. + input: The input to the function. + config: The config to pass to the function. + run_manager: The run manager to pass to the function. Defaults to None. + **kwargs: The keyword arguments to pass to the function. + + Returns: + The output of the function. + """ + if accepts_config(func): + if run_manager is not None: + kwargs["config"] = patch_config(config, callbacks=run_manager.get_child()) + else: + kwargs["config"] = config + if run_manager is not None and accepts_run_manager(func): + kwargs["run_manager"] = run_manager + return func(input, **kwargs) # type: ignore[call-arg] + + +def get_callback_manager_for_config(config: RunnableConfig) -> CallbackManager: + """Get a callback manager for a config. + + Args: + config (RunnableConfig): The config. + + Returns: + CallbackManager: The callback manager. + """ + from langchain_core.callbacks.manager import CallbackManager + + return CallbackManager.configure( + inheritable_callbacks=config.get("callbacks"), + inheritable_tags=config.get("tags"), + inheritable_metadata=config.get("metadata"), + ) + + +def get_async_callback_manager_for_config( + config: RunnableConfig, +) -> AsyncCallbackManager: + """Get an async callback manager for a config. + + Args: + config (RunnableConfig): The config. + + Returns: + AsyncCallbackManager: The async callback manager. + """ + from langchain_core.callbacks.manager import AsyncCallbackManager + + return AsyncCallbackManager.configure( + inheritable_callbacks=config.get("callbacks"), + inheritable_tags=config.get("tags"), + inheritable_metadata=config.get("metadata"), + ) + + +P = ParamSpec("P") +T = TypeVar("T") + + +class ContextThreadPoolExecutor(ThreadPoolExecutor): + """ThreadPoolExecutor that copies the context to the child thread.""" + + def submit( # type: ignore[override] + self, + func: Callable[P, T], + *args: P.args, + **kwargs: P.kwargs, + ) -> Future[T]: + """Submit a function to the executor. + + Args: + func (Callable[..., T]): The function to submit. + *args (Any): The positional arguments to the function. + **kwargs (Any): The keyword arguments to the function. + + Returns: + Future[T]: The future for the function. + """ + return super().submit( + cast("Callable[..., T]", partial(copy_context().run, func, *args, **kwargs)) + ) + + def map( + self, + fn: Callable[..., T], + *iterables: Iterable[Any], + timeout: float | None = None, + chunksize: int = 1, + ) -> Iterator[T]: + """Map a function to multiple iterables. + + Args: + fn (Callable[..., T]): The function to map. + *iterables (Iterable[Any]): The iterables to map over. + timeout (float | None, optional): The timeout for the map. + Defaults to None. + chunksize (int, optional): The chunksize for the map. Defaults to 1. + + Returns: + Iterator[T]: The iterator for the mapped function. + """ + contexts = [copy_context() for _ in range(len(iterables[0]))] # type: ignore[arg-type] + + def _wrapped_fn(*args: Any) -> T: + return contexts.pop().run(fn, *args) + + return super().map( + _wrapped_fn, + *iterables, + timeout=timeout, + chunksize=chunksize, + ) + + +@contextmanager +def get_executor_for_config( + config: Optional[RunnableConfig], +) -> Generator[Executor, None, None]: + """Get an executor for a config. + + Args: + config (RunnableConfig): The config. + + Yields: + Generator[Executor, None, None]: The executor. + """ + config = config or {} + with ContextThreadPoolExecutor( + max_workers=config.get("max_concurrency") + ) as executor: + yield executor + + +async def run_in_executor( + executor_or_config: Optional[Union[Executor, RunnableConfig]], + func: Callable[P, T], + *args: P.args, + **kwargs: P.kwargs, +) -> T: + """Run a function in an executor. + + Args: + executor_or_config: The executor or config to run in. + func (Callable[P, Output]): The function. + *args (Any): The positional arguments to the function. + **kwargs (Any): The keyword arguments to the function. + + Returns: + Output: The output of the function. + + Raises: + RuntimeError: If the function raises a StopIteration. + """ + + def wrapper() -> T: + try: + return func(*args, **kwargs) + except StopIteration as exc: + # StopIteration can't be set on an asyncio.Future + # it raises a TypeError and leaves the Future pending forever + # so we need to convert it to a RuntimeError + raise RuntimeError from exc + + if executor_or_config is None or isinstance(executor_or_config, dict): + # Use default executor with context copied from current context + return await asyncio.get_running_loop().run_in_executor( + None, + cast("Callable[..., T]", partial(copy_context().run, wrapper)), + ) + + return await asyncio.get_running_loop().run_in_executor(executor_or_config, wrapper) diff --git a/venv/Lib/site-packages/langchain_core/runnables/configurable.py b/venv/Lib/site-packages/langchain_core/runnables/configurable.py new file mode 100644 index 00000000..0fdb2869 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/configurable.py @@ -0,0 +1,716 @@ +"""Runnables that can be dynamically configured.""" + +from __future__ import annotations + +import enum +import threading +from abc import abstractmethod +from collections.abc import ( + AsyncIterator, + Iterator, + Sequence, +) +from functools import wraps +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Union, + cast, +) +from weakref import WeakValueDictionary + +from pydantic import BaseModel, ConfigDict +from typing_extensions import override + +from langchain_core.runnables.base import Runnable, RunnableSerializable +from langchain_core.runnables.config import ( + RunnableConfig, + ensure_config, + get_config_list, + get_executor_for_config, + merge_configs, +) +from langchain_core.runnables.utils import ( + AnyConfigurableField, + ConfigurableField, + ConfigurableFieldMultiOption, + ConfigurableFieldSingleOption, + ConfigurableFieldSpec, + Input, + Output, + gather_with_concurrency, + get_unique_config_specs, +) + +if TYPE_CHECKING: + from langchain_core.runnables.graph import Graph + + +class DynamicRunnable(RunnableSerializable[Input, Output]): + """Serializable Runnable that can be dynamically configured. + + A DynamicRunnable should be initiated using the `configurable_fields` or + `configurable_alternatives` method of a Runnable. + + Parameters: + default: The default Runnable to use. + config: The configuration to use. + """ + + default: RunnableSerializable[Input, Output] + + config: Optional[RunnableConfig] = None + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @classmethod + @override + def is_lc_serializable(cls) -> bool: + return True + + @classmethod + @override + def get_lc_namespace(cls) -> list[str]: + return ["langchain", "schema", "runnable"] + + @property + @override + def InputType(self) -> type[Input]: + return self.default.InputType + + @property + @override + def OutputType(self) -> type[Output]: + return self.default.OutputType + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + runnable, config = self.prepare(config) + return runnable.get_input_schema(config) + + @override + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + runnable, config = self.prepare(config) + return runnable.get_output_schema(config) + + @override + def get_graph(self, config: Optional[RunnableConfig] = None) -> Graph: + runnable, config = self.prepare(config) + return runnable.get_graph(config) + + @override + def with_config( + self, + config: Optional[RunnableConfig] = None, + # Sadly Unpack is not well supported by mypy so this will have to be untyped + **kwargs: Any, + ) -> Runnable[Input, Output]: + return self.__class__( + **{**self.__dict__, "config": ensure_config(merge_configs(config, kwargs))} # type: ignore[arg-type] + ) + + def prepare( + self, config: Optional[RunnableConfig] = None + ) -> tuple[Runnable[Input, Output], RunnableConfig]: + """Prepare the Runnable for invocation. + + Args: + config: The configuration to use. Defaults to None. + + Returns: + tuple[Runnable[Input, Output], RunnableConfig]: The prepared Runnable and + configuration. + """ + runnable: Runnable[Input, Output] = self + while isinstance(runnable, DynamicRunnable): + runnable, config = runnable._prepare(merge_configs(runnable.config, config)) + return runnable, cast("RunnableConfig", config) + + @abstractmethod + def _prepare( + self, config: Optional[RunnableConfig] = None + ) -> tuple[Runnable[Input, Output], RunnableConfig]: ... + + @override + def invoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + runnable, config = self.prepare(config) + return runnable.invoke(input, config, **kwargs) + + @override + async def ainvoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + runnable, config = self.prepare(config) + return await runnable.ainvoke(input, config, **kwargs) + + @override + def batch( + self, + inputs: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Output]: + configs = get_config_list(config, len(inputs)) + prepared = [self.prepare(c) for c in configs] + + if all(p is self.default for p, _ in prepared): + return self.default.batch( + inputs, + [c for _, c in prepared], + return_exceptions=return_exceptions, + **kwargs, + ) + + if not inputs: + return [] + + def invoke( + prepared: tuple[Runnable[Input, Output], RunnableConfig], + input: Input, + ) -> Union[Output, Exception]: + bound, config = prepared + if return_exceptions: + try: + return bound.invoke(input, config, **kwargs) + except Exception as e: + return e + else: + return bound.invoke(input, config, **kwargs) + + # If there's only one input, don't bother with the executor + if len(inputs) == 1: + return cast("list[Output]", [invoke(prepared[0], inputs[0])]) + + with get_executor_for_config(configs[0]) as executor: + return cast("list[Output]", list(executor.map(invoke, prepared, inputs))) + + @override + async def abatch( + self, + inputs: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Output]: + configs = get_config_list(config, len(inputs)) + prepared = [self.prepare(c) for c in configs] + + if all(p is self.default for p, _ in prepared): + return await self.default.abatch( + inputs, + [c for _, c in prepared], + return_exceptions=return_exceptions, + **kwargs, + ) + + if not inputs: + return [] + + async def ainvoke( + prepared: tuple[Runnable[Input, Output], RunnableConfig], + input: Input, + ) -> Union[Output, Exception]: + bound, config = prepared + if return_exceptions: + try: + return await bound.ainvoke(input, config, **kwargs) + except Exception as e: + return e + else: + return await bound.ainvoke(input, config, **kwargs) + + coros = map(ainvoke, prepared, inputs) + return await gather_with_concurrency(configs[0].get("max_concurrency"), *coros) + + @override + def stream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + runnable, config = self.prepare(config) + return runnable.stream(input, config, **kwargs) + + @override + async def astream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + runnable, config = self.prepare(config) + async for chunk in runnable.astream(input, config, **kwargs): + yield chunk + + @override + def transform( + self, + input: Iterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + runnable, config = self.prepare(config) + return runnable.transform(input, config, **kwargs) + + @override + async def atransform( + self, + input: AsyncIterator[Input], + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + runnable, config = self.prepare(config) + async for chunk in runnable.atransform(input, config, **kwargs): + yield chunk + + @override + def __getattr__(self, name: str) -> Any: # type: ignore[misc] + attr = getattr(self.default, name) + if callable(attr): + + @wraps(attr) + def wrapper(*args: Any, **kwargs: Any) -> Any: + for key, arg in kwargs.items(): + if key == "config" and ( + isinstance(arg, dict) + and "configurable" in arg + and isinstance(arg["configurable"], dict) + ): + runnable, config = self.prepare(cast("RunnableConfig", arg)) + kwargs = {**kwargs, "config": config} + return getattr(runnable, name)(*args, **kwargs) + + for idx, arg in enumerate(args): + if ( + isinstance(arg, dict) + and "configurable" in arg + and isinstance(arg["configurable"], dict) + ): + runnable, config = self.prepare(cast("RunnableConfig", arg)) + argsl = list(args) + argsl[idx] = config + return getattr(runnable, name)(*argsl, **kwargs) + + if self.config: + runnable, config = self.prepare() + return getattr(runnable, name)(*args, **kwargs) + + return attr(*args, **kwargs) + + return wrapper + + return attr + + +class RunnableConfigurableFields(DynamicRunnable[Input, Output]): + """Runnable that can be dynamically configured. + + A RunnableConfigurableFields should be initiated using the + `configurable_fields` method of a Runnable. + + Parameters: + fields: The configurable fields to use. + + Here is an example of using a RunnableConfigurableFields with LLMs: + + .. code-block:: python + + from langchain_core.prompts import PromptTemplate + from langchain_core.runnables import ConfigurableField + from langchain_openai import ChatOpenAI + + model = ChatOpenAI(temperature=0).configurable_fields( + temperature=ConfigurableField( + id="temperature", + name="LLM Temperature", + description="The temperature of the LLM", + ) + ) + # This creates a RunnableConfigurableFields for a chat model. + + # When invoking the created RunnableSequence, you can pass in the + # value for your ConfigurableField's id which in this case + # will be change in temperature + + prompt = PromptTemplate.from_template("Pick a random number above {x}") + chain = prompt | model + + chain.invoke({"x": 0}) + chain.invoke({"x": 0}, config={"configurable": {"temperature": 0.9}}) + + + Here is an example of using a RunnableConfigurableFields with HubRunnables: + + .. code-block:: python + + from langchain_core.prompts import PromptTemplate + from langchain_core.runnables import ConfigurableField + from langchain_openai import ChatOpenAI + from langchain.runnables.hub import HubRunnable + + prompt = HubRunnable("rlm/rag-prompt").configurable_fields( + owner_repo_commit=ConfigurableField( + id="hub_commit", + name="Hub Commit", + description="The Hub commit to pull from", + ) + ) + + prompt.invoke({"question": "foo", "context": "bar"}) + + # Invoking prompt with `with_config` method + + prompt.invoke( + {"question": "foo", "context": "bar"}, + config={"configurable": {"hub_commit": "rlm/rag-prompt-llama"}}, + ) + """ + + fields: dict[str, AnyConfigurableField] + + @property + def config_specs(self) -> list[ConfigurableFieldSpec]: + """Get the configuration specs for the RunnableConfigurableFields. + + Returns: + list[ConfigurableFieldSpec]: The configuration specs. + """ + config_specs = [] + + default_fields = type(self.default).model_fields + for field_name, spec in self.fields.items(): + if isinstance(spec, ConfigurableField): + config_specs.append( + ConfigurableFieldSpec( + id=spec.id, + name=spec.name, + description=spec.description + or default_fields[field_name].description, + annotation=spec.annotation + or default_fields[field_name].annotation, + default=getattr(self.default, field_name), + is_shared=spec.is_shared, + ) + ) + else: + config_specs.append( + make_options_spec(spec, default_fields[field_name].description) + ) + + config_specs.extend(self.default.config_specs) + + return get_unique_config_specs(config_specs) + + @override + def configurable_fields( + self, **kwargs: AnyConfigurableField + ) -> RunnableSerializable[Input, Output]: + return self.default.configurable_fields(**{**self.fields, **kwargs}) + + def _prepare( + self, config: Optional[RunnableConfig] = None + ) -> tuple[Runnable[Input, Output], RunnableConfig]: + config = ensure_config(config) + specs_by_id = {spec.id: (key, spec) for key, spec in self.fields.items()} + configurable_fields = { + specs_by_id[k][0]: v + for k, v in config.get("configurable", {}).items() + if k in specs_by_id and isinstance(specs_by_id[k][1], ConfigurableField) + } + configurable_single_options = { + k: v.options[(config.get("configurable", {}).get(v.id) or v.default)] + for k, v in self.fields.items() + if isinstance(v, ConfigurableFieldSingleOption) + } + configurable_multi_options = { + k: [ + v.options[o] + for o in config.get("configurable", {}).get(v.id, v.default) + ] + for k, v in self.fields.items() + if isinstance(v, ConfigurableFieldMultiOption) + } + configurable = { + **configurable_fields, + **configurable_single_options, + **configurable_multi_options, + } + + if configurable: + init_params = { + k: v + for k, v in self.default.__dict__.items() + if k in type(self.default).model_fields + } + return ( + self.default.__class__(**{**init_params, **configurable}), + config, + ) + return (self.default, config) + + +# Before Python 3.11 native StrEnum is not available +class StrEnum(str, enum.Enum): + """String enum.""" + + +_enums_for_spec: WeakValueDictionary[ + Union[ + ConfigurableFieldSingleOption, ConfigurableFieldMultiOption, ConfigurableField + ], + type[StrEnum], +] = WeakValueDictionary() + +_enums_for_spec_lock = threading.Lock() + + +class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]): + """Runnable that can be dynamically configured. + + A RunnableConfigurableAlternatives should be initiated using the + `configurable_alternatives` method of a Runnable or can be + initiated directly as well. + + Here is an example of using a RunnableConfigurableAlternatives that uses + alternative prompts to illustrate its functionality: + + .. code-block:: python + + from langchain_core.runnables import ConfigurableField + from langchain_openai import ChatOpenAI + + # This creates a RunnableConfigurableAlternatives for Prompt Runnable + # with two alternatives. + prompt = PromptTemplate.from_template( + "Tell me a joke about {topic}" + ).configurable_alternatives( + ConfigurableField(id="prompt"), + default_key="joke", + poem=PromptTemplate.from_template("Write a short poem about {topic}") + ) + + # When invoking the created RunnableSequence, you can pass in the + # value for your ConfigurableField's id which in this case will either be + # `joke` or `poem`. + chain = prompt | ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + + # The `with_config` method brings in the desired Prompt Runnable in your + # Runnable Sequence. + chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"}) + + + Equivalently, you can initialize RunnableConfigurableAlternatives directly + and use in LCEL in the same way: + + .. code-block:: python + + from langchain_core.runnables import ConfigurableField + from langchain_core.runnables.configurable import RunnableConfigurableAlternatives + from langchain_openai import ChatOpenAI + + prompt = RunnableConfigurableAlternatives( + which=ConfigurableField(id='prompt'), + default=PromptTemplate.from_template("Tell me a joke about {topic}"), + default_key='joke', + prefix_keys=False, + alternatives={"poem":PromptTemplate.from_template("Write a short poem about {topic}")} + ) + chain = prompt | ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"}) + + """ # noqa: E501 + + which: ConfigurableField + """The ConfigurableField to use to choose between alternatives.""" + + alternatives: dict[ + str, + Union[Runnable[Input, Output], Callable[[], Runnable[Input, Output]]], + ] + """The alternatives to choose from.""" + + default_key: str = "default" + """The enum value to use for the default option. Defaults to "default".""" + + prefix_keys: bool + """Whether to prefix configurable fields of each alternative with a namespace + of the form ==, eg. a key named "temperature" used by + the alternative named "gpt3" becomes "model==gpt3/temperature".""" + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + with _enums_for_spec_lock: + if which_enum := _enums_for_spec.get(self.which): + pass + else: + which_enum = StrEnum( # type: ignore[call-overload] + self.which.name or self.which.id, + ( + (v, v) + for v in list(self.alternatives.keys()) + [self.default_key] + ), + ) + _enums_for_spec[self.which] = cast("type[StrEnum]", which_enum) + return get_unique_config_specs( + # which alternative + [ + ConfigurableFieldSpec( + id=self.which.id, + name=self.which.name, + description=self.which.description, + annotation=which_enum, + default=self.default_key, + is_shared=self.which.is_shared, + ), + ] + # config specs of the default option + + ( + [ + prefix_config_spec(s, f"{self.which.id}=={self.default_key}") + for s in self.default.config_specs + ] + if self.prefix_keys + else self.default.config_specs + ) + # config specs of the alternatives + + [ + ( + prefix_config_spec(s, f"{self.which.id}=={alt_key}") + if self.prefix_keys + else s + ) + for alt_key, alt in self.alternatives.items() + if isinstance(alt, RunnableSerializable) + for s in alt.config_specs + ] + ) + + @override + def configurable_fields( + self, **kwargs: AnyConfigurableField + ) -> RunnableSerializable[Input, Output]: + return self.__class__( + which=self.which, + default=self.default.configurable_fields(**kwargs), + alternatives=self.alternatives, + default_key=self.default_key, + prefix_keys=self.prefix_keys, + ) + + def _prepare( + self, config: Optional[RunnableConfig] = None + ) -> tuple[Runnable[Input, Output], RunnableConfig]: + config = ensure_config(config) + which = config.get("configurable", {}).get(self.which.id, self.default_key) + # remap configurable keys for the chosen alternative + if self.prefix_keys: + config = cast( + "RunnableConfig", + { + **config, + "configurable": { + _strremoveprefix(k, f"{self.which.id}=={which}/"): v + for k, v in config.get("configurable", {}).items() + }, + }, + ) + # return the chosen alternative + if which == self.default_key: + return (self.default, config) + if which in self.alternatives: + alt = self.alternatives[which] + if isinstance(alt, Runnable): + return (alt, config) + return (alt(), config) + msg = f"Unknown alternative: {which}" + raise ValueError(msg) + + +def _strremoveprefix(s: str, prefix: str) -> str: + """str.removeprefix() is only available in Python 3.9+.""" + return s.replace(prefix, "", 1) if s.startswith(prefix) else s + + +def prefix_config_spec( + spec: ConfigurableFieldSpec, prefix: str +) -> ConfigurableFieldSpec: + """Prefix the id of a ConfigurableFieldSpec. + + This is useful when a RunnableConfigurableAlternatives is used as a + ConfigurableField of another RunnableConfigurableAlternatives. + + Args: + spec: The ConfigurableFieldSpec to prefix. + prefix: The prefix to add. + + Returns: + ConfigurableFieldSpec: The prefixed ConfigurableFieldSpec. + """ + return ( + ConfigurableFieldSpec( + id=f"{prefix}/{spec.id}", + name=spec.name, + description=spec.description, + annotation=spec.annotation, + default=spec.default, + is_shared=spec.is_shared, + ) + if not spec.is_shared + else spec + ) + + +def make_options_spec( + spec: Union[ConfigurableFieldSingleOption, ConfigurableFieldMultiOption], + description: Optional[str], +) -> ConfigurableFieldSpec: + """Make a ConfigurableFieldSpec for a ConfigurableFieldSingleOption or ConfigurableFieldMultiOption. + + Args: + spec: The ConfigurableFieldSingleOption or ConfigurableFieldMultiOption. + description: The description to use if the spec does not have one. + + Returns: + The ConfigurableFieldSpec. + """ # noqa: E501 + with _enums_for_spec_lock: + if enum := _enums_for_spec.get(spec): + pass + else: + enum = StrEnum( # type: ignore[call-overload] + spec.name or spec.id, + ((v, v) for v in list(spec.options.keys())), + ) + _enums_for_spec[spec] = cast("type[StrEnum]", enum) + if isinstance(spec, ConfigurableFieldSingleOption): + return ConfigurableFieldSpec( + id=spec.id, + name=spec.name, + description=spec.description or description, + annotation=enum, + default=spec.default, + is_shared=spec.is_shared, + ) + return ConfigurableFieldSpec( + id=spec.id, + name=spec.name, + description=spec.description or description, + annotation=Sequence[enum], # type: ignore[valid-type] + default=spec.default, + is_shared=spec.is_shared, + ) diff --git a/venv/Lib/site-packages/langchain_core/runnables/fallbacks.py b/venv/Lib/site-packages/langchain_core/runnables/fallbacks.py new file mode 100644 index 00000000..2150b00b --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/fallbacks.py @@ -0,0 +1,656 @@ +"""Runnable that can fallback to other Runnables if it fails.""" + +import asyncio +import inspect +import typing +from collections.abc import AsyncIterator, Iterator, Sequence +from functools import wraps +from typing import ( + TYPE_CHECKING, + Any, + Optional, + Union, +) + +from pydantic import BaseModel, ConfigDict +from typing_extensions import override + +from langchain_core.runnables.base import Runnable, RunnableSerializable +from langchain_core.runnables.config import ( + RunnableConfig, + ensure_config, + get_async_callback_manager_for_config, + get_callback_manager_for_config, + get_config_list, + patch_config, + set_config_context, +) +from langchain_core.runnables.utils import ( + ConfigurableFieldSpec, + Input, + Output, + coro_with_context, + get_unique_config_specs, +) +from langchain_core.utils.aiter import py_anext + +if TYPE_CHECKING: + from langchain_core.callbacks.manager import AsyncCallbackManagerForChainRun + + +class RunnableWithFallbacks(RunnableSerializable[Input, Output]): + """Runnable that can fallback to other Runnables if it fails. + + External APIs (e.g., APIs for a language model) may at times experience + degraded performance or even downtime. + + In these cases, it can be useful to have a fallback Runnable that can be + used in place of the original Runnable (e.g., fallback to another LLM provider). + + Fallbacks can be defined at the level of a single Runnable, or at the level + of a chain of Runnables. Fallbacks are tried in order until one succeeds or + all fail. + + While you can instantiate a ``RunnableWithFallbacks`` directly, it is usually + more convenient to use the ``with_fallbacks`` method on a Runnable. + + Example: + + .. code-block:: python + + from langchain_core.chat_models.openai import ChatOpenAI + from langchain_core.chat_models.anthropic import ChatAnthropic + + model = ChatAnthropic( + model="claude-3-haiku-20240307" + ).with_fallbacks([ChatOpenAI(model="gpt-3.5-turbo-0125")]) + # Will usually use ChatAnthropic, but fallback to ChatOpenAI + # if ChatAnthropic fails. + model.invoke('hello') + + # And you can also use fallbacks at the level of a chain. + # Here if both LLM providers fail, we'll fallback to a good hardcoded + # response. + + from langchain_core.prompts import PromptTemplate + from langchain_core.output_parser import StrOutputParser + from langchain_core.runnables import RunnableLambda + + def when_all_is_lost(inputs): + return ("Looks like our LLM providers are down. " + "Here's a nice 🦜️ emoji for you instead.") + + chain_with_fallback = ( + PromptTemplate.from_template('Tell me a joke about {topic}') + | model + | StrOutputParser() + ).with_fallbacks([RunnableLambda(when_all_is_lost)]) + """ + + runnable: Runnable[Input, Output] + """The Runnable to run first.""" + fallbacks: Sequence[Runnable[Input, Output]] + """A sequence of fallbacks to try.""" + exceptions_to_handle: tuple[type[BaseException], ...] = (Exception,) + """The exceptions on which fallbacks should be tried. + + Any exception that is not a subclass of these exceptions will be raised immediately. + """ + exception_key: Optional[str] = None + """If string is specified then handled exceptions will be passed to fallbacks as + part of the input under the specified key. If None, exceptions + will not be passed to fallbacks. If used, the base Runnable and its fallbacks + must accept a dictionary as input.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @property + @override + def InputType(self) -> type[Input]: + return self.runnable.InputType + + @property + @override + def OutputType(self) -> type[Output]: + return self.runnable.OutputType + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + return self.runnable.get_input_schema(config) + + @override + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + return self.runnable.get_output_schema(config) + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + return get_unique_config_specs( + spec + for step in [self.runnable, *self.fallbacks] + for spec in step.config_specs + ) + + @classmethod + @override + def is_lc_serializable(cls) -> bool: + return True + + @classmethod + @override + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object. + + Defaults to ["langchain", "schema", "runnable"]. + """ + return ["langchain", "schema", "runnable"] + + @property + def runnables(self) -> Iterator[Runnable[Input, Output]]: + """Iterator over the Runnable and its fallbacks.""" + yield self.runnable + yield from self.fallbacks + + @override + def invoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + if self.exception_key is not None and not isinstance(input, dict): + msg = ( + "If 'exception_key' is specified then input must be a dictionary." + f"However found a type of {type(input)} for input" + ) + raise ValueError(msg) + # setup callbacks + config = ensure_config(config) + callback_manager = get_callback_manager_for_config(config) + # start the root run + run_manager = callback_manager.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + first_error = None + last_error = None + for runnable in self.runnables: + try: + if self.exception_key and last_error is not None: + input[self.exception_key] = last_error # type: ignore[index] + child_config = patch_config(config, callbacks=run_manager.get_child()) + with set_config_context(child_config) as context: + output = context.run( + runnable.invoke, + input, + config, + **kwargs, + ) + except self.exceptions_to_handle as e: + if first_error is None: + first_error = e + last_error = e + except BaseException as e: + run_manager.on_chain_error(e) + raise + else: + run_manager.on_chain_end(output) + return output + if first_error is None: + msg = "No error stored at end of fallbacks." + raise ValueError(msg) + run_manager.on_chain_error(first_error) + raise first_error + + @override + async def ainvoke( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Output: + if self.exception_key is not None and not isinstance(input, dict): + msg = ( + "If 'exception_key' is specified then input must be a dictionary." + f"However found a type of {type(input)} for input" + ) + raise ValueError(msg) + # setup callbacks + config = ensure_config(config) + callback_manager = get_async_callback_manager_for_config(config) + # start the root run + run_manager = await callback_manager.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + + first_error = None + last_error = None + for runnable in self.runnables: + try: + if self.exception_key and last_error is not None: + input[self.exception_key] = last_error # type: ignore[index] + child_config = patch_config(config, callbacks=run_manager.get_child()) + with set_config_context(child_config) as context: + coro = context.run(runnable.ainvoke, input, config, **kwargs) + output = await coro_with_context(coro, context) + except self.exceptions_to_handle as e: + if first_error is None: + first_error = e + last_error = e + except BaseException as e: + await run_manager.on_chain_error(e) + raise + else: + await run_manager.on_chain_end(output) + return output + if first_error is None: + msg = "No error stored at end of fallbacks." + raise ValueError(msg) + await run_manager.on_chain_error(first_error) + raise first_error + + @override + def batch( + self, + inputs: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Output]: + from langchain_core.callbacks.manager import CallbackManager + + if self.exception_key is not None and not all( + isinstance(input, dict) for input in inputs + ): + msg = ( + "If 'exception_key' is specified then inputs must be dictionaries." + f"However found a type of {type(inputs[0])} for input" + ) + raise ValueError(msg) + + if not inputs: + return [] + + # setup callbacks + configs = get_config_list(config, len(inputs)) + callback_managers = [ + CallbackManager.configure( + inheritable_callbacks=config.get("callbacks"), + local_callbacks=None, + verbose=False, + inheritable_tags=config.get("tags"), + local_tags=None, + inheritable_metadata=config.get("metadata"), + local_metadata=None, + ) + for config in configs + ] + # start the root runs, one per input + run_managers = [ + cm.on_chain_start( + None, + input if isinstance(input, dict) else {"input": input}, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + for cm, input, config in zip(callback_managers, inputs, configs) + ] + + to_return: dict[int, Any] = {} + run_again = dict(enumerate(inputs)) + handled_exceptions: dict[int, BaseException] = {} + first_to_raise = None + for runnable in self.runnables: + outputs = runnable.batch( + [input for _, input in sorted(run_again.items())], + [ + # each step a child run of the corresponding root run + patch_config(configs[i], callbacks=run_managers[i].get_child()) + for i in sorted(run_again) + ], + return_exceptions=True, + **kwargs, + ) + for (i, input), output in zip(sorted(run_again.copy().items()), outputs): + if isinstance(output, BaseException) and not isinstance( + output, self.exceptions_to_handle + ): + if not return_exceptions: + first_to_raise = first_to_raise or output + else: + handled_exceptions[i] = output + run_again.pop(i) + elif isinstance(output, self.exceptions_to_handle): + if self.exception_key: + input[self.exception_key] = output # type: ignore[index] + handled_exceptions[i] = output + else: + run_managers[i].on_chain_end(output) + to_return[i] = output + run_again.pop(i) + handled_exceptions.pop(i, None) + if first_to_raise: + raise first_to_raise + if not run_again: + break + + sorted_handled_exceptions = sorted(handled_exceptions.items()) + for i, error in sorted_handled_exceptions: + run_managers[i].on_chain_error(error) + if not return_exceptions and sorted_handled_exceptions: + raise sorted_handled_exceptions[0][1] + to_return.update(handled_exceptions) + return [output for _, output in sorted(to_return.items())] + + @override + async def abatch( + self, + inputs: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Output]: + from langchain_core.callbacks.manager import AsyncCallbackManager + + if self.exception_key is not None and not all( + isinstance(input, dict) for input in inputs + ): + msg = ( + "If 'exception_key' is specified then inputs must be dictionaries." + f"However found a type of {type(inputs[0])} for input" + ) + raise ValueError(msg) + + if not inputs: + return [] + + # setup callbacks + configs = get_config_list(config, len(inputs)) + callback_managers = [ + AsyncCallbackManager.configure( + inheritable_callbacks=config.get("callbacks"), + local_callbacks=None, + verbose=False, + inheritable_tags=config.get("tags"), + local_tags=None, + inheritable_metadata=config.get("metadata"), + local_metadata=None, + ) + for config in configs + ] + # start the root runs, one per input + run_managers: list[AsyncCallbackManagerForChainRun] = await asyncio.gather( + *( + cm.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + for cm, input, config in zip(callback_managers, inputs, configs) + ) + ) + + to_return = {} + run_again = dict(enumerate(inputs)) + handled_exceptions: dict[int, BaseException] = {} + first_to_raise = None + for runnable in self.runnables: + outputs = await runnable.abatch( + [input for _, input in sorted(run_again.items())], + [ + # each step a child run of the corresponding root run + patch_config(configs[i], callbacks=run_managers[i].get_child()) + for i in sorted(run_again) + ], + return_exceptions=True, + **kwargs, + ) + + for (i, input), output in zip(sorted(run_again.copy().items()), outputs): + if isinstance(output, BaseException) and not isinstance( + output, self.exceptions_to_handle + ): + if not return_exceptions: + first_to_raise = first_to_raise or output + else: + handled_exceptions[i] = output + run_again.pop(i) + elif isinstance(output, self.exceptions_to_handle): + if self.exception_key: + input[self.exception_key] = output # type: ignore[index] + handled_exceptions[i] = output + else: + to_return[i] = output + await run_managers[i].on_chain_end(output) + run_again.pop(i) + handled_exceptions.pop(i, None) + + if first_to_raise: + raise first_to_raise + if not run_again: + break + + sorted_handled_exceptions = sorted(handled_exceptions.items()) + await asyncio.gather( + *( + run_managers[i].on_chain_error(error) + for i, error in sorted_handled_exceptions + ) + ) + if not return_exceptions and sorted_handled_exceptions: + raise sorted_handled_exceptions[0][1] + to_return.update(handled_exceptions) + return [output for _, output in sorted(to_return.items())] # type: ignore[misc] + + @override + def stream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + if self.exception_key is not None and not isinstance(input, dict): + msg = ( + "If 'exception_key' is specified then input must be a dictionary." + f"However found a type of {type(input)} for input" + ) + raise ValueError(msg) + # setup callbacks + config = ensure_config(config) + callback_manager = get_callback_manager_for_config(config) + # start the root run + run_manager = callback_manager.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + first_error = None + last_error = None + for runnable in self.runnables: + try: + if self.exception_key and last_error is not None: + input[self.exception_key] = last_error # type: ignore[index] + child_config = patch_config(config, callbacks=run_manager.get_child()) + with set_config_context(child_config) as context: + stream = context.run( + runnable.stream, + input, + **kwargs, + ) + chunk: Output = context.run(next, stream) + except self.exceptions_to_handle as e: + first_error = e if first_error is None else first_error + last_error = e + except BaseException as e: + run_manager.on_chain_error(e) + raise + else: + first_error = None + break + if first_error: + run_manager.on_chain_error(first_error) + raise first_error + + yield chunk + output: Optional[Output] = chunk + try: + for chunk in stream: + yield chunk + try: + output = output + chunk # type: ignore[operator] + except TypeError: + output = None + except BaseException as e: + run_manager.on_chain_error(e) + raise + run_manager.on_chain_end(output) + + @override + async def astream( + self, + input: Input, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + if self.exception_key is not None and not isinstance(input, dict): + msg = ( + "If 'exception_key' is specified then input must be a dictionary." + f"However found a type of {type(input)} for input" + ) + raise ValueError(msg) + # setup callbacks + config = ensure_config(config) + callback_manager = get_async_callback_manager_for_config(config) + # start the root run + run_manager = await callback_manager.on_chain_start( + None, + input, + name=config.get("run_name") or self.get_name(), + run_id=config.pop("run_id", None), + ) + first_error = None + last_error = None + for runnable in self.runnables: + try: + if self.exception_key and last_error is not None: + input[self.exception_key] = last_error # type: ignore[index] + child_config = patch_config(config, callbacks=run_manager.get_child()) + with set_config_context(child_config) as context: + stream = runnable.astream( + input, + child_config, + **kwargs, + ) + chunk = await coro_with_context(py_anext(stream), context) + except self.exceptions_to_handle as e: + first_error = e if first_error is None else first_error + last_error = e + except BaseException as e: + await run_manager.on_chain_error(e) + raise + else: + first_error = None + break + if first_error: + await run_manager.on_chain_error(first_error) + raise first_error + + yield chunk + output: Optional[Output] = chunk + try: + async for chunk in stream: + yield chunk + try: + output = output + chunk + except TypeError: + output = None + except BaseException as e: + await run_manager.on_chain_error(e) + raise + await run_manager.on_chain_end(output) + + def __getattr__(self, name: str) -> Any: + """Get an attribute from the wrapped Runnable and its fallbacks. + + Returns: + If the attribute is anything other than a method that outputs a Runnable, + returns getattr(self.runnable, name). If the attribute is a method that + does return a new Runnable (e.g. llm.bind_tools([...]) outputs a new + RunnableBinding) then self.runnable and each of the runnables in + self.fallbacks is replaced with getattr(x, name). + + Example: + .. code-block:: python + + from langchain_openai import ChatOpenAI + from langchain_anthropic import ChatAnthropic + + gpt_4o = ChatOpenAI(model="gpt-4o") + claude_3_sonnet = ChatAnthropic(model="claude-3-sonnet-20240229") + llm = gpt_4o.with_fallbacks([claude_3_sonnet]) + + llm.model_name + # -> "gpt-4o" + + # .bind_tools() is called on both ChatOpenAI and ChatAnthropic + # Equivalent to: + # gpt_4o.bind_tools([...]).with_fallbacks([claude_3_sonnet.bind_tools([...])]) + llm.bind_tools([...]) + # -> RunnableWithFallbacks( + runnable=RunnableBinding(bound=ChatOpenAI(...), kwargs={"tools": [...]}), + fallbacks=[RunnableBinding(bound=ChatAnthropic(...), kwargs={"tools": [...]})], + ) + + """ # noqa: E501 + attr = getattr(self.runnable, name) + if _returns_runnable(attr): + + @wraps(attr) + def wrapped(*args: Any, **kwargs: Any) -> Any: + new_runnable = attr(*args, **kwargs) + new_fallbacks = [] + for fallback in self.fallbacks: + fallback_attr = getattr(fallback, name) + new_fallbacks.append(fallback_attr(*args, **kwargs)) + + return self.__class__( + **{ + **self.model_dump(), + "runnable": new_runnable, + "fallbacks": new_fallbacks, + } + ) + + return wrapped + + return attr + + +def _returns_runnable(attr: Any) -> bool: + if not callable(attr): + return False + return_type = typing.get_type_hints(attr).get("return") + return bool(return_type and _is_runnable_type(return_type)) + + +def _is_runnable_type(type_: Any) -> bool: + if inspect.isclass(type_): + return issubclass(type_, Runnable) + origin = getattr(type_, "__origin__", None) + if inspect.isclass(origin): + return issubclass(origin, Runnable) + if origin is typing.Union: + return all(_is_runnable_type(t) for t in type_.__args__) + return False diff --git a/venv/Lib/site-packages/langchain_core/runnables/graph.py b/venv/Lib/site-packages/langchain_core/runnables/graph.py new file mode 100644 index 00000000..323dcdd2 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/graph.py @@ -0,0 +1,725 @@ +"""Graph used in Runnables.""" + +from __future__ import annotations + +import inspect +from collections import defaultdict +from dataclasses import dataclass, field +from enum import Enum +from typing import ( + TYPE_CHECKING, + Any, + Callable, + NamedTuple, + Optional, + Protocol, + TypedDict, + Union, + overload, +) +from uuid import UUID, uuid4 + +from langchain_core.utils.pydantic import _IgnoreUnserializable, is_basemodel_subclass + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pydantic import BaseModel + + from langchain_core.runnables.base import Runnable as RunnableType + + +class Stringifiable(Protocol): + """Protocol for objects that can be converted to a string.""" + + def __str__(self) -> str: + """Convert the object to a string.""" + + +class LabelsDict(TypedDict): + """Dictionary of labels for nodes and edges in a graph.""" + + nodes: dict[str, str] + """Labels for nodes.""" + edges: dict[str, str] + """Labels for edges.""" + + +def is_uuid(value: str) -> bool: + """Check if a string is a valid UUID. + + Args: + value: The string to check. + + Returns: + True if the string is a valid UUID, False otherwise. + """ + try: + UUID(value) + except ValueError: + return False + return True + + +class Edge(NamedTuple): + """Edge in a graph. + + Parameters: + source: The source node id. + target: The target node id. + data: Optional data associated with the edge. Defaults to None. + conditional: Whether the edge is conditional. Defaults to False. + """ + + source: str + target: str + data: Optional[Stringifiable] = None + conditional: bool = False + + def copy( + self, *, source: Optional[str] = None, target: Optional[str] = None + ) -> Edge: + """Return a copy of the edge with optional new source and target nodes. + + Args: + source: The new source node id. Defaults to None. + target: The new target node id. Defaults to None. + + Returns: + A copy of the edge with the new source and target nodes. + """ + return Edge( + source=source or self.source, + target=target or self.target, + data=self.data, + conditional=self.conditional, + ) + + +class Node(NamedTuple): + """Node in a graph. + + Parameters: + id: The unique identifier of the node. + name: The name of the node. + data: The data of the node. + metadata: Optional metadata for the node. Defaults to None. + """ + + id: str + name: str + data: Union[type[BaseModel], RunnableType, None] + metadata: Optional[dict[str, Any]] + + def copy(self, *, id: Optional[str] = None, name: Optional[str] = None) -> Node: + """Return a copy of the node with optional new id and name. + + Args: + id: The new node id. Defaults to None. + name: The new node name. Defaults to None. + + Returns: + A copy of the node with the new id and name. + """ + return Node( + id=id or self.id, + name=name or self.name, + data=self.data, + metadata=self.metadata, + ) + + +class Branch(NamedTuple): + """Branch in a graph. + + Parameters: + condition: A callable that returns a string representation of the condition. + ends: Optional dictionary of end node ids for the branches. Defaults + to None. + """ + + condition: Callable[..., str] + ends: Optional[dict[str, str]] + + +class CurveStyle(Enum): + """Enum for different curve styles supported by Mermaid.""" + + BASIS = "basis" + BUMP_X = "bumpX" + BUMP_Y = "bumpY" + CARDINAL = "cardinal" + CATMULL_ROM = "catmullRom" + LINEAR = "linear" + MONOTONE_X = "monotoneX" + MONOTONE_Y = "monotoneY" + NATURAL = "natural" + STEP = "step" + STEP_AFTER = "stepAfter" + STEP_BEFORE = "stepBefore" + + +@dataclass +class NodeStyles: + """Schema for Hexadecimal color codes for different node types. + + Parameters: + default: The default color code. Defaults to "fill:#f2f0ff,line-height:1.2". + first: The color code for the first node. Defaults to "fill-opacity:0". + last: The color code for the last node. Defaults to "fill:#bfb6fc". + """ + + default: str = "fill:#f2f0ff,line-height:1.2" + first: str = "fill-opacity:0" + last: str = "fill:#bfb6fc" + + +class MermaidDrawMethod(Enum): + """Enum for different draw methods supported by Mermaid.""" + + PYPPETEER = "pyppeteer" # Uses Pyppeteer to render the graph + API = "api" # Uses Mermaid.INK API to render the graph + + +def node_data_str(id: str, data: Union[type[BaseModel], RunnableType, None]) -> str: + """Convert the data of a node to a string. + + Args: + id: The node id. + data: The node data. + + Returns: + A string representation of the data. + """ + from langchain_core.runnables.base import Runnable + + if not is_uuid(id) or data is None: + return id + data_str = data.get_name() if isinstance(data, Runnable) else data.__name__ + return data_str if not data_str.startswith("Runnable") else data_str[8:] + + +def node_data_json( + node: Node, *, with_schemas: bool = False +) -> dict[str, Union[str, dict[str, Any]]]: + """Convert the data of a node to a JSON-serializable format. + + Args: + node: The node to convert. + with_schemas: Whether to include the schema of the data if + it is a Pydantic model. Defaults to False. + + Returns: + A dictionary with the type of the data and the data itself. + """ + from langchain_core.load.serializable import to_json_not_implemented + from langchain_core.runnables.base import Runnable, RunnableSerializable + + if node.data is None: + json: dict[str, Any] = {} + elif isinstance(node.data, RunnableSerializable): + json = { + "type": "runnable", + "data": { + "id": node.data.lc_id(), + "name": node_data_str(node.id, node.data), + }, + } + elif isinstance(node.data, Runnable): + json = { + "type": "runnable", + "data": { + "id": to_json_not_implemented(node.data)["id"], + "name": node_data_str(node.id, node.data), + }, + } + elif inspect.isclass(node.data) and is_basemodel_subclass(node.data): + json = ( + { + "type": "schema", + "data": node.data.model_json_schema( + schema_generator=_IgnoreUnserializable + ), + } + if with_schemas + else { + "type": "schema", + "data": node_data_str(node.id, node.data), + } + ) + else: + json = { + "type": "unknown", + "data": node_data_str(node.id, node.data), + } + if node.metadata is not None: + json["metadata"] = node.metadata + return json + + +@dataclass +class Graph: + """Graph of nodes and edges. + + Parameters: + nodes: Dictionary of nodes in the graph. Defaults to an empty dictionary. + edges: List of edges in the graph. Defaults to an empty list. + """ + + nodes: dict[str, Node] = field(default_factory=dict) + edges: list[Edge] = field(default_factory=list) + + def to_json(self, *, with_schemas: bool = False) -> dict[str, list[dict[str, Any]]]: + """Convert the graph to a JSON-serializable format. + + Args: + with_schemas: Whether to include the schemas of the nodes if they are + Pydantic models. Defaults to False. + + Returns: + A dictionary with the nodes and edges of the graph. + """ + stable_node_ids = { + node.id: i if is_uuid(node.id) else node.id + for i, node in enumerate(self.nodes.values()) + } + edges: list[dict[str, Any]] = [] + for edge in self.edges: + edge_dict = { + "source": stable_node_ids[edge.source], + "target": stable_node_ids[edge.target], + } + if edge.data is not None: + edge_dict["data"] = edge.data # type: ignore[assignment] + if edge.conditional: + edge_dict["conditional"] = True + edges.append(edge_dict) + + return { + "nodes": [ + { + "id": stable_node_ids[node.id], + **node_data_json(node, with_schemas=with_schemas), + } + for node in self.nodes.values() + ], + "edges": edges, + } + + def __bool__(self) -> bool: + """Return whether the graph has any nodes.""" + return bool(self.nodes) + + def next_id(self) -> str: + """Return a new unique node identifier. + + It that can be used to add a node to the graph. + """ + return uuid4().hex + + def add_node( + self, + data: Union[type[BaseModel], RunnableType, None], + id: Optional[str] = None, + *, + metadata: Optional[dict[str, Any]] = None, + ) -> Node: + """Add a node to the graph and return it. + + Args: + data: The data of the node. + id: The id of the node. Defaults to None. + metadata: Optional metadata for the node. Defaults to None. + + Returns: + The node that was added to the graph. + + Raises: + ValueError: If a node with the same id already exists. + """ + if id is not None and id in self.nodes: + msg = f"Node with id {id} already exists" + raise ValueError(msg) + id = id or self.next_id() + node = Node(id=id, data=data, metadata=metadata, name=node_data_str(id, data)) + self.nodes[node.id] = node + return node + + def remove_node(self, node: Node) -> None: + """Remove a node from the graph and all edges connected to it. + + Args: + node: The node to remove. + """ + self.nodes.pop(node.id) + self.edges = [ + edge for edge in self.edges if node.id not in (edge.source, edge.target) + ] + + def add_edge( + self, + source: Node, + target: Node, + data: Optional[Stringifiable] = None, + conditional: bool = False, # noqa: FBT001,FBT002 + ) -> Edge: + """Add an edge to the graph and return it. + + Args: + source: The source node of the edge. + target: The target node of the edge. + data: Optional data associated with the edge. Defaults to None. + conditional: Whether the edge is conditional. Defaults to False. + + Returns: + The edge that was added to the graph. + + Raises: + ValueError: If the source or target node is not in the graph. + """ + if source.id not in self.nodes: + msg = f"Source node {source.id} not in graph" + raise ValueError(msg) + if target.id not in self.nodes: + msg = f"Target node {target.id} not in graph" + raise ValueError(msg) + edge = Edge( + source=source.id, target=target.id, data=data, conditional=conditional + ) + self.edges.append(edge) + return edge + + def extend( + self, graph: Graph, *, prefix: str = "" + ) -> tuple[Optional[Node], Optional[Node]]: + """Add all nodes and edges from another graph. + + Note this doesn't check for duplicates, nor does it connect the graphs. + + Args: + graph: The graph to add. + prefix: The prefix to add to the node ids. Defaults to "". + + Returns: + A tuple of the first and last nodes of the subgraph. + """ + if all(is_uuid(node.id) for node in graph.nodes.values()): + prefix = "" + + def prefixed(id: str) -> str: + return f"{prefix}:{id}" if prefix else id + + # prefix each node + self.nodes.update( + {prefixed(k): v.copy(id=prefixed(k)) for k, v in graph.nodes.items()} + ) + # prefix each edge's source and target + self.edges.extend( + [ + edge.copy(source=prefixed(edge.source), target=prefixed(edge.target)) + for edge in graph.edges + ] + ) + # return (prefixed) first and last nodes of the subgraph + first, last = graph.first_node(), graph.last_node() + return ( + first.copy(id=prefixed(first.id)) if first else None, + last.copy(id=prefixed(last.id)) if last else None, + ) + + def reid(self) -> Graph: + """Return a new graph with all nodes re-identified. + + Uses their unique, readable names where possible. + """ + node_name_to_ids = defaultdict(list) + for node in self.nodes.values(): + node_name_to_ids[node.name].append(node.id) + + unique_labels = { + node_id: node_name if len(node_ids) == 1 else f"{node_name}_{i + 1}" + for node_name, node_ids in node_name_to_ids.items() + for i, node_id in enumerate(node_ids) + } + + def _get_node_id(node_id: str) -> str: + label = unique_labels[node_id] + if is_uuid(node_id): + return label + return node_id + + return Graph( + nodes={ + _get_node_id(id): node.copy(id=_get_node_id(id)) + for id, node in self.nodes.items() + }, + edges=[ + edge.copy( + source=_get_node_id(edge.source), + target=_get_node_id(edge.target), + ) + for edge in self.edges + ], + ) + + def first_node(self) -> Optional[Node]: + """Find the single node that is not a target of any edge. + + If there is no such node, or there are multiple, return None. + When drawing the graph, this node would be the origin. + """ + return _first_node(self) + + def last_node(self) -> Optional[Node]: + """Find the single node that is not a source of any edge. + + If there is no such node, or there are multiple, return None. + When drawing the graph, this node would be the destination. + """ + return _last_node(self) + + def trim_first_node(self) -> None: + """Remove the first node if it exists and has a single outgoing edge. + + i.e., if removing it would not leave the graph without a "first" node. + """ + first_node = self.first_node() + if ( + first_node + and _first_node(self, exclude=[first_node.id]) + and len({e for e in self.edges if e.source == first_node.id}) == 1 + ): + self.remove_node(first_node) + + def trim_last_node(self) -> None: + """Remove the last node if it exists and has a single incoming edge. + + i.e., if removing it would not leave the graph without a "last" node. + """ + last_node = self.last_node() + if ( + last_node + and _last_node(self, exclude=[last_node.id]) + and len({e for e in self.edges if e.target == last_node.id}) == 1 + ): + self.remove_node(last_node) + + def draw_ascii(self) -> str: + """Draw the graph as an ASCII art string.""" + from langchain_core.runnables.graph_ascii import draw_ascii + + return draw_ascii( + {node.id: node.name for node in self.nodes.values()}, + self.edges, + ) + + def print_ascii(self) -> None: + """Print the graph as an ASCII art string.""" + print(self.draw_ascii()) # noqa: T201 + + @overload + def draw_png( + self, + output_file_path: str, + fontname: Optional[str] = None, + labels: Optional[LabelsDict] = None, + ) -> None: ... + + @overload + def draw_png( + self, + output_file_path: None, + fontname: Optional[str] = None, + labels: Optional[LabelsDict] = None, + ) -> bytes: ... + + def draw_png( + self, + output_file_path: Optional[str] = None, + fontname: Optional[str] = None, + labels: Optional[LabelsDict] = None, + ) -> Union[bytes, None]: + """Draw the graph as a PNG image. + + Args: + output_file_path: The path to save the image to. If None, the image + is not saved. Defaults to None. + fontname: The name of the font to use. Defaults to None. + labels: Optional labels for nodes and edges in the graph. Defaults to None. + + Returns: + The PNG image as bytes if output_file_path is None, None otherwise. + """ + from langchain_core.runnables.graph_png import PngDrawer + + default_node_labels = {node.id: node.name for node in self.nodes.values()} + + return PngDrawer( + fontname, + LabelsDict( + nodes={ + **default_node_labels, + **(labels["nodes"] if labels is not None else {}), + }, + edges=labels["edges"] if labels is not None else {}, + ), + ).draw(self, output_file_path) + + def draw_mermaid( + self, + *, + with_styles: bool = True, + curve_style: CurveStyle = CurveStyle.LINEAR, + node_colors: Optional[NodeStyles] = None, + wrap_label_n_words: int = 9, + frontmatter_config: Optional[dict[str, Any]] = None, + ) -> str: + """Draw the graph as a Mermaid syntax string. + + Args: + with_styles: Whether to include styles in the syntax. Defaults to True. + curve_style: The style of the edges. Defaults to CurveStyle.LINEAR. + node_colors: The colors of the nodes. Defaults to NodeStyles(). + wrap_label_n_words: The number of words to wrap the node labels at. + Defaults to 9. + frontmatter_config (dict[str, Any], optional): Mermaid frontmatter config. + Can be used to customize theme and styles. Will be converted to YAML and + added to the beginning of the mermaid graph. Defaults to None. + + See more here: https://mermaid.js.org/config/configuration.html. + + Example config: + + .. code-block:: python + + { + "config": { + "theme": "neutral", + "look": "handDrawn", + "themeVariables": { "primaryColor": "#e2e2e2"}, + } + } + + + Returns: + The Mermaid syntax string. + """ + from langchain_core.runnables.graph_mermaid import draw_mermaid + + graph = self.reid() + first_node = graph.first_node() + last_node = graph.last_node() + + return draw_mermaid( + nodes=graph.nodes, + edges=graph.edges, + first_node=first_node.id if first_node else None, + last_node=last_node.id if last_node else None, + with_styles=with_styles, + curve_style=curve_style, + node_styles=node_colors, + wrap_label_n_words=wrap_label_n_words, + frontmatter_config=frontmatter_config, + ) + + def draw_mermaid_png( + self, + *, + curve_style: CurveStyle = CurveStyle.LINEAR, + node_colors: Optional[NodeStyles] = None, + wrap_label_n_words: int = 9, + output_file_path: Optional[str] = None, + draw_method: MermaidDrawMethod = MermaidDrawMethod.API, + background_color: str = "white", + padding: int = 10, + max_retries: int = 1, + retry_delay: float = 1.0, + frontmatter_config: Optional[dict[str, Any]] = None, + ) -> bytes: + """Draw the graph as a PNG image using Mermaid. + + Args: + curve_style: The style of the edges. Defaults to CurveStyle.LINEAR. + node_colors: The colors of the nodes. Defaults to NodeStyles(). + wrap_label_n_words: The number of words to wrap the node labels at. + Defaults to 9. + output_file_path: The path to save the image to. If None, the image + is not saved. Defaults to None. + draw_method: The method to use to draw the graph. + Defaults to MermaidDrawMethod.API. + background_color: The color of the background. Defaults to "white". + padding: The padding around the graph. Defaults to 10. + max_retries: The maximum number of retries (MermaidDrawMethod.API). + Defaults to 1. + retry_delay: The delay between retries (MermaidDrawMethod.API). + Defaults to 1.0. + frontmatter_config (dict[str, Any], optional): Mermaid frontmatter config. + Can be used to customize theme and styles. Will be converted to YAML and + added to the beginning of the mermaid graph. Defaults to None. + + See more here: https://mermaid.js.org/config/configuration.html. + + Example config: + + .. code-block:: python + + { + "config": { + "theme": "neutral", + "look": "handDrawn", + "themeVariables": { "primaryColor": "#e2e2e2"}, + } + } + + Returns: + The PNG image as bytes. + """ + from langchain_core.runnables.graph_mermaid import draw_mermaid_png + + mermaid_syntax = self.draw_mermaid( + curve_style=curve_style, + node_colors=node_colors, + wrap_label_n_words=wrap_label_n_words, + frontmatter_config=frontmatter_config, + ) + return draw_mermaid_png( + mermaid_syntax=mermaid_syntax, + output_file_path=output_file_path, + draw_method=draw_method, + background_color=background_color, + padding=padding, + max_retries=max_retries, + retry_delay=retry_delay, + ) + + +def _first_node(graph: Graph, exclude: Sequence[str] = ()) -> Optional[Node]: + """Find the single node that is not a target of any edge. + + Exclude nodes/sources with ids in the exclude list. + If there is no such node, or there are multiple, return None. + When drawing the graph, this node would be the origin. + """ + targets = {edge.target for edge in graph.edges if edge.source not in exclude} + found: list[Node] = [ + node + for node in graph.nodes.values() + if node.id not in exclude and node.id not in targets + ] + return found[0] if len(found) == 1 else None + + +def _last_node(graph: Graph, exclude: Sequence[str] = ()) -> Optional[Node]: + """Find the single node that is not a source of any edge. + + Exclude nodes/targets with ids in the exclude list. + If there is no such node, or there are multiple, return None. + When drawing the graph, this node would be the destination. + """ + sources = {edge.source for edge in graph.edges if edge.target not in exclude} + found: list[Node] = [ + node + for node in graph.nodes.values() + if node.id not in exclude and node.id not in sources + ] + return found[0] if len(found) == 1 else None diff --git a/venv/Lib/site-packages/langchain_core/runnables/graph_ascii.py b/venv/Lib/site-packages/langchain_core/runnables/graph_ascii.py new file mode 100644 index 00000000..9f0b40c3 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/graph_ascii.py @@ -0,0 +1,334 @@ +"""Draws DAG in ASCII. + +Adapted from https://github.com/iterative/dvc/blob/main/dvc/dagascii.py. +""" + +import math +import os +from collections.abc import Mapping, Sequence +from typing import Any + +from langchain_core.runnables.graph import Edge as LangEdge + + +class VertexViewer: + """VertexViewer class. + + Class to define vertex box boundaries that will be accounted for during + graph building by grandalf. + """ + + HEIGHT = 3 # top and bottom box edges + text + """Height of the box.""" + + def __init__(self, name: str) -> None: + """Create a VertexViewer. + + Args: + name: name of the vertex. + """ + self._h = self.HEIGHT # top and bottom box edges + text + self._w = len(name) + 2 # right and left bottom edges + text + + @property + def h(self) -> int: + """Height of the box.""" + return self._h + + @property + def w(self) -> int: + """Width of the box.""" + return self._w + + +class AsciiCanvas: + """Class for drawing in ASCII.""" + + TIMEOUT = 10 + + def __init__(self, cols: int, lines: int) -> None: + """Create an ASCII canvas. + + Args: + cols (int): number of columns in the canvas. Should be > 1. + lines (int): number of lines in the canvas. Should be > 1. + """ + if cols <= 1 or lines <= 1: + msg = "Canvas dimensions should be > 1" + raise ValueError(msg) + + self.cols = cols + self.lines = lines + + self.canvas = [[" "] * cols for line in range(lines)] + + def draw(self) -> str: + """Draws ASCII canvas on the screen.""" + lines = map("".join, self.canvas) + return os.linesep.join(lines) + + def point(self, x: int, y: int, char: str) -> None: + """Create a point on ASCII canvas. + + Args: + x (int): x coordinate. Should be >= 0 and < number of columns in + the canvas. + y (int): y coordinate. Should be >= 0 an < number of lines in the + canvas. + char (str): character to place in the specified point on the + canvas. + """ + if len(char) != 1: + msg = "char should be a single character" + raise ValueError(msg) + if x >= self.cols or x < 0: + msg = "x should be >= 0 and < number of columns" + raise ValueError(msg) + if y >= self.lines or y < 0: + msg = "y should be >= 0 and < number of lines" + raise ValueError(msg) + + self.canvas[y][x] = char + + def line(self, x0: int, y0: int, x1: int, y1: int, char: str) -> None: + """Create a line on ASCII canvas. + + Args: + x0 (int): x coordinate where the line should start. + y0 (int): y coordinate where the line should start. + x1 (int): x coordinate where the line should end. + y1 (int): y coordinate where the line should end. + char (str): character to draw the line with. + """ + if x0 > x1: + x1, x0 = x0, x1 + y1, y0 = y0, y1 + + dx = x1 - x0 + dy = y1 - y0 + + if dx == 0 and dy == 0: + self.point(x0, y0, char) + elif abs(dx) >= abs(dy): + for x in range(x0, x1 + 1): + y = y0 if dx == 0 else y0 + int(round((x - x0) * dy / float(dx))) + self.point(x, y, char) + elif y0 < y1: + for y in range(y0, y1 + 1): + x = x0 if dy == 0 else x0 + int(round((y - y0) * dx / float(dy))) + self.point(x, y, char) + else: + for y in range(y1, y0 + 1): + x = x0 if dy == 0 else x1 + int(round((y - y1) * dx / float(dy))) + self.point(x, y, char) + + def text(self, x: int, y: int, text: str) -> None: + """Print a text on ASCII canvas. + + Args: + x (int): x coordinate where the text should start. + y (int): y coordinate where the text should start. + text (str): string that should be printed. + """ + for i, char in enumerate(text): + self.point(x + i, y, char) + + def box(self, x0: int, y0: int, width: int, height: int) -> None: + """Create a box on ASCII canvas. + + Args: + x0 (int): x coordinate of the box corner. + y0 (int): y coordinate of the box corner. + width (int): box width. + height (int): box height. + """ + if width <= 1 or height <= 1: + msg = "Box dimensions should be > 1" + raise ValueError(msg) + + width -= 1 + height -= 1 + + for x in range(x0, x0 + width): + self.point(x, y0, "-") + self.point(x, y0 + height, "-") + + for y in range(y0, y0 + height): + self.point(x0, y, "|") + self.point(x0 + width, y, "|") + + self.point(x0, y0, "+") + self.point(x0 + width, y0, "+") + self.point(x0, y0 + height, "+") + self.point(x0 + width, y0 + height, "+") + + +def _build_sugiyama_layout( + vertices: Mapping[str, str], edges: Sequence[LangEdge] +) -> Any: + try: + from grandalf.graphs import Edge, Graph, Vertex # type: ignore[import-untyped] + from grandalf.layouts import SugiyamaLayout # type: ignore[import-untyped] + from grandalf.routing import ( # type: ignore[import-untyped] + EdgeViewer, + route_with_lines, + ) + except ImportError as exc: + msg = "Install grandalf to draw graphs: `pip install grandalf`." + raise ImportError(msg) from exc + + # + # Just a reminder about naming conventions: + # +------------X + # | + # | + # | + # | + # Y + # + + vertices_ = {id: Vertex(f" {data} ") for id, data in vertices.items()} + edges_ = [Edge(vertices_[s], vertices_[e], data=cond) for s, e, _, cond in edges] + vertices_list = vertices_.values() + graph = Graph(vertices_list, edges_) + + for vertex in vertices_list: + vertex.view = VertexViewer(vertex.data) + + # NOTE: determine min box length to create the best layout + minw = min(v.view.w for v in vertices_list) + + for edge in edges_: + edge.view = EdgeViewer() + + sug = SugiyamaLayout(graph.C[0]) + graph = graph.C[0] + roots = list(filter(lambda x: len(x.e_in()) == 0, graph.sV)) + + sug.init_all(roots=roots, optimize=True) + + sug.yspace = VertexViewer.HEIGHT + sug.xspace = minw + sug.route_edge = route_with_lines + + sug.draw() + + return sug + + +def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str: + """Build a DAG and draw it in ASCII. + + Args: + vertices (list): list of graph vertices. + edges (list): list of graph edges. + + Returns: + str: ASCII representation + + Example: + + .. code-block:: python + + from langchain_core.runnables.graph_ascii import draw_ascii + + vertices = {1: "1", 2: "2", 3: "3", 4: "4"} + edges = [ + (source, target, None, None) + for source, target in [(1, 2), (2, 3), (2, 4), (1, 4)] + ] + + + print(draw_ascii(vertices, edges)) + + .. code-block:: none + + +---+ + | 1 | + +---+ + * * + * * + * * + +---+ * + | 2 | * + +---+** * + * ** * + * ** * + * ** + +---+ +---+ + | 3 | | 4 | + +---+ +---+ + """ + # NOTE: coordinates might me negative, so we need to shift + # everything to the positive plane before we actually draw it. + xlist: list[float] = [] + ylist: list[float] = [] + + sug = _build_sugiyama_layout(vertices, edges) + + for vertex in sug.g.sV: + # NOTE: moving boxes w/2 to the left + xlist.extend( + ( + vertex.view.xy[0] - vertex.view.w / 2.0, + vertex.view.xy[0] + vertex.view.w / 2.0, + ) + ) + ylist.extend((vertex.view.xy[1], vertex.view.xy[1] + vertex.view.h)) + + for edge in sug.g.sE: + for x, y in edge.view._pts: + xlist.append(x) + ylist.append(y) + + minx = min(xlist) + miny = min(ylist) + maxx = max(xlist) + maxy = max(ylist) + + canvas_cols = int(math.ceil(math.ceil(maxx) - math.floor(minx))) + 1 + canvas_lines = int(round(maxy - miny)) + + canvas = AsciiCanvas(canvas_cols, canvas_lines) + + # NOTE: first draw edges so that node boxes could overwrite them + for edge in sug.g.sE: + if len(edge.view._pts) <= 1: + msg = "Not enough points to draw an edge" + raise ValueError(msg) + for index in range(1, len(edge.view._pts)): + start = edge.view._pts[index - 1] + end = edge.view._pts[index] + + start_x = int(round(start[0] - minx)) + start_y = int(round(start[1] - miny)) + end_x = int(round(end[0] - minx)) + end_y = int(round(end[1] - miny)) + + if start_x < 0 or start_y < 0 or end_x < 0 or end_y < 0: + msg = ( + "Invalid edge coordinates: " + f"start_x={start_x}, " + f"start_y={start_y}, " + f"end_x={end_x}, " + f"end_y={end_y}" + ) + raise ValueError(msg) + + canvas.line(start_x, start_y, end_x, end_y, "." if edge.data else "*") + + for vertex in sug.g.sV: + # NOTE: moving boxes w/2 to the left + x = vertex.view.xy[0] - vertex.view.w / 2.0 + y = vertex.view.xy[1] + + canvas.box( + int(round(x - minx)), + int(round(y - miny)), + vertex.view.w, + vertex.view.h, + ) + + canvas.text(int(round(x - minx)) + 1, int(round(y - miny)) + 1, vertex.data) + + return canvas.draw() diff --git a/venv/Lib/site-packages/langchain_core/runnables/graph_mermaid.py b/venv/Lib/site-packages/langchain_core/runnables/graph_mermaid.py new file mode 100644 index 00000000..410c6c56 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/graph_mermaid.py @@ -0,0 +1,469 @@ +"""Mermaid graph drawing utilities.""" + +import asyncio +import base64 +import random +import re +import time +from dataclasses import asdict +from pathlib import Path +from typing import Any, Literal, Optional + +import yaml + +from langchain_core.runnables.graph import ( + CurveStyle, + Edge, + MermaidDrawMethod, + Node, + NodeStyles, +) + +MARKDOWN_SPECIAL_CHARS = "*_`" + + +def draw_mermaid( + nodes: dict[str, Node], + edges: list[Edge], + *, + first_node: Optional[str] = None, + last_node: Optional[str] = None, + with_styles: bool = True, + curve_style: CurveStyle = CurveStyle.LINEAR, + node_styles: Optional[NodeStyles] = None, + wrap_label_n_words: int = 9, + frontmatter_config: Optional[dict[str, Any]] = None, +) -> str: + """Draws a Mermaid graph using the provided graph data. + + Args: + nodes (dict[str, str]): List of node ids. + edges (list[Edge]): List of edges, object with a source, + target and data. + first_node (str, optional): Id of the first node. Defaults to None. + last_node (str, optional): Id of the last node. Defaults to None. + with_styles (bool, optional): Whether to include styles in the graph. + Defaults to True. + curve_style (CurveStyle, optional): Curve style for the edges. + Defaults to CurveStyle.LINEAR. + node_styles (NodeStyles, optional): Node colors for different types. + Defaults to NodeStyles(). + wrap_label_n_words (int, optional): Words to wrap the edge labels. + Defaults to 9. + frontmatter_config (dict[str, Any], optional): Mermaid frontmatter config. + Can be used to customize theme and styles. Will be converted to YAML and + added to the beginning of the mermaid graph. Defaults to None. + + See more here: https://mermaid.js.org/config/configuration.html. + + Example config: + + .. code-block:: python + + { + "config": { + "theme": "neutral", + "look": "handDrawn", + "themeVariables": { "primaryColor": "#e2e2e2"}, + } + } + + Returns: + str: Mermaid graph syntax. + """ + # Initialize Mermaid graph configuration + original_frontmatter_config = frontmatter_config or {} + original_flowchart_config = original_frontmatter_config.get("config", {}).get( + "flowchart", {} + ) + frontmatter_config = { + **original_frontmatter_config, + "config": { + **original_frontmatter_config.get("config", {}), + "flowchart": {**original_flowchart_config, "curve": curve_style.value}, + }, + } + + mermaid_graph = ( + ( + "---\n" + + yaml.dump(frontmatter_config, default_flow_style=False) + + "---\ngraph TD;\n" + ) + if with_styles + else "graph TD;\n" + ) + # Group nodes by subgraph + subgraph_nodes: dict[str, dict[str, Node]] = {} + regular_nodes: dict[str, Node] = {} + + for key, node in nodes.items(): + if ":" in key: + # For nodes with colons, add them only to their deepest subgraph level + prefix = ":".join(key.split(":")[:-1]) + subgraph_nodes.setdefault(prefix, {})[key] = node + else: + regular_nodes[key] = node + + # Node formatting templates + default_class_label = "default" + format_dict = {default_class_label: "{0}({1})"} + if first_node is not None: + format_dict[first_node] = "{0}([{1}]):::first" + if last_node is not None: + format_dict[last_node] = "{0}([{1}]):::last" + + def render_node(key: str, node: Node, indent: str = "\t") -> str: + """Helper function to render a node with consistent formatting.""" + node_name = node.name.split(":")[-1] + label = ( + f"

{node_name}

" + if node_name.startswith(tuple(MARKDOWN_SPECIAL_CHARS)) + and node_name.endswith(tuple(MARKDOWN_SPECIAL_CHARS)) + else node_name + ) + if node.metadata: + label = ( + f"{label}
" + + "\n".join(f"{k} = {value}" for k, value in node.metadata.items()) + + "" + ) + node_label = format_dict.get(key, format_dict[default_class_label]).format( + _escape_node_label(key), label + ) + return f"{indent}{node_label}\n" + + # Add non-subgraph nodes to the graph + if with_styles: + for key, node in regular_nodes.items(): + mermaid_graph += render_node(key, node) + + # Group edges by their common prefixes + edge_groups: dict[str, list[Edge]] = {} + for edge in edges: + src_parts = edge.source.split(":") + tgt_parts = edge.target.split(":") + common_prefix = ":".join( + src for src, tgt in zip(src_parts, tgt_parts) if src == tgt + ) + edge_groups.setdefault(common_prefix, []).append(edge) + + seen_subgraphs = set() + + def add_subgraph(edges: list[Edge], prefix: str) -> None: + nonlocal mermaid_graph + self_loop = len(edges) == 1 and edges[0].source == edges[0].target + if prefix and not self_loop: + subgraph = prefix.split(":")[-1] + if subgraph in seen_subgraphs: + msg = ( + f"Found duplicate subgraph '{subgraph}' -- this likely means that " + "you're reusing a subgraph node with the same name. " + "Please adjust your graph to have subgraph nodes with unique names." + ) + raise ValueError(msg) + + seen_subgraphs.add(subgraph) + mermaid_graph += f"\tsubgraph {subgraph}\n" + + # Add nodes that belong to this subgraph + if with_styles and prefix in subgraph_nodes: + for key, node in subgraph_nodes[prefix].items(): + mermaid_graph += render_node(key, node) + + for edge in edges: + source, target = edge.source, edge.target + + # Add BR every wrap_label_n_words words + if edge.data is not None: + edge_data = edge.data + words = str(edge_data).split() # Split the string into words + # Group words into chunks of wrap_label_n_words size + if len(words) > wrap_label_n_words: + edge_data = " 
 ".join( + " ".join(words[i : i + wrap_label_n_words]) + for i in range(0, len(words), wrap_label_n_words) + ) + if edge.conditional: + edge_label = f" -.  {edge_data}  .-> " + else: + edge_label = f" --  {edge_data}  --> " + else: + edge_label = " -.-> " if edge.conditional else " --> " + + mermaid_graph += ( + f"\t{_escape_node_label(source)}{edge_label}" + f"{_escape_node_label(target)};\n" + ) + + # Recursively add nested subgraphs + for nested_prefix, edges_ in edge_groups.items(): + if not nested_prefix.startswith(prefix + ":") or nested_prefix == prefix: + continue + # only go to first level subgraphs + if ":" in nested_prefix[len(prefix) + 1 :]: + continue + add_subgraph(edges_, nested_prefix) + + if prefix and not self_loop: + mermaid_graph += "\tend\n" + + # Start with the top-level edges (no common prefix) + add_subgraph(edge_groups.get("", []), "") + + # Add remaining subgraphs with edges + for prefix, edges_ in edge_groups.items(): + if ":" in prefix or prefix == "": + continue + add_subgraph(edges_, prefix) + seen_subgraphs.add(prefix) + + # Add empty subgraphs (subgraphs with no internal edges) + if with_styles: + for prefix, subgraph_node in subgraph_nodes.items(): + if ":" not in prefix and prefix not in seen_subgraphs: + mermaid_graph += f"\tsubgraph {prefix}\n" + + # Add nodes that belong to this subgraph + for key, node in subgraph_node.items(): + mermaid_graph += render_node(key, node) + + mermaid_graph += "\tend\n" + seen_subgraphs.add(prefix) + + # Add custom styles for nodes + if with_styles: + mermaid_graph += _generate_mermaid_graph_styles(node_styles or NodeStyles()) + return mermaid_graph + + +def _escape_node_label(node_label: str) -> str: + """Escapes the node label for Mermaid syntax.""" + return re.sub(r"[^a-zA-Z-_0-9]", "_", node_label) + + +def _generate_mermaid_graph_styles(node_colors: NodeStyles) -> str: + """Generates Mermaid graph styles for different node types.""" + styles = "" + for class_name, style in asdict(node_colors).items(): + styles += f"\tclassDef {class_name} {style}\n" + return styles + + +def draw_mermaid_png( + mermaid_syntax: str, + output_file_path: Optional[str] = None, + draw_method: MermaidDrawMethod = MermaidDrawMethod.API, + background_color: Optional[str] = "white", + padding: int = 10, + max_retries: int = 1, + retry_delay: float = 1.0, +) -> bytes: + """Draws a Mermaid graph as PNG using provided syntax. + + Args: + mermaid_syntax (str): Mermaid graph syntax. + output_file_path (str, optional): Path to save the PNG image. + Defaults to None. + draw_method (MermaidDrawMethod, optional): Method to draw the graph. + Defaults to MermaidDrawMethod.API. + background_color (str, optional): Background color of the image. + Defaults to "white". + padding (int, optional): Padding around the image. Defaults to 10. + max_retries (int, optional): Maximum number of retries (MermaidDrawMethod.API). + Defaults to 1. + retry_delay (float, optional): Delay between retries (MermaidDrawMethod.API). + Defaults to 1.0. + + Returns: + bytes: PNG image bytes. + + Raises: + ValueError: If an invalid draw method is provided. + """ + if draw_method == MermaidDrawMethod.PYPPETEER: + import asyncio + + img_bytes = asyncio.run( + _render_mermaid_using_pyppeteer( + mermaid_syntax, output_file_path, background_color, padding + ) + ) + elif draw_method == MermaidDrawMethod.API: + img_bytes = _render_mermaid_using_api( + mermaid_syntax, + output_file_path=output_file_path, + background_color=background_color, + max_retries=max_retries, + retry_delay=retry_delay, + ) + else: + supported_methods = ", ".join([m.value for m in MermaidDrawMethod]) + msg = ( + f"Invalid draw method: {draw_method}. " + f"Supported draw methods are: {supported_methods}" + ) + raise ValueError(msg) + + return img_bytes + + +async def _render_mermaid_using_pyppeteer( + mermaid_syntax: str, + output_file_path: Optional[str] = None, + background_color: Optional[str] = "white", + padding: int = 10, + device_scale_factor: int = 3, +) -> bytes: + """Renders Mermaid graph using Pyppeteer.""" + try: + from pyppeteer import launch # type: ignore[import-not-found] + except ImportError as e: + msg = "Install Pyppeteer to use the Pyppeteer method: `pip install pyppeteer`." + raise ImportError(msg) from e + + browser = await launch() + page = await browser.newPage() + + # Setup Mermaid JS + await page.goto("about:blank") + await page.addScriptTag( + {"url": "https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"} + ) + await page.evaluate( + """() => { + mermaid.initialize({startOnLoad:true}); + }""" + ) + + # Render SVG + svg_code = await page.evaluate( + """(mermaidGraph) => { + return mermaid.mermaidAPI.render('mermaid', mermaidGraph); + }""", + mermaid_syntax, + ) + + # Set the page background to white + await page.evaluate( + """(svg, background_color) => { + document.body.innerHTML = svg; + document.body.style.background = background_color; + }""", + svg_code["svg"], + background_color, + ) + + # Take a screenshot + dimensions = await page.evaluate( + """() => { + const svgElement = document.querySelector('svg'); + const rect = svgElement.getBoundingClientRect(); + return { width: rect.width, height: rect.height }; + }""" + ) + await page.setViewport( + { + "width": int(dimensions["width"] + padding), + "height": int(dimensions["height"] + padding), + "deviceScaleFactor": device_scale_factor, + } + ) + + img_bytes = await page.screenshot({"fullPage": False}) + await browser.close() + + if output_file_path is not None: + await asyncio.get_event_loop().run_in_executor( + None, Path(output_file_path).write_bytes, img_bytes + ) + + return img_bytes + + +def _render_mermaid_using_api( + mermaid_syntax: str, + *, + output_file_path: Optional[str] = None, + background_color: Optional[str] = "white", + file_type: Optional[Literal["jpeg", "png", "webp"]] = "png", + max_retries: int = 1, + retry_delay: float = 1.0, +) -> bytes: + """Renders Mermaid graph using the Mermaid.INK API.""" + try: + import requests + except ImportError as e: + msg = ( + "Install the `requests` module to use the Mermaid.INK API: " + "`pip install requests`." + ) + raise ImportError(msg) from e + + # Use Mermaid API to render the image + mermaid_syntax_encoded = base64.b64encode(mermaid_syntax.encode("utf8")).decode( + "ascii" + ) + + # Check if the background color is a hexadecimal color code using regex + if background_color is not None: + hex_color_pattern = re.compile(r"^#(?:[0-9a-fA-F]{3}){1,2}$") + if not hex_color_pattern.match(background_color): + background_color = f"!{background_color}" + + image_url = ( + f"https://mermaid.ink/img/{mermaid_syntax_encoded}" + f"?type={file_type}&bgColor={background_color}" + ) + + error_msg_suffix = ( + "To resolve this issue:\n" + "1. Check your internet connection and try again\n" + "2. Try with higher retry settings: " + "`draw_mermaid_png(..., max_retries=5, retry_delay=2.0)`\n" + "3. Use the Pyppeteer rendering method which will render your graph locally " + "in a browser: `draw_mermaid_png(..., draw_method=MermaidDrawMethod.PYPPETEER)`" + ) + + for attempt in range(max_retries + 1): + try: + response = requests.get(image_url, timeout=10) + if response.status_code == requests.codes.ok: + img_bytes = response.content + if output_file_path is not None: + Path(output_file_path).write_bytes(response.content) + + return img_bytes + + # If we get a server error (5xx), retry + if 500 <= response.status_code < 600 and attempt < max_retries: + # Exponential backoff with jitter + sleep_time = retry_delay * (2**attempt) * (0.5 + 0.5 * random.random()) # noqa: S311 not used for crypto + time.sleep(sleep_time) + continue + + # For other status codes, fail immediately + msg = ( + "Failed to reach https://mermaid.ink/ API while trying to render " + f"your graph. Status code: {response.status_code}.\n\n" + ) + error_msg_suffix + raise ValueError(msg) + + except (requests.RequestException, requests.Timeout) as e: + if attempt < max_retries: + # Exponential backoff with jitter + sleep_time = retry_delay * (2**attempt) * (0.5 + 0.5 * random.random()) # noqa: S311 not used for crypto + time.sleep(sleep_time) + else: + msg = ( + "Failed to reach https://mermaid.ink/ API while trying to render " + f"your graph after {max_retries} retries. " + ) + error_msg_suffix + raise ValueError(msg) from e + + # This should not be reached, but just in case + msg = ( + "Failed to reach https://mermaid.ink/ API while trying to render " + f"your graph after {max_retries} retries. " + ) + error_msg_suffix + raise ValueError(msg) diff --git a/venv/Lib/site-packages/langchain_core/runnables/graph_png.py b/venv/Lib/site-packages/langchain_core/runnables/graph_png.py new file mode 100644 index 00000000..50423618 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/graph_png.py @@ -0,0 +1,191 @@ +"""Helper class to draw a state graph into a PNG file.""" + +from typing import Any, Optional + +from langchain_core.runnables.graph import Graph, LabelsDict + + +class PngDrawer: + """Helper class to draw a state graph into a PNG file. + + It requires `graphviz` and `pygraphviz` to be installed. + :param fontname: The font to use for the labels + :param labels: A dictionary of label overrides. The dictionary + should have the following format: + { + "nodes": { + "node1": "CustomLabel1", + "node2": "CustomLabel2", + "__end__": "End Node" + }, + "edges": { + "continue": "ContinueLabel", + "end": "EndLabel" + } + } + The keys are the original labels, and the values are the new labels. + Usage: + drawer = PngDrawer() + drawer.draw(state_graph, 'graph.png') + """ + + def __init__( + self, fontname: Optional[str] = None, labels: Optional[LabelsDict] = None + ) -> None: + """Initializes the PNG drawer. + + Args: + fontname: The font to use for the labels. Defaults to "arial". + labels: A dictionary of label overrides. The dictionary + should have the following format: + { + "nodes": { + "node1": "CustomLabel1", + "node2": "CustomLabel2", + "__end__": "End Node" + }, + "edges": { + "continue": "ContinueLabel", + "end": "EndLabel" + } + } + The keys are the original labels, and the values are the new labels. + Defaults to None. + """ + self.fontname = fontname or "arial" + self.labels = labels or LabelsDict(nodes={}, edges={}) + + def get_node_label(self, label: str) -> str: + """Returns the label to use for a node. + + Args: + label: The original label. + + Returns: + The new label. + """ + label = self.labels.get("nodes", {}).get(label, label) + return f"<{label}>" + + def get_edge_label(self, label: str) -> str: + """Returns the label to use for an edge. + + Args: + label: The original label. + + Returns: + The new label. + """ + label = self.labels.get("edges", {}).get(label, label) + return f"<{label}>" + + def add_node(self, viz: Any, node: str) -> None: + """Adds a node to the graph. + + Args: + viz: The graphviz object. + node: The node to add. + + Returns: + None + """ + viz.add_node( + node, + label=self.get_node_label(node), + style="filled", + fillcolor="yellow", + fontsize=15, + fontname=self.fontname, + ) + + def add_edge( + self, + viz: Any, + source: str, + target: str, + label: Optional[str] = None, + conditional: bool = False, # noqa: FBT001,FBT002 + ) -> None: + """Adds an edge to the graph. + + Args: + viz: The graphviz object. + source: The source node. + target: The target node. + label: The label for the edge. Defaults to None. + conditional: Whether the edge is conditional. Defaults to False. + + Returns: + None + """ + viz.add_edge( + source, + target, + label=self.get_edge_label(label) if label else "", + fontsize=12, + fontname=self.fontname, + style="dotted" if conditional else "solid", + ) + + def draw(self, graph: Graph, output_path: Optional[str] = None) -> Optional[bytes]: + """Draw the given state graph into a PNG file. + + Requires `graphviz` and `pygraphviz` to be installed. + :param graph: The graph to draw + :param output_path: The path to save the PNG. If None, PNG bytes are returned. + """ + try: + import pygraphviz as pgv # type: ignore[import-not-found] + except ImportError as exc: + msg = "Install pygraphviz to draw graphs: `pip install pygraphviz`." + raise ImportError(msg) from exc + + # Create a directed graph + viz = pgv.AGraph(directed=True, nodesep=0.9, ranksep=1.0) + + # Add nodes, conditional edges, and edges to the graph + self.add_nodes(viz, graph) + self.add_edges(viz, graph) + + # Update entrypoint and END styles + self.update_styles(viz, graph) + + # Save the graph as PNG + try: + return viz.draw(output_path, format="png", prog="dot") + finally: + viz.close() + + def add_nodes(self, viz: Any, graph: Graph) -> None: + """Add nodes to the graph. + + Args: + viz: The graphviz object. + graph: The graph to draw. + """ + for node in graph.nodes: + self.add_node(viz, node) + + def add_edges(self, viz: Any, graph: Graph) -> None: + """Add edges to the graph. + + Args: + viz: The graphviz object. + graph: The graph to draw. + """ + for start, end, data, cond in graph.edges: + self.add_edge( + viz, start, end, str(data) if data is not None else None, cond + ) + + def update_styles(self, viz: Any, graph: Graph) -> None: + """Update the styles of the entrypoint and END nodes. + + Args: + viz: The graphviz object. + graph: The graph to draw. + """ + if first := graph.first_node(): + viz.get_node(first.id).attr.update(fillcolor="lightblue") + if last := graph.last_node(): + viz.get_node(last.id).attr.update(fillcolor="orange") diff --git a/venv/Lib/site-packages/langchain_core/runnables/history.py b/venv/Lib/site-packages/langchain_core/runnables/history.py new file mode 100644 index 00000000..4c553f0f --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/history.py @@ -0,0 +1,622 @@ +"""Runnable that manages chat message history for another Runnable.""" + +from __future__ import annotations + +import inspect +from collections.abc import Sequence +from types import GenericAlias +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Union, +) + +from pydantic import BaseModel +from typing_extensions import override + +from langchain_core.chat_history import BaseChatMessageHistory +from langchain_core.load.load import load +from langchain_core.runnables.base import Runnable, RunnableBindingBase, RunnableLambda +from langchain_core.runnables.passthrough import RunnablePassthrough +from langchain_core.runnables.utils import ( + ConfigurableFieldSpec, + Output, + get_unique_config_specs, +) +from langchain_core.utils.pydantic import create_model_v2 + +if TYPE_CHECKING: + from langchain_core.language_models.base import LanguageModelLike + from langchain_core.messages.base import BaseMessage + from langchain_core.runnables.config import RunnableConfig + from langchain_core.tracers.schemas import Run + + +MessagesOrDictWithMessages = Union[Sequence["BaseMessage"], dict[str, Any]] +GetSessionHistoryCallable = Callable[..., BaseChatMessageHistory] + + +class RunnableWithMessageHistory(RunnableBindingBase): + """Runnable that manages chat message history for another Runnable. + + A chat message history is a sequence of messages that represent a conversation. + + RunnableWithMessageHistory wraps another Runnable and manages the chat message + history for it; it is responsible for reading and updating the chat message + history. + + The formats supported for the inputs and outputs of the wrapped Runnable + are described below. + + RunnableWithMessageHistory must always be called with a config that contains + the appropriate parameters for the chat message history factory. + + By default, the Runnable is expected to take a single configuration parameter + called `session_id` which is a string. This parameter is used to create a new + or look up an existing chat message history that matches the given session_id. + + In this case, the invocation would look like this: + + `with_history.invoke(..., config={"configurable": {"session_id": "bar"}})` + ; e.g., ``{"configurable": {"session_id": ""}}``. + + The configuration can be customized by passing in a list of + ``ConfigurableFieldSpec`` objects to the ``history_factory_config`` parameter (see + example below). + + In the examples, we will use a chat message history with an in-memory + implementation to make it easy to experiment and see the results. + + For production use cases, you will want to use a persistent implementation + of chat message history, such as ``RedisChatMessageHistory``. + + Parameters: + get_session_history: Function that returns a new BaseChatMessageHistory. + This function should either take a single positional argument + `session_id` of type string and return a corresponding + chat message history instance. + input_messages_key: Must be specified if the base runnable accepts a dict + as input. The key in the input dict that contains the messages. + output_messages_key: Must be specified if the base Runnable returns a dict + as output. The key in the output dict that contains the messages. + history_messages_key: Must be specified if the base runnable accepts a dict + as input and expects a separate key for historical messages. + history_factory_config: Configure fields that should be passed to the + chat history factory. See ``ConfigurableFieldSpec`` for more details. + + Example: Chat message history with an in-memory implementation for testing. + + .. code-block:: python + + from operator import itemgetter + + from langchain_openai.chat_models import ChatOpenAI + + from langchain_core.chat_history import BaseChatMessageHistory + from langchain_core.documents import Document + from langchain_core.messages import BaseMessage, AIMessage + from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder + from pydantic import BaseModel, Field + from langchain_core.runnables import ( + RunnableLambda, + ConfigurableFieldSpec, + RunnablePassthrough, + ) + from langchain_core.runnables.history import RunnableWithMessageHistory + + + class InMemoryHistory(BaseChatMessageHistory, BaseModel): + \"\"\"In memory implementation of chat message history.\"\"\" + + messages: list[BaseMessage] = Field(default_factory=list) + + def add_messages(self, messages: list[BaseMessage]) -> None: + \"\"\"Add a list of messages to the store\"\"\" + self.messages.extend(messages) + + def clear(self) -> None: + self.messages = [] + + # Here we use a global variable to store the chat message history. + # This will make it easier to inspect it to see the underlying results. + store = {} + + def get_by_session_id(session_id: str) -> BaseChatMessageHistory: + if session_id not in store: + store[session_id] = InMemoryHistory() + return store[session_id] + + + history = get_by_session_id("1") + history.add_message(AIMessage(content="hello")) + print(store) # noqa: T201 + + + Example where the wrapped Runnable takes a dictionary input: + + .. code-block:: python + + from typing import Optional + + from langchain_community.chat_models import ChatAnthropic + from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder + from langchain_core.runnables.history import RunnableWithMessageHistory + + + prompt = ChatPromptTemplate.from_messages([ + ("system", "You're an assistant who's good at {ability}"), + MessagesPlaceholder(variable_name="history"), + ("human", "{question}"), + ]) + + chain = prompt | ChatAnthropic(model="claude-2") + + chain_with_history = RunnableWithMessageHistory( + chain, + # Uses the get_by_session_id function defined in the example + # above. + get_by_session_id, + input_messages_key="question", + history_messages_key="history", + ) + + print(chain_with_history.invoke( # noqa: T201 + {"ability": "math", "question": "What does cosine mean?"}, + config={"configurable": {"session_id": "foo"}} + )) + + # Uses the store defined in the example above. + print(store) # noqa: T201 + + print(chain_with_history.invoke( # noqa: T201 + {"ability": "math", "question": "What's its inverse"}, + config={"configurable": {"session_id": "foo"}} + )) + + print(store) # noqa: T201 + + + Example where the session factory takes two keys, user_id and conversation id): + + .. code-block:: python + + store = {} + + def get_session_history( + user_id: str, conversation_id: str + ) -> BaseChatMessageHistory: + if (user_id, conversation_id) not in store: + store[(user_id, conversation_id)] = InMemoryHistory() + return store[(user_id, conversation_id)] + + prompt = ChatPromptTemplate.from_messages([ + ("system", "You're an assistant who's good at {ability}"), + MessagesPlaceholder(variable_name="history"), + ("human", "{question}"), + ]) + + chain = prompt | ChatAnthropic(model="claude-2") + + with_message_history = RunnableWithMessageHistory( + chain, + get_session_history=get_session_history, + input_messages_key="question", + history_messages_key="history", + history_factory_config=[ + ConfigurableFieldSpec( + id="user_id", + annotation=str, + name="User ID", + description="Unique identifier for the user.", + default="", + is_shared=True, + ), + ConfigurableFieldSpec( + id="conversation_id", + annotation=str, + name="Conversation ID", + description="Unique identifier for the conversation.", + default="", + is_shared=True, + ), + ], + ) + + with_message_history.invoke( + {"ability": "math", "question": "What does cosine mean?"}, + config={"configurable": {"user_id": "123", "conversation_id": "1"}} + ) + + """ + + get_session_history: GetSessionHistoryCallable + input_messages_key: Optional[str] = None + output_messages_key: Optional[str] = None + history_messages_key: Optional[str] = None + history_factory_config: Sequence[ConfigurableFieldSpec] + + def __init__( + self, + runnable: Union[ + Runnable[ + Union[MessagesOrDictWithMessages], + Union[str, BaseMessage, MessagesOrDictWithMessages], + ], + LanguageModelLike, + ], + get_session_history: GetSessionHistoryCallable, + *, + input_messages_key: Optional[str] = None, + output_messages_key: Optional[str] = None, + history_messages_key: Optional[str] = None, + history_factory_config: Optional[Sequence[ConfigurableFieldSpec]] = None, + **kwargs: Any, + ) -> None: + """Initialize RunnableWithMessageHistory. + + Args: + runnable: The base Runnable to be wrapped. Must take as input one of: + 1. A sequence of BaseMessages + 2. A dict with one key for all messages + 3. A dict with one key for the current input string/message(s) and + a separate key for historical messages. If the input key points + to a string, it will be treated as a HumanMessage in history. + + Must return as output one of: + 1. A string which can be treated as an AIMessage + 2. A BaseMessage or sequence of BaseMessages + 3. A dict with a key for a BaseMessage or sequence of BaseMessages + + get_session_history: Function that returns a new BaseChatMessageHistory. + This function should either take a single positional argument + `session_id` of type string and return a corresponding + chat message history instance. + .. code-block:: python + + def get_session_history( + session_id: str, + *, + user_id: Optional[str]=None + ) -> BaseChatMessageHistory: + ... + + Or it should take keyword arguments that match the keys of + `session_history_config_specs` and return a corresponding + chat message history instance. + + .. code-block:: python + + def get_session_history( + *, + user_id: str, + thread_id: str, + ) -> BaseChatMessageHistory: + ... + + input_messages_key: Must be specified if the base runnable accepts a dict + as input. Default is None. + output_messages_key: Must be specified if the base runnable returns a dict + as output. Default is None. + history_messages_key: Must be specified if the base runnable accepts a dict + as input and expects a separate key for historical messages. + history_factory_config: Configure fields that should be passed to the + chat history factory. See ``ConfigurableFieldSpec`` for more details. + Specifying these allows you to pass multiple config keys + into the get_session_history factory. + **kwargs: Arbitrary additional kwargs to pass to parent class + ``RunnableBindingBase`` init. + """ + history_chain: Runnable = RunnableLambda( + self._enter_history, self._aenter_history + ).with_config(run_name="load_history") + messages_key = history_messages_key or input_messages_key + if messages_key: + history_chain = RunnablePassthrough.assign( + **{messages_key: history_chain} + ).with_config(run_name="insert_history") + + runnable_sync: Runnable = runnable.with_listeners(on_end=self._exit_history) + runnable_async: Runnable = runnable.with_alisteners(on_end=self._aexit_history) + + def _call_runnable_sync(_input: Any) -> Runnable: + return runnable_sync + + async def _call_runnable_async(_input: Any) -> Runnable: + return runnable_async + + bound: Runnable = ( + history_chain + | RunnableLambda( + _call_runnable_sync, + _call_runnable_async, + ).with_config(run_name="check_sync_or_async") + ).with_config(run_name="RunnableWithMessageHistory") + + if history_factory_config: + _config_specs = history_factory_config + else: + # If not provided, then we'll use the default session_id field + _config_specs = [ + ConfigurableFieldSpec( + id="session_id", + annotation=str, + name="Session ID", + description="Unique identifier for a session.", + default="", + is_shared=True, + ), + ] + + super().__init__( + get_session_history=get_session_history, + input_messages_key=input_messages_key, + output_messages_key=output_messages_key, + bound=bound, + history_messages_key=history_messages_key, + history_factory_config=_config_specs, + **kwargs, + ) + self._history_chain = history_chain + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + """Get the configuration specs for the RunnableWithMessageHistory.""" + return get_unique_config_specs( + super().config_specs + list(self.history_factory_config) + ) + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + from langchain_core.messages import BaseMessage + + fields: dict = {} + if self.input_messages_key and self.history_messages_key: + fields[self.input_messages_key] = ( + Union[str, BaseMessage, Sequence[BaseMessage]], + ..., + ) + elif self.input_messages_key: + fields[self.input_messages_key] = (Sequence[BaseMessage], ...) + else: + return create_model_v2( + "RunnableWithChatHistoryInput", + module_name=self.__class__.__module__, + root=(Sequence[BaseMessage], ...), + ) + return create_model_v2( + "RunnableWithChatHistoryInput", + field_definitions=fields, + module_name=self.__class__.__module__, + ) + + @property + @override + def OutputType(self) -> type[Output]: + return self._history_chain.OutputType + + @override + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + """Get a pydantic model that can be used to validate output to the Runnable. + + Runnables that leverage the configurable_fields and configurable_alternatives + methods will have a dynamic output schema that depends on which + configuration the Runnable is invoked with. + + This method allows to get an output schema for a specific configuration. + + Args: + config: A config to use when generating the schema. + + Returns: + A pydantic model that can be used to validate output. + """ + root_type = self.OutputType + + if ( + inspect.isclass(root_type) + and not isinstance(root_type, GenericAlias) + and issubclass(root_type, BaseModel) + ): + return root_type + + return create_model_v2( + "RunnableWithChatHistoryOutput", + root=root_type, + module_name=self.__class__.__module__, + ) + + def _get_input_messages( + self, input_val: Union[str, BaseMessage, Sequence[BaseMessage], dict] + ) -> list[BaseMessage]: + from langchain_core.messages import BaseMessage + + # If dictionary, try to pluck the single key representing messages + if isinstance(input_val, dict): + if self.input_messages_key: + key = self.input_messages_key + elif len(input_val) == 1: + key = list(input_val.keys())[0] + else: + key = "input" + input_val = input_val[key] + + # If value is a string, convert to a human message + if isinstance(input_val, str): + from langchain_core.messages import HumanMessage + + return [HumanMessage(content=input_val)] + # If value is a single message, convert to a list + if isinstance(input_val, BaseMessage): + return [input_val] + # If value is a list or tuple... + if isinstance(input_val, (list, tuple)): + # Handle empty case + if len(input_val) == 0: + return list(input_val) + # If is a list of list, then return the first value + # This occurs for chat models - since we batch inputs + if isinstance(input_val[0], list): + if len(input_val) != 1: + msg = f"Expected a single list of messages. Got {input_val}." + raise ValueError(msg) + return input_val[0] + return list(input_val) + msg = ( + f"Expected str, BaseMessage, list[BaseMessage], or tuple[BaseMessage]. " + f"Got {input_val}." + ) + raise ValueError(msg) # noqa: TRY004 + + def _get_output_messages( + self, output_val: Union[str, BaseMessage, Sequence[BaseMessage], dict] + ) -> list[BaseMessage]: + from langchain_core.messages import BaseMessage + + # If dictionary, try to pluck the single key representing messages + if isinstance(output_val, dict): + if self.output_messages_key: + key = self.output_messages_key + elif len(output_val) == 1: + key = list(output_val.keys())[0] + else: + key = "output" + # If you are wrapping a chat model directly + # The output is actually this weird generations object + if key not in output_val and "generations" in output_val: + output_val = output_val["generations"][0][0]["message"] + else: + output_val = output_val[key] + + if isinstance(output_val, str): + from langchain_core.messages import AIMessage + + return [AIMessage(content=output_val)] + # If value is a single message, convert to a list + if isinstance(output_val, BaseMessage): + return [output_val] + if isinstance(output_val, (list, tuple)): + return list(output_val) + msg = ( + f"Expected str, BaseMessage, list[BaseMessage], or tuple[BaseMessage]. " + f"Got {output_val}." + ) + raise ValueError(msg) # noqa: TRY004 + + def _enter_history(self, input: Any, config: RunnableConfig) -> list[BaseMessage]: + hist: BaseChatMessageHistory = config["configurable"]["message_history"] + messages = hist.messages.copy() + + if not self.history_messages_key: + # return all messages + input_val = ( + input if not self.input_messages_key else input[self.input_messages_key] + ) + messages += self._get_input_messages(input_val) + return messages + + async def _aenter_history( + self, input: dict[str, Any], config: RunnableConfig + ) -> list[BaseMessage]: + hist: BaseChatMessageHistory = config["configurable"]["message_history"] + messages = (await hist.aget_messages()).copy() + + if not self.history_messages_key: + # return all messages + input_val = ( + input if not self.input_messages_key else input[self.input_messages_key] + ) + messages += self._get_input_messages(input_val) + return messages + + def _exit_history(self, run: Run, config: RunnableConfig) -> None: + hist: BaseChatMessageHistory = config["configurable"]["message_history"] + + # Get the input messages + inputs = load(run.inputs) + input_messages = self._get_input_messages(inputs) + # If historic messages were prepended to the input messages, remove them to + # avoid adding duplicate messages to history. + if not self.history_messages_key: + historic_messages = config["configurable"]["message_history"].messages + input_messages = input_messages[len(historic_messages) :] + + # Get the output messages + output_val = load(run.outputs) + output_messages = self._get_output_messages(output_val) + hist.add_messages(input_messages + output_messages) + + async def _aexit_history(self, run: Run, config: RunnableConfig) -> None: + hist: BaseChatMessageHistory = config["configurable"]["message_history"] + + # Get the input messages + inputs = load(run.inputs) + input_messages = self._get_input_messages(inputs) + # If historic messages were prepended to the input messages, remove them to + # avoid adding duplicate messages to history. + if not self.history_messages_key: + historic_messages = await hist.aget_messages() + input_messages = input_messages[len(historic_messages) :] + + # Get the output messages + output_val = load(run.outputs) + output_messages = self._get_output_messages(output_val) + await hist.aadd_messages(input_messages + output_messages) + + def _merge_configs(self, *configs: Optional[RunnableConfig]) -> RunnableConfig: + config = super()._merge_configs(*configs) + expected_keys = [field_spec.id for field_spec in self.history_factory_config] + + configurable = config.get("configurable", {}) + + missing_keys = set(expected_keys) - set(configurable.keys()) + parameter_names = _get_parameter_names(self.get_session_history) + + if missing_keys and parameter_names: + example_input = {self.input_messages_key: "foo"} + example_configurable = dict.fromkeys(missing_keys, "[your-value-here]") + example_config = {"configurable": example_configurable} + msg = ( + f"Missing keys {sorted(missing_keys)} in config['configurable'] " + f"Expected keys are {sorted(expected_keys)}." + f"When using via .invoke() or .stream(), pass in a config; " + f"e.g., chain.invoke({example_input}, {example_config})" + ) + raise ValueError(msg) + + if len(expected_keys) == 1: + if parameter_names: + # If arity = 1, then invoke function by positional arguments + message_history = self.get_session_history( + configurable[expected_keys[0]] + ) + else: + if not config: + config["configurable"] = {} + message_history = self.get_session_history() + else: + # otherwise verify that names of keys patch and invoke by named arguments + if set(expected_keys) != set(parameter_names): + msg = ( + f"Expected keys {sorted(expected_keys)} do not match parameter " + f"names {sorted(parameter_names)} of get_session_history." + ) + raise ValueError(msg) + + message_history = self.get_session_history( + **{key: configurable[key] for key in expected_keys} + ) + config["configurable"]["message_history"] = message_history + return config + + +def _get_parameter_names(callable_: GetSessionHistoryCallable) -> list[str]: + """Get the parameter names of the callable.""" + sig = inspect.signature(callable_) + return list(sig.parameters.keys()) diff --git a/venv/Lib/site-packages/langchain_core/runnables/passthrough.py b/venv/Lib/site-packages/langchain_core/runnables/passthrough.py new file mode 100644 index 00000000..098ce791 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/passthrough.py @@ -0,0 +1,837 @@ +"""Implementation of the RunnablePassthrough.""" + +from __future__ import annotations + +import asyncio +import inspect +import threading +from collections.abc import Awaitable +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Union, + cast, +) + +from pydantic import BaseModel, RootModel +from typing_extensions import override + +from langchain_core.runnables.base import ( + Other, + Runnable, + RunnableParallel, + RunnableSerializable, +) +from langchain_core.runnables.config import ( + RunnableConfig, + acall_func_with_variable_args, + call_func_with_variable_args, + ensure_config, + get_executor_for_config, + patch_config, +) +from langchain_core.runnables.utils import ( + AddableDict, + ConfigurableFieldSpec, +) +from langchain_core.utils.aiter import atee, py_anext +from langchain_core.utils.iter import safetee +from langchain_core.utils.pydantic import create_model_v2 + +if TYPE_CHECKING: + from collections.abc import AsyncIterator, Iterator, Mapping + + from langchain_core.callbacks.manager import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, + ) + from langchain_core.runnables.graph import Graph + + +def identity(x: Other) -> Other: + """Identity function. + + Args: + x (Other): input. + + Returns: + Other: output. + """ + return x + + +async def aidentity(x: Other) -> Other: + """Async identity function. + + Args: + x (Other): input. + + Returns: + Other: output. + """ + return x + + +class RunnablePassthrough(RunnableSerializable[Other, Other]): + """Runnable to passthrough inputs unchanged or with additional keys. + + This Runnable behaves almost like the identity function, except that it + can be configured to add additional keys to the output, if the input is a + dict. + + The examples below demonstrate this Runnable works using a few simple + chains. The chains rely on simple lambdas to make the examples easy to execute + and experiment with. + + Examples: + + .. code-block:: python + + from langchain_core.runnables import ( + RunnableLambda, + RunnableParallel, + RunnablePassthrough, + ) + + runnable = RunnableParallel( + origin=RunnablePassthrough(), + modified=lambda x: x+1 + ) + + runnable.invoke(1) # {'origin': 1, 'modified': 2} + + + def fake_llm(prompt: str) -> str: # Fake LLM for the example + return "completion" + + chain = RunnableLambda(fake_llm) | { + 'original': RunnablePassthrough(), # Original LLM output + 'parsed': lambda text: text[::-1] # Parsing logic + } + + chain.invoke('hello') # {'original': 'completion', 'parsed': 'noitelpmoc'} + + In some cases, it may be useful to pass the input through while adding some + keys to the output. In this case, you can use the `assign` method: + + .. code-block:: python + + from langchain_core.runnables import RunnablePassthrough + + def fake_llm(prompt: str) -> str: # Fake LLM for the example + return "completion" + + runnable = { + 'llm1': fake_llm, + 'llm2': fake_llm, + } | RunnablePassthrough.assign( + total_chars=lambda inputs: len(inputs['llm1'] + inputs['llm2']) + ) + + runnable.invoke('hello') + # {'llm1': 'completion', 'llm2': 'completion', 'total_chars': 20} + """ + + input_type: Optional[type[Other]] = None + + func: Optional[ + Union[Callable[[Other], None], Callable[[Other, RunnableConfig], None]] + ] = None + + afunc: Optional[ + Union[ + Callable[[Other], Awaitable[None]], + Callable[[Other, RunnableConfig], Awaitable[None]], + ] + ] = None + + @override + def __repr_args__(self) -> Any: + # Without this repr(self) raises a RecursionError + # See https://github.com/pydantic/pydantic/issues/7327 + return [] + + def __init__( + self, + func: Optional[ + Union[ + Union[Callable[[Other], None], Callable[[Other, RunnableConfig], None]], + Union[ + Callable[[Other], Awaitable[None]], + Callable[[Other, RunnableConfig], Awaitable[None]], + ], + ] + ] = None, + afunc: Optional[ + Union[ + Callable[[Other], Awaitable[None]], + Callable[[Other, RunnableConfig], Awaitable[None]], + ] + ] = None, + *, + input_type: Optional[type[Other]] = None, + **kwargs: Any, + ) -> None: + """Create e RunnablePassthrough. + + Args: + func: Function to be called with the input. + afunc: Async function to be called with the input. + input_type: Type of the input. + """ + if inspect.iscoroutinefunction(func): + afunc = func + func = None + + super().__init__(func=func, afunc=afunc, input_type=input_type, **kwargs) # type: ignore[call-arg] + + @classmethod + @override + def is_lc_serializable(cls) -> bool: + return True + + @classmethod + @override + def get_lc_namespace(cls) -> list[str]: + return ["langchain", "schema", "runnable"] + + @property + @override + def InputType(self) -> Any: + return self.input_type or Any + + @property + @override + def OutputType(self) -> Any: + return self.input_type or Any + + @classmethod + @override + def assign( + cls, + **kwargs: Union[ + Runnable[dict[str, Any], Any], + Callable[[dict[str, Any]], Any], + Mapping[ + str, + Union[Runnable[dict[str, Any], Any], Callable[[dict[str, Any]], Any]], + ], + ], + ) -> RunnableAssign: + """Merge the Dict input with the output produced by the mapping argument. + + Args: + **kwargs: Runnable, Callable or a Mapping from keys to Runnables + or Callables. + + Returns: + A Runnable that merges the Dict input with the output produced by the + mapping argument. + """ + return RunnableAssign(RunnableParallel[dict[str, Any]](kwargs)) + + @override + def invoke( + self, input: Other, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Other: + if self.func is not None: + call_func_with_variable_args( + self.func, input, ensure_config(config), **kwargs + ) + return self._call_with_config(identity, input, config) + + @override + async def ainvoke( + self, + input: Other, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Other: + if self.afunc is not None: + await acall_func_with_variable_args( + self.afunc, input, ensure_config(config), **kwargs + ) + elif self.func is not None: + call_func_with_variable_args( + self.func, input, ensure_config(config), **kwargs + ) + return await self._acall_with_config(aidentity, input, config) + + @override + def transform( + self, + input: Iterator[Other], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Iterator[Other]: + if self.func is None: + for chunk in self._transform_stream_with_config(input, identity, config): + yield chunk + else: + final: Other + got_first_chunk = False + + for chunk in self._transform_stream_with_config(input, identity, config): + yield chunk + + if not got_first_chunk: + final = chunk + got_first_chunk = True + else: + try: + final = final + chunk # type: ignore[operator] + except TypeError: + final = chunk + + if got_first_chunk: + call_func_with_variable_args( + self.func, final, ensure_config(config), **kwargs + ) + + @override + async def atransform( + self, + input: AsyncIterator[Other], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[Other]: + if self.afunc is None and self.func is None: + async for chunk in self._atransform_stream_with_config( + input, identity, config + ): + yield chunk + else: + got_first_chunk = False + + async for chunk in self._atransform_stream_with_config( + input, identity, config + ): + yield chunk + + # By definitions, a function will operate on the aggregated + # input. So we'll aggregate the input until we get to the last + # chunk. + # If the input is not addable, then we'll assume that we can + # only operate on the last chunk. + if not got_first_chunk: + final = chunk + got_first_chunk = True + else: + try: + final = final + chunk # type: ignore[operator] + except TypeError: + final = chunk + + if got_first_chunk: + config = ensure_config(config) + if self.afunc is not None: + await acall_func_with_variable_args( + self.afunc, final, config, **kwargs + ) + elif self.func is not None: + call_func_with_variable_args(self.func, final, config, **kwargs) + + @override + def stream( + self, + input: Other, + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Iterator[Other]: + return self.transform(iter([input]), config, **kwargs) + + @override + async def astream( + self, + input: Other, + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[Other]: + async def input_aiter() -> AsyncIterator[Other]: + yield input + + async for chunk in self.atransform(input_aiter(), config, **kwargs): + yield chunk + + +_graph_passthrough: RunnablePassthrough = RunnablePassthrough() + + +class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): + """Runnable that assigns key-value pairs to dict[str, Any] inputs. + + The `RunnableAssign` class takes input dictionaries and, through a + `RunnableParallel` instance, applies transformations, then combines + these with the original data, introducing new key-value pairs based + on the mapper's logic. + + Examples: + .. code-block:: python + + # This is a RunnableAssign + from langchain_core.runnables.passthrough import ( + RunnableAssign, + RunnableParallel, + ) + from langchain_core.runnables.base import RunnableLambda + + def add_ten(x: dict[str, int]) -> dict[str, int]: + return {"added": x["input"] + 10} + + mapper = RunnableParallel( + {"add_step": RunnableLambda(add_ten),} + ) + + runnable_assign = RunnableAssign(mapper) + + # Synchronous example + runnable_assign.invoke({"input": 5}) + # returns {'input': 5, 'add_step': {'added': 15}} + + # Asynchronous example + await runnable_assign.ainvoke({"input": 5}) + # returns {'input': 5, 'add_step': {'added': 15}} + """ + + mapper: RunnableParallel + + def __init__(self, mapper: RunnableParallel[dict[str, Any]], **kwargs: Any) -> None: + """Create a RunnableAssign. + + Args: + mapper: A ``RunnableParallel`` instance that will be used to transform the + input dictionary. + """ + super().__init__(mapper=mapper, **kwargs) # type: ignore[call-arg] + + @classmethod + @override + def is_lc_serializable(cls) -> bool: + return True + + @classmethod + @override + def get_lc_namespace(cls) -> list[str]: + return ["langchain", "schema", "runnable"] + + @override + def get_name( + self, suffix: Optional[str] = None, *, name: Optional[str] = None + ) -> str: + name = ( + name + or self.name + or f"RunnableAssign<{','.join(self.mapper.steps__.keys())}>" + ) + return super().get_name(suffix, name=name) + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + map_input_schema = self.mapper.get_input_schema(config) + if not issubclass(map_input_schema, RootModel): + # ie. it's a dict + return map_input_schema + + return super().get_input_schema(config) + + @override + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + map_input_schema = self.mapper.get_input_schema(config) + map_output_schema = self.mapper.get_output_schema(config) + if not issubclass(map_input_schema, RootModel) and not issubclass( + map_output_schema, RootModel + ): + fields = {} + + for name, field_info in map_input_schema.model_fields.items(): + fields[name] = (field_info.annotation, field_info.default) + + for name, field_info in map_output_schema.model_fields.items(): + fields[name] = (field_info.annotation, field_info.default) + + return create_model_v2("RunnableAssignOutput", field_definitions=fields) + if not issubclass(map_output_schema, RootModel): + # ie. only map output is a dict + # ie. input type is either unknown or inferred incorrectly + return map_output_schema + + return super().get_output_schema(config) + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + return self.mapper.config_specs + + @override + def get_graph(self, config: RunnableConfig | None = None) -> Graph: + # get graph from mapper + graph = self.mapper.get_graph(config) + # add passthrough node and edges + input_node = graph.first_node() + output_node = graph.last_node() + if input_node is not None and output_node is not None: + passthrough_node = graph.add_node(_graph_passthrough) + graph.add_edge(input_node, passthrough_node) + graph.add_edge(passthrough_node, output_node) + return graph + + def _invoke( + self, + input: dict[str, Any], + run_manager: CallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> dict[str, Any]: + if not isinstance(input, dict): + msg = "The input to RunnablePassthrough.assign() must be a dict." + raise ValueError(msg) # noqa: TRY004 + + return { + **input, + **self.mapper.invoke( + input, + patch_config(config, callbacks=run_manager.get_child()), + **kwargs, + ), + } + + @override + def invoke( + self, + input: dict[str, Any], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> dict[str, Any]: + return self._call_with_config(self._invoke, input, config, **kwargs) + + async def _ainvoke( + self, + input: dict[str, Any], + run_manager: AsyncCallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> dict[str, Any]: + if not isinstance(input, dict): + msg = "The input to RunnablePassthrough.assign() must be a dict." + raise ValueError(msg) # noqa: TRY004 + + return { + **input, + **await self.mapper.ainvoke( + input, + patch_config(config, callbacks=run_manager.get_child()), + **kwargs, + ), + } + + @override + async def ainvoke( + self, + input: dict[str, Any], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> dict[str, Any]: + return await self._acall_with_config(self._ainvoke, input, config, **kwargs) + + def _transform( + self, + input: Iterator[dict[str, Any]], + run_manager: CallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> Iterator[dict[str, Any]]: + # collect mapper keys + mapper_keys = set(self.mapper.steps__.keys()) + # create two streams, one for the map and one for the passthrough + for_passthrough, for_map = safetee(input, 2, lock=threading.Lock()) + + # create map output stream + map_output = self.mapper.transform( + for_map, + patch_config( + config, + callbacks=run_manager.get_child(), + ), + **kwargs, + ) + + # get executor to start map output stream in background + with get_executor_for_config(config) as executor: + # start map output stream + first_map_chunk_future = executor.submit( + next, + map_output, + None, + ) + # consume passthrough stream + for chunk in for_passthrough: + if not isinstance(chunk, dict): + msg = "The input to RunnablePassthrough.assign() must be a dict." + raise ValueError(msg) # noqa: TRY004 + # remove mapper keys from passthrough chunk, to be overwritten by map + filtered = AddableDict( + {k: v for k, v in chunk.items() if k not in mapper_keys} + ) + if filtered: + yield filtered + # yield map output + yield cast("dict[str, Any]", first_map_chunk_future.result()) + for chunk in map_output: + yield chunk + + @override + def transform( + self, + input: Iterator[dict[str, Any]], + config: Optional[RunnableConfig] = None, + **kwargs: Any | None, + ) -> Iterator[dict[str, Any]]: + yield from self._transform_stream_with_config( + input, self._transform, config, **kwargs + ) + + async def _atransform( + self, + input: AsyncIterator[dict[str, Any]], + run_manager: AsyncCallbackManagerForChainRun, + config: RunnableConfig, + **kwargs: Any, + ) -> AsyncIterator[dict[str, Any]]: + # collect mapper keys + mapper_keys = set(self.mapper.steps__.keys()) + # create two streams, one for the map and one for the passthrough + for_passthrough, for_map = atee(input, 2, lock=asyncio.Lock()) + # create map output stream + map_output = self.mapper.atransform( + for_map, + patch_config( + config, + callbacks=run_manager.get_child(), + ), + **kwargs, + ) + # start map output stream + first_map_chunk_task: asyncio.Task = asyncio.create_task( + py_anext(map_output, None), # type: ignore[arg-type] + ) + # consume passthrough stream + async for chunk in for_passthrough: + if not isinstance(chunk, dict): + msg = "The input to RunnablePassthrough.assign() must be a dict." + raise ValueError(msg) # noqa: TRY004 + + # remove mapper keys from passthrough chunk, to be overwritten by map output + filtered = AddableDict( + {k: v for k, v in chunk.items() if k not in mapper_keys} + ) + if filtered: + yield filtered + # yield map output + yield await first_map_chunk_task + async for chunk in map_output: + yield chunk + + @override + async def atransform( + self, + input: AsyncIterator[dict[str, Any]], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[dict[str, Any]]: + async for chunk in self._atransform_stream_with_config( + input, self._atransform, config, **kwargs + ): + yield chunk + + @override + def stream( + self, + input: dict[str, Any], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Iterator[dict[str, Any]]: + return self.transform(iter([input]), config, **kwargs) + + @override + async def astream( + self, + input: dict[str, Any], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[dict[str, Any]]: + async def input_aiter() -> AsyncIterator[dict[str, Any]]: + yield input + + async for chunk in self.atransform(input_aiter(), config, **kwargs): + yield chunk + + +class RunnablePick(RunnableSerializable[dict[str, Any], dict[str, Any]]): + """Runnable that picks keys from dict[str, Any] inputs. + + RunnablePick class represents a Runnable that selectively picks keys from a + dictionary input. It allows you to specify one or more keys to extract + from the input dictionary. It returns a new dictionary containing only + the selected keys. + + Example: + .. code-block:: python + + from langchain_core.runnables.passthrough import RunnablePick + + input_data = { + 'name': 'John', + 'age': 30, + 'city': 'New York', + 'country': 'USA' + } + + runnable = RunnablePick(keys=['name', 'age']) + + output_data = runnable.invoke(input_data) + + print(output_data) # Output: {'name': 'John', 'age': 30} + """ + + keys: Union[str, list[str]] + + def __init__(self, keys: Union[str, list[str]], **kwargs: Any) -> None: + """Create a RunnablePick. + + Args: + keys: A single key or a list of keys to pick from the input dictionary. + """ + super().__init__(keys=keys, **kwargs) # type: ignore[call-arg] + + @classmethod + @override + def is_lc_serializable(cls) -> bool: + return True + + @classmethod + @override + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object.""" + return ["langchain", "schema", "runnable"] + + @override + def get_name( + self, suffix: Optional[str] = None, *, name: Optional[str] = None + ) -> str: + name = ( + name + or self.name + or f"RunnablePick<{','.join([self.keys] if isinstance(self.keys, str) else self.keys)}>" # noqa: E501 + ) + return super().get_name(suffix, name=name) + + def _pick(self, input: dict[str, Any]) -> Any: + if not isinstance(input, dict): + msg = "The input to RunnablePassthrough.assign() must be a dict." + raise ValueError(msg) # noqa: TRY004 + + if isinstance(self.keys, str): + return input.get(self.keys) + picked = {k: input.get(k) for k in self.keys if k in input} + if picked: + return AddableDict(picked) + return None + + def _invoke( + self, + input: dict[str, Any], + ) -> dict[str, Any]: + return self._pick(input) + + @override + def invoke( + self, + input: dict[str, Any], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> dict[str, Any]: + return self._call_with_config(self._invoke, input, config, **kwargs) + + async def _ainvoke( + self, + input: dict[str, Any], + ) -> dict[str, Any]: + return self._pick(input) + + @override + async def ainvoke( + self, + input: dict[str, Any], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> dict[str, Any]: + return await self._acall_with_config(self._ainvoke, input, config, **kwargs) + + def _transform( + self, + input: Iterator[dict[str, Any]], + ) -> Iterator[dict[str, Any]]: + for chunk in input: + picked = self._pick(chunk) + if picked is not None: + yield picked + + @override + def transform( + self, + input: Iterator[dict[str, Any]], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Iterator[dict[str, Any]]: + yield from self._transform_stream_with_config( + input, self._transform, config, **kwargs + ) + + async def _atransform( + self, + input: AsyncIterator[dict[str, Any]], + ) -> AsyncIterator[dict[str, Any]]: + async for chunk in input: + picked = self._pick(chunk) + if picked is not None: + yield picked + + @override + async def atransform( + self, + input: AsyncIterator[dict[str, Any]], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[dict[str, Any]]: + async for chunk in self._atransform_stream_with_config( + input, self._atransform, config, **kwargs + ): + yield chunk + + @override + def stream( + self, + input: dict[str, Any], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Iterator[dict[str, Any]]: + return self.transform(iter([input]), config, **kwargs) + + @override + async def astream( + self, + input: dict[str, Any], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[dict[str, Any]]: + async def input_aiter() -> AsyncIterator[dict[str, Any]]: + yield input + + async for chunk in self.atransform(input_aiter(), config, **kwargs): + yield chunk diff --git a/venv/Lib/site-packages/langchain_core/runnables/retry.py b/venv/Lib/site-packages/langchain_core/runnables/retry.py new file mode 100644 index 00000000..3ceb396f --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/retry.py @@ -0,0 +1,362 @@ +"""Runnable that retries a Runnable if it fails.""" + +from typing import ( + TYPE_CHECKING, + Any, + Optional, + TypeVar, + Union, + cast, +) + +from tenacity import ( + AsyncRetrying, + RetryCallState, + RetryError, + Retrying, + retry_if_exception_type, + stop_after_attempt, + wait_exponential_jitter, +) +from typing_extensions import TypedDict, override + +from langchain_core.runnables.base import RunnableBindingBase +from langchain_core.runnables.config import RunnableConfig, patch_config +from langchain_core.runnables.utils import Input, Output + +if TYPE_CHECKING: + from langchain_core.callbacks.manager import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, + ) + + T = TypeVar("T", CallbackManagerForChainRun, AsyncCallbackManagerForChainRun) +U = TypeVar("U") + + +class ExponentialJitterParams(TypedDict, total=False): + """Parameters for ``tenacity.wait_exponential_jitter``.""" + + initial: float + """Initial wait.""" + max: float + """Maximum wait.""" + exp_base: float + """Base for exponential backoff.""" + jitter: float + """Random additional wait sampled from random.uniform(0, jitter).""" + + +class RunnableRetry(RunnableBindingBase[Input, Output]): + """Retry a Runnable if it fails. + + RunnableRetry can be used to add retry logic to any object + that subclasses the base Runnable. + + Such retries are especially useful for network calls that may fail + due to transient errors. + + The RunnableRetry is implemented as a RunnableBinding. The easiest + way to use it is through the `.with_retry()` method on all Runnables. + + Example: + Here's an example that uses a RunnableLambda to raise an exception + + .. code-block:: python + + import time + + def foo(input) -> None: + '''Fake function that raises an exception.''' + raise ValueError(f"Invoking foo failed. At time {time.time()}") + + runnable = RunnableLambda(foo) + + runnable_with_retries = runnable.with_retry( + retry_if_exception_type=(ValueError,), # Retry only on ValueError + wait_exponential_jitter=True, # Add jitter to the exponential backoff + stop_after_attempt=2, # Try twice + exponential_jitter_params={"initial": 2}, # if desired, customize backoff + ) + + # The method invocation above is equivalent to the longer form below: + + runnable_with_retries = RunnableRetry( + bound=runnable, + retry_exception_types=(ValueError,), + max_attempt_number=2, + wait_exponential_jitter=True, + exponential_jitter_params={"initial": 2}, + ) + + This logic can be used to retry any Runnable, including a chain of Runnables, + but in general it's best practice to keep the scope of the retry as small as + possible. For example, if you have a chain of Runnables, you should only retry + the Runnable that is likely to fail, not the entire chain. + + Example: + + .. code-block:: python + + from langchain_core.chat_models import ChatOpenAI + from langchain_core.prompts import PromptTemplate + + template = PromptTemplate.from_template("tell me a joke about {topic}.") + model = ChatOpenAI(temperature=0.5) + + # Good + chain = template | model.with_retry() + + # Bad + chain = template | model + retryable_chain = chain.with_retry() + """ # noqa: E501 + + retry_exception_types: tuple[type[BaseException], ...] = (Exception,) + """The exception types to retry on. By default all exceptions are retried. + + In general you should only retry on exceptions that are likely to be + transient, such as network errors. + + Good exceptions to retry are all server errors (5xx) and selected client + errors (4xx) such as 429 Too Many Requests. + """ + + wait_exponential_jitter: bool = True + """Whether to add jitter to the exponential backoff.""" + + exponential_jitter_params: Optional[ExponentialJitterParams] = None + """Parameters for ``tenacity.wait_exponential_jitter``. Namely: ``initial``, + ``max``, ``exp_base``, and ``jitter`` (all float values). + """ + + max_attempt_number: int = 3 + """The maximum number of attempts to retry the Runnable.""" + + @property + def _kwargs_retrying(self) -> dict[str, Any]: + kwargs: dict[str, Any] = {} + + if self.max_attempt_number: + kwargs["stop"] = stop_after_attempt(self.max_attempt_number) + + if self.wait_exponential_jitter: + kwargs["wait"] = wait_exponential_jitter( + **(self.exponential_jitter_params or {}) + ) + + if self.retry_exception_types: + kwargs["retry"] = retry_if_exception_type(self.retry_exception_types) + + return kwargs + + def _sync_retrying(self, **kwargs: Any) -> Retrying: + return Retrying(**self._kwargs_retrying, **kwargs) + + def _async_retrying(self, **kwargs: Any) -> AsyncRetrying: + return AsyncRetrying(**self._kwargs_retrying, **kwargs) + + @staticmethod + def _patch_config( + config: RunnableConfig, + run_manager: "T", + retry_state: RetryCallState, + ) -> RunnableConfig: + attempt = retry_state.attempt_number + tag = f"retry:attempt:{attempt}" if attempt > 1 else None + return patch_config(config, callbacks=run_manager.get_child(tag)) + + def _patch_config_list( + self, + config: list[RunnableConfig], + run_manager: list["T"], + retry_state: RetryCallState, + ) -> list[RunnableConfig]: + return [ + self._patch_config(c, rm, retry_state) for c, rm in zip(config, run_manager) + ] + + def _invoke( + self, + input: Input, + run_manager: "CallbackManagerForChainRun", + config: RunnableConfig, + **kwargs: Any, + ) -> Output: + for attempt in self._sync_retrying(reraise=True): + with attempt: + result = super().invoke( + input, + self._patch_config(config, run_manager, attempt.retry_state), + **kwargs, + ) + if attempt.retry_state.outcome and not attempt.retry_state.outcome.failed: + attempt.retry_state.set_result(result) + return result + + @override + def invoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + return self._call_with_config(self._invoke, input, config, **kwargs) + + async def _ainvoke( + self, + input: Input, + run_manager: "AsyncCallbackManagerForChainRun", + config: RunnableConfig, + **kwargs: Any, + ) -> Output: + async for attempt in self._async_retrying(reraise=True): + with attempt: + result = await super().ainvoke( + input, + self._patch_config(config, run_manager, attempt.retry_state), + **kwargs, + ) + if attempt.retry_state.outcome and not attempt.retry_state.outcome.failed: + attempt.retry_state.set_result(result) + return result + + @override + async def ainvoke( + self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + return await self._acall_with_config(self._ainvoke, input, config, **kwargs) + + def _batch( + self, + inputs: list[Input], + run_manager: list["CallbackManagerForChainRun"], + config: list[RunnableConfig], + **kwargs: Any, + ) -> list[Union[Output, Exception]]: + results_map: dict[int, Output] = {} + + def pending(iterable: list[U]) -> list[U]: + return [item for idx, item in enumerate(iterable) if idx not in results_map] + + not_set: list[Output] = [] + result = not_set + try: + for attempt in self._sync_retrying(): + with attempt: + # Get the results of the inputs that have not succeeded yet. + result = super().batch( + pending(inputs), + self._patch_config_list( + pending(config), pending(run_manager), attempt.retry_state + ), + return_exceptions=True, + **kwargs, + ) + # Register the results of the inputs that have succeeded. + first_exception = None + for i, r in enumerate(result): + if isinstance(r, Exception): + if not first_exception: + first_exception = r + continue + results_map[i] = r + # If any exception occurred, raise it, to retry the failed ones + if first_exception: + raise first_exception + if ( + attempt.retry_state.outcome + and not attempt.retry_state.outcome.failed + ): + attempt.retry_state.set_result(result) + except RetryError as e: + if result is not_set: + result = cast("list[Output]", [e] * len(inputs)) + + outputs: list[Union[Output, Exception]] = [] + for idx in range(len(inputs)): + if idx in results_map: + outputs.append(results_map[idx]) + else: + outputs.append(result.pop(0)) + return outputs + + @override + def batch( + self, + inputs: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Any, + ) -> list[Output]: + return self._batch_with_config( + self._batch, inputs, config, return_exceptions=return_exceptions, **kwargs + ) + + async def _abatch( + self, + inputs: list[Input], + run_manager: list["AsyncCallbackManagerForChainRun"], + config: list[RunnableConfig], + **kwargs: Any, + ) -> list[Union[Output, Exception]]: + results_map: dict[int, Output] = {} + + def pending(iterable: list[U]) -> list[U]: + return [item for idx, item in enumerate(iterable) if idx not in results_map] + + not_set: list[Output] = [] + result = not_set + try: + async for attempt in self._async_retrying(): + with attempt: + # Get the results of the inputs that have not succeeded yet. + result = await super().abatch( + pending(inputs), + self._patch_config_list( + pending(config), pending(run_manager), attempt.retry_state + ), + return_exceptions=True, + **kwargs, + ) + # Register the results of the inputs that have succeeded. + first_exception = None + for i, r in enumerate(result): + if isinstance(r, Exception): + if not first_exception: + first_exception = r + continue + results_map[i] = r + # If any exception occurred, raise it, to retry the failed ones + if first_exception: + raise first_exception + if ( + attempt.retry_state.outcome + and not attempt.retry_state.outcome.failed + ): + attempt.retry_state.set_result(result) + except RetryError as e: + if result is not_set: + result = cast("list[Output]", [e] * len(inputs)) + + outputs: list[Union[Output, Exception]] = [] + for idx in range(len(inputs)): + if idx in results_map: + outputs.append(results_map[idx]) + else: + outputs.append(result.pop(0)) + return outputs + + @override + async def abatch( + self, + inputs: list[Input], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Any, + ) -> list[Output]: + return await self._abatch_with_config( + self._abatch, inputs, config, return_exceptions=return_exceptions, **kwargs + ) + + # stream() and transform() are not retried because retrying a stream + # is not very intuitive. diff --git a/venv/Lib/site-packages/langchain_core/runnables/router.py b/venv/Lib/site-packages/langchain_core/runnables/router.py new file mode 100644 index 00000000..192c6239 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/router.py @@ -0,0 +1,243 @@ +"""Runnable that routes to a set of Runnables.""" + +from __future__ import annotations + +from collections.abc import Mapping +from itertools import starmap +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Union, + cast, +) + +from pydantic import ConfigDict +from typing_extensions import TypedDict, override + +from langchain_core.runnables.base import ( + Runnable, + RunnableSerializable, + coerce_to_runnable, +) +from langchain_core.runnables.config import ( + RunnableConfig, + get_config_list, + get_executor_for_config, +) +from langchain_core.runnables.utils import ( + ConfigurableFieldSpec, + Input, + Output, + gather_with_concurrency, + get_unique_config_specs, +) + +if TYPE_CHECKING: + from collections.abc import AsyncIterator, Iterator + + +class RouterInput(TypedDict): + """Router input. + + Attributes: + key: The key to route on. + input: The input to pass to the selected Runnable. + """ + + key: str + input: Any + + +class RouterRunnable(RunnableSerializable[RouterInput, Output]): + """Runnable that routes to a set of Runnables based on Input['key']. + + Returns the output of the selected Runnable. + + Example: + + .. code-block:: python + + from langchain_core.runnables.router import RouterRunnable + from langchain_core.runnables import RunnableLambda + + add = RunnableLambda(func=lambda x: x + 1) + square = RunnableLambda(func=lambda x: x**2) + + router = RouterRunnable(runnables={"add": add, "square": square}) + router.invoke({"key": "square", "input": 3}) + """ + + runnables: Mapping[str, Runnable[Any, Output]] + + @property + @override + def config_specs(self) -> list[ConfigurableFieldSpec]: + return get_unique_config_specs( + spec for step in self.runnables.values() for spec in step.config_specs + ) + + def __init__( + self, + runnables: Mapping[str, Union[Runnable[Any, Output], Callable[[Any], Output]]], + ) -> None: + """Create a RouterRunnable. + + Args: + runnables: A mapping of keys to Runnables. + """ + super().__init__( # type: ignore[call-arg] + runnables={key: coerce_to_runnable(r) for key, r in runnables.items()} + ) + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @classmethod + @override + def is_lc_serializable(cls) -> bool: + """Return whether this class is serializable.""" + return True + + @classmethod + @override + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object.""" + return ["langchain", "schema", "runnable"] + + @override + def invoke( + self, input: RouterInput, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> Output: + key = input["key"] + actual_input = input["input"] + if key not in self.runnables: + msg = f"No runnable associated with key '{key}'" + raise ValueError(msg) + + runnable = self.runnables[key] + return runnable.invoke(actual_input, config) + + @override + async def ainvoke( + self, + input: RouterInput, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Output: + key = input["key"] + actual_input = input["input"] + if key not in self.runnables: + msg = f"No runnable associated with key '{key}'" + raise ValueError(msg) + + runnable = self.runnables[key] + return await runnable.ainvoke(actual_input, config) + + @override + def batch( + self, + inputs: list[RouterInput], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Output]: + if not inputs: + return [] + + keys = [input["key"] for input in inputs] + actual_inputs = [input["input"] for input in inputs] + if any(key not in self.runnables for key in keys): + msg = "One or more keys do not have a corresponding runnable" + raise ValueError(msg) + + def invoke( + runnable: Runnable, input: Input, config: RunnableConfig + ) -> Union[Output, Exception]: + if return_exceptions: + try: + return runnable.invoke(input, config, **kwargs) + except Exception as e: + return e + else: + return runnable.invoke(input, config, **kwargs) + + runnables = [self.runnables[key] for key in keys] + configs = get_config_list(config, len(inputs)) + with get_executor_for_config(configs[0]) as executor: + return cast( + "list[Output]", + list(executor.map(invoke, runnables, actual_inputs, configs)), + ) + + @override + async def abatch( + self, + inputs: list[RouterInput], + config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None, + *, + return_exceptions: bool = False, + **kwargs: Optional[Any], + ) -> list[Output]: + if not inputs: + return [] + + keys = [input["key"] for input in inputs] + actual_inputs = [input["input"] for input in inputs] + if any(key not in self.runnables for key in keys): + msg = "One or more keys do not have a corresponding runnable" + raise ValueError(msg) + + async def ainvoke( + runnable: Runnable, input: Input, config: RunnableConfig + ) -> Union[Output, Exception]: + if return_exceptions: + try: + return await runnable.ainvoke(input, config, **kwargs) + except Exception as e: + return e + else: + return await runnable.ainvoke(input, config, **kwargs) + + runnables = [self.runnables[key] for key in keys] + configs = get_config_list(config, len(inputs)) + return await gather_with_concurrency( + configs[0].get("max_concurrency"), + *starmap(ainvoke, zip(runnables, actual_inputs, configs)), + ) + + @override + def stream( + self, + input: RouterInput, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> Iterator[Output]: + key = input["key"] + actual_input = input["input"] + if key not in self.runnables: + msg = f"No runnable associated with key '{key}'" + raise ValueError(msg) + + runnable = self.runnables[key] + yield from runnable.stream(actual_input, config) + + @override + async def astream( + self, + input: RouterInput, + config: Optional[RunnableConfig] = None, + **kwargs: Optional[Any], + ) -> AsyncIterator[Output]: + key = input["key"] + actual_input = input["input"] + if key not in self.runnables: + msg = f"No runnable associated with key '{key}'" + raise ValueError(msg) + + runnable = self.runnables[key] + async for output in runnable.astream(actual_input, config): + yield output diff --git a/venv/Lib/site-packages/langchain_core/runnables/schema.py b/venv/Lib/site-packages/langchain_core/runnables/schema.py new file mode 100644 index 00000000..20ad5800 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/schema.py @@ -0,0 +1,177 @@ +"""Module contains typedefs that are used with Runnables.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Literal, Union + +from typing_extensions import NotRequired, TypedDict + +if TYPE_CHECKING: + from collections.abc import Sequence + + +class EventData(TypedDict, total=False): + """Data associated with a streaming event.""" + + input: Any + """The input passed to the Runnable that generated the event. + + Inputs will sometimes be available at the *START* of the Runnable, and + sometimes at the *END* of the Runnable. + + If a Runnable is able to stream its inputs, then its input by definition + won't be known until the *END* of the Runnable when it has finished streaming + its inputs. + """ + output: Any + """The output of the Runnable that generated the event. + + Outputs will only be available at the *END* of the Runnable. + + For most Runnables, this field can be inferred from the `chunk` field, + though there might be some exceptions for special cased Runnables (e.g., like + chat models), which may return more information. + """ + chunk: Any + """A streaming chunk from the output that generated the event. + + chunks support addition in general, and adding them up should result + in the output of the Runnable that generated the event. + """ + + +class BaseStreamEvent(TypedDict): + """Streaming event. + + Schema of a streaming event which is produced from the astream_events method. + + Example: + + .. code-block:: python + + from langchain_core.runnables import RunnableLambda + + async def reverse(s: str) -> str: + return s[::-1] + + chain = RunnableLambda(func=reverse) + + events = [event async for event in chain.astream_events("hello")] + + # will produce the following events + # (where some fields have been omitted for brevity): + [ + { + "data": {"input": "hello"}, + "event": "on_chain_start", + "metadata": {}, + "name": "reverse", + "tags": [], + }, + { + "data": {"chunk": "olleh"}, + "event": "on_chain_stream", + "metadata": {}, + "name": "reverse", + "tags": [], + }, + { + "data": {"output": "olleh"}, + "event": "on_chain_end", + "metadata": {}, + "name": "reverse", + "tags": [], + }, + ] + """ + + event: str + """Event names are of the format: on_[runnable_type]_(start|stream|end). + + Runnable types are one of: + + - **llm** - used by non chat models + - **chat_model** - used by chat models + - **prompt** -- e.g., ChatPromptTemplate + - **tool** -- from tools defined via @tool decorator or inheriting + from Tool/BaseTool + - **chain** - most Runnables are of this type + + Further, the events are categorized as one of: + + - **start** - when the Runnable starts + - **stream** - when the Runnable is streaming + - **end* - when the Runnable ends + + start, stream and end are associated with slightly different `data` payload. + + Please see the documentation for `EventData` for more details. + """ + run_id: str + """An randomly generated ID to keep track of the execution of the given Runnable. + + Each child Runnable that gets invoked as part of the execution of a parent Runnable + is assigned its own unique ID. + """ + tags: NotRequired[list[str]] + """Tags associated with the Runnable that generated this event. + + Tags are always inherited from parent Runnables. + + Tags can either be bound to a Runnable using `.with_config({"tags": ["hello"]})` + or passed at run time using `.astream_events(..., {"tags": ["hello"]})`. + """ + metadata: NotRequired[dict[str, Any]] + """Metadata associated with the Runnable that generated this event. + + Metadata can either be bound to a Runnable using + + `.with_config({"metadata": { "foo": "bar" }})` + + or passed at run time using + + `.astream_events(..., {"metadata": {"foo": "bar"}})`. + """ + + parent_ids: Sequence[str] + """A list of the parent IDs associated with this event. + + Root Events will have an empty list. + + For example, if a Runnable A calls Runnable B, then the event generated by Runnable + B will have Runnable A's ID in the parent_ids field. + + The order of the parent IDs is from the root parent to the immediate parent. + + Only supported as of v2 of the astream events API. v1 will return an empty list. + """ + + +class StandardStreamEvent(BaseStreamEvent): + """A standard stream event that follows LangChain convention for event data.""" + + data: EventData + """Event data. + + The contents of the event data depend on the event type. + """ + name: str + """The name of the Runnable that generated the event.""" + + +class CustomStreamEvent(BaseStreamEvent): + """Custom stream event created by the user. + + .. versionadded:: 0.2.15 + """ + + # Overwrite the event field to be more specific. + event: Literal["on_custom_event"] # type: ignore[misc] + """The event type.""" + name: str + """User defined name for the event.""" + data: Any + """The data associated with the event. Free form and can be anything.""" + + +StreamEvent = Union[StandardStreamEvent, CustomStreamEvent] diff --git a/venv/Lib/site-packages/langchain_core/runnables/utils.py b/venv/Lib/site-packages/langchain_core/runnables/utils.py new file mode 100644 index 00000000..b9fd1b20 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/runnables/utils.py @@ -0,0 +1,798 @@ +"""Utility code for runnables.""" + +from __future__ import annotations + +import ast +import asyncio +import inspect +import textwrap +from collections.abc import Mapping, Sequence +from contextvars import Context +from functools import lru_cache +from inspect import signature +from itertools import groupby +from typing import ( + TYPE_CHECKING, + Any, + Callable, + NamedTuple, + Optional, + Protocol, + TypeVar, + Union, +) + +from typing_extensions import TypeGuard, override + +# Re-export create-model for backwards compatibility +from langchain_core.utils.pydantic import create_model # noqa: F401 + +if TYPE_CHECKING: + from collections.abc import ( + AsyncIterable, + AsyncIterator, + Awaitable, + Coroutine, + Iterable, + ) + + from langchain_core.runnables.schema import StreamEvent + +Input = TypeVar("Input", contravariant=True) # noqa: PLC0105 +# Output type should implement __concat__, as eg str, list, dict do +Output = TypeVar("Output", covariant=True) # noqa: PLC0105 + + +async def gated_coro(semaphore: asyncio.Semaphore, coro: Coroutine) -> Any: + """Run a coroutine with a semaphore. + + Args: + semaphore: The semaphore to use. + coro: The coroutine to run. + + Returns: + The result of the coroutine. + """ + async with semaphore: + return await coro + + +async def gather_with_concurrency(n: Union[int, None], *coros: Coroutine) -> list: + """Gather coroutines with a limit on the number of concurrent coroutines. + + Args: + n: The number of coroutines to run concurrently. + *coros: The coroutines to run. + + Returns: + The results of the coroutines. + """ + if n is None: + return await asyncio.gather(*coros) + + semaphore = asyncio.Semaphore(n) + + return await asyncio.gather(*(gated_coro(semaphore, c) for c in coros)) + + +def accepts_run_manager(callable: Callable[..., Any]) -> bool: + """Check if a callable accepts a run_manager argument. + + Args: + callable: The callable to check. + + Returns: + bool: True if the callable accepts a run_manager argument, False otherwise. + """ + try: + return signature(callable).parameters.get("run_manager") is not None + except ValueError: + return False + + +def accepts_config(callable: Callable[..., Any]) -> bool: + """Check if a callable accepts a config argument. + + Args: + callable: The callable to check. + + Returns: + bool: True if the callable accepts a config argument, False otherwise. + """ + try: + return signature(callable).parameters.get("config") is not None + except ValueError: + return False + + +def accepts_context(callable: Callable[..., Any]) -> bool: + """Check if a callable accepts a context argument. + + Args: + callable: The callable to check. + + Returns: + bool: True if the callable accepts a context argument, False otherwise. + """ + try: + return signature(callable).parameters.get("context") is not None + except ValueError: + return False + + +@lru_cache(maxsize=1) +def asyncio_accepts_context() -> bool: + """Cache the result of checking if asyncio.create_task accepts a ``context`` arg.""" + return accepts_context(asyncio.create_task) + + +def coro_with_context( + coro: Awaitable[Any], context: Context, *, create_task: bool = False +) -> Awaitable[Any]: + """Await a coroutine with a context. + + Args: + coro: The coroutine to await. + context: The context to use. + create_task: Whether to create a task. Defaults to False. + + Returns: + The coroutine with the context. + """ + if asyncio_accepts_context(): + return asyncio.create_task(coro, context=context) # type: ignore[arg-type,call-arg,unused-ignore] + if create_task: + return asyncio.create_task(coro) # type: ignore[arg-type] + return coro + + +class IsLocalDict(ast.NodeVisitor): + """Check if a name is a local dict.""" + + def __init__(self, name: str, keys: set[str]) -> None: + """Initialize the visitor. + + Args: + name: The name to check. + keys: The keys to populate. + """ + self.name = name + self.keys = keys + + @override + def visit_Subscript(self, node: ast.Subscript) -> Any: + """Visit a subscript node. + + Args: + node: The node to visit. + + Returns: + Any: The result of the visit. + """ + if ( + isinstance(node.ctx, ast.Load) + and isinstance(node.value, ast.Name) + and node.value.id == self.name + and isinstance(node.slice, ast.Constant) + and isinstance(node.slice.value, str) + ): + # we've found a subscript access on the name we're looking for + self.keys.add(node.slice.value) + + @override + def visit_Call(self, node: ast.Call) -> Any: + """Visit a call node. + + Args: + node: The node to visit. + + Returns: + Any: The result of the visit. + """ + if ( + isinstance(node.func, ast.Attribute) + and isinstance(node.func.value, ast.Name) + and node.func.value.id == self.name + and node.func.attr == "get" + and len(node.args) in (1, 2) + and isinstance(node.args[0], ast.Constant) + and isinstance(node.args[0].value, str) + ): + # we've found a .get() call on the name we're looking for + self.keys.add(node.args[0].value) + + +class IsFunctionArgDict(ast.NodeVisitor): + """Check if the first argument of a function is a dict.""" + + def __init__(self) -> None: + """Create a IsFunctionArgDict visitor.""" + self.keys: set[str] = set() + + @override + def visit_Lambda(self, node: ast.Lambda) -> Any: + """Visit a lambda function. + + Args: + node: The node to visit. + + Returns: + Any: The result of the visit. + """ + if not node.args.args: + return + input_arg_name = node.args.args[0].arg + IsLocalDict(input_arg_name, self.keys).visit(node.body) + + @override + def visit_FunctionDef(self, node: ast.FunctionDef) -> Any: + """Visit a function definition. + + Args: + node: The node to visit. + + Returns: + Any: The result of the visit. + """ + if not node.args.args: + return + input_arg_name = node.args.args[0].arg + IsLocalDict(input_arg_name, self.keys).visit(node) + + @override + def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> Any: + """Visit an async function definition. + + Args: + node: The node to visit. + + Returns: + Any: The result of the visit. + """ + if not node.args.args: + return + input_arg_name = node.args.args[0].arg + IsLocalDict(input_arg_name, self.keys).visit(node) + + +class NonLocals(ast.NodeVisitor): + """Get nonlocal variables accessed.""" + + def __init__(self) -> None: + """Create a NonLocals visitor.""" + self.loads: set[str] = set() + self.stores: set[str] = set() + + @override + def visit_Name(self, node: ast.Name) -> Any: + """Visit a name node. + + Args: + node: The node to visit. + + Returns: + Any: The result of the visit. + """ + if isinstance(node.ctx, ast.Load): + self.loads.add(node.id) + elif isinstance(node.ctx, ast.Store): + self.stores.add(node.id) + + @override + def visit_Attribute(self, node: ast.Attribute) -> Any: + """Visit an attribute node. + + Args: + node: The node to visit. + + Returns: + Any: The result of the visit. + """ + if isinstance(node.ctx, ast.Load): + parent = node.value + attr_expr = node.attr + while isinstance(parent, ast.Attribute): + attr_expr = parent.attr + "." + attr_expr + parent = parent.value + if isinstance(parent, ast.Name): + self.loads.add(parent.id + "." + attr_expr) + self.loads.discard(parent.id) + elif isinstance(parent, ast.Call): + if isinstance(parent.func, ast.Name): + self.loads.add(parent.func.id) + else: + parent = parent.func + attr_expr = "" + while isinstance(parent, ast.Attribute): + if attr_expr: + attr_expr = parent.attr + "." + attr_expr + else: + attr_expr = parent.attr + parent = parent.value + if isinstance(parent, ast.Name): + self.loads.add(parent.id + "." + attr_expr) + + +class FunctionNonLocals(ast.NodeVisitor): + """Get the nonlocal variables accessed of a function.""" + + def __init__(self) -> None: + """Create a FunctionNonLocals visitor.""" + self.nonlocals: set[str] = set() + + @override + def visit_FunctionDef(self, node: ast.FunctionDef) -> Any: + """Visit a function definition. + + Args: + node: The node to visit. + + Returns: + Any: The result of the visit. + """ + visitor = NonLocals() + visitor.visit(node) + self.nonlocals.update(visitor.loads - visitor.stores) + + @override + def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> Any: + """Visit an async function definition. + + Args: + node: The node to visit. + + Returns: + Any: The result of the visit. + """ + visitor = NonLocals() + visitor.visit(node) + self.nonlocals.update(visitor.loads - visitor.stores) + + @override + def visit_Lambda(self, node: ast.Lambda) -> Any: + """Visit a lambda function. + + Args: + node: The node to visit. + + Returns: + Any: The result of the visit. + """ + visitor = NonLocals() + visitor.visit(node) + self.nonlocals.update(visitor.loads - visitor.stores) + + +class GetLambdaSource(ast.NodeVisitor): + """Get the source code of a lambda function.""" + + def __init__(self) -> None: + """Initialize the visitor.""" + self.source: Optional[str] = None + self.count = 0 + + @override + def visit_Lambda(self, node: ast.Lambda) -> Any: + """Visit a lambda function. + + Args: + node: The node to visit. + + Returns: + Any: The result of the visit. + """ + self.count += 1 + if hasattr(ast, "unparse"): + self.source = ast.unparse(node) + + +def get_function_first_arg_dict_keys(func: Callable) -> Optional[list[str]]: + """Get the keys of the first argument of a function if it is a dict. + + Args: + func: The function to check. + + Returns: + Optional[list[str]]: The keys of the first argument if it is a dict, + None otherwise. + """ + try: + code = inspect.getsource(func) + tree = ast.parse(textwrap.dedent(code)) + visitor = IsFunctionArgDict() + visitor.visit(tree) + return sorted(visitor.keys) if visitor.keys else None + except (SyntaxError, TypeError, OSError, SystemError): + return None + + +def get_lambda_source(func: Callable) -> Optional[str]: + """Get the source code of a lambda function. + + Args: + func: a Callable that can be a lambda function. + + Returns: + str: the source code of the lambda function. + """ + try: + name = func.__name__ if func.__name__ != "" else None + except AttributeError: + name = None + try: + code = inspect.getsource(func) + tree = ast.parse(textwrap.dedent(code)) + visitor = GetLambdaSource() + visitor.visit(tree) + except (SyntaxError, TypeError, OSError, SystemError): + return name + return visitor.source if visitor.count == 1 else name + + +@lru_cache(maxsize=256) +def get_function_nonlocals(func: Callable) -> list[Any]: + """Get the nonlocal variables accessed by a function. + + Args: + func: The function to check. + + Returns: + list[Any]: The nonlocal variables accessed by the function. + """ + try: + code = inspect.getsource(func) + tree = ast.parse(textwrap.dedent(code)) + visitor = FunctionNonLocals() + visitor.visit(tree) + values: list[Any] = [] + closure = ( + inspect.getclosurevars(func.__wrapped__) + if hasattr(func, "__wrapped__") and callable(func.__wrapped__) + else inspect.getclosurevars(func) + ) + candidates = {**closure.globals, **closure.nonlocals} + for k, v in candidates.items(): + if k in visitor.nonlocals: + values.append(v) + for kk in visitor.nonlocals: + if "." in kk and kk.startswith(k): + vv = v + for part in kk.split(".")[1:]: + if vv is None: + break + try: + vv = getattr(vv, part) + except AttributeError: + break + else: + values.append(vv) + except (SyntaxError, TypeError, OSError, SystemError): + return [] + + return values + + +def indent_lines_after_first(text: str, prefix: str) -> str: + """Indent all lines of text after the first line. + + Args: + text: The text to indent. + prefix: Used to determine the number of spaces to indent. + + Returns: + str: The indented text. + """ + n_spaces = len(prefix) + spaces = " " * n_spaces + lines = text.splitlines() + return "\n".join([lines[0]] + [spaces + line for line in lines[1:]]) + + +class AddableDict(dict[str, Any]): + """Dictionary that can be added to another dictionary.""" + + def __add__(self, other: AddableDict) -> AddableDict: + """Add a dictionary to this dictionary. + + Args: + other: The other dictionary to add. + """ + chunk = AddableDict(self) + for key in other: + if key not in chunk or chunk[key] is None: + chunk[key] = other[key] + elif other[key] is not None: + try: + added = chunk[key] + other[key] + except TypeError: + added = other[key] + chunk[key] = added + return chunk + + def __radd__(self, other: AddableDict) -> AddableDict: + """Add this dictionary to another dictionary. + + Args: + other: The other dictionary to be added to. + """ + chunk = AddableDict(other) + for key in self: + if key not in chunk or chunk[key] is None: + chunk[key] = self[key] + elif self[key] is not None: + try: + added = chunk[key] + self[key] + except TypeError: + added = self[key] + chunk[key] = added + return chunk + + +_T_co = TypeVar("_T_co", covariant=True) +_T_contra = TypeVar("_T_contra", contravariant=True) + + +class SupportsAdd(Protocol[_T_contra, _T_co]): + """Protocol for objects that support addition.""" + + def __add__(self, x: _T_contra, /) -> _T_co: + """Add the object to another object.""" + + +Addable = TypeVar("Addable", bound=SupportsAdd[Any, Any]) + + +def add(addables: Iterable[Addable]) -> Optional[Addable]: + """Add a sequence of addable objects together. + + Args: + addables: The addable objects to add. + + Returns: + Optional[Addable]: The result of adding the addable objects. + """ + final: Optional[Addable] = None + for chunk in addables: + final = chunk if final is None else final + chunk + return final + + +async def aadd(addables: AsyncIterable[Addable]) -> Optional[Addable]: + """Asynchronously add a sequence of addable objects together. + + Args: + addables: The addable objects to add. + + Returns: + Optional[Addable]: The result of adding the addable objects. + """ + final: Optional[Addable] = None + async for chunk in addables: + final = chunk if final is None else final + chunk + return final + + +class ConfigurableField(NamedTuple): + """Field that can be configured by the user. + + Parameters: + id: The unique identifier of the field. + name: The name of the field. Defaults to None. + description: The description of the field. Defaults to None. + annotation: The annotation of the field. Defaults to None. + is_shared: Whether the field is shared. Defaults to False. + """ + + id: str + + name: Optional[str] = None + description: Optional[str] = None + annotation: Optional[Any] = None + is_shared: bool = False + + @override + def __hash__(self) -> int: + return hash((self.id, self.annotation)) + + +class ConfigurableFieldSingleOption(NamedTuple): + """Field that can be configured by the user with a default value. + + Parameters: + id: The unique identifier of the field. + options: The options for the field. + default: The default value for the field. + name: The name of the field. Defaults to None. + description: The description of the field. Defaults to None. + is_shared: Whether the field is shared. Defaults to False. + """ + + id: str + options: Mapping[str, Any] + default: str + + name: Optional[str] = None + description: Optional[str] = None + is_shared: bool = False + + @override + def __hash__(self) -> int: + return hash((self.id, tuple(self.options.keys()), self.default)) + + +class ConfigurableFieldMultiOption(NamedTuple): + """Field that can be configured by the user with multiple default values. + + Parameters: + id: The unique identifier of the field. + options: The options for the field. + default: The default values for the field. + name: The name of the field. Defaults to None. + description: The description of the field. Defaults to None. + is_shared: Whether the field is shared. Defaults to False. + """ + + id: str + options: Mapping[str, Any] + default: Sequence[str] + + name: Optional[str] = None + description: Optional[str] = None + is_shared: bool = False + + @override + def __hash__(self) -> int: + return hash((self.id, tuple(self.options.keys()), tuple(self.default))) + + +AnyConfigurableField = Union[ + ConfigurableField, ConfigurableFieldSingleOption, ConfigurableFieldMultiOption +] + + +class ConfigurableFieldSpec(NamedTuple): + """Field that can be configured by the user. It is a specification of a field. + + Parameters: + id: The unique identifier of the field. + annotation: The annotation of the field. + name: The name of the field. Defaults to None. + description: The description of the field. Defaults to None. + default: The default value for the field. Defaults to None. + is_shared: Whether the field is shared. Defaults to False. + dependencies: The dependencies of the field. Defaults to None. + """ + + id: str + annotation: Any + + name: Optional[str] = None + description: Optional[str] = None + default: Any = None + is_shared: bool = False + dependencies: Optional[list[str]] = None + + +def get_unique_config_specs( + specs: Iterable[ConfigurableFieldSpec], +) -> list[ConfigurableFieldSpec]: + """Get the unique config specs from a sequence of config specs. + + Args: + specs: The config specs. + + Returns: + list[ConfigurableFieldSpec]: The unique config specs. + + Raises: + ValueError: If the runnable sequence contains conflicting config specs. + """ + grouped = groupby( + sorted(specs, key=lambda s: (s.id, *(s.dependencies or []))), lambda s: s.id + ) + unique: list[ConfigurableFieldSpec] = [] + for id, dupes in grouped: + first = next(dupes) + others = list(dupes) + if len(others) == 0 or all(o == first for o in others): + unique.append(first) + else: + msg = ( + "RunnableSequence contains conflicting config specs" + f"for {id}: {[first] + others}" + ) + raise ValueError(msg) + return unique + + +class _RootEventFilter: + def __init__( + self, + *, + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + ) -> None: + """Utility to filter the root event in the astream_events implementation. + + This is simply binding the arguments to the namespace to make save on + a bit of typing in the astream_events implementation. + """ + self.include_names = include_names + self.include_types = include_types + self.include_tags = include_tags + self.exclude_names = exclude_names + self.exclude_types = exclude_types + self.exclude_tags = exclude_tags + + def include_event(self, event: StreamEvent, root_type: str) -> bool: + """Determine whether to include an event.""" + if ( + self.include_names is None + and self.include_types is None + and self.include_tags is None + ): + include = True + else: + include = False + + event_tags = event.get("tags") or [] + + if self.include_names is not None: + include = include or event["name"] in self.include_names + if self.include_types is not None: + include = include or root_type in self.include_types + if self.include_tags is not None: + include = include or any(tag in self.include_tags for tag in event_tags) + + if self.exclude_names is not None: + include = include and event["name"] not in self.exclude_names + if self.exclude_types is not None: + include = include and root_type not in self.exclude_types + if self.exclude_tags is not None: + include = include and all( + tag not in self.exclude_tags for tag in event_tags + ) + + return include + + +def is_async_generator( + func: Any, +) -> TypeGuard[Callable[..., AsyncIterator]]: + """Check if a function is an async generator. + + Args: + func: The function to check. + + Returns: + TypeGuard[Callable[..., AsyncIterator]: True if the function is + an async generator, False otherwise. + """ + return ( + inspect.isasyncgenfunction(func) + or hasattr(func, "__call__") # noqa: B004 + and inspect.isasyncgenfunction(func.__call__) + ) + + +def is_async_callable( + func: Any, +) -> TypeGuard[Callable[..., Awaitable]]: + """Check if a function is async. + + Args: + func: The function to check. + + Returns: + TypeGuard[Callable[..., Awaitable]: True if the function is async, + False otherwise. + """ + return ( + asyncio.iscoroutinefunction(func) + or hasattr(func, "__call__") # noqa: B004 + and asyncio.iscoroutinefunction(func.__call__) + ) diff --git a/venv/Lib/site-packages/langchain_core/stores.py b/venv/Lib/site-packages/langchain_core/stores.py new file mode 100644 index 00000000..2d40a633 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/stores.py @@ -0,0 +1,334 @@ +"""**Store** implements the key-value stores and storage helpers. + +Module provides implementations of various key-value stores that conform +to a simple key-value interface. + +The primary goal of these storages is to support implementation of caching. +""" + +from abc import ABC, abstractmethod +from collections.abc import AsyncIterator, Iterator, Sequence +from typing import ( + Any, + Generic, + Optional, + TypeVar, + Union, +) + +from langchain_core.exceptions import LangChainException +from langchain_core.runnables import run_in_executor + +K = TypeVar("K") +V = TypeVar("V") + + +class BaseStore(Generic[K, V], ABC): + """Abstract interface for a key-value store. + + This is an interface that's meant to abstract away the details of + different key-value stores. It provides a simple interface for + getting, setting, and deleting key-value pairs. + + The basic methods are `mget`, `mset`, and `mdelete` for getting, + setting, and deleting multiple key-value pairs at once. The `yield_keys` + method is used to iterate over keys that match a given prefix. + + The async versions of these methods are also provided, which are + meant to be used in async contexts. The async methods are named with + an `a` prefix, e.g., `amget`, `amset`, `amdelete`, and `ayield_keys`. + + By default, the `amget`, `amset`, `amdelete`, and `ayield_keys` methods + are implemented using the synchronous methods. If the store can natively + support async operations, it should override these methods. + + By design the methods only accept batches of keys and values, and not + single keys or values. This is done to force user code to work with batches + which will usually be more efficient by saving on round trips to the store. + + Examples: + + .. code-block:: python + + from langchain.storage import BaseStore + + class MyInMemoryStore(BaseStore[str, int]): + + def __init__(self): + self.store = {} + + def mget(self, keys): + return [self.store.get(key) for key in keys] + + def mset(self, key_value_pairs): + for key, value in key_value_pairs: + self.store[key] = value + + def mdelete(self, keys): + for key in keys: + if key in self.store: + del self.store[key] + + def yield_keys(self, prefix=None): + if prefix is None: + yield from self.store.keys() + else: + for key in self.store.keys(): + if key.startswith(prefix): + yield key + """ + + @abstractmethod + def mget(self, keys: Sequence[K]) -> list[Optional[V]]: + """Get the values associated with the given keys. + + Args: + keys (Sequence[K]): A sequence of keys. + + Returns: + A sequence of optional values associated with the keys. + If a key is not found, the corresponding value will be None. + """ + + async def amget(self, keys: Sequence[K]) -> list[Optional[V]]: + """Async get the values associated with the given keys. + + Args: + keys (Sequence[K]): A sequence of keys. + + Returns: + A sequence of optional values associated with the keys. + If a key is not found, the corresponding value will be None. + """ + return await run_in_executor(None, self.mget, keys) + + @abstractmethod + def mset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None: + """Set the values for the given keys. + + Args: + key_value_pairs (Sequence[tuple[K, V]]): A sequence of key-value pairs. + """ + + async def amset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None: + """Async set the values for the given keys. + + Args: + key_value_pairs (Sequence[tuple[K, V]]): A sequence of key-value pairs. + """ + return await run_in_executor(None, self.mset, key_value_pairs) + + @abstractmethod + def mdelete(self, keys: Sequence[K]) -> None: + """Delete the given keys and their associated values. + + Args: + keys (Sequence[K]): A sequence of keys to delete. + """ + + async def amdelete(self, keys: Sequence[K]) -> None: + """Async delete the given keys and their associated values. + + Args: + keys (Sequence[K]): A sequence of keys to delete. + """ + return await run_in_executor(None, self.mdelete, keys) + + @abstractmethod + def yield_keys( + self, *, prefix: Optional[str] = None + ) -> Union[Iterator[K], Iterator[str]]: + """Get an iterator over keys that match the given prefix. + + Args: + prefix (str): The prefix to match. + + Yields: + Iterator[K | str]: An iterator over keys that match the given prefix. + This method is allowed to return an iterator over either K or str + depending on what makes more sense for the given store. + """ + + async def ayield_keys( + self, *, prefix: Optional[str] = None + ) -> Union[AsyncIterator[K], AsyncIterator[str]]: + """Async get an iterator over keys that match the given prefix. + + Args: + prefix (str): The prefix to match. + + Yields: + Iterator[K | str]: An iterator over keys that match the given prefix. + This method is allowed to return an iterator over either K or str + depending on what makes more sense for the given store. + """ + iterator = await run_in_executor(None, self.yield_keys, prefix=prefix) + done = object() + while True: + item = await run_in_executor(None, lambda it: next(it, done), iterator) + if item is done: + break + yield item # type: ignore[misc] + + +ByteStore = BaseStore[str, bytes] + + +class InMemoryBaseStore(BaseStore[str, V], Generic[V]): + """In-memory implementation of the BaseStore using a dictionary.""" + + def __init__(self) -> None: + """Initialize an empty store.""" + self.store: dict[str, V] = {} + + def mget(self, keys: Sequence[str]) -> list[Optional[V]]: + """Get the values associated with the given keys. + + Args: + keys (Sequence[str]): A sequence of keys. + + Returns: + A sequence of optional values associated with the keys. + If a key is not found, the corresponding value will be None. + """ + return [self.store.get(key) for key in keys] + + async def amget(self, keys: Sequence[str]) -> list[Optional[V]]: + """Async get the values associated with the given keys. + + Args: + keys (Sequence[str]): A sequence of keys. + + Returns: + A sequence of optional values associated with the keys. + If a key is not found, the corresponding value will be None. + """ + return self.mget(keys) + + def mset(self, key_value_pairs: Sequence[tuple[str, V]]) -> None: + """Set the values for the given keys. + + Args: + key_value_pairs (Sequence[tuple[str, V]]): A sequence of key-value pairs. + + Returns: + None + """ + for key, value in key_value_pairs: + self.store[key] = value + + async def amset(self, key_value_pairs: Sequence[tuple[str, V]]) -> None: + """Async set the values for the given keys. + + Args: + key_value_pairs (Sequence[tuple[str, V]]): A sequence of key-value pairs. + + Returns: + None + """ + return self.mset(key_value_pairs) + + def mdelete(self, keys: Sequence[str]) -> None: + """Delete the given keys and their associated values. + + Args: + keys (Sequence[str]): A sequence of keys to delete. + """ + for key in keys: + if key in self.store: + del self.store[key] + + async def amdelete(self, keys: Sequence[str]) -> None: + """Async delete the given keys and their associated values. + + Args: + keys (Sequence[str]): A sequence of keys to delete. + """ + self.mdelete(keys) + + def yield_keys(self, prefix: Optional[str] = None) -> Iterator[str]: + """Get an iterator over keys that match the given prefix. + + Args: + prefix (str, optional): The prefix to match. Defaults to None. + + Yields: + Iterator[str]: An iterator over keys that match the given prefix. + """ + if prefix is None: + yield from self.store.keys() + else: + for key in self.store: + if key.startswith(prefix): + yield key + + async def ayield_keys(self, prefix: Optional[str] = None) -> AsyncIterator[str]: + """Async get an async iterator over keys that match the given prefix. + + Args: + prefix (str, optional): The prefix to match. Defaults to None. + + Yields: + AsyncIterator[str]: An async iterator over keys that match the given prefix. + """ + if prefix is None: + for key in self.store: + yield key + else: + for key in self.store: + if key.startswith(prefix): + yield key + + +class InMemoryStore(InMemoryBaseStore[Any]): + """In-memory store for any type of data. + + Attributes: + store (dict[str, Any]): The underlying dictionary that stores + the key-value pairs. + + Examples: + + .. code-block:: python + + from langchain.storage import InMemoryStore + + store = InMemoryStore() + store.mset([('key1', 'value1'), ('key2', 'value2')]) + store.mget(['key1', 'key2']) + # ['value1', 'value2'] + store.mdelete(['key1']) + list(store.yield_keys()) + # ['key2'] + list(store.yield_keys(prefix='k')) + # ['key2'] + """ + + +class InMemoryByteStore(InMemoryBaseStore[bytes]): + """In-memory store for bytes. + + Attributes: + store (dict[str, bytes]): The underlying dictionary that stores + the key-value pairs. + + Examples: + + .. code-block:: python + + from langchain.storage import InMemoryByteStore + + store = InMemoryByteStore() + store.mset([('key1', b'value1'), ('key2', b'value2')]) + store.mget(['key1', 'key2']) + # [b'value1', b'value2'] + store.mdelete(['key1']) + list(store.yield_keys()) + # ['key2'] + list(store.yield_keys(prefix='k')) + # ['key2'] + """ + + +class InvalidKeyException(LangChainException): + """Raised when a key is invalid; e.g., uses incorrect characters.""" diff --git a/venv/Lib/site-packages/langchain_core/structured_query.py b/venv/Lib/site-packages/langchain_core/structured_query.py new file mode 100644 index 00000000..746174e5 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/structured_query.py @@ -0,0 +1,201 @@ +"""Internal representation of a structured query language.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from enum import Enum +from typing import TYPE_CHECKING, Any, Optional, Union + +from pydantic import BaseModel + +if TYPE_CHECKING: + from collections.abc import Sequence + + +class Visitor(ABC): + """Defines interface for IR translation using a visitor pattern.""" + + allowed_comparators: Optional[Sequence[Comparator]] = None + """Allowed comparators for the visitor.""" + allowed_operators: Optional[Sequence[Operator]] = None + """Allowed operators for the visitor.""" + + def _validate_func(self, func: Union[Operator, Comparator]) -> None: + if ( + isinstance(func, Operator) + and self.allowed_operators is not None + and func not in self.allowed_operators + ): + msg = ( + f"Received disallowed operator {func}. Allowed " + f"comparators are {self.allowed_operators}" + ) + raise ValueError(msg) + if ( + isinstance(func, Comparator) + and self.allowed_comparators is not None + and func not in self.allowed_comparators + ): + msg = ( + f"Received disallowed comparator {func}. Allowed " + f"comparators are {self.allowed_comparators}" + ) + raise ValueError(msg) + + @abstractmethod + def visit_operation(self, operation: Operation) -> Any: + """Translate an Operation. + + Args: + operation: Operation to translate. + """ + + @abstractmethod + def visit_comparison(self, comparison: Comparison) -> Any: + """Translate a Comparison. + + Args: + comparison: Comparison to translate. + """ + + @abstractmethod + def visit_structured_query(self, structured_query: StructuredQuery) -> Any: + """Translate a StructuredQuery. + + Args: + structured_query: StructuredQuery to translate. + """ + + +def _to_snake_case(name: str) -> str: + """Convert a name into snake_case.""" + snake_case = "" + for i, char in enumerate(name): + if char.isupper() and i != 0: + snake_case += "_" + char.lower() + else: + snake_case += char.lower() + return snake_case + + +class Expr(BaseModel): + """Base class for all expressions.""" + + def accept(self, visitor: Visitor) -> Any: + """Accept a visitor. + + Args: + visitor: visitor to accept. + + Returns: + result of visiting. + """ + return getattr(visitor, f"visit_{_to_snake_case(self.__class__.__name__)}")( + self + ) + + +class Operator(str, Enum): + """Enumerator of the operations.""" + + AND = "and" + OR = "or" + NOT = "not" + + +class Comparator(str, Enum): + """Enumerator of the comparison operators.""" + + EQ = "eq" + NE = "ne" + GT = "gt" + GTE = "gte" + LT = "lt" + LTE = "lte" + CONTAIN = "contain" + LIKE = "like" + IN = "in" + NIN = "nin" + + +class FilterDirective(Expr, ABC): + """Filtering expression.""" + + +class Comparison(FilterDirective): + """Comparison to a value.""" + + comparator: Comparator + """The comparator to use.""" + attribute: str + """The attribute to compare.""" + value: Any + """The value to compare to.""" + + def __init__( + self, comparator: Comparator, attribute: str, value: Any, **kwargs: Any + ) -> None: + """Create a Comparison. + + Args: + comparator: The comparator to use. + attribute: The attribute to compare. + value: The value to compare to. + """ + # super exists from BaseModel + super().__init__( # type: ignore[call-arg] + comparator=comparator, attribute=attribute, value=value, **kwargs + ) + + +class Operation(FilterDirective): + """Logical operation over other directives.""" + + operator: Operator + """The operator to use.""" + arguments: list[FilterDirective] + """The arguments to the operator.""" + + def __init__( + self, operator: Operator, arguments: list[FilterDirective], **kwargs: Any + ) -> None: + """Create an Operation. + + Args: + operator: The operator to use. + arguments: The arguments to the operator. + """ + # super exists from BaseModel + super().__init__( # type: ignore[call-arg] + operator=operator, arguments=arguments, **kwargs + ) + + +class StructuredQuery(Expr): + """Structured query.""" + + query: str + """Query string.""" + filter: Optional[FilterDirective] + """Filtering expression.""" + limit: Optional[int] + """Limit on the number of results.""" + + def __init__( + self, + query: str, + filter: Optional[FilterDirective], + limit: Optional[int] = None, + **kwargs: Any, + ) -> None: + """Create a StructuredQuery. + + Args: + query: The query string. + filter: The filtering expression. + limit: The limit on the number of results. + """ + # super exists from BaseModel + super().__init__( # type: ignore[call-arg] + query=query, filter=filter, limit=limit, **kwargs + ) diff --git a/venv/Lib/site-packages/langchain_core/sys_info.py b/venv/Lib/site-packages/langchain_core/sys_info.py new file mode 100644 index 00000000..38d53e15 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/sys_info.py @@ -0,0 +1,135 @@ +"""**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501 + +from collections.abc import Sequence + + +def _get_sub_deps(packages: Sequence[str]) -> list[str]: + """Get any specified sub-dependencies.""" + from importlib import metadata + + sub_deps = set() + _underscored_packages = {pkg.replace("-", "_") for pkg in packages} + + for pkg in packages: + try: + required = metadata.requires(pkg) + except metadata.PackageNotFoundError: + continue + + if not required: + continue + + for req in required: + cleaned_req = req.split(" ")[0] + if cleaned_req.replace("-", "_") not in _underscored_packages: + sub_deps.add(cleaned_req) + + return sorted(sub_deps, key=lambda x: x.lower()) + + +def print_sys_info(*, additional_pkgs: Sequence[str] = ()) -> None: + """Print information about the environment for debugging purposes. + + Args: + additional_pkgs: Additional packages to include in the output. + """ + import pkgutil + import platform + import sys + from importlib import metadata, util + + # Packages that do not start with "langchain" prefix. + other_langchain_packages = [ + "langserve", + "langsmith", + ] + + langchain_pkgs = [ + name for _, name, _ in pkgutil.iter_modules() if name.startswith("langchain") + ] + + langgraph_pkgs = [ + name for _, name, _ in pkgutil.iter_modules() if name.startswith("langgraph") + ] + + all_packages = sorted( + set( + langchain_pkgs + + langgraph_pkgs + + other_langchain_packages + + list(additional_pkgs) + ) + ) + + # Always surface these packages to the top + order_by = ["langchain_core", "langchain", "langchain_community", "langsmith"] + + for pkg in reversed(order_by): + if pkg in all_packages: + all_packages.remove(pkg) + all_packages = [pkg] + list(all_packages) + + system_info = { + "OS": platform.system(), + "OS Version": platform.version(), + "Python Version": sys.version, + } + print() # noqa: T201 + print("System Information") # noqa: T201 + print("------------------") # noqa: T201 + print("> OS: ", system_info["OS"]) # noqa: T201 + print("> OS Version: ", system_info["OS Version"]) # noqa: T201 + print("> Python Version: ", system_info["Python Version"]) # noqa: T201 + + # Print out only langchain packages + print() # noqa: T201 + print("Package Information") # noqa: T201 + print("-------------------") # noqa: T201 + + not_installed = [] + + for pkg in all_packages: + try: + found_package = util.find_spec(pkg) + except Exception: + found_package = None + if found_package is None: + not_installed.append(pkg) + continue + + # Package version + try: + package_version = metadata.version(pkg) + except Exception: + package_version = None + + # Print package with version + if package_version is not None: + print(f"> {pkg}: {package_version}") # noqa: T201 + else: + print(f"> {pkg}: Installed. No version info available.") # noqa: T201 + + if not_installed: + print() # noqa: T201 + print("Optional packages not installed") # noqa: T201 + print("-------------------------------") # noqa: T201 + for pkg in not_installed: + print(f"> {pkg}") # noqa: T201 + + sub_dependencies = _get_sub_deps(all_packages) + + if sub_dependencies: + print() # noqa: T201 + print("Other Dependencies") # noqa: T201 + print("------------------") # noqa: T201 + + for dep in sub_dependencies: + try: + dep_version = metadata.version(dep) + print(f"> {dep}: {dep_version}") # noqa: T201 + except Exception: + print(f"> {dep}: Installed. No version info available.") # noqa: T201 + + +if __name__ == "__main__": + print_sys_info() diff --git a/venv/Lib/site-packages/langchain_core/tools/__init__.py b/venv/Lib/site-packages/langchain_core/tools/__init__.py new file mode 100644 index 00000000..f13b3167 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tools/__init__.py @@ -0,0 +1,108 @@ +"""**Tools** are classes that an Agent uses to interact with the world. + +Each tool has a **description**. Agent uses the description to choose the right +tool for the job. + +**Class hierarchy:** + +.. code-block:: + + RunnableSerializable --> BaseTool --> Tool # Examples: AIPluginTool, BaseGraphQLTool + # Examples: BraveSearch, HumanInputRun + +**Main helpers:** + +.. code-block:: + + CallbackManagerForToolRun, AsyncCallbackManagerForToolRun +""" # noqa: E501 + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.tools.base import ( + FILTERED_ARGS, + ArgsSchema, + BaseTool, + BaseToolkit, + InjectedToolArg, + InjectedToolCallId, + SchemaAnnotationError, + ToolException, + _get_runnable_config_param, + create_schema_from_function, + ) + from langchain_core.tools.convert import ( + convert_runnable_to_tool, + tool, + ) + from langchain_core.tools.render import ( + ToolsRenderer, + render_text_description, + render_text_description_and_args, + ) + from langchain_core.tools.retriever import ( + RetrieverInput, + create_retriever_tool, + ) + from langchain_core.tools.simple import Tool + from langchain_core.tools.structured import StructuredTool + +__all__ = ( + "ArgsSchema", + "BaseTool", + "BaseToolkit", + "FILTERED_ARGS", + "SchemaAnnotationError", + "ToolException", + "InjectedToolArg", + "InjectedToolCallId", + "_get_runnable_config_param", + "create_schema_from_function", + "convert_runnable_to_tool", + "tool", + "ToolsRenderer", + "render_text_description", + "render_text_description_and_args", + "RetrieverInput", + "create_retriever_tool", + "Tool", + "StructuredTool", +) + +_dynamic_imports = { + "FILTERED_ARGS": "base", + "ArgsSchema": "base", + "BaseTool": "base", + "BaseToolkit": "base", + "InjectedToolArg": "base", + "InjectedToolCallId": "base", + "SchemaAnnotationError": "base", + "ToolException": "base", + "_get_runnable_config_param": "base", + "create_schema_from_function": "base", + "convert_runnable_to_tool": "convert", + "tool": "convert", + "ToolsRenderer": "render", + "render_text_description": "render", + "render_text_description_and_args": "render", + "RetrieverInput": "retriever", + "create_retriever_tool": "retriever", + "Tool": "simple", + "StructuredTool": "structured", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/tools/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tools/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..77a63b0c Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tools/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tools/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tools/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..a9b06577 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tools/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tools/__pycache__/convert.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tools/__pycache__/convert.cpython-312.pyc new file mode 100644 index 00000000..423a4c13 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tools/__pycache__/convert.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tools/__pycache__/render.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tools/__pycache__/render.cpython-312.pyc new file mode 100644 index 00000000..392946ca Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tools/__pycache__/render.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tools/__pycache__/retriever.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tools/__pycache__/retriever.cpython-312.pyc new file mode 100644 index 00000000..935f1b02 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tools/__pycache__/retriever.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tools/__pycache__/simple.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tools/__pycache__/simple.cpython-312.pyc new file mode 100644 index 00000000..64137fdd Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tools/__pycache__/simple.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tools/__pycache__/structured.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tools/__pycache__/structured.cpython-312.pyc new file mode 100644 index 00000000..8cc9b96e Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tools/__pycache__/structured.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tools/base.py b/venv/Lib/site-packages/langchain_core/tools/base.py new file mode 100644 index 00000000..daee8604 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tools/base.py @@ -0,0 +1,1162 @@ +"""Base for Tools.""" + +from __future__ import annotations + +import functools +import inspect +import json +import warnings +from abc import ABC, abstractmethod +from inspect import signature +from typing import ( + TYPE_CHECKING, + Annotated, + Any, + Callable, + Literal, + Optional, + TypeVar, + Union, + cast, + get_args, + get_origin, + get_type_hints, +) + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + PydanticDeprecationWarning, + SkipValidation, + ValidationError, + model_validator, + validate_arguments, +) +from pydantic.v1 import BaseModel as BaseModelV1 +from pydantic.v1 import ValidationError as ValidationErrorV1 +from pydantic.v1 import validate_arguments as validate_arguments_v1 +from typing_extensions import override + +from langchain_core._api import deprecated +from langchain_core.callbacks import ( + AsyncCallbackManager, + BaseCallbackManager, + CallbackManager, + Callbacks, +) +from langchain_core.messages.tool import ToolCall, ToolMessage, ToolOutputMixin +from langchain_core.runnables import ( + RunnableConfig, + RunnableSerializable, + ensure_config, + patch_config, + run_in_executor, +) +from langchain_core.runnables.config import set_config_context +from langchain_core.runnables.utils import coro_with_context +from langchain_core.utils.function_calling import ( + _parse_google_docstring, + _py_38_safe_origin, +) +from langchain_core.utils.pydantic import ( + TypeBaseModel, + _create_subset_model, + get_fields, + is_basemodel_subclass, + is_pydantic_v1_subclass, + is_pydantic_v2_subclass, +) + +if TYPE_CHECKING: + import uuid + from collections.abc import Sequence + +FILTERED_ARGS = ("run_manager", "callbacks") + + +class SchemaAnnotationError(TypeError): + """Raised when 'args_schema' is missing or has an incorrect type annotation.""" + + +def _is_annotated_type(typ: type[Any]) -> bool: + return get_origin(typ) is Annotated + + +def _get_annotation_description(arg_type: type) -> str | None: + if _is_annotated_type(arg_type): + annotated_args = get_args(arg_type) + for annotation in annotated_args[1:]: + if isinstance(annotation, str): + return annotation + return None + + +def _get_filtered_args( + inferred_model: type[BaseModel], + func: Callable, + *, + filter_args: Sequence[str], + include_injected: bool = True, +) -> dict: + """Get the arguments from a function's signature.""" + schema = inferred_model.model_json_schema()["properties"] + valid_keys = signature(func).parameters + return { + k: schema[k] + for i, (k, param) in enumerate(valid_keys.items()) + if k not in filter_args + and (i > 0 or param.name not in ("self", "cls")) + and (include_injected or not _is_injected_arg_type(param.annotation)) + } + + +def _parse_python_function_docstring( + function: Callable, annotations: dict, *, error_on_invalid_docstring: bool = False +) -> tuple[str, dict]: + """Parse the function and argument descriptions from the docstring of a function. + + Assumes the function docstring follows Google Python style guide. + """ + docstring = inspect.getdoc(function) + return _parse_google_docstring( + docstring, + list(annotations), + error_on_invalid_docstring=error_on_invalid_docstring, + ) + + +def _validate_docstring_args_against_annotations( + arg_descriptions: dict, annotations: dict +) -> None: + """Raise error if docstring arg is not in type annotations.""" + for docstring_arg in arg_descriptions: + if docstring_arg not in annotations: + msg = f"Arg {docstring_arg} in docstring not found in function signature." + raise ValueError(msg) + + +def _infer_arg_descriptions( + fn: Callable, + *, + parse_docstring: bool = False, + error_on_invalid_docstring: bool = False, +) -> tuple[str, dict]: + """Infer argument descriptions from a function's docstring.""" + if hasattr(inspect, "get_annotations"): + # This is for python < 3.10 + annotations = inspect.get_annotations(fn) + else: + annotations = getattr(fn, "__annotations__", {}) + if parse_docstring: + description, arg_descriptions = _parse_python_function_docstring( + fn, annotations, error_on_invalid_docstring=error_on_invalid_docstring + ) + else: + description = inspect.getdoc(fn) or "" + arg_descriptions = {} + if parse_docstring: + _validate_docstring_args_against_annotations(arg_descriptions, annotations) + for arg, arg_type in annotations.items(): + if arg in arg_descriptions: + continue + if desc := _get_annotation_description(arg_type): + arg_descriptions[arg] = desc + return description, arg_descriptions + + +def _is_pydantic_annotation(annotation: Any, pydantic_version: str = "v2") -> bool: + """Determine if a type annotation is a Pydantic model.""" + base_model_class = BaseModelV1 if pydantic_version == "v1" else BaseModel + try: + return issubclass(annotation, base_model_class) + except TypeError: + return False + + +def _function_annotations_are_pydantic_v1( + signature: inspect.Signature, func: Callable +) -> bool: + """Determine if all Pydantic annotations in a function signature are from V1.""" + any_v1_annotations = any( + _is_pydantic_annotation(parameter.annotation, pydantic_version="v1") + for parameter in signature.parameters.values() + ) + any_v2_annotations = any( + _is_pydantic_annotation(parameter.annotation, pydantic_version="v2") + for parameter in signature.parameters.values() + ) + if any_v1_annotations and any_v2_annotations: + msg = ( + f"Function {func} contains a mix of Pydantic v1 and v2 annotations. " + "Only one version of Pydantic annotations per function is supported." + ) + raise NotImplementedError(msg) + return any_v1_annotations and not any_v2_annotations + + +class _SchemaConfig: + """Configuration for the pydantic model. + + This is used to configure the pydantic model created from + a function's signature. + + Parameters: + extra: Whether to allow extra fields in the model. + arbitrary_types_allowed: Whether to allow arbitrary types in the model. + Defaults to True. + """ + + extra: str = "forbid" + arbitrary_types_allowed: bool = True + + +def create_schema_from_function( + model_name: str, + func: Callable, + *, + filter_args: Optional[Sequence[str]] = None, + parse_docstring: bool = False, + error_on_invalid_docstring: bool = False, + include_injected: bool = True, +) -> type[BaseModel]: + """Create a pydantic schema from a function's signature. + + Args: + model_name: Name to assign to the generated pydantic schema. + func: Function to generate the schema from. + filter_args: Optional list of arguments to exclude from the schema. + Defaults to FILTERED_ARGS. + parse_docstring: Whether to parse the function's docstring for descriptions + for each argument. Defaults to False. + error_on_invalid_docstring: if ``parse_docstring`` is provided, configure + whether to raise ValueError on invalid Google Style docstrings. + Defaults to False. + include_injected: Whether to include injected arguments in the schema. + Defaults to True, since we want to include them in the schema + when *validating* tool inputs. + + Returns: + A pydantic model with the same arguments as the function. + """ + sig = inspect.signature(func) + + if _function_annotations_are_pydantic_v1(sig, func): + validated = validate_arguments_v1(func, config=_SchemaConfig) # type: ignore[call-overload] + else: + # https://docs.pydantic.dev/latest/usage/validation_decorator/ + with warnings.catch_warnings(): + # We are using deprecated functionality here. + # This code should be re-written to simply construct a pydantic model + # using inspect.signature and create_model. + warnings.simplefilter("ignore", category=PydanticDeprecationWarning) + validated = validate_arguments(func, config=_SchemaConfig) # type: ignore[operator] + + # Let's ignore `self` and `cls` arguments for class and instance methods + # If qualified name has a ".", then it likely belongs in a class namespace + in_class = bool(func.__qualname__ and "." in func.__qualname__) + + has_args = False + has_kwargs = False + + for param in sig.parameters.values(): + if param.kind == param.VAR_POSITIONAL: + has_args = True + elif param.kind == param.VAR_KEYWORD: + has_kwargs = True + + inferred_model = validated.model + + if filter_args: + filter_args_ = filter_args + else: + # Handle classmethods and instance methods + existing_params: list[str] = list(sig.parameters.keys()) + if existing_params and existing_params[0] in ("self", "cls") and in_class: + filter_args_ = [existing_params[0]] + list(FILTERED_ARGS) + else: + filter_args_ = list(FILTERED_ARGS) + + for existing_param in existing_params: + if not include_injected and _is_injected_arg_type( + sig.parameters[existing_param].annotation + ): + filter_args_.append(existing_param) + + description, arg_descriptions = _infer_arg_descriptions( + func, + parse_docstring=parse_docstring, + error_on_invalid_docstring=error_on_invalid_docstring, + ) + # Pydantic adds placeholder virtual fields we need to strip + valid_properties = [] + for field in get_fields(inferred_model): + if not has_args and field == "args": + continue + if not has_kwargs and field == "kwargs": + continue + + if field == "v__duplicate_kwargs": # Internal pydantic field + continue + + if field not in filter_args_: + valid_properties.append(field) + + return _create_subset_model( + model_name, + inferred_model, + list(valid_properties), + descriptions=arg_descriptions, + fn_description=description, + ) + + +class ToolException(Exception): # noqa: N818 + """Optional exception that tool throws when execution error occurs. + + When this exception is thrown, the agent will not stop working, + but it will handle the exception according to the handle_tool_error + variable of the tool, and the processing result will be returned + to the agent as observation, and printed in red on the console. + """ + + +ArgsSchema = Union[TypeBaseModel, dict[str, Any]] + + +class BaseTool(RunnableSerializable[Union[str, dict, ToolCall], Any]): + """Interface LangChain tools must implement.""" + + def __init_subclass__(cls, **kwargs: Any) -> None: + """Create the definition of the new tool class.""" + super().__init_subclass__(**kwargs) + + args_schema_type = cls.__annotations__.get("args_schema", None) + + if args_schema_type is not None and args_schema_type == BaseModel: + # Throw errors for common mis-annotations. + # TODO: Use get_args / get_origin and fully + # specify valid annotations. + typehint_mandate = """ +class ChildTool(BaseTool): + ... + args_schema: Type[BaseModel] = SchemaClass + ...""" + name = cls.__name__ + msg = ( + f"Tool definition for {name} must include valid type annotations" + f" for argument 'args_schema' to behave as expected.\n" + f"Expected annotation of 'Type[BaseModel]'" + f" but got '{args_schema_type}'.\n" + f"Expected class looks like:\n" + f"{typehint_mandate}" + ) + raise SchemaAnnotationError(msg) + + name: str + """The unique name of the tool that clearly communicates its purpose.""" + description: str + """Used to tell the model how/when/why to use the tool. + + You can provide few-shot examples as a part of the description. + """ + + args_schema: Annotated[Optional[ArgsSchema], SkipValidation()] = Field( + default=None, description="The tool schema." + ) + """Pydantic model class to validate and parse the tool's input arguments. + + Args schema should be either: + + - A subclass of pydantic.BaseModel. + or + - A subclass of pydantic.v1.BaseModel if accessing v1 namespace in pydantic 2 + or + - a JSON schema dict + """ + return_direct: bool = False + """Whether to return the tool's output directly. + + Setting this to True means + that after the tool is called, the AgentExecutor will stop looping. + """ + verbose: bool = False + """Whether to log the tool's progress.""" + + callbacks: Callbacks = Field(default=None, exclude=True) + """Callbacks to be called during tool execution.""" + + callback_manager: Optional[BaseCallbackManager] = deprecated( + name="callback_manager", since="0.1.7", removal="1.0", alternative="callbacks" + )( + Field( + default=None, + exclude=True, + description="Callback manager to add to the run trace.", + ) + ) + tags: Optional[list[str]] = None + """Optional list of tags associated with the tool. Defaults to None. + These tags will be associated with each call to this tool, + and passed as arguments to the handlers defined in `callbacks`. + You can use these to eg identify a specific instance of a tool with its use case. + """ + metadata: Optional[dict[str, Any]] = None + """Optional metadata associated with the tool. Defaults to None. + This metadata will be associated with each call to this tool, + and passed as arguments to the handlers defined in `callbacks`. + You can use these to eg identify a specific instance of a tool with its use case. + """ + + handle_tool_error: Optional[Union[bool, str, Callable[[ToolException], str]]] = ( + False + ) + """Handle the content of the ToolException thrown.""" + + handle_validation_error: Optional[ + Union[bool, str, Callable[[Union[ValidationError, ValidationErrorV1]], str]] + ] = False + """Handle the content of the ValidationError thrown.""" + + response_format: Literal["content", "content_and_artifact"] = "content" + """The tool response format. Defaults to 'content'. + + If "content" then the output of the tool is interpreted as the contents of a + ToolMessage. If "content_and_artifact" then the output is expected to be a + two-tuple corresponding to the (content, artifact) of a ToolMessage. + """ + + def __init__(self, **kwargs: Any) -> None: + """Initialize the tool.""" + if ( + "args_schema" in kwargs + and kwargs["args_schema"] is not None + and not is_basemodel_subclass(kwargs["args_schema"]) + and not isinstance(kwargs["args_schema"], dict) + ): + msg = ( + "args_schema must be a subclass of pydantic BaseModel or " + f"a JSON schema dict. Got: {kwargs['args_schema']}." + ) + raise TypeError(msg) + super().__init__(**kwargs) + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @property + def is_single_input(self) -> bool: + """Whether the tool only accepts a single input.""" + keys = {k for k in self.args if k != "kwargs"} + return len(keys) == 1 + + @property + def args(self) -> dict: + """The arguments of the tool.""" + if isinstance(self.args_schema, dict): + json_schema = self.args_schema + else: + input_schema = self.get_input_schema() + json_schema = input_schema.model_json_schema() + return json_schema["properties"] + + @property + def tool_call_schema(self) -> ArgsSchema: + """The schema for a tool call.""" + if isinstance(self.args_schema, dict): + if self.description: + return { + **self.args_schema, + "description": self.description, + } + + return self.args_schema + + full_schema = self.get_input_schema() + fields = [] + for name, type_ in get_all_basemodel_annotations(full_schema).items(): + if not _is_injected_arg_type(type_): + fields.append(name) + return _create_subset_model( + self.name, full_schema, fields, fn_description=self.description + ) + + # --- Runnable --- + + @override + def get_input_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + """The tool's input schema. + + Args: + config: The configuration for the tool. + + Returns: + The input schema for the tool. + """ + if self.args_schema is not None: + if isinstance(self.args_schema, dict): + return super().get_input_schema(config) + return self.args_schema + return create_schema_from_function(self.name, self._run) + + @override + def invoke( + self, + input: Union[str, dict, ToolCall], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Any: + tool_input, kwargs = _prep_run_args(input, config, **kwargs) + return self.run(tool_input, **kwargs) + + @override + async def ainvoke( + self, + input: Union[str, dict, ToolCall], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Any: + tool_input, kwargs = _prep_run_args(input, config, **kwargs) + return await self.arun(tool_input, **kwargs) + + # --- Tool --- + + def _parse_input( + self, tool_input: Union[str, dict], tool_call_id: Optional[str] + ) -> Union[str, dict[str, Any]]: + """Convert tool input to a pydantic model. + + Args: + tool_input: The input to the tool. + tool_call_id: The id of the tool call. + """ + input_args = self.args_schema + if isinstance(tool_input, str): + if input_args is not None: + if isinstance(input_args, dict): + msg = ( + "String tool inputs are not allowed when " + "using tools with JSON schema args_schema." + ) + raise ValueError(msg) + key_ = next(iter(get_fields(input_args).keys())) + if hasattr(input_args, "model_validate"): + input_args.model_validate({key_: tool_input}) + else: + input_args.parse_obj({key_: tool_input}) + return tool_input + if input_args is not None: + if isinstance(input_args, dict): + return tool_input + if issubclass(input_args, BaseModel): + for k, v in get_all_basemodel_annotations(input_args).items(): + if ( + _is_injected_arg_type(v, injected_type=InjectedToolCallId) + and k not in tool_input + ): + if tool_call_id is None: + msg = ( + "When tool includes an InjectedToolCallId " + "argument, tool must always be invoked with a full " + "model ToolCall of the form: {'args': {...}, " + "'name': '...', 'type': 'tool_call', " + "'tool_call_id': '...'}" + ) + raise ValueError(msg) + tool_input[k] = tool_call_id + result = input_args.model_validate(tool_input) + result_dict = result.model_dump() + elif issubclass(input_args, BaseModelV1): + for k, v in get_all_basemodel_annotations(input_args).items(): + if ( + _is_injected_arg_type(v, injected_type=InjectedToolCallId) + and k not in tool_input + ): + if tool_call_id is None: + msg = ( + "When tool includes an InjectedToolCallId " + "argument, tool must always be invoked with a full " + "model ToolCall of the form: {'args': {...}, " + "'name': '...', 'type': 'tool_call', " + "'tool_call_id': '...'}" + ) + raise ValueError(msg) + tool_input[k] = tool_call_id + result = input_args.parse_obj(tool_input) + result_dict = result.dict() + else: + msg = ( + f"args_schema must be a Pydantic BaseModel, got {self.args_schema}" + ) + raise NotImplementedError(msg) + return { + k: getattr(result, k) for k, v in result_dict.items() if k in tool_input + } + return tool_input + + @model_validator(mode="before") + @classmethod + def raise_deprecation(cls, values: dict) -> Any: + """Raise deprecation warning if callback_manager is used. + + Args: + values: The values to validate. + + Returns: + The validated values. + """ + if values.get("callback_manager") is not None: + warnings.warn( + "callback_manager is deprecated. Please use callbacks instead.", + DeprecationWarning, + stacklevel=6, + ) + values["callbacks"] = values.pop("callback_manager", None) + return values + + @abstractmethod + def _run(self, *args: Any, **kwargs: Any) -> Any: + """Use the tool. + + Add run_manager: Optional[CallbackManagerForToolRun] = None + to child implementations to enable tracing. + """ + + async def _arun(self, *args: Any, **kwargs: Any) -> Any: + """Use the tool asynchronously. + + Add run_manager: Optional[AsyncCallbackManagerForToolRun] = None + to child implementations to enable tracing. + """ + if kwargs.get("run_manager") and signature(self._run).parameters.get( + "run_manager" + ): + kwargs["run_manager"] = kwargs["run_manager"].get_sync() + return await run_in_executor(None, self._run, *args, **kwargs) + + def _to_args_and_kwargs( + self, tool_input: Union[str, dict], tool_call_id: Optional[str] + ) -> tuple[tuple, dict]: + if ( + self.args_schema is not None + and isinstance(self.args_schema, type) + and is_basemodel_subclass(self.args_schema) + and not get_fields(self.args_schema) + ): + # StructuredTool with no args + return (), {} + tool_input = self._parse_input(tool_input, tool_call_id) + # For backwards compatibility, if run_input is a string, + # pass as a positional argument. + if isinstance(tool_input, str): + return (tool_input,), {} + if isinstance(tool_input, dict): + # Make a shallow copy of the input to allow downstream code + # to modify the root level of the input without affecting the + # original input. + # This is used by the tool to inject run time information like + # the callback manager. + return (), tool_input.copy() + # This code path is not expected to be reachable. + msg = f"Invalid tool input type: {type(tool_input)}" + raise TypeError(msg) + + def run( + self, + tool_input: Union[str, dict[str, Any]], + verbose: Optional[bool] = None, + start_color: Optional[str] = "green", + color: Optional[str] = "green", + callbacks: Callbacks = None, + *, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + run_name: Optional[str] = None, + run_id: Optional[uuid.UUID] = None, + config: Optional[RunnableConfig] = None, + tool_call_id: Optional[str] = None, + **kwargs: Any, + ) -> Any: + """Run the tool. + + Args: + tool_input: The input to the tool. + verbose: Whether to log the tool's progress. Defaults to None. + start_color: The color to use when starting the tool. Defaults to 'green'. + color: The color to use when ending the tool. Defaults to 'green'. + callbacks: Callbacks to be called during tool execution. Defaults to None. + tags: Optional list of tags associated with the tool. Defaults to None. + metadata: Optional metadata associated with the tool. Defaults to None. + run_name: The name of the run. Defaults to None. + run_id: The id of the run. Defaults to None. + config: The configuration for the tool. Defaults to None. + tool_call_id: The id of the tool call. Defaults to None. + kwargs: Keyword arguments to be passed to tool callbacks + + Returns: + The output of the tool. + + Raises: + ToolException: If an error occurs during tool execution. + """ + callback_manager = CallbackManager.configure( + callbacks, + self.callbacks, + self.verbose or bool(verbose), + tags, + self.tags, + metadata, + self.metadata, + ) + + run_manager = callback_manager.on_tool_start( + {"name": self.name, "description": self.description}, + tool_input if isinstance(tool_input, str) else str(tool_input), + color=start_color, + name=run_name, + run_id=run_id, + # Inputs by definition should always be dicts. + # For now, it's unclear whether this assumption is ever violated, + # but if it is we will send a `None` value to the callback instead + # TODO: will need to address issue via a patch. + inputs=tool_input if isinstance(tool_input, dict) else None, + **kwargs, + ) + + content = None + artifact = None + status = "success" + error_to_raise: Union[Exception, KeyboardInterrupt, None] = None + try: + child_config = patch_config(config, callbacks=run_manager.get_child()) + with set_config_context(child_config) as context: + tool_args, tool_kwargs = self._to_args_and_kwargs( + tool_input, tool_call_id + ) + if signature(self._run).parameters.get("run_manager"): + tool_kwargs = tool_kwargs | {"run_manager": run_manager} + if config_param := _get_runnable_config_param(self._run): + tool_kwargs = tool_kwargs | {config_param: config} + response = context.run(self._run, *tool_args, **tool_kwargs) + if self.response_format == "content_and_artifact": + if not isinstance(response, tuple) or len(response) != 2: + msg = ( + "Since response_format='content_and_artifact' " + "a two-tuple of the message content and raw tool output is " + f"expected. Instead generated response of type: " + f"{type(response)}." + ) + error_to_raise = ValueError(msg) + else: + content, artifact = response + else: + content = response + except (ValidationError, ValidationErrorV1) as e: + if not self.handle_validation_error: + error_to_raise = e + else: + content = _handle_validation_error(e, flag=self.handle_validation_error) + status = "error" + except ToolException as e: + if not self.handle_tool_error: + error_to_raise = e + else: + content = _handle_tool_error(e, flag=self.handle_tool_error) + status = "error" + except (Exception, KeyboardInterrupt) as e: + error_to_raise = e + + if error_to_raise: + run_manager.on_tool_error(error_to_raise) + raise error_to_raise + output = _format_output(content, artifact, tool_call_id, self.name, status) + run_manager.on_tool_end(output, color=color, name=self.name, **kwargs) + return output + + async def arun( + self, + tool_input: Union[str, dict], + verbose: Optional[bool] = None, + start_color: Optional[str] = "green", + color: Optional[str] = "green", + callbacks: Callbacks = None, + *, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + run_name: Optional[str] = None, + run_id: Optional[uuid.UUID] = None, + config: Optional[RunnableConfig] = None, + tool_call_id: Optional[str] = None, + **kwargs: Any, + ) -> Any: + """Run the tool asynchronously. + + Args: + tool_input: The input to the tool. + verbose: Whether to log the tool's progress. Defaults to None. + start_color: The color to use when starting the tool. Defaults to 'green'. + color: The color to use when ending the tool. Defaults to 'green'. + callbacks: Callbacks to be called during tool execution. Defaults to None. + tags: Optional list of tags associated with the tool. Defaults to None. + metadata: Optional metadata associated with the tool. Defaults to None. + run_name: The name of the run. Defaults to None. + run_id: The id of the run. Defaults to None. + config: The configuration for the tool. Defaults to None. + tool_call_id: The id of the tool call. Defaults to None. + kwargs: Keyword arguments to be passed to tool callbacks + + Returns: + The output of the tool. + + Raises: + ToolException: If an error occurs during tool execution. + """ + callback_manager = AsyncCallbackManager.configure( + callbacks, + self.callbacks, + self.verbose or bool(verbose), + tags, + self.tags, + metadata, + self.metadata, + ) + run_manager = await callback_manager.on_tool_start( + {"name": self.name, "description": self.description}, + tool_input if isinstance(tool_input, str) else str(tool_input), + color=start_color, + name=run_name, + run_id=run_id, + # Inputs by definition should always be dicts. + # For now, it's unclear whether this assumption is ever violated, + # but if it is we will send a `None` value to the callback instead + # TODO: will need to address issue via a patch. + inputs=tool_input if isinstance(tool_input, dict) else None, + **kwargs, + ) + content = None + artifact = None + status = "success" + error_to_raise: Optional[Union[Exception, KeyboardInterrupt]] = None + try: + tool_args, tool_kwargs = self._to_args_and_kwargs(tool_input, tool_call_id) + child_config = patch_config(config, callbacks=run_manager.get_child()) + with set_config_context(child_config) as context: + func_to_check = ( + self._run if self.__class__._arun is BaseTool._arun else self._arun + ) + if signature(func_to_check).parameters.get("run_manager"): + tool_kwargs["run_manager"] = run_manager + if config_param := _get_runnable_config_param(func_to_check): + tool_kwargs[config_param] = config + + coro = self._arun(*tool_args, **tool_kwargs) + response = await coro_with_context(coro, context) + if self.response_format == "content_and_artifact": + if not isinstance(response, tuple) or len(response) != 2: + msg = ( + "Since response_format='content_and_artifact' " + "a two-tuple of the message content and raw tool output is " + f"expected. Instead generated response of type: " + f"{type(response)}." + ) + error_to_raise = ValueError(msg) + else: + content, artifact = response + else: + content = response + except ValidationError as e: + if not self.handle_validation_error: + error_to_raise = e + else: + content = _handle_validation_error(e, flag=self.handle_validation_error) + status = "error" + except ToolException as e: + if not self.handle_tool_error: + error_to_raise = e + else: + content = _handle_tool_error(e, flag=self.handle_tool_error) + status = "error" + except (Exception, KeyboardInterrupt) as e: + error_to_raise = e + + if error_to_raise: + await run_manager.on_tool_error(error_to_raise) + raise error_to_raise + + output = _format_output(content, artifact, tool_call_id, self.name, status) + await run_manager.on_tool_end(output, color=color, name=self.name, **kwargs) + return output + + @deprecated("0.1.47", alternative="invoke", removal="1.0") + def __call__(self, tool_input: str, callbacks: Callbacks = None) -> str: + """Make tool callable.""" + return self.run(tool_input, callbacks=callbacks) + + +def _is_tool_call(x: Any) -> bool: + return isinstance(x, dict) and x.get("type") == "tool_call" + + +def _handle_validation_error( + e: Union[ValidationError, ValidationErrorV1], + *, + flag: Union[ + Literal[True], str, Callable[[Union[ValidationError, ValidationErrorV1]], str] + ], +) -> str: + if isinstance(flag, bool): + content = "Tool input validation error" + elif isinstance(flag, str): + content = flag + elif callable(flag): + content = flag(e) + else: + msg = ( + f"Got unexpected type of `handle_validation_error`. Expected bool, " + f"str or callable. Received: {flag}" + ) + raise ValueError(msg) # noqa: TRY004 + return content + + +def _handle_tool_error( + e: ToolException, + *, + flag: Optional[Union[Literal[True], str, Callable[[ToolException], str]]], +) -> str: + if isinstance(flag, bool): + content = e.args[0] if e.args else "Tool execution error" + elif isinstance(flag, str): + content = flag + elif callable(flag): + content = flag(e) + else: + msg = ( + f"Got unexpected type of `handle_tool_error`. Expected bool, str " + f"or callable. Received: {flag}" + ) + raise ValueError(msg) # noqa: TRY004 + return content + + +def _prep_run_args( + input: Union[str, dict, ToolCall], + config: Optional[RunnableConfig], + **kwargs: Any, +) -> tuple[Union[str, dict], dict]: + config = ensure_config(config) + if _is_tool_call(input): + tool_call_id: Optional[str] = cast("ToolCall", input)["id"] + tool_input: Union[str, dict] = cast("ToolCall", input)["args"].copy() + else: + tool_call_id = None + tool_input = cast("Union[str, dict]", input) + return ( + tool_input, + dict( + callbacks=config.get("callbacks"), + tags=config.get("tags"), + metadata=config.get("metadata"), + run_name=config.get("run_name"), + run_id=config.pop("run_id", None), + config=config, + tool_call_id=tool_call_id, + **kwargs, + ), + ) + + +def _format_output( + content: Any, + artifact: Any, + tool_call_id: Optional[str], + name: str, + status: str, +) -> Union[ToolOutputMixin, Any]: + if isinstance(content, ToolOutputMixin) or tool_call_id is None: + return content + if not _is_message_content_type(content): + content = _stringify(content) + return ToolMessage( + content, + artifact=artifact, + tool_call_id=tool_call_id, + name=name, + status=status, + ) + + +def _is_message_content_type(obj: Any) -> bool: + """Check for OpenAI or Anthropic format tool message content.""" + return ( + isinstance(obj, str) + or isinstance(obj, list) + and all(_is_message_content_block(e) for e in obj) + ) + + +def _is_message_content_block(obj: Any) -> bool: + """Check for OpenAI or Anthropic format tool message content blocks.""" + if isinstance(obj, str): + return True + if isinstance(obj, dict): + return obj.get("type", None) in ("text", "image_url", "image", "json") + return False + + +def _stringify(content: Any) -> str: + try: + return json.dumps(content, ensure_ascii=False) + except Exception: + return str(content) + + +def _get_type_hints(func: Callable) -> Optional[dict[str, type]]: + if isinstance(func, functools.partial): + func = func.func + try: + return get_type_hints(func) + except Exception: + return None + + +def _get_runnable_config_param(func: Callable) -> Optional[str]: + type_hints = _get_type_hints(func) + if not type_hints: + return None + for name, type_ in type_hints.items(): + if type_ is RunnableConfig: + return name + return None + + +class InjectedToolArg: + """Annotation for a Tool arg that is **not** meant to be generated by a model.""" + + +class InjectedToolCallId(InjectedToolArg): + """Annotation for injecting the tool_call_id. + + Example: + ..code-block:: python + + from typing_extensions import Annotated + + from langchain_core.messages import ToolMessage + from langchain_core.tools import tool, InjectedToolCallID + + @tool + def foo(x: int, tool_call_id: Annotated[str, InjectedToolCallID]) -> ToolMessage: + \"\"\"Return x.\"\"\" + return ToolMessage(str(x), artifact=x, name="foo", tool_call_id=tool_call_id) + """ # noqa: E501 + + +def _is_injected_arg_type( + type_: type, injected_type: Optional[type[InjectedToolArg]] = None +) -> bool: + injected_type = injected_type or InjectedToolArg + return any( + isinstance(arg, injected_type) + or (isinstance(arg, type) and issubclass(arg, injected_type)) + for arg in get_args(type_)[1:] + ) + + +def get_all_basemodel_annotations( + cls: Union[TypeBaseModel, Any], *, default_to_bound: bool = True +) -> dict[str, type]: + """Get all annotations from a Pydantic BaseModel and its parents. + + Args: + cls: The Pydantic BaseModel class. + default_to_bound: Whether to default to the bound of a TypeVar if it exists. + """ + # cls has no subscript: cls = FooBar + if isinstance(cls, type): + annotations: dict[str, type] = {} + for name, param in inspect.signature(cls).parameters.items(): + # Exclude hidden init args added by pydantic Config. For example if + # BaseModel(extra="allow") then "extra_data" will part of init sig. + if ( + fields := getattr(cls, "model_fields", {}) # pydantic v2+ + or getattr(cls, "__fields__", {}) # pydantic v1 + ) and name not in fields: + continue + annotations[name] = param.annotation + orig_bases: tuple = getattr(cls, "__orig_bases__", ()) + # cls has subscript: cls = FooBar[int] + else: + annotations = get_all_basemodel_annotations( + get_origin(cls), default_to_bound=False + ) + orig_bases = (cls,) + + # Pydantic v2 automatically resolves inherited generics, Pydantic v1 does not. + if not (isinstance(cls, type) and is_pydantic_v2_subclass(cls)): + # if cls = FooBar inherits from Baz[str], orig_bases will contain Baz[str] + # if cls = FooBar inherits from Baz, orig_bases will contain Baz + # if cls = FooBar[int], orig_bases will contain FooBar[int] + for parent in orig_bases: + # if class = FooBar inherits from Baz, parent = Baz + if isinstance(parent, type) and is_pydantic_v1_subclass(parent): + annotations.update( + get_all_basemodel_annotations(parent, default_to_bound=False) + ) + continue + + parent_origin = get_origin(parent) + + # if class = FooBar inherits from non-pydantic class + if not parent_origin: + continue + + # if class = FooBar inherits from Baz[str]: + # parent = Baz[str], + # parent_origin = Baz, + # generic_type_vars = (type vars in Baz) + # generic_map = {type var in Baz: str} + generic_type_vars: tuple = getattr(parent_origin, "__parameters__", ()) + generic_map = dict(zip(generic_type_vars, get_args(parent))) + for field in getattr(parent_origin, "__annotations__", {}): + annotations[field] = _replace_type_vars( + annotations[field], generic_map, default_to_bound=default_to_bound + ) + + return { + k: _replace_type_vars(v, default_to_bound=default_to_bound) + for k, v in annotations.items() + } + + +def _replace_type_vars( + type_: type, + generic_map: Optional[dict[TypeVar, type]] = None, + *, + default_to_bound: bool = True, +) -> type: + generic_map = generic_map or {} + if isinstance(type_, TypeVar): + if type_ in generic_map: + return generic_map[type_] + if default_to_bound: + return type_.__bound__ or Any + return type_ + if (origin := get_origin(type_)) and (args := get_args(type_)): + new_args = tuple( + _replace_type_vars(arg, generic_map, default_to_bound=default_to_bound) + for arg in args + ) + return _py_38_safe_origin(origin)[new_args] # type: ignore[index] + return type_ + + +class BaseToolkit(BaseModel, ABC): + """Base Toolkit representing a collection of related tools.""" + + @abstractmethod + def get_tools(self) -> list[BaseTool]: + """Get the tools in the toolkit.""" diff --git a/venv/Lib/site-packages/langchain_core/tools/convert.py b/venv/Lib/site-packages/langchain_core/tools/convert.py new file mode 100644 index 00000000..ec20a7ba --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tools/convert.py @@ -0,0 +1,436 @@ +"""Convert functions and runnables to tools.""" + +import inspect +from typing import Any, Callable, Literal, Optional, Union, get_type_hints, overload + +from pydantic import BaseModel, Field, create_model + +from langchain_core.callbacks import Callbacks +from langchain_core.runnables import Runnable +from langchain_core.tools.base import ArgsSchema, BaseTool +from langchain_core.tools.simple import Tool +from langchain_core.tools.structured import StructuredTool + + +@overload +def tool( + *, + description: Optional[str] = None, + return_direct: bool = False, + args_schema: Optional[ArgsSchema] = None, + infer_schema: bool = True, + response_format: Literal["content", "content_and_artifact"] = "content", + parse_docstring: bool = False, + error_on_invalid_docstring: bool = True, +) -> Callable[[Union[Callable, Runnable]], BaseTool]: ... + + +@overload +def tool( + name_or_callable: str, + runnable: Runnable, + *, + description: Optional[str] = None, + return_direct: bool = False, + args_schema: Optional[ArgsSchema] = None, + infer_schema: bool = True, + response_format: Literal["content", "content_and_artifact"] = "content", + parse_docstring: bool = False, + error_on_invalid_docstring: bool = True, +) -> BaseTool: ... + + +@overload +def tool( + name_or_callable: Callable, + *, + description: Optional[str] = None, + return_direct: bool = False, + args_schema: Optional[ArgsSchema] = None, + infer_schema: bool = True, + response_format: Literal["content", "content_and_artifact"] = "content", + parse_docstring: bool = False, + error_on_invalid_docstring: bool = True, +) -> BaseTool: ... + + +@overload +def tool( + name_or_callable: str, + *, + description: Optional[str] = None, + return_direct: bool = False, + args_schema: Optional[ArgsSchema] = None, + infer_schema: bool = True, + response_format: Literal["content", "content_and_artifact"] = "content", + parse_docstring: bool = False, + error_on_invalid_docstring: bool = True, +) -> Callable[[Union[Callable, Runnable]], BaseTool]: ... + + +def tool( + name_or_callable: Optional[Union[str, Callable]] = None, + runnable: Optional[Runnable] = None, + *args: Any, + description: Optional[str] = None, + return_direct: bool = False, + args_schema: Optional[ArgsSchema] = None, + infer_schema: bool = True, + response_format: Literal["content", "content_and_artifact"] = "content", + parse_docstring: bool = False, + error_on_invalid_docstring: bool = True, +) -> Union[ + BaseTool, + Callable[[Union[Callable, Runnable]], BaseTool], +]: + """Make tools out of functions, can be used with or without arguments. + + Args: + name_or_callable: Optional name of the tool or the callable to be + converted to a tool. Must be provided as a positional argument. + runnable: Optional runnable to convert to a tool. Must be provided as a + positional argument. + description: Optional description for the tool. + Precedence for the tool description value is as follows: + - `description` argument + (used even if docstring and/or `args_schema` are provided) + - tool function docstring + (used even if `args_schema` is provided) + - `args_schema` description + (used only if `description` / docstring are not provided) + *args: Extra positional arguments. Must be empty. + return_direct: Whether to return directly from the tool rather + than continuing the agent loop. Defaults to False. + args_schema: optional argument schema for user to specify. + Defaults to None. + infer_schema: Whether to infer the schema of the arguments from + the function's signature. This also makes the resultant tool + accept a dictionary input to its `run()` function. + Defaults to True. + response_format: The tool response format. If "content" then the output of + the tool is interpreted as the contents of a ToolMessage. If + "content_and_artifact" then the output is expected to be a two-tuple + corresponding to the (content, artifact) of a ToolMessage. + Defaults to "content". + parse_docstring: if ``infer_schema`` and ``parse_docstring``, will attempt to + parse parameter descriptions from Google Style function docstrings. + Defaults to False. + error_on_invalid_docstring: if ``parse_docstring`` is provided, configure + whether to raise ValueError on invalid Google Style docstrings. + Defaults to True. + + Returns: + The tool. + + Requires: + - Function must be of type (str) -> str + - Function must have a docstring + + Examples: + .. code-block:: python + + @tool + def search_api(query: str) -> str: + # Searches the API for the query. + return + + @tool("search", return_direct=True) + def search_api(query: str) -> str: + # Searches the API for the query. + return + + @tool(response_format="content_and_artifact") + def search_api(query: str) -> tuple[str, dict]: + return "partial json of results", {"full": "object of results"} + + .. versionadded:: 0.2.14 + Parse Google-style docstrings: + + .. code-block:: python + + @tool(parse_docstring=True) + def foo(bar: str, baz: int) -> str: + \"\"\"The foo. + + Args: + bar: The bar. + baz: The baz. + \"\"\" + return bar + + foo.args_schema.model_json_schema() + + .. code-block:: python + + { + "title": "foo", + "description": "The foo.", + "type": "object", + "properties": { + "bar": { + "title": "Bar", + "description": "The bar.", + "type": "string" + }, + "baz": { + "title": "Baz", + "description": "The baz.", + "type": "integer" + } + }, + "required": [ + "bar", + "baz" + ] + } + + Note that parsing by default will raise ``ValueError`` if the docstring + is considered invalid. A docstring is considered invalid if it contains + arguments not in the function signature, or is unable to be parsed into + a summary and "Args:" blocks. Examples below: + + .. code-block:: python + + # No args section + def invalid_docstring_1(bar: str, baz: int) -> str: + \"\"\"The foo.\"\"\" + return bar + + # Improper whitespace between summary and args section + def invalid_docstring_2(bar: str, baz: int) -> str: + \"\"\"The foo. + Args: + bar: The bar. + baz: The baz. + \"\"\" + return bar + + # Documented args absent from function signature + def invalid_docstring_3(bar: str, baz: int) -> str: + \"\"\"The foo. + + Args: + banana: The bar. + monkey: The baz. + \"\"\" + return bar + """ # noqa: D214,D405,D410,D411,D412,D416 + + def _create_tool_factory( + tool_name: str, + ) -> Callable[[Union[Callable, Runnable]], BaseTool]: + """Create a decorator that takes a callable and returns a tool. + + Args: + tool_name: The name that will be assigned to the tool. + + Returns: + A function that takes a callable or Runnable and returns a tool. + """ + + def _tool_factory(dec_func: Union[Callable, Runnable]) -> BaseTool: + tool_description = description + if isinstance(dec_func, Runnable): + runnable = dec_func + + if runnable.input_schema.model_json_schema().get("type") != "object": + msg = "Runnable must have an object schema." + raise ValueError(msg) + + async def ainvoke_wrapper( + callbacks: Optional[Callbacks] = None, **kwargs: Any + ) -> Any: + return await runnable.ainvoke(kwargs, {"callbacks": callbacks}) + + def invoke_wrapper( + callbacks: Optional[Callbacks] = None, **kwargs: Any + ) -> Any: + return runnable.invoke(kwargs, {"callbacks": callbacks}) + + coroutine = ainvoke_wrapper + func = invoke_wrapper + schema: Optional[ArgsSchema] = runnable.input_schema + tool_description = description or repr(runnable) + elif inspect.iscoroutinefunction(dec_func): + coroutine = dec_func + func = None + schema = args_schema + else: + coroutine = None + func = dec_func + schema = args_schema + + if infer_schema or args_schema is not None: + return StructuredTool.from_function( + func, + coroutine, + name=tool_name, + description=tool_description, + return_direct=return_direct, + args_schema=schema, + infer_schema=infer_schema, + response_format=response_format, + parse_docstring=parse_docstring, + error_on_invalid_docstring=error_on_invalid_docstring, + ) + # If someone doesn't want a schema applied, we must treat it as + # a simple string->string function + if dec_func.__doc__ is None: + msg = ( + "Function must have a docstring if " + "description not provided and infer_schema is False." + ) + raise ValueError(msg) + return Tool( + name=tool_name, + func=func, + description=f"{tool_name} tool", + return_direct=return_direct, + coroutine=coroutine, + response_format=response_format, + ) + + return _tool_factory + + if len(args) != 0: + # Triggered if a user attempts to use positional arguments that + # do not exist in the function signature + # e.g., @tool("name", runnable, "extra_arg") + # Here, "extra_arg" is not a valid argument + msg = "Too many arguments for tool decorator. A decorator " + raise ValueError(msg) + + if runnable is not None: + # tool is used as a function + # tool_from_runnable = tool("name", runnable) + if not name_or_callable: + msg = "Runnable without name for tool constructor" + raise ValueError(msg) + if not isinstance(name_or_callable, str): + msg = "Name must be a string for tool constructor" + raise ValueError(msg) + return _create_tool_factory(name_or_callable)(runnable) + if name_or_callable is not None: + if callable(name_or_callable) and hasattr(name_or_callable, "__name__"): + # Used as a decorator without parameters + # @tool + # def my_tool(): + # pass + return _create_tool_factory(name_or_callable.__name__)(name_or_callable) + if isinstance(name_or_callable, str): + # Used with a new name for the tool + # @tool("search") + # def my_tool(): + # pass + # + # or + # + # @tool("search", parse_docstring=True) + # def my_tool(): + # pass + return _create_tool_factory(name_or_callable) + msg = ( + f"The first argument must be a string or a callable with a __name__ " + f"for tool decorator. Got {type(name_or_callable)}" + ) + raise ValueError(msg) + + # Tool is used as a decorator with parameters specified + # @tool(parse_docstring=True) + # def my_tool(): + # pass + def _partial(func: Union[Callable, Runnable]) -> BaseTool: + """Partial function that takes a callable and returns a tool.""" + name_ = func.get_name() if isinstance(func, Runnable) else func.__name__ + tool_factory = _create_tool_factory(name_) + return tool_factory(func) + + return _partial + + +def _get_description_from_runnable(runnable: Runnable) -> str: + """Generate a placeholder description of a runnable.""" + input_schema = runnable.input_schema.model_json_schema() + return f"Takes {input_schema}." + + +def _get_schema_from_runnable_and_arg_types( + runnable: Runnable, + name: str, + arg_types: Optional[dict[str, type]] = None, +) -> type[BaseModel]: + """Infer args_schema for tool.""" + if arg_types is None: + try: + arg_types = get_type_hints(runnable.InputType) + except TypeError as e: + msg = ( + "Tool input must be str or dict. If dict, dict arguments must be " + "typed. Either annotate types (e.g., with TypedDict) or pass " + f"arg_types into `.as_tool` to specify. {str(e)}" + ) + raise TypeError(msg) from e + fields = {key: (key_type, Field(...)) for key, key_type in arg_types.items()} + return create_model(name, **fields) # type: ignore[call-overload] + + +def convert_runnable_to_tool( + runnable: Runnable, + args_schema: Optional[type[BaseModel]] = None, + *, + name: Optional[str] = None, + description: Optional[str] = None, + arg_types: Optional[dict[str, type]] = None, +) -> BaseTool: + """Convert a Runnable into a BaseTool. + + Args: + runnable: The runnable to convert. + args_schema: The schema for the tool's input arguments. Defaults to None. + name: The name of the tool. Defaults to None. + description: The description of the tool. Defaults to None. + arg_types: The types of the arguments. Defaults to None. + + Returns: + The tool. + """ + if args_schema: + runnable = runnable.with_types(input_type=args_schema) + description = description or _get_description_from_runnable(runnable) + name = name or runnable.get_name() + + schema = runnable.input_schema.model_json_schema() + if schema.get("type") == "string": + return Tool( + name=name, + func=runnable.invoke, + coroutine=runnable.ainvoke, + description=description, + ) + + async def ainvoke_wrapper( + callbacks: Optional[Callbacks] = None, **kwargs: Any + ) -> Any: + return await runnable.ainvoke(kwargs, config={"callbacks": callbacks}) + + def invoke_wrapper(callbacks: Optional[Callbacks] = None, **kwargs: Any) -> Any: + return runnable.invoke(kwargs, config={"callbacks": callbacks}) + + if ( + arg_types is None + and schema.get("type") == "object" + and schema.get("properties") + ): + args_schema = runnable.input_schema + else: + args_schema = _get_schema_from_runnable_and_arg_types( + runnable, name, arg_types=arg_types + ) + + return StructuredTool.from_function( + name=name, + func=invoke_wrapper, + coroutine=ainvoke_wrapper, + description=description, + args_schema=args_schema, + ) diff --git a/venv/Lib/site-packages/langchain_core/tools/render.py b/venv/Lib/site-packages/langchain_core/tools/render.py new file mode 100644 index 00000000..eeb6f3d5 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tools/render.py @@ -0,0 +1,67 @@ +"""Utilities to render tools.""" + +from __future__ import annotations + +from inspect import signature +from typing import Callable + +from langchain_core.tools.base import BaseTool + +ToolsRenderer = Callable[[list[BaseTool]], str] + + +def render_text_description(tools: list[BaseTool]) -> str: + """Render the tool name and description in plain text. + + Args: + tools: The tools to render. + + Returns: + The rendered text. + + Output will be in the format of: + + .. code-block:: markdown + + search: This tool is used for search + calculator: This tool is used for math + """ + descriptions = [] + for tool in tools: + if hasattr(tool, "func") and tool.func: + sig = signature(tool.func) + description = f"{tool.name}{sig} - {tool.description}" + else: + description = f"{tool.name} - {tool.description}" + + descriptions.append(description) + return "\n".join(descriptions) + + +def render_text_description_and_args(tools: list[BaseTool]) -> str: + """Render the tool name, description, and args in plain text. + + Args: + tools: The tools to render. + + Returns: + The rendered text. + + Output will be in the format of: + + .. code-block:: markdown + + search: This tool is used for search, args: {"query": {"type": "string"}} + calculator: This tool is used for math, \ +args: {"expression": {"type": "string"}} + """ + tool_strings = [] + for tool in tools: + args_schema = str(tool.args) + if hasattr(tool, "func") and tool.func: + sig = signature(tool.func) + description = f"{tool.name}{sig} - {tool.description}" + else: + description = f"{tool.name} - {tool.description}" + tool_strings.append(f"{description}, args: {args_schema}") + return "\n".join(tool_strings) diff --git a/venv/Lib/site-packages/langchain_core/tools/retriever.py b/venv/Lib/site-packages/langchain_core/tools/retriever.py new file mode 100644 index 00000000..002fa5e8 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tools/retriever.py @@ -0,0 +1,117 @@ +"""Retriever tool.""" + +from __future__ import annotations + +from functools import partial +from typing import TYPE_CHECKING, Literal, Optional, Union + +from pydantic import BaseModel, Field + +from langchain_core.prompts import ( + BasePromptTemplate, + PromptTemplate, + aformat_document, + format_document, +) +from langchain_core.tools.simple import Tool + +if TYPE_CHECKING: + from langchain_core.callbacks import Callbacks + from langchain_core.documents import Document + from langchain_core.retrievers import BaseRetriever + + +class RetrieverInput(BaseModel): + """Input to the retriever.""" + + query: str = Field(description="query to look up in retriever") + + +def _get_relevant_documents( + query: str, + retriever: BaseRetriever, + document_prompt: BasePromptTemplate, + document_separator: str, + callbacks: Callbacks = None, + response_format: Literal["content", "content_and_artifact"] = "content", +) -> Union[str, tuple[str, list[Document]]]: + docs = retriever.invoke(query, config={"callbacks": callbacks}) + content = document_separator.join( + format_document(doc, document_prompt) for doc in docs + ) + if response_format == "content_and_artifact": + return (content, docs) + + return content + + +async def _aget_relevant_documents( + query: str, + retriever: BaseRetriever, + document_prompt: BasePromptTemplate, + document_separator: str, + callbacks: Callbacks = None, + response_format: Literal["content", "content_and_artifact"] = "content", +) -> Union[str, tuple[str, list[Document]]]: + docs = await retriever.ainvoke(query, config={"callbacks": callbacks}) + content = document_separator.join( + [await aformat_document(doc, document_prompt) for doc in docs] + ) + + if response_format == "content_and_artifact": + return (content, docs) + + return content + + +def create_retriever_tool( + retriever: BaseRetriever, + name: str, + description: str, + *, + document_prompt: Optional[BasePromptTemplate] = None, + document_separator: str = "\n\n", + response_format: Literal["content", "content_and_artifact"] = "content", +) -> Tool: + r"""Create a tool to do retrieval of documents. + + Args: + retriever: The retriever to use for the retrieval + name: The name for the tool. This will be passed to the language model, + so should be unique and somewhat descriptive. + description: The description for the tool. This will be passed to the language + model, so should be descriptive. + document_prompt: The prompt to use for the document. Defaults to None. + document_separator: The separator to use between documents. Defaults to "\n\n". + response_format: The tool response format. If "content" then the output of + the tool is interpreted as the contents of a ToolMessage. If + "content_and_artifact" then the output is expected to be a two-tuple + corresponding to the (content, artifact) of a ToolMessage (artifact + being a list of documents in this case). Defaults to "content". + + Returns: + Tool class to pass to an agent. + """ + document_prompt = document_prompt or PromptTemplate.from_template("{page_content}") + func = partial( + _get_relevant_documents, + retriever=retriever, + document_prompt=document_prompt, + document_separator=document_separator, + response_format=response_format, + ) + afunc = partial( + _aget_relevant_documents, + retriever=retriever, + document_prompt=document_prompt, + document_separator=document_separator, + response_format=response_format, + ) + return Tool( + name=name, + description=description, + func=func, + coroutine=afunc, + args_schema=RetrieverInput, + response_format=response_format, + ) diff --git a/venv/Lib/site-packages/langchain_core/tools/simple.py b/venv/Lib/site-packages/langchain_core/tools/simple.py new file mode 100644 index 00000000..05dc1917 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tools/simple.py @@ -0,0 +1,178 @@ +"""Tool that takes in function or coroutine directly.""" + +from __future__ import annotations + +from collections.abc import Awaitable +from inspect import signature +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Union, +) + +from typing_extensions import override + +from langchain_core.callbacks import ( + AsyncCallbackManagerForToolRun, + CallbackManagerForToolRun, +) +from langchain_core.runnables import RunnableConfig, run_in_executor +from langchain_core.tools.base import ( + ArgsSchema, + BaseTool, + ToolException, + _get_runnable_config_param, +) + +if TYPE_CHECKING: + from langchain_core.messages import ToolCall + + +class Tool(BaseTool): + """Tool that takes in function or coroutine directly.""" + + description: str = "" + func: Optional[Callable[..., str]] + """The function to run when the tool is called.""" + coroutine: Optional[Callable[..., Awaitable[str]]] = None + """The asynchronous version of the function.""" + + # --- Runnable --- + + @override + async def ainvoke( + self, + input: Union[str, dict, ToolCall], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Any: + if not self.coroutine: + # If the tool does not implement async, fall back to default implementation + return await run_in_executor(config, self.invoke, input, config, **kwargs) + + return await super().ainvoke(input, config, **kwargs) + + # --- Tool --- + + @property + def args(self) -> dict: + """The tool's input arguments. + + Returns: + The input arguments for the tool. + """ + if self.args_schema is not None: + if isinstance(self.args_schema, dict): + json_schema = self.args_schema + else: + json_schema = self.args_schema.model_json_schema() + return json_schema["properties"] + # For backwards compatibility, if the function signature is ambiguous, + # assume it takes a single string input. + return {"tool_input": {"type": "string"}} + + def _to_args_and_kwargs( + self, tool_input: Union[str, dict], tool_call_id: Optional[str] + ) -> tuple[tuple, dict]: + """Convert tool input to pydantic model.""" + args, kwargs = super()._to_args_and_kwargs(tool_input, tool_call_id) + # For backwards compatibility. The tool must be run with a single input + all_args = list(args) + list(kwargs.values()) + if len(all_args) != 1: + msg = ( + f"""Too many arguments to single-input tool {self.name}. + Consider using StructuredTool instead.""" + f" Args: {all_args}" + ) + raise ToolException(msg) + return tuple(all_args), {} + + def _run( + self, + *args: Any, + config: RunnableConfig, + run_manager: Optional[CallbackManagerForToolRun] = None, + **kwargs: Any, + ) -> Any: + """Use the tool.""" + if self.func: + if run_manager and signature(self.func).parameters.get("callbacks"): + kwargs["callbacks"] = run_manager.get_child() + if config_param := _get_runnable_config_param(self.func): + kwargs[config_param] = config + return self.func(*args, **kwargs) + msg = "Tool does not support sync invocation." + raise NotImplementedError(msg) + + async def _arun( + self, + *args: Any, + config: RunnableConfig, + run_manager: Optional[AsyncCallbackManagerForToolRun] = None, + **kwargs: Any, + ) -> Any: + """Use the tool asynchronously.""" + if self.coroutine: + if run_manager and signature(self.coroutine).parameters.get("callbacks"): + kwargs["callbacks"] = run_manager.get_child() + if config_param := _get_runnable_config_param(self.coroutine): + kwargs[config_param] = config + return await self.coroutine(*args, **kwargs) + + # NOTE: this code is unreachable since _arun is only called if coroutine is not + # None. + return await super()._arun( + *args, config=config, run_manager=run_manager, **kwargs + ) + + # TODO: this is for backwards compatibility, remove in future + def __init__( + self, name: str, func: Optional[Callable], description: str, **kwargs: Any + ) -> None: + """Initialize tool.""" + super().__init__(name=name, func=func, description=description, **kwargs) + + @classmethod + def from_function( + cls, + func: Optional[Callable], + name: str, # We keep these required to support backwards compatibility + description: str, + return_direct: bool = False, # noqa: FBT001,FBT002 + args_schema: Optional[ArgsSchema] = None, + coroutine: Optional[ + Callable[..., Awaitable[Any]] + ] = None, # This is last for compatibility, but should be after func + **kwargs: Any, + ) -> Tool: + """Initialize tool from a function. + + Args: + func: The function to create the tool from. + name: The name of the tool. + description: The description of the tool. + return_direct: Whether to return the output directly. Defaults to False. + args_schema: The schema of the tool's input arguments. Defaults to None. + coroutine: The asynchronous version of the function. Defaults to None. + kwargs: Additional arguments to pass to the tool. + + Returns: + The tool. + + Raises: + ValueError: If the function is not provided. + """ + if func is None and coroutine is None: + msg = "Function and/or coroutine must be provided" + raise ValueError(msg) + return cls( + name=name, + func=func, + coroutine=coroutine, + description=description, + return_direct=return_direct, + args_schema=args_schema, + **kwargs, + ) diff --git a/venv/Lib/site-packages/langchain_core/tools/structured.py b/venv/Lib/site-packages/langchain_core/tools/structured.py new file mode 100644 index 00000000..4e202dfe --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tools/structured.py @@ -0,0 +1,236 @@ +"""Structured tool.""" + +from __future__ import annotations + +import textwrap +from collections.abc import Awaitable +from inspect import signature +from typing import ( + TYPE_CHECKING, + Annotated, + Any, + Callable, + Literal, + Optional, + Union, +) + +from pydantic import Field, SkipValidation +from typing_extensions import override + +from langchain_core.callbacks import ( + AsyncCallbackManagerForToolRun, + CallbackManagerForToolRun, +) +from langchain_core.runnables import RunnableConfig, run_in_executor +from langchain_core.tools.base import ( + FILTERED_ARGS, + ArgsSchema, + BaseTool, + _get_runnable_config_param, + create_schema_from_function, +) +from langchain_core.utils.pydantic import is_basemodel_subclass + +if TYPE_CHECKING: + from langchain_core.messages import ToolCall + + +class StructuredTool(BaseTool): + """Tool that can operate on any number of inputs.""" + + description: str = "" + args_schema: Annotated[ArgsSchema, SkipValidation()] = Field( + ..., description="The tool schema." + ) + """The input arguments' schema.""" + func: Optional[Callable[..., Any]] = None + """The function to run when the tool is called.""" + coroutine: Optional[Callable[..., Awaitable[Any]]] = None + """The asynchronous version of the function.""" + + # --- Runnable --- + + # TODO: Is this needed? + @override + async def ainvoke( + self, + input: Union[str, dict, ToolCall], + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Any: + if not self.coroutine: + # If the tool does not implement async, fall back to default implementation + return await run_in_executor(config, self.invoke, input, config, **kwargs) + + return await super().ainvoke(input, config, **kwargs) + + # --- Tool --- + + @property + def args(self) -> dict: + """The tool's input arguments.""" + if isinstance(self.args_schema, dict): + json_schema = self.args_schema + else: + input_schema = self.get_input_schema() + json_schema = input_schema.model_json_schema() + return json_schema["properties"] + + def _run( + self, + *args: Any, + config: RunnableConfig, + run_manager: Optional[CallbackManagerForToolRun] = None, + **kwargs: Any, + ) -> Any: + """Use the tool.""" + if self.func: + if run_manager and signature(self.func).parameters.get("callbacks"): + kwargs["callbacks"] = run_manager.get_child() + if config_param := _get_runnable_config_param(self.func): + kwargs[config_param] = config + return self.func(*args, **kwargs) + msg = "StructuredTool does not support sync invocation." + raise NotImplementedError(msg) + + async def _arun( + self, + *args: Any, + config: RunnableConfig, + run_manager: Optional[AsyncCallbackManagerForToolRun] = None, + **kwargs: Any, + ) -> Any: + """Use the tool asynchronously.""" + if self.coroutine: + if run_manager and signature(self.coroutine).parameters.get("callbacks"): + kwargs["callbacks"] = run_manager.get_child() + if config_param := _get_runnable_config_param(self.coroutine): + kwargs[config_param] = config + return await self.coroutine(*args, **kwargs) + + # If self.coroutine is None, then this will delegate to the default + # implementation which is expected to delegate to _run on a separate thread. + return await super()._arun( + *args, config=config, run_manager=run_manager, **kwargs + ) + + @classmethod + def from_function( + cls, + func: Optional[Callable] = None, + coroutine: Optional[Callable[..., Awaitable[Any]]] = None, + name: Optional[str] = None, + description: Optional[str] = None, + return_direct: bool = False, # noqa: FBT001,FBT002 + args_schema: Optional[ArgsSchema] = None, + infer_schema: bool = True, # noqa: FBT001,FBT002 + *, + response_format: Literal["content", "content_and_artifact"] = "content", + parse_docstring: bool = False, + error_on_invalid_docstring: bool = False, + **kwargs: Any, + ) -> StructuredTool: + """Create tool from a given function. + + A classmethod that helps to create a tool from a function. + + Args: + func: The function from which to create a tool. + coroutine: The async function from which to create a tool. + name: The name of the tool. Defaults to the function name. + description: The description of the tool. + Defaults to the function docstring. + return_direct: Whether to return the result directly or as a callback. + Defaults to False. + args_schema: The schema of the tool's input arguments. Defaults to None. + infer_schema: Whether to infer the schema from the function's signature. + Defaults to True. + response_format: The tool response format. If "content" then the output of + the tool is interpreted as the contents of a ToolMessage. If + "content_and_artifact" then the output is expected to be a two-tuple + corresponding to the (content, artifact) of a ToolMessage. + Defaults to "content". + parse_docstring: if ``infer_schema`` and ``parse_docstring``, will attempt + to parse parameter descriptions from Google Style function docstrings. + Defaults to False. + error_on_invalid_docstring: if ``parse_docstring`` is provided, configure + whether to raise ValueError on invalid Google Style docstrings. + Defaults to False. + kwargs: Additional arguments to pass to the tool + + Returns: + The tool. + + Raises: + ValueError: If the function is not provided. + + Examples: + + .. code-block:: python + + def add(a: int, b: int) -> int: + \"\"\"Add two numbers\"\"\" + return a + b + tool = StructuredTool.from_function(add) + tool.run(1, 2) # 3 + """ + if func is not None: + source_function = func + elif coroutine is not None: + source_function = coroutine + else: + msg = "Function and/or coroutine must be provided" + raise ValueError(msg) + name = name or source_function.__name__ + if args_schema is None and infer_schema: + # schema name is appended within function + args_schema = create_schema_from_function( + name, + source_function, + parse_docstring=parse_docstring, + error_on_invalid_docstring=error_on_invalid_docstring, + filter_args=_filter_schema_args(source_function), + ) + description_ = description + if description is None and not parse_docstring: + description_ = source_function.__doc__ or None + if description_ is None and args_schema: + if isinstance(args_schema, type) and is_basemodel_subclass(args_schema): + description_ = args_schema.__doc__ or None + elif isinstance(args_schema, dict): + description_ = args_schema.get("description") + else: + msg = ( + "Invalid args_schema: expected BaseModel or dict, " + f"got {args_schema}" + ) + raise TypeError(msg) + if description_ is None: + msg = "Function must have a docstring if description not provided." + raise ValueError(msg) + if description is None: + # Only apply if using the function's docstring + description_ = textwrap.dedent(description_).strip() + + # Description example: + # search_api(query: str) - Searches the API for the query. + description_ = f"{description_.strip()}" + return cls( + name=name, + func=func, + coroutine=coroutine, + args_schema=args_schema, # type: ignore[arg-type] + description=description_, + return_direct=return_direct, + response_format=response_format, + **kwargs, + ) + + +def _filter_schema_args(func: Callable) -> list[str]: + filter_args = list(FILTERED_ARGS) + if config_param := _get_runnable_config_param(func): + filter_args.append(config_param) + # filter_args.extend(_get_non_model_params(type_hints)) + return filter_args diff --git a/venv/Lib/site-packages/langchain_core/tracers/__init__.py b/venv/Lib/site-packages/langchain_core/tracers/__init__.py new file mode 100644 index 00000000..db8e828c --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/__init__.py @@ -0,0 +1,58 @@ +"""**Tracers** are classes for tracing runs. + +**Class hierarchy:** + +.. code-block:: + + BaseCallbackHandler --> BaseTracer --> Tracer # Examples: LangChainTracer, RootListenersTracer + --> # Examples: LogStreamCallbackHandler +""" # noqa: E501 + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.tracers.base import BaseTracer + from langchain_core.tracers.evaluation import EvaluatorCallbackHandler + from langchain_core.tracers.langchain import LangChainTracer + from langchain_core.tracers.log_stream import ( + LogStreamCallbackHandler, + RunLog, + RunLogPatch, + ) + from langchain_core.tracers.schemas import Run + from langchain_core.tracers.stdout import ConsoleCallbackHandler + +__all__ = ( + "BaseTracer", + "EvaluatorCallbackHandler", + "LangChainTracer", + "ConsoleCallbackHandler", + "Run", + "RunLog", + "RunLogPatch", + "LogStreamCallbackHandler", +) + +_dynamic_imports = { + "BaseTracer": "base", + "EvaluatorCallbackHandler": "evaluation", + "LangChainTracer": "langchain", + "LogStreamCallbackHandler": "log_stream", + "RunLog": "log_stream", + "RunLogPatch": "log_stream", + "Run": "schemas", + "ConsoleCallbackHandler": "stdout", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..5a1302db Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/_streaming.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/_streaming.cpython-312.pyc new file mode 100644 index 00000000..591d1656 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/_streaming.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..a0f4f22c Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/context.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/context.cpython-312.pyc new file mode 100644 index 00000000..2878b2f3 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/context.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/core.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/core.cpython-312.pyc new file mode 100644 index 00000000..be1a0ddd Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/core.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/evaluation.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/evaluation.cpython-312.pyc new file mode 100644 index 00000000..34a41452 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/evaluation.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/event_stream.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/event_stream.cpython-312.pyc new file mode 100644 index 00000000..3fabfd84 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/event_stream.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/langchain.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/langchain.cpython-312.pyc new file mode 100644 index 00000000..1464b0bc Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/langchain.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/langchain_v1.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/langchain_v1.cpython-312.pyc new file mode 100644 index 00000000..26c8ecfc Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/langchain_v1.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/log_stream.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/log_stream.cpython-312.pyc new file mode 100644 index 00000000..812a9bd2 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/log_stream.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/memory_stream.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/memory_stream.cpython-312.pyc new file mode 100644 index 00000000..5d2dea7a Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/memory_stream.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/root_listeners.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/root_listeners.cpython-312.pyc new file mode 100644 index 00000000..95bc7035 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/root_listeners.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/run_collector.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/run_collector.cpython-312.pyc new file mode 100644 index 00000000..754c1caf Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/run_collector.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/schemas.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/schemas.cpython-312.pyc new file mode 100644 index 00000000..cff6fb94 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/schemas.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/__pycache__/stdout.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/stdout.cpython-312.pyc new file mode 100644 index 00000000..d6d720d8 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/tracers/__pycache__/stdout.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/tracers/_streaming.py b/venv/Lib/site-packages/langchain_core/tracers/_streaming.py new file mode 100644 index 00000000..ca50213d --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/_streaming.py @@ -0,0 +1,34 @@ +"""Internal tracers used for stream_log and astream events implementations.""" + +import abc +from collections.abc import AsyncIterator, Iterator +from typing import TypeVar +from uuid import UUID + +T = TypeVar("T") + + +class _StreamingCallbackHandler(abc.ABC): + """For internal use. + + This is a common mixin that the callback handlers + for both astream events and astream log inherit from. + + The `tap_output_aiter` method is invoked in some contexts + to produce callbacks for intermediate results. + """ + + @abc.abstractmethod + def tap_output_aiter( + self, run_id: UUID, output: AsyncIterator[T] + ) -> AsyncIterator[T]: + """Used for internal astream_log and astream events implementations.""" + + @abc.abstractmethod + def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]: + """Used for internal astream_log and astream events implementations.""" + + +__all__ = [ + "_StreamingCallbackHandler", +] diff --git a/venv/Lib/site-packages/langchain_core/tracers/base.py b/venv/Lib/site-packages/langchain_core/tracers/base.py new file mode 100644 index 00000000..ee588606 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/base.py @@ -0,0 +1,917 @@ +"""Base interfaces for tracing runs.""" + +from __future__ import annotations + +import asyncio +import logging +from abc import ABC, abstractmethod +from typing import ( + TYPE_CHECKING, + Any, + Optional, + Union, +) + +from typing_extensions import override + +from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler +from langchain_core.exceptions import TracerException # noqa: F401 +from langchain_core.tracers.core import _TracerCore + +if TYPE_CHECKING: + from collections.abc import Sequence + from uuid import UUID + + from tenacity import RetryCallState + + from langchain_core.documents import Document + from langchain_core.messages import BaseMessage + from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult + from langchain_core.tracers.schemas import Run + +logger = logging.getLogger(__name__) + + +class BaseTracer(_TracerCore, BaseCallbackHandler, ABC): + """Base interface for tracers.""" + + @abstractmethod + def _persist_run(self, run: Run) -> None: + """Persist a run.""" + + def _start_trace(self, run: Run) -> None: + """Start a trace for a run.""" + super()._start_trace(run) + self._on_run_create(run) + + def _end_trace(self, run: Run) -> None: + """End a trace for a run.""" + if not run.parent_run_id: + self._persist_run(run) + self.run_map.pop(str(run.id)) + self._on_run_update(run) + + def on_chat_model_start( + self, + serialized: dict[str, Any], + messages: list[list[BaseMessage]], + *, + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> Run: + """Start a trace for an LLM run. + + Args: + serialized: The serialized model. + messages: The messages to start the chat with. + run_id: The run ID. + tags: The tags for the run. Defaults to None. + parent_run_id: The parent run ID. Defaults to None. + metadata: The metadata for the run. Defaults to None. + name: The name of the run. + kwargs: Additional arguments. + + Returns: + The run. + """ + chat_model_run = self._create_chat_model_run( + serialized=serialized, + messages=messages, + run_id=run_id, + parent_run_id=parent_run_id, + tags=tags, + metadata=metadata, + name=name, + **kwargs, + ) + self._start_trace(chat_model_run) + self._on_chat_model_start(chat_model_run) + return chat_model_run + + def on_llm_start( + self, + serialized: dict[str, Any], + prompts: list[str], + *, + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> Run: + """Start a trace for an LLM run. + + Args: + serialized: The serialized model. + prompts: The prompts to start the LLM with. + run_id: The run ID. + tags: The tags for the run. Defaults to None. + parent_run_id: The parent run ID. Defaults to None. + metadata: The metadata for the run. Defaults to None. + name: The name of the run. + kwargs: Additional arguments. + + Returns: + The run. + """ + llm_run = self._create_llm_run( + serialized=serialized, + prompts=prompts, + run_id=run_id, + parent_run_id=parent_run_id, + tags=tags, + metadata=metadata, + name=name, + **kwargs, + ) + self._start_trace(llm_run) + self._on_llm_start(llm_run) + return llm_run + + @override + def on_llm_new_token( + self, + token: str, + *, + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> Run: + """Run on new LLM token. Only available when streaming is enabled. + + Args: + token: The token. + chunk: The chunk. Defaults to None. + run_id: The run ID. + parent_run_id: The parent run ID. Defaults to None. + kwargs: Additional arguments. + + Returns: + The run. + """ + # "chat_model" is only used for the experimental new streaming_events format. + # This change should not affect any existing tracers. + llm_run = self._llm_run_with_token_event( + token=token, + run_id=run_id, + chunk=chunk, + parent_run_id=parent_run_id, + ) + self._on_llm_new_token(llm_run, token, chunk) + return llm_run + + @override + def on_retry( + self, + retry_state: RetryCallState, + *, + run_id: UUID, + **kwargs: Any, + ) -> Run: + """Run on retry. + + Args: + retry_state: The retry state. + run_id: The run ID. + kwargs: Additional arguments. + + Returns: + The run. + """ + return self._llm_run_with_retry_event( + retry_state=retry_state, + run_id=run_id, + ) + + @override + def on_llm_end(self, response: LLMResult, *, run_id: UUID, **kwargs: Any) -> Run: + """End a trace for an LLM run. + + Args: + response: The response. + run_id: The run ID. + kwargs: Additional arguments. + + Returns: + The run. + """ + # "chat_model" is only used for the experimental new streaming_events format. + # This change should not affect any existing tracers. + llm_run = self._complete_llm_run( + response=response, + run_id=run_id, + ) + self._end_trace(llm_run) + self._on_llm_end(llm_run) + return llm_run + + def on_llm_error( + self, + error: BaseException, + *, + run_id: UUID, + **kwargs: Any, + ) -> Run: + """Handle an error for an LLM run. + + Args: + error: The error. + run_id: The run ID. + kwargs: Additional arguments. + + Returns: + The run. + """ + # "chat_model" is only used for the experimental new streaming_events format. + # This change should not affect any existing tracers. + llm_run = self._errored_llm_run( + error=error, run_id=run_id, response=kwargs.pop("response", None) + ) + self._end_trace(llm_run) + self._on_llm_error(llm_run) + return llm_run + + @override + def on_chain_start( + self, + serialized: dict[str, Any], + inputs: dict[str, Any], + *, + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + run_type: Optional[str] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> Run: + """Start a trace for a chain run. + + Args: + serialized: The serialized chain. + inputs: The inputs for the chain. + run_id: The run ID. + tags: The tags for the run. Defaults to None. + parent_run_id: The parent run ID. Defaults to None. + metadata: The metadata for the run. Defaults to None. + run_type: The type of the run. Defaults to None. + name: The name of the run. + kwargs: Additional arguments. + + Returns: + The run. + """ + chain_run = self._create_chain_run( + serialized=serialized, + inputs=inputs, + run_id=run_id, + tags=tags, + parent_run_id=parent_run_id, + metadata=metadata, + run_type=run_type, + name=name, + **kwargs, + ) + self._start_trace(chain_run) + self._on_chain_start(chain_run) + return chain_run + + @override + def on_chain_end( + self, + outputs: dict[str, Any], + *, + run_id: UUID, + inputs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Run: + """End a trace for a chain run. + + Args: + outputs: The outputs for the chain. + run_id: The run ID. + inputs: The inputs for the chain. Defaults to None. + kwargs: Additional arguments. + + Returns: + The run. + """ + chain_run = self._complete_chain_run( + outputs=outputs, + run_id=run_id, + inputs=inputs, + ) + self._end_trace(chain_run) + self._on_chain_end(chain_run) + return chain_run + + @override + def on_chain_error( + self, + error: BaseException, + *, + inputs: Optional[dict[str, Any]] = None, + run_id: UUID, + **kwargs: Any, + ) -> Run: + """Handle an error for a chain run. + + Args: + error: The error. + inputs: The inputs for the chain. Defaults to None. + run_id: The run ID. + kwargs: Additional arguments. + + Returns: + The run. + """ + chain_run = self._errored_chain_run( + error=error, + run_id=run_id, + inputs=inputs, + ) + self._end_trace(chain_run) + self._on_chain_error(chain_run) + return chain_run + + def on_tool_start( + self, + serialized: dict[str, Any], + input_str: str, + *, + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + inputs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Run: + """Start a trace for a tool run. + + Args: + serialized: The serialized tool. + input_str: The input string. + run_id: The run ID. + tags: The tags for the run. Defaults to None. + parent_run_id: The parent run ID. Defaults to None. + metadata: The metadata for the run. Defaults to None. + name: The name of the run. + inputs: The inputs for the tool. + kwargs: Additional arguments. + + Returns: + The run. + """ + tool_run = self._create_tool_run( + serialized=serialized, + input_str=input_str, + run_id=run_id, + tags=tags, + parent_run_id=parent_run_id, + metadata=metadata, + name=name, + inputs=inputs, + **kwargs, + ) + self._start_trace(tool_run) + self._on_tool_start(tool_run) + return tool_run + + @override + def on_tool_end(self, output: Any, *, run_id: UUID, **kwargs: Any) -> Run: + """End a trace for a tool run. + + Args: + output: The output for the tool. + run_id: The run ID. + kwargs: Additional arguments. + + Returns: + The run. + """ + tool_run = self._complete_tool_run( + output=output, + run_id=run_id, + ) + self._end_trace(tool_run) + self._on_tool_end(tool_run) + return tool_run + + @override + def on_tool_error( + self, + error: BaseException, + *, + run_id: UUID, + **kwargs: Any, + ) -> Run: + """Handle an error for a tool run. + + Args: + error: The error. + run_id: The run ID. + kwargs: Additional arguments. + + Returns: + The run. + """ + tool_run = self._errored_tool_run( + error=error, + run_id=run_id, + ) + self._end_trace(tool_run) + self._on_tool_error(tool_run) + return tool_run + + def on_retriever_start( + self, + serialized: dict[str, Any], + query: str, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> Run: + """Run when the Retriever starts running. + + Args: + serialized: The serialized retriever. + query: The query. + run_id: The run ID. + parent_run_id: The parent run ID. Defaults to None. + tags: The tags for the run. Defaults to None. + metadata: The metadata for the run. Defaults to None. + name: The name of the run. + kwargs: Additional arguments. + + Returns: + The run. + """ + retrieval_run = self._create_retrieval_run( + serialized=serialized, + query=query, + run_id=run_id, + parent_run_id=parent_run_id, + tags=tags, + metadata=metadata, + name=name, + **kwargs, + ) + self._start_trace(retrieval_run) + self._on_retriever_start(retrieval_run) + return retrieval_run + + @override + def on_retriever_error( + self, + error: BaseException, + *, + run_id: UUID, + **kwargs: Any, + ) -> Run: + """Run when Retriever errors. + + Args: + error: The error. + run_id: The run ID. + kwargs: Additional arguments. + + Returns: + The run. + """ + retrieval_run = self._errored_retrieval_run( + error=error, + run_id=run_id, + ) + self._end_trace(retrieval_run) + self._on_retriever_error(retrieval_run) + return retrieval_run + + @override + def on_retriever_end( + self, documents: Sequence[Document], *, run_id: UUID, **kwargs: Any + ) -> Run: + """Run when the Retriever ends running. + + Args: + documents: The documents. + run_id: The run ID. + kwargs: Additional arguments. + + Returns: + The run. + """ + retrieval_run = self._complete_retrieval_run( + documents=documents, + run_id=run_id, + ) + self._end_trace(retrieval_run) + self._on_retriever_end(retrieval_run) + return retrieval_run + + def __deepcopy__(self, memo: dict) -> BaseTracer: + """Deepcopy the tracer.""" + return self + + def __copy__(self) -> BaseTracer: + """Copy the tracer.""" + return self + + +class AsyncBaseTracer(_TracerCore, AsyncCallbackHandler, ABC): + """Async Base interface for tracers.""" + + @abstractmethod + @override + async def _persist_run(self, run: Run) -> None: + """Persist a run.""" + + @override + async def _start_trace(self, run: Run) -> None: + """Start a trace for a run. + + Starting a trace will run concurrently with each _on_[run_type]_start method. + No _on_[run_type]_start callback should depend on operations in _start_trace. + """ + super()._start_trace(run) + await self._on_run_create(run) + + @override + async def _end_trace(self, run: Run) -> None: + """End a trace for a run. + + Ending a trace will run concurrently with each _on_[run_type]_end method. + No _on_[run_type]_end callback should depend on operations in _end_trace. + """ + if not run.parent_run_id: + await self._persist_run(run) + self.run_map.pop(str(run.id)) + await self._on_run_update(run) + + @override + async def on_chat_model_start( + self, + serialized: dict[str, Any], + messages: list[list[BaseMessage]], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> Any: + chat_model_run = self._create_chat_model_run( + serialized=serialized, + messages=messages, + run_id=run_id, + parent_run_id=parent_run_id, + tags=tags, + metadata=metadata, + name=name, + **kwargs, + ) + tasks = [ + self._start_trace(chat_model_run), + self._on_chat_model_start(chat_model_run), + ] + await asyncio.gather(*tasks) + return chat_model_run + + @override + async def on_llm_start( + self, + serialized: dict[str, Any], + prompts: list[str], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + llm_run = self._create_llm_run( + serialized=serialized, + prompts=prompts, + run_id=run_id, + parent_run_id=parent_run_id, + tags=tags, + metadata=metadata, + **kwargs, + ) + tasks = [self._start_trace(llm_run), self._on_llm_start(llm_run)] + await asyncio.gather(*tasks) + + @override + async def on_llm_new_token( + self, + token: str, + *, + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> None: + llm_run = self._llm_run_with_token_event( + token=token, + run_id=run_id, + chunk=chunk, + parent_run_id=parent_run_id, + ) + await self._on_llm_new_token(llm_run, token, chunk) + + @override + async def on_retry( + self, + retry_state: RetryCallState, + *, + run_id: UUID, + **kwargs: Any, + ) -> None: + self._llm_run_with_retry_event( + retry_state=retry_state, + run_id=run_id, + ) + + @override + async def on_llm_end( + self, + response: LLMResult, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + llm_run = self._complete_llm_run( + response=response, + run_id=run_id, + ) + tasks = [self._on_llm_end(llm_run), self._end_trace(llm_run)] + await asyncio.gather(*tasks) + + @override + async def on_llm_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + llm_run = self._errored_llm_run( + error=error, + run_id=run_id, + ) + tasks = [self._on_llm_error(llm_run), self._end_trace(llm_run)] + await asyncio.gather(*tasks) + + @override + async def on_chain_start( + self, + serialized: dict[str, Any], + inputs: dict[str, Any], + *, + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + run_type: Optional[str] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> None: + chain_run = self._create_chain_run( + serialized=serialized, + inputs=inputs, + run_id=run_id, + tags=tags, + parent_run_id=parent_run_id, + metadata=metadata, + run_type=run_type, + name=name, + **kwargs, + ) + tasks = [self._start_trace(chain_run), self._on_chain_start(chain_run)] + await asyncio.gather(*tasks) + + @override + async def on_chain_end( + self, + outputs: dict[str, Any], + *, + run_id: UUID, + inputs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + chain_run = self._complete_chain_run( + outputs=outputs, + run_id=run_id, + inputs=inputs, + ) + tasks = [self._end_trace(chain_run), self._on_chain_end(chain_run)] + await asyncio.gather(*tasks) + + @override + async def on_chain_error( + self, + error: BaseException, + *, + inputs: Optional[dict[str, Any]] = None, + run_id: UUID, + **kwargs: Any, + ) -> None: + chain_run = self._errored_chain_run( + error=error, + inputs=inputs, + run_id=run_id, + ) + tasks = [self._end_trace(chain_run), self._on_chain_error(chain_run)] + await asyncio.gather(*tasks) + + @override + async def on_tool_start( + self, + serialized: dict[str, Any], + input_str: str, + *, + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + inputs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + tool_run = self._create_tool_run( + serialized=serialized, + input_str=input_str, + run_id=run_id, + tags=tags, + parent_run_id=parent_run_id, + metadata=metadata, + inputs=inputs, + **kwargs, + ) + tasks = [self._start_trace(tool_run), self._on_tool_start(tool_run)] + await asyncio.gather(*tasks) + + @override + async def on_tool_end( + self, + output: Any, + *, + run_id: UUID, + **kwargs: Any, + ) -> None: + tool_run = self._complete_tool_run( + output=output, + run_id=run_id, + ) + tasks = [self._end_trace(tool_run), self._on_tool_end(tool_run)] + await asyncio.gather(*tasks) + + @override + async def on_tool_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + tool_run = self._errored_tool_run( + error=error, + run_id=run_id, + ) + tasks = [self._end_trace(tool_run), self._on_tool_error(tool_run)] + await asyncio.gather(*tasks) + + @override + async def on_retriever_start( + self, + serialized: dict[str, Any], + query: str, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> None: + retriever_run = self._create_retrieval_run( + serialized=serialized, + query=query, + run_id=run_id, + parent_run_id=parent_run_id, + tags=tags, + metadata=metadata, + name=name, + ) + tasks = [ + self._start_trace(retriever_run), + self._on_retriever_start(retriever_run), + ] + await asyncio.gather(*tasks) + + @override + async def on_retriever_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + retrieval_run = self._errored_retrieval_run( + error=error, + run_id=run_id, + ) + tasks = [ + self._end_trace(retrieval_run), + self._on_retriever_error(retrieval_run), + ] + await asyncio.gather(*tasks) + + @override + async def on_retriever_end( + self, + documents: Sequence[Document], + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + retrieval_run = self._complete_retrieval_run( + documents=documents, + run_id=run_id, + ) + tasks = [self._end_trace(retrieval_run), self._on_retriever_end(retrieval_run)] + await asyncio.gather(*tasks) + + async def _on_run_create(self, run: Run) -> None: + """Process a run upon creation.""" + + async def _on_run_update(self, run: Run) -> None: + """Process a run upon update.""" + + async def _on_llm_start(self, run: Run) -> None: + """Process the LLM Run upon start.""" + + async def _on_llm_end(self, run: Run) -> None: + """Process the LLM Run.""" + + async def _on_llm_error(self, run: Run) -> None: + """Process the LLM Run upon error.""" + + async def _on_llm_new_token( + self, + run: Run, + token: str, + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]], + ) -> None: + """Process new LLM token.""" + + async def _on_chain_start(self, run: Run) -> None: + """Process the Chain Run upon start.""" + + async def _on_chain_end(self, run: Run) -> None: + """Process the Chain Run.""" + + async def _on_chain_error(self, run: Run) -> None: + """Process the Chain Run upon error.""" + + async def _on_tool_start(self, run: Run) -> None: + """Process the Tool Run upon start.""" + + async def _on_tool_end(self, run: Run) -> None: + """Process the Tool Run.""" + + async def _on_tool_error(self, run: Run) -> None: + """Process the Tool Run upon error.""" + + async def _on_chat_model_start(self, run: Run) -> None: + """Process the Chat Model Run upon start.""" + + async def _on_retriever_start(self, run: Run) -> None: + """Process the Retriever Run upon start.""" + + async def _on_retriever_end(self, run: Run) -> None: + """Process the Retriever Run.""" + + async def _on_retriever_error(self, run: Run) -> None: + """Process the Retriever Run upon error.""" diff --git a/venv/Lib/site-packages/langchain_core/tracers/context.py b/venv/Lib/site-packages/langchain_core/tracers/context.py new file mode 100644 index 00000000..466bc2f9 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/context.py @@ -0,0 +1,222 @@ +"""Context management for tracers.""" + +from __future__ import annotations + +from contextlib import contextmanager +from contextvars import ContextVar +from typing import ( + TYPE_CHECKING, + Any, + Literal, + Optional, + Union, + cast, +) +from uuid import UUID + +from langsmith import run_helpers as ls_rh +from langsmith import utils as ls_utils + +from langchain_core.tracers.langchain import LangChainTracer +from langchain_core.tracers.run_collector import RunCollectorCallbackHandler + +if TYPE_CHECKING: + from collections.abc import Generator + + from langsmith import Client as LangSmithClient + + from langchain_core.callbacks.base import BaseCallbackHandler, Callbacks + from langchain_core.callbacks.manager import AsyncCallbackManager, CallbackManager + from langchain_core.tracers.schemas import TracerSessionV1 + +# for backwards partial compatibility if this is imported by users but unused +tracing_callback_var: Any = None +tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( + "tracing_callback_v2", default=None +) +run_collector_var: ContextVar[Optional[RunCollectorCallbackHandler]] = ContextVar( + "run_collector", default=None +) + + +@contextmanager +def tracing_enabled( + session_name: str = "default", # noqa: ARG001 +) -> Generator[TracerSessionV1, None, None]: + """Throw an error because this has been replaced by tracing_v2_enabled.""" + msg = ( + "tracing_enabled is no longer supported. Please use tracing_enabled_v2 instead." + ) + raise RuntimeError(msg) + + +@contextmanager +def tracing_v2_enabled( + project_name: Optional[str] = None, + *, + example_id: Optional[Union[str, UUID]] = None, + tags: Optional[list[str]] = None, + client: Optional[LangSmithClient] = None, +) -> Generator[LangChainTracer, None, None]: + """Instruct LangChain to log all runs in context to LangSmith. + + Args: + project_name (str, optional): The name of the project. + Defaults to "default". + example_id (str or UUID, optional): The ID of the example. + Defaults to None. + tags (list[str], optional): The tags to add to the run. + Defaults to None. + client (LangSmithClient, optional): The client of the langsmith. + Defaults to None. + + Yields: + LangChainTracer: The LangChain tracer. + + Example: + >>> with tracing_v2_enabled(): + ... # LangChain code will automatically be traced + + You can use this to fetch the LangSmith run URL: + + >>> with tracing_v2_enabled() as cb: + ... chain.invoke("foo") + ... run_url = cb.get_run_url() + """ + if isinstance(example_id, str): + example_id = UUID(example_id) + cb = LangChainTracer( + example_id=example_id, + project_name=project_name, + tags=tags, + client=client, + ) + token = tracing_v2_callback_var.set(cb) + try: + yield cb + finally: + tracing_v2_callback_var.reset(token) + + +@contextmanager +def collect_runs() -> Generator[RunCollectorCallbackHandler, None, None]: + """Collect all run traces in context. + + Yields: + run_collector.RunCollectorCallbackHandler: The run collector callback handler. + + Example: + >>> with collect_runs() as runs_cb: + chain.invoke("foo") + run_id = runs_cb.traced_runs[0].id + """ + cb = RunCollectorCallbackHandler() + token = run_collector_var.set(cb) + try: + yield cb + finally: + run_collector_var.reset(token) + + +def _get_trace_callbacks( + project_name: Optional[str] = None, + example_id: Optional[Union[str, UUID]] = None, + callback_manager: Optional[Union[CallbackManager, AsyncCallbackManager]] = None, +) -> Callbacks: + if _tracing_v2_is_enabled(): + project_name_ = project_name or _get_tracer_project() + tracer = tracing_v2_callback_var.get() or LangChainTracer( + project_name=project_name_, + example_id=example_id, + ) + if callback_manager is None: + cb = cast("Callbacks", [tracer]) + else: + if not any( + isinstance(handler, LangChainTracer) + for handler in callback_manager.handlers + ): + callback_manager.add_handler(tracer) + # If it already has a LangChainTracer, we don't need to add another one. + # this would likely mess up the trace hierarchy. + cb = callback_manager + else: + cb = None + return cb + + +def _tracing_v2_is_enabled() -> Union[bool, Literal["local"]]: + if tracing_v2_callback_var.get() is not None: + return True + return ls_utils.tracing_is_enabled() + + +def _get_tracer_project() -> str: + tracing_context = ls_rh.get_tracing_context() + run_tree = tracing_context["parent"] + if run_tree is None and tracing_context["project_name"] is not None: + return tracing_context["project_name"] + return getattr( + run_tree, + "session_name", + getattr( + # Note, if people are trying to nest @traceable functions and the + # tracing_v2_enabled context manager, this will likely mess up the + # tree structure. + tracing_v2_callback_var.get(), + "project", + # Have to set this to a string even though it always will return + # a string because `get_tracer_project` technically can return + # None, but only when a specific argument is supplied. + # Therefore, this just tricks the mypy type checker + str(ls_utils.get_tracer_project()), + ), + ) + + +_configure_hooks: list[ + tuple[ + ContextVar[Optional[BaseCallbackHandler]], + bool, + Optional[type[BaseCallbackHandler]], + Optional[str], + ] +] = [] + + +def register_configure_hook( + context_var: ContextVar[Optional[Any]], + inheritable: bool, # noqa: FBT001 + handle_class: Optional[type[BaseCallbackHandler]] = None, + env_var: Optional[str] = None, +) -> None: + """Register a configure hook. + + Args: + context_var (ContextVar[Optional[Any]]): The context variable. + inheritable (bool): Whether the context variable is inheritable. + handle_class (Optional[Type[BaseCallbackHandler]], optional): + The callback handler class. Defaults to None. + env_var (Optional[str], optional): The environment variable. Defaults to None. + + Raises: + ValueError: If env_var is set, handle_class must also be set + to a non-None value. + """ + if env_var is not None and handle_class is None: + msg = "If env_var is set, handle_class must also be set to a non-None value." + raise ValueError(msg) + + _configure_hooks.append( + ( + # the typings of ContextVar do not have the generic arg set as covariant + # so we have to cast it + cast("ContextVar[Optional[BaseCallbackHandler]]", context_var), + inheritable, + handle_class, + env_var, + ) + ) + + +register_configure_hook(run_collector_var, inheritable=False) diff --git a/venv/Lib/site-packages/langchain_core/tracers/core.py b/venv/Lib/site-packages/langchain_core/tracers/core.py new file mode 100644 index 00000000..8d1287ef --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/core.py @@ -0,0 +1,612 @@ +"""Utilities for the root listener.""" + +from __future__ import annotations + +import logging +import sys +import traceback +from abc import ABC, abstractmethod +from datetime import datetime, timezone +from typing import ( + TYPE_CHECKING, + Any, + Literal, + Optional, + Union, + cast, +) + +from langchain_core.exceptions import TracerException +from langchain_core.load import dumpd +from langchain_core.outputs import ( + ChatGeneration, + ChatGenerationChunk, + GenerationChunk, + LLMResult, +) +from langchain_core.tracers.schemas import Run + +if TYPE_CHECKING: + from collections.abc import Coroutine, Sequence + from uuid import UUID + + from tenacity import RetryCallState + + from langchain_core.documents import Document + from langchain_core.messages import BaseMessage + +logger = logging.getLogger(__name__) + +SCHEMA_FORMAT_TYPE = Literal["original", "streaming_events"] + + +class _TracerCore(ABC): + """Abstract base class for tracers. + + This class provides common methods, and reusable methods for tracers. + """ + + log_missing_parent: bool = True + + def __init__( + self, + *, + _schema_format: Literal[ + "original", "streaming_events", "original+chat" + ] = "original", + **kwargs: Any, + ) -> None: + """Initialize the tracer. + + Args: + _schema_format: Primarily changes how the inputs and outputs are + handled. For internal use only. This API will change. + + - 'original' is the format used by all current tracers. + This format is slightly inconsistent with respect to inputs + and outputs. + - 'streaming_events' is used for supporting streaming events, + for internal usage. It will likely change in the future, or + be deprecated entirely in favor of a dedicated async tracer + for streaming events. + - 'original+chat' is a format that is the same as 'original' + except it does NOT raise an attribute error on_chat_model_start + kwargs: Additional keyword arguments that will be passed to + the superclass. + """ + super().__init__(**kwargs) + self._schema_format = _schema_format # For internal use only API will change. + self.run_map: dict[str, Run] = {} + """Map of run ID to run. Cleared on run end.""" + self.order_map: dict[UUID, tuple[UUID, str]] = {} + """Map of run ID to (trace_id, dotted_order). Cleared when tracer GCed.""" + + @abstractmethod + def _persist_run(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: + """Persist a run.""" + + @staticmethod + def _add_child_run( + parent_run: Run, + child_run: Run, + ) -> None: + """Add child run to a chain run or tool run.""" + parent_run.child_runs.append(child_run) + + @staticmethod + def _get_stacktrace(error: BaseException) -> str: + """Get the stacktrace of the parent error.""" + msg = repr(error) + try: + if sys.version_info < (3, 10): + tb = traceback.format_exception( + error.__class__, error, error.__traceback__ + ) + else: + tb = traceback.format_exception(error) + return (msg + "\n\n".join(tb)).strip() + except: # noqa: E722 + return msg + + def _start_trace(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # type: ignore[return] + current_dotted_order = run.start_time.strftime("%Y%m%dT%H%M%S%fZ") + str(run.id) + if run.parent_run_id: + if parent := self.order_map.get(run.parent_run_id): + run.trace_id, run.dotted_order = parent + run.dotted_order += "." + current_dotted_order + if parent_run := self.run_map.get(str(run.parent_run_id)): + self._add_child_run(parent_run, run) + else: + if self.log_missing_parent: + logger.debug( + "Parent run %s not found for run %s. Treating as a root run.", + run.parent_run_id, + run.id, + ) + run.parent_run_id = None + run.trace_id = run.id + run.dotted_order = current_dotted_order + else: + run.trace_id = run.id + run.dotted_order = current_dotted_order + self.order_map[run.id] = (run.trace_id, run.dotted_order) + self.run_map[str(run.id)] = run + + def _get_run( + self, run_id: UUID, run_type: Union[str, set[str], None] = None + ) -> Run: + try: + run = self.run_map[str(run_id)] + except KeyError as exc: + msg = f"No indexed run ID {run_id}." + raise TracerException(msg) from exc + + if isinstance(run_type, str): + run_types: Union[set[str], None] = {run_type} + else: + run_types = run_type + if run_types is not None and run.run_type not in run_types: + msg = ( + f"Found {run.run_type} run at ID {run_id}, " + f"but expected {run_types} run." + ) + raise TracerException(msg) + return run + + def _create_chat_model_run( + self, + serialized: dict[str, Any], + messages: list[list[BaseMessage]], + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> Run: + """Create a chat model run.""" + if self._schema_format not in ("streaming_events", "original+chat"): + # Please keep this un-implemented for backwards compatibility. + # When it's unimplemented old tracers that use the "original" format + # fallback on the on_llm_start method implementation if they + # find that the on_chat_model_start method is not implemented. + # This can eventually be cleaned up by writing a "modern" tracer + # that has all the updated schema changes corresponding to + # the "streaming_events" format. + msg = ( + f"Chat model tracing is not supported in " + f"for {self._schema_format} format." + ) + raise NotImplementedError(msg) + start_time = datetime.now(timezone.utc) + if metadata: + kwargs.update({"metadata": metadata}) + return Run( + id=run_id, + parent_run_id=parent_run_id, + serialized=serialized, + inputs={"messages": [[dumpd(msg) for msg in batch] for batch in messages]}, + extra=kwargs, + events=[{"name": "start", "time": start_time}], + start_time=start_time, + # WARNING: This is valid ONLY for streaming_events. + # run_type="llm" is what's used by virtually all tracers. + # Changing this to "chat_model" may break triggering on_llm_start + run_type="chat_model", + tags=tags, + name=name, # type: ignore[arg-type] + ) + + def _create_llm_run( + self, + serialized: dict[str, Any], + prompts: list[str], + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> Run: + """Create a llm run.""" + start_time = datetime.now(timezone.utc) + if metadata: + kwargs.update({"metadata": metadata}) + return Run( + id=run_id, + parent_run_id=parent_run_id, + serialized=serialized, + # TODO: Figure out how to expose kwargs here + inputs={"prompts": prompts}, + extra=kwargs, + events=[{"name": "start", "time": start_time}], + start_time=start_time, + run_type="llm", + tags=tags or [], + name=name, # type: ignore[arg-type] + ) + + def _llm_run_with_token_event( + self, + token: str, + run_id: UUID, + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + parent_run_id: Optional[UUID] = None, # noqa: ARG002 + ) -> Run: + """Append token event to LLM run and return the run.""" + llm_run = self._get_run(run_id, run_type={"llm", "chat_model"}) + event_kwargs: dict[str, Any] = {"token": token} + if chunk: + event_kwargs["chunk"] = chunk + llm_run.events.append( + { + "name": "new_token", + "time": datetime.now(timezone.utc), + "kwargs": event_kwargs, + }, + ) + return llm_run + + def _llm_run_with_retry_event( + self, + retry_state: RetryCallState, + run_id: UUID, + ) -> Run: + llm_run = self._get_run(run_id) + retry_d: dict[str, Any] = { + "slept": retry_state.idle_for, + "attempt": retry_state.attempt_number, + } + if retry_state.outcome is None: + retry_d["outcome"] = "N/A" + elif retry_state.outcome.failed: + retry_d["outcome"] = "failed" + exception = retry_state.outcome.exception() + retry_d["exception"] = str(exception) + retry_d["exception_type"] = exception.__class__.__name__ + else: + retry_d["outcome"] = "success" + retry_d["result"] = str(retry_state.outcome.result()) + llm_run.events.append( + { + "name": "retry", + "time": datetime.now(timezone.utc), + "kwargs": retry_d, + }, + ) + return llm_run + + def _complete_llm_run(self, response: LLMResult, run_id: UUID) -> Run: + llm_run = self._get_run(run_id, run_type={"llm", "chat_model"}) + if getattr(llm_run, "outputs", None) is None: + llm_run.outputs = {} + else: + llm_run.outputs = cast("dict[str, Any]", llm_run.outputs) + if not llm_run.extra.get("__omit_auto_outputs", False): + llm_run.outputs.update(response.model_dump()) + for i, generations in enumerate(response.generations): + for j, generation in enumerate(generations): + output_generation = llm_run.outputs["generations"][i][j] + if "message" in output_generation: + output_generation["message"] = dumpd( + cast("ChatGeneration", generation).message + ) + llm_run.end_time = datetime.now(timezone.utc) + llm_run.events.append({"name": "end", "time": llm_run.end_time}) + + return llm_run + + def _errored_llm_run( + self, error: BaseException, run_id: UUID, response: Optional[LLMResult] = None + ) -> Run: + llm_run = self._get_run(run_id, run_type={"llm", "chat_model"}) + llm_run.error = self._get_stacktrace(error) + if response: + if getattr(llm_run, "outputs", None) is None: + llm_run.outputs = {} + else: + llm_run.outputs = cast("dict[str, Any]", llm_run.outputs) + if not llm_run.extra.get("__omit_auto_outputs", False): + llm_run.outputs.update(response.model_dump()) + for i, generations in enumerate(response.generations): + for j, generation in enumerate(generations): + output_generation = llm_run.outputs["generations"][i][j] + if "message" in output_generation: + output_generation["message"] = dumpd( + cast("ChatGeneration", generation).message + ) + llm_run.end_time = datetime.now(timezone.utc) + llm_run.events.append({"name": "error", "time": llm_run.end_time}) + + return llm_run + + def _create_chain_run( + self, + serialized: dict[str, Any], + inputs: dict[str, Any], + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + run_type: Optional[str] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> Run: + """Create a chain Run.""" + start_time = datetime.now(timezone.utc) + if metadata: + kwargs.update({"metadata": metadata}) + return Run( + id=run_id, + parent_run_id=parent_run_id, + serialized=serialized, + inputs=self._get_chain_inputs(inputs), + extra=kwargs, + events=[{"name": "start", "time": start_time}], + start_time=start_time, + child_runs=[], + run_type=run_type or "chain", + name=name, # type: ignore[arg-type] + tags=tags or [], + ) + + def _get_chain_inputs(self, inputs: Any) -> Any: + """Get the inputs for a chain run.""" + if self._schema_format in ("original", "original+chat"): + return inputs if isinstance(inputs, dict) else {"input": inputs} + if self._schema_format == "streaming_events": + return { + "input": inputs, + } + msg = f"Invalid format: {self._schema_format}" + raise ValueError(msg) + + def _get_chain_outputs(self, outputs: Any) -> Any: + """Get the outputs for a chain run.""" + if self._schema_format in ("original", "original+chat"): + return outputs if isinstance(outputs, dict) else {"output": outputs} + if self._schema_format == "streaming_events": + return { + "output": outputs, + } + msg = f"Invalid format: {self._schema_format}" + raise ValueError(msg) + + def _complete_chain_run( + self, + outputs: dict[str, Any], + run_id: UUID, + inputs: Optional[dict[str, Any]] = None, + ) -> Run: + """Update a chain run with outputs and end time.""" + chain_run = self._get_run(run_id) + if getattr(chain_run, "outputs", None) is None: + chain_run.outputs = {} + if not chain_run.extra.get("__omit_auto_outputs", False): + cast("dict[str, Any]", chain_run.outputs).update( + self._get_chain_outputs(outputs) + ) + chain_run.end_time = datetime.now(timezone.utc) + chain_run.events.append({"name": "end", "time": chain_run.end_time}) + if inputs is not None: + chain_run.inputs = self._get_chain_inputs(inputs) + return chain_run + + def _errored_chain_run( + self, + error: BaseException, + inputs: Optional[dict[str, Any]], + run_id: UUID, + ) -> Run: + chain_run = self._get_run(run_id) + chain_run.error = self._get_stacktrace(error) + chain_run.end_time = datetime.now(timezone.utc) + chain_run.events.append({"name": "error", "time": chain_run.end_time}) + if inputs is not None: + chain_run.inputs = self._get_chain_inputs(inputs) + return chain_run + + def _create_tool_run( + self, + serialized: dict[str, Any], + input_str: str, + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + inputs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> Run: + """Create a tool run.""" + start_time = datetime.now(timezone.utc) + if metadata: + kwargs.update({"metadata": metadata}) + + if self._schema_format in ("original", "original+chat"): + inputs = {"input": input_str} + elif self._schema_format == "streaming_events": + inputs = {"input": inputs} + else: + msg = f"Invalid format: {self._schema_format}" + raise AssertionError(msg) + + return Run( + id=run_id, + parent_run_id=parent_run_id, + serialized=serialized, + # Wrapping in dict since Run requires a dict object. + inputs=inputs, + extra=kwargs, + events=[{"name": "start", "time": start_time}], + start_time=start_time, + child_runs=[], + run_type="tool", + tags=tags or [], + name=name, # type: ignore[arg-type] + ) + + def _complete_tool_run( + self, + output: dict[str, Any], + run_id: UUID, + ) -> Run: + """Update a tool run with outputs and end time.""" + tool_run = self._get_run(run_id, run_type="tool") + if getattr(tool_run, "outputs", None) is None: + tool_run.outputs = {} + if not tool_run.extra.get("__omit_auto_outputs", False): + cast("dict[str, Any]", tool_run.outputs).update({"output": output}) + tool_run.end_time = datetime.now(timezone.utc) + tool_run.events.append({"name": "end", "time": tool_run.end_time}) + return tool_run + + def _errored_tool_run( + self, + error: BaseException, + run_id: UUID, + ) -> Run: + """Update a tool run with error and end time.""" + tool_run = self._get_run(run_id, run_type="tool") + tool_run.error = self._get_stacktrace(error) + tool_run.end_time = datetime.now(timezone.utc) + tool_run.events.append({"name": "error", "time": tool_run.end_time}) + return tool_run + + def _create_retrieval_run( + self, + serialized: dict[str, Any], + query: str, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> Run: + """Create a retrieval run.""" + start_time = datetime.now(timezone.utc) + if metadata: + kwargs.update({"metadata": metadata}) + return Run( + id=run_id, + name=name or "Retriever", + parent_run_id=parent_run_id, + serialized=serialized, + inputs={"query": query}, + extra=kwargs, + events=[{"name": "start", "time": start_time}], + start_time=start_time, + tags=tags, + child_runs=[], + run_type="retriever", + ) + + def _complete_retrieval_run( + self, + documents: Sequence[Document], + run_id: UUID, + ) -> Run: + """Update a retrieval run with outputs and end time.""" + retrieval_run = self._get_run(run_id, run_type="retriever") + if getattr(retrieval_run, "outputs", None) is None: + retrieval_run.outputs = {} + if not retrieval_run.extra.get("__omit_auto_outputs", False): + cast("dict[str, Any]", retrieval_run.outputs).update( + {"documents": documents} + ) + retrieval_run.end_time = datetime.now(timezone.utc) + retrieval_run.events.append({"name": "end", "time": retrieval_run.end_time}) + return retrieval_run + + def _errored_retrieval_run( + self, + error: BaseException, + run_id: UUID, + ) -> Run: + retrieval_run = self._get_run(run_id, run_type="retriever") + retrieval_run.error = self._get_stacktrace(error) + retrieval_run.end_time = datetime.now(timezone.utc) + retrieval_run.events.append({"name": "error", "time": retrieval_run.end_time}) + return retrieval_run + + def __deepcopy__(self, memo: dict) -> _TracerCore: + """Deepcopy the tracer.""" + return self + + def __copy__(self) -> _TracerCore: + """Copy the tracer.""" + return self + + def _end_trace(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """End a trace for a run.""" + return None + + def _on_run_create(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process a run upon creation.""" + return None + + def _on_run_update(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process a run upon update.""" + return None + + def _on_llm_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the LLM Run upon start.""" + return None + + def _on_llm_new_token( + self, + run: Run, # noqa: ARG002 + token: str, # noqa: ARG002 + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]], # noqa: ARG002 + ) -> Union[None, Coroutine[Any, Any, None]]: + """Process new LLM token.""" + return None + + def _on_llm_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the LLM Run.""" + return None + + def _on_llm_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the LLM Run upon error.""" + return None + + def _on_chain_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the Chain Run upon start.""" + return None + + def _on_chain_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the Chain Run.""" + return None + + def _on_chain_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the Chain Run upon error.""" + return None + + def _on_tool_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the Tool Run upon start.""" + return None + + def _on_tool_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the Tool Run.""" + return None + + def _on_tool_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the Tool Run upon error.""" + return None + + def _on_chat_model_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the Chat Model Run upon start.""" + return None + + def _on_retriever_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the Retriever Run upon start.""" + return None + + def _on_retriever_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the Retriever Run.""" + return None + + def _on_retriever_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002 + """Process the Retriever Run upon error.""" + return None diff --git a/venv/Lib/site-packages/langchain_core/tracers/evaluation.py b/venv/Lib/site-packages/langchain_core/tracers/evaluation.py new file mode 100644 index 00000000..c7447ad4 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/evaluation.py @@ -0,0 +1,225 @@ +"""A tracer that runs evaluators over completed runs.""" + +from __future__ import annotations + +import logging +import threading +import weakref +from concurrent.futures import Future, ThreadPoolExecutor, wait +from typing import TYPE_CHECKING, Any, Optional, Union, cast +from uuid import UUID + +import langsmith +from langsmith.evaluation.evaluator import EvaluationResult, EvaluationResults + +from langchain_core.tracers import langchain as langchain_tracer +from langchain_core.tracers.base import BaseTracer +from langchain_core.tracers.context import tracing_v2_enabled +from langchain_core.tracers.langchain import _get_executor + +if TYPE_CHECKING: + from collections.abc import Sequence + + from langchain_core.tracers.schemas import Run + +logger = logging.getLogger(__name__) + +_TRACERS: weakref.WeakSet[EvaluatorCallbackHandler] = weakref.WeakSet() + + +def wait_for_all_evaluators() -> None: + """Wait for all tracers to finish.""" + for tracer in list(_TRACERS): + if tracer is not None: + tracer.wait_for_futures() + + +class EvaluatorCallbackHandler(BaseTracer): + """Tracer that runs a run evaluator whenever a run is persisted. + + Attributes: + example_id : Union[UUID, None] + The example ID associated with the runs. + client : Client + The LangSmith client instance used for evaluating the runs. + evaluators : Sequence[RunEvaluator] + The sequence of run evaluators to be executed. + executor : ThreadPoolExecutor + The thread pool executor used for running the evaluators. + futures : set[Future] + The set of futures representing the running evaluators. + skip_unfinished : bool + Whether to skip runs that are not finished or raised + an error. + project_name : Optional[str] + The LangSmith project name to be organize eval chain runs under. + """ + + name: str = "evaluator_callback_handler" + + def __init__( + self, + evaluators: Sequence[langsmith.RunEvaluator], + client: Optional[langsmith.Client] = None, + example_id: Optional[Union[UUID, str]] = None, + skip_unfinished: bool = True, # noqa: FBT001,FBT002 + project_name: Optional[str] = "evaluators", + max_concurrency: Optional[int] = None, + **kwargs: Any, + ) -> None: + """Create an EvaluatorCallbackHandler. + + Args: + evaluators : Sequence[RunEvaluator] + The run evaluators to apply to all top level runs. + client : LangSmith Client, optional + The LangSmith client instance to use for evaluating the runs. + If not specified, a new instance will be created. + example_id : Union[UUID, str], optional + The example ID to be associated with the runs. + skip_unfinished: bool, optional + Whether to skip unfinished runs. + project_name : str, optional + The LangSmith project name to be organize eval chain runs under. + max_concurrency : int, optional + The maximum number of concurrent evaluators to run. + """ + super().__init__(**kwargs) + self.example_id = ( + UUID(example_id) if isinstance(example_id, str) else example_id + ) + self.client = client or langchain_tracer.get_client() + self.evaluators = evaluators + if max_concurrency is None: + self.executor: Optional[ThreadPoolExecutor] = _get_executor() + elif max_concurrency > 0: + self.executor = ThreadPoolExecutor(max_workers=max_concurrency) + weakref.finalize( + self, + lambda: cast("ThreadPoolExecutor", self.executor).shutdown(wait=True), + ) + else: + self.executor = None + self.futures: weakref.WeakSet[Future] = weakref.WeakSet() + self.skip_unfinished = skip_unfinished + self.project_name = project_name + self.logged_eval_results: dict[tuple[str, str], list[EvaluationResult]] = {} + self.lock = threading.Lock() + _TRACERS.add(self) + + def _evaluate_in_project(self, run: Run, evaluator: langsmith.RunEvaluator) -> None: + """Evaluate the run in the project. + + Args: + ---------- + run : Run + The run to be evaluated. + evaluator : RunEvaluator + The evaluator to use for evaluating the run. + + """ + try: + if self.project_name is None: + eval_result = self.client.evaluate_run(run, evaluator) + eval_results = [eval_result] + with tracing_v2_enabled( + project_name=self.project_name, tags=["eval"], client=self.client + ) as cb: + reference_example = ( + self.client.read_example(run.reference_example_id) + if run.reference_example_id + else None + ) + evaluation_result = evaluator.evaluate_run( + # This is subclass, but getting errors for some reason + run, # type: ignore[arg-type] + example=reference_example, + ) + eval_results = self._log_evaluation_feedback( + evaluation_result, + run, + source_run_id=cb.latest_run.id if cb.latest_run else None, + ) + except Exception: + logger.exception( + "Error evaluating run %s with %s", + run.id, + evaluator.__class__.__name__, + ) + raise + example_id = str(run.reference_example_id) + with self.lock: + for res in eval_results: + run_id = str(getattr(res, "target_run_id", run.id)) + self.logged_eval_results.setdefault((run_id, example_id), []).append( + res + ) + + def _select_eval_results( + self, + results: Union[EvaluationResult, EvaluationResults], + ) -> list[EvaluationResult]: + if isinstance(results, EvaluationResult): + results_ = [results] + elif isinstance(results, dict) and "results" in results: + results_ = results["results"] + else: + msg = ( + f"Invalid evaluation result type {type(results)}." + " Expected EvaluationResult or EvaluationResults." + ) + raise TypeError(msg) + return results_ + + def _log_evaluation_feedback( + self, + evaluator_response: Union[EvaluationResult, EvaluationResults], + run: Run, + source_run_id: Optional[UUID] = None, + ) -> list[EvaluationResult]: + results = self._select_eval_results(evaluator_response) + for res in results: + source_info_: dict[str, Any] = {} + if res.evaluator_info: + source_info_ = {**res.evaluator_info, **source_info_} + run_id_ = getattr(res, "target_run_id", None) + if run_id_ is None: + run_id_ = run.id + self.client.create_feedback( + run_id_, + res.key, + score=res.score, + value=res.value, + comment=res.comment, + correction=res.correction, + source_info=source_info_, + source_run_id=res.source_run_id or source_run_id, + feedback_source_type=langsmith.schemas.FeedbackSourceType.MODEL, + ) + return results + + def _persist_run(self, run: Run) -> None: + """Run the evaluator on the run. + + Args: + ---------- + run : Run + The run to be evaluated. + + """ + if self.skip_unfinished and not run.outputs: + logger.debug("Skipping unfinished run %s", run.id) + return + run_ = run.copy() + run_.reference_example_id = self.example_id + for evaluator in self.evaluators: + if self.executor is None: + self._evaluate_in_project(run_, evaluator) + else: + self.futures.add( + self.executor.submit(self._evaluate_in_project, run_, evaluator) + ) + + def wait_for_futures(self) -> None: + """Wait for all futures to complete.""" + wait(self.futures) diff --git a/venv/Lib/site-packages/langchain_core/tracers/event_stream.py b/venv/Lib/site-packages/langchain_core/tracers/event_stream.py new file mode 100644 index 00000000..6cc9a654 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/event_stream.py @@ -0,0 +1,1021 @@ +"""Internal tracer to power the event stream API.""" + +from __future__ import annotations + +import asyncio +import contextlib +import logging +from typing import ( + TYPE_CHECKING, + Any, + Optional, + TypeVar, + Union, + cast, +) +from uuid import UUID, uuid4 + +from typing_extensions import NotRequired, TypedDict, override + +from langchain_core.callbacks.base import AsyncCallbackHandler +from langchain_core.messages import AIMessageChunk, BaseMessage, BaseMessageChunk +from langchain_core.outputs import ( + ChatGenerationChunk, + GenerationChunk, + LLMResult, +) +from langchain_core.runnables.schema import ( + CustomStreamEvent, + EventData, + StandardStreamEvent, + StreamEvent, +) +from langchain_core.runnables.utils import ( + Input, + Output, + _RootEventFilter, +) +from langchain_core.tracers._streaming import _StreamingCallbackHandler +from langchain_core.tracers.memory_stream import _MemoryStream +from langchain_core.utils.aiter import aclosing, py_anext + +if TYPE_CHECKING: + from collections.abc import AsyncIterator, Iterator, Sequence + + from langchain_core.documents import Document + from langchain_core.runnables import Runnable, RunnableConfig + from langchain_core.tracers.log_stream import LogEntry + +logger = logging.getLogger(__name__) + + +class RunInfo(TypedDict): + """Information about a run. + + This is used to keep track of the metadata associated with a run. + + Parameters: + name: The name of the run. + tags: The tags associated with the run. + metadata: The metadata associated with the run. + run_type: The type of the run. + inputs: The inputs to the run. + parent_run_id: The ID of the parent run. + """ + + name: str + tags: list[str] + metadata: dict[str, Any] + run_type: str + inputs: NotRequired[Any] + parent_run_id: Optional[UUID] + + +def _assign_name(name: Optional[str], serialized: Optional[dict[str, Any]]) -> str: + """Assign a name to a run.""" + if name is not None: + return name + if serialized is not None: + if "name" in serialized: + return serialized["name"] + if "id" in serialized: + return serialized["id"][-1] + return "Unnamed" + + +T = TypeVar("T") + + +class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHandler): + """An implementation of an async callback handler for astream events.""" + + def __init__( + self, + *args: Any, + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> None: + """Initialize the tracer.""" + super().__init__(*args, **kwargs) + # Map of run ID to run info. + # the entry corresponding to a given run id is cleaned + # up when each corresponding run ends. + self.run_map: dict[UUID, RunInfo] = {} + # The callback event that corresponds to the end of a parent run + # may be invoked BEFORE the callback event that corresponds to the end + # of a child run, which results in clean up of run_map. + # So we keep track of the mapping between children and parent run IDs + # in a separate container. This container is GCed when the tracer is GCed. + self.parent_map: dict[UUID, Optional[UUID]] = {} + + self.is_tapped: dict[UUID, Any] = {} + + # Filter which events will be sent over the queue. + self.root_event_filter = _RootEventFilter( + include_names=include_names, + include_types=include_types, + include_tags=include_tags, + exclude_names=exclude_names, + exclude_types=exclude_types, + exclude_tags=exclude_tags, + ) + + loop = asyncio.get_event_loop() + memory_stream = _MemoryStream[StreamEvent](loop) + self.send_stream = memory_stream.get_send_stream() + self.receive_stream = memory_stream.get_receive_stream() + + def _get_parent_ids(self, run_id: UUID) -> list[str]: + """Get the parent IDs of a run (non-recursively) cast to strings.""" + parent_ids = [] + + while parent_id := self.parent_map.get(run_id): + str_parent_id = str(parent_id) + if str_parent_id in parent_ids: + msg = ( + f"Parent ID {parent_id} is already in the parent_ids list. " + f"This should never happen." + ) + raise AssertionError(msg) + parent_ids.append(str_parent_id) + run_id = parent_id + + # Return the parent IDs in reverse order, so that the first + # parent ID is the root and the last ID is the immediate parent. + return parent_ids[::-1] + + def _send(self, event: StreamEvent, event_type: str) -> None: + """Send an event to the stream.""" + if self.root_event_filter.include_event(event, event_type): + self.send_stream.send_nowait(event) + + def __aiter__(self) -> AsyncIterator[Any]: + """Iterate over the receive stream.""" + return self.receive_stream.__aiter__() + + async def tap_output_aiter( + self, run_id: UUID, output: AsyncIterator[T] + ) -> AsyncIterator[T]: + """Tap the output aiter. + + This method is used to tap the output of a Runnable that produces + an async iterator. It is used to generate stream events for the + output of the Runnable. + + Args: + run_id: The ID of the run. + output: The output of the Runnable. + + Yields: + T: The output of the Runnable. + """ + sentinel = object() + # atomic check and set + tap = self.is_tapped.setdefault(run_id, sentinel) + # wait for first chunk + first = await py_anext(output, default=sentinel) + if first is sentinel: + return + # get run info + run_info = self.run_map.get(run_id) + if run_info is None: + # run has finished, don't issue any stream events + yield cast("T", first) + return + if tap is sentinel: + # if we are the first to tap, issue stream events + event: StandardStreamEvent = { + "event": f"on_{run_info['run_type']}_stream", + "run_id": str(run_id), + "name": run_info["name"], + "tags": run_info["tags"], + "metadata": run_info["metadata"], + "data": {}, + "parent_ids": self._get_parent_ids(run_id), + } + self._send({**event, "data": {"chunk": first}}, run_info["run_type"]) + yield cast("T", first) + # consume the rest of the output + async for chunk in output: + self._send( + {**event, "data": {"chunk": chunk}}, + run_info["run_type"], + ) + yield chunk + else: + # otherwise just pass through + yield cast("T", first) + # consume the rest of the output + async for chunk in output: + yield chunk + + def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]: + """Tap the output aiter. + + Args: + run_id: The ID of the run. + output: The output of the Runnable. + + Yields: + T: The output of the Runnable. + """ + sentinel = object() + # atomic check and set + tap = self.is_tapped.setdefault(run_id, sentinel) + # wait for first chunk + first = next(output, sentinel) + if first is sentinel: + return + # get run info + run_info = self.run_map.get(run_id) + if run_info is None: + # run has finished, don't issue any stream events + yield cast("T", first) + return + if tap is sentinel: + # if we are the first to tap, issue stream events + event: StandardStreamEvent = { + "event": f"on_{run_info['run_type']}_stream", + "run_id": str(run_id), + "name": run_info["name"], + "tags": run_info["tags"], + "metadata": run_info["metadata"], + "data": {}, + "parent_ids": self._get_parent_ids(run_id), + } + self._send({**event, "data": {"chunk": first}}, run_info["run_type"]) + yield cast("T", first) + # consume the rest of the output + for chunk in output: + self._send( + {**event, "data": {"chunk": chunk}}, + run_info["run_type"], + ) + yield chunk + else: + # otherwise just pass through + yield cast("T", first) + # consume the rest of the output + for chunk in output: + yield chunk + + def _write_run_start_info( + self, + run_id: UUID, + *, + tags: Optional[list[str]], + metadata: Optional[dict[str, Any]], + parent_run_id: Optional[UUID], + name_: str, + run_type: str, + **kwargs: Any, + ) -> None: + """Update the run info.""" + info: RunInfo = { + "tags": tags or [], + "metadata": metadata or {}, + "name": name_, + "run_type": run_type, + "parent_run_id": parent_run_id, + } + + if "inputs" in kwargs: + # Handle inputs in a special case to allow inputs to be an + # optionally provided and distinguish between missing value + # vs. None value. + info["inputs"] = kwargs["inputs"] + + self.run_map[run_id] = info + self.parent_map[run_id] = parent_run_id + + @override + async def on_chat_model_start( + self, + serialized: dict[str, Any], + messages: list[list[BaseMessage]], + *, + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Start a trace for an LLM run.""" + name_ = _assign_name(name, serialized) + run_type = "chat_model" + + self._write_run_start_info( + run_id, + tags=tags, + metadata=metadata, + parent_run_id=parent_run_id, + name_=name_, + run_type=run_type, + inputs={"messages": messages}, + ) + + self._send( + { + "event": "on_chat_model_start", + "data": { + "input": {"messages": messages}, + }, + "name": name_, + "tags": tags or [], + "run_id": str(run_id), + "metadata": metadata or {}, + "parent_ids": self._get_parent_ids(run_id), + }, + run_type, + ) + + @override + async def on_llm_start( + self, + serialized: dict[str, Any], + prompts: list[str], + *, + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Start a trace for an LLM run.""" + name_ = _assign_name(name, serialized) + run_type = "llm" + + self._write_run_start_info( + run_id, + tags=tags, + metadata=metadata, + parent_run_id=parent_run_id, + name_=name_, + run_type=run_type, + inputs={"prompts": prompts}, + ) + + self._send( + { + "event": "on_llm_start", + "data": { + "input": { + "prompts": prompts, + } + }, + "name": name_, + "tags": tags or [], + "run_id": str(run_id), + "metadata": metadata or {}, + "parent_ids": self._get_parent_ids(run_id), + }, + run_type, + ) + + @override + async def on_custom_event( + self, + name: str, + data: Any, + *, + run_id: UUID, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + """Generate a custom astream event.""" + event = CustomStreamEvent( + event="on_custom_event", + run_id=str(run_id), + name=name, + tags=tags or [], + metadata=metadata or {}, + data=data, + parent_ids=self._get_parent_ids(run_id), + ) + self._send(event, name) + + @override + async def on_llm_new_token( + self, + token: str, + *, + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> None: + """Run on new LLM token. Only available when streaming is enabled.""" + run_info = self.run_map.get(run_id) + chunk_: Union[GenerationChunk, BaseMessageChunk] + + if run_info is None: + msg = f"Run ID {run_id} not found in run map." + raise AssertionError(msg) + if self.is_tapped.get(run_id): + return + if run_info["run_type"] == "chat_model": + event = "on_chat_model_stream" + + if chunk is None: + chunk_ = AIMessageChunk(content=token) + else: + chunk_ = cast("ChatGenerationChunk", chunk).message + + elif run_info["run_type"] == "llm": + event = "on_llm_stream" + if chunk is None: + chunk_ = GenerationChunk(text=token) + else: + chunk_ = cast("GenerationChunk", chunk) + else: + msg = f"Unexpected run type: {run_info['run_type']}" + raise ValueError(msg) + + self._send( + { + "event": event, + "data": { + "chunk": chunk_, + }, + "run_id": str(run_id), + "name": run_info["name"], + "tags": run_info["tags"], + "metadata": run_info["metadata"], + "parent_ids": self._get_parent_ids(run_id), + }, + run_info["run_type"], + ) + + @override + async def on_llm_end( + self, response: LLMResult, *, run_id: UUID, **kwargs: Any + ) -> None: + """End a trace for an LLM run.""" + run_info = self.run_map.pop(run_id) + inputs_ = run_info["inputs"] + + generations: Union[list[list[GenerationChunk]], list[list[ChatGenerationChunk]]] + output: Union[dict, BaseMessage] = {} + + if run_info["run_type"] == "chat_model": + generations = cast("list[list[ChatGenerationChunk]]", response.generations) + for gen in generations: + if output != {}: + break + for chunk in gen: + output = chunk.message + break + + event = "on_chat_model_end" + elif run_info["run_type"] == "llm": + generations = cast("list[list[GenerationChunk]]", response.generations) + output = { + "generations": [ + [ + { + "text": chunk.text, + "generation_info": chunk.generation_info, + "type": chunk.type, + } + for chunk in gen + ] + for gen in generations + ], + "llm_output": response.llm_output, + } + event = "on_llm_end" + else: + msg = f"Unexpected run type: {run_info['run_type']}" + raise ValueError(msg) + + self._send( + { + "event": event, + "data": {"output": output, "input": inputs_}, + "run_id": str(run_id), + "name": run_info["name"], + "tags": run_info["tags"], + "metadata": run_info["metadata"], + "parent_ids": self._get_parent_ids(run_id), + }, + run_info["run_type"], + ) + + async def on_chain_start( + self, + serialized: dict[str, Any], + inputs: dict[str, Any], + *, + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + run_type: Optional[str] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Start a trace for a chain run.""" + name_ = _assign_name(name, serialized) + run_type_ = run_type or "chain" + + data: EventData = {} + + # Work-around Runnable core code not sending input in some + # cases. + if inputs != {"input": ""}: + data["input"] = inputs + kwargs["inputs"] = inputs + + self._write_run_start_info( + run_id, + tags=tags, + metadata=metadata, + parent_run_id=parent_run_id, + name_=name_, + run_type=run_type_, + **kwargs, + ) + + self._send( + { + "event": f"on_{run_type_}_start", + "data": data, + "name": name_, + "tags": tags or [], + "run_id": str(run_id), + "metadata": metadata or {}, + "parent_ids": self._get_parent_ids(run_id), + }, + run_type_, + ) + + @override + async def on_chain_end( + self, + outputs: dict[str, Any], + *, + run_id: UUID, + inputs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + """End a trace for a chain run.""" + run_info = self.run_map.pop(run_id) + run_type = run_info["run_type"] + + event = f"on_{run_type}_end" + + inputs = inputs or run_info.get("inputs") or {} + + data: EventData = { + "output": outputs, + "input": inputs, + } + + self._send( + { + "event": event, + "data": data, + "run_id": str(run_id), + "name": run_info["name"], + "tags": run_info["tags"], + "metadata": run_info["metadata"], + "parent_ids": self._get_parent_ids(run_id), + }, + run_type, + ) + + @override + async def on_tool_start( + self, + serialized: dict[str, Any], + input_str: str, + *, + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + inputs: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + """Start a trace for a tool run.""" + name_ = _assign_name(name, serialized) + + self._write_run_start_info( + run_id, + tags=tags, + metadata=metadata, + parent_run_id=parent_run_id, + name_=name_, + run_type="tool", + inputs=inputs, + ) + + self._send( + { + "event": "on_tool_start", + "data": { + "input": inputs or {}, + }, + "name": name_, + "tags": tags or [], + "run_id": str(run_id), + "metadata": metadata or {}, + "parent_ids": self._get_parent_ids(run_id), + }, + "tool", + ) + + @override + async def on_tool_end(self, output: Any, *, run_id: UUID, **kwargs: Any) -> None: + """End a trace for a tool run.""" + run_info = self.run_map.pop(run_id) + if "inputs" not in run_info: + msg = ( + f"Run ID {run_id} is a tool call and is expected to have " + f"inputs associated with it." + ) + raise AssertionError(msg) + inputs = run_info["inputs"] + + self._send( + { + "event": "on_tool_end", + "data": { + "output": output, + "input": inputs, + }, + "run_id": str(run_id), + "name": run_info["name"], + "tags": run_info["tags"], + "metadata": run_info["metadata"], + "parent_ids": self._get_parent_ids(run_id), + }, + "tool", + ) + + @override + async def on_retriever_start( + self, + serialized: dict[str, Any], + query: str, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Run when Retriever starts running.""" + name_ = _assign_name(name, serialized) + run_type = "retriever" + + self._write_run_start_info( + run_id, + tags=tags, + metadata=metadata, + parent_run_id=parent_run_id, + name_=name_, + run_type=run_type, + inputs={"query": query}, + ) + + self._send( + { + "event": "on_retriever_start", + "data": { + "input": { + "query": query, + } + }, + "name": name_, + "tags": tags or [], + "run_id": str(run_id), + "metadata": metadata or {}, + "parent_ids": self._get_parent_ids(run_id), + }, + run_type, + ) + + @override + async def on_retriever_end( + self, documents: Sequence[Document], *, run_id: UUID, **kwargs: Any + ) -> None: + """Run when Retriever ends running.""" + run_info = self.run_map.pop(run_id) + + self._send( + { + "event": "on_retriever_end", + "data": { + "output": documents, + "input": run_info["inputs"], + }, + "run_id": str(run_id), + "name": run_info["name"], + "tags": run_info["tags"], + "metadata": run_info["metadata"], + "parent_ids": self._get_parent_ids(run_id), + }, + run_info["run_type"], + ) + + def __deepcopy__(self, memo: dict) -> _AstreamEventsCallbackHandler: + """Deepcopy the tracer.""" + return self + + def __copy__(self) -> _AstreamEventsCallbackHandler: + """Copy the tracer.""" + return self + + +async def _astream_events_implementation_v1( + runnable: Runnable[Input, Output], + input: Any, + config: Optional[RunnableConfig] = None, + *, + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + **kwargs: Any, +) -> AsyncIterator[StandardStreamEvent]: + from langchain_core.runnables import ensure_config + from langchain_core.runnables.utils import _RootEventFilter + from langchain_core.tracers.log_stream import ( + LogStreamCallbackHandler, + RunLog, + _astream_log_implementation, + ) + + stream = LogStreamCallbackHandler( + auto_close=False, + include_names=include_names, + include_types=include_types, + include_tags=include_tags, + exclude_names=exclude_names, + exclude_types=exclude_types, + exclude_tags=exclude_tags, + _schema_format="streaming_events", + ) + + run_log = RunLog(state=None) # type: ignore[arg-type] + encountered_start_event = False + + _root_event_filter = _RootEventFilter( + include_names=include_names, + include_types=include_types, + include_tags=include_tags, + exclude_names=exclude_names, + exclude_types=exclude_types, + exclude_tags=exclude_tags, + ) + + config = ensure_config(config) + root_tags = config.get("tags", []) + root_metadata = config.get("metadata", {}) + root_name = config.get("run_name", runnable.get_name()) + + async for log in _astream_log_implementation( + runnable, + input, + config=config, + stream=stream, + diff=True, + with_streamed_output_list=True, + **kwargs, + ): + run_log = run_log + log + + if not encountered_start_event: + # Yield the start event for the root runnable. + encountered_start_event = True + state = run_log.state.copy() + + event = StandardStreamEvent( + event=f"on_{state['type']}_start", + run_id=state["id"], + name=root_name, + tags=root_tags, + metadata=root_metadata, + data={ + "input": input, + }, + parent_ids=[], # Not supported in v1 + ) + + if _root_event_filter.include_event(event, state["type"]): + yield event + + paths = { + op["path"].split("/")[2] + for op in log.ops + if op["path"].startswith("/logs/") + } + # Elements in a set should be iterated in the same order + # as they were inserted in modern python versions. + for path in paths: + data: EventData = {} + log_entry: LogEntry = run_log.state["logs"][path] + if log_entry["end_time"] is None: + event_type = "stream" if log_entry["streamed_output"] else "start" + else: + event_type = "end" + + if event_type == "start": + # Include the inputs with the start event if they are available. + # Usually they will NOT be available for components that operate + # on streams, since those components stream the input and + # don't know its final value until the end of the stream. + inputs = log_entry["inputs"] + if inputs is not None: + data["input"] = inputs + + if event_type == "end": + inputs = log_entry["inputs"] + if inputs is not None: + data["input"] = inputs + + # None is a VALID output for an end event + data["output"] = log_entry["final_output"] + + if event_type == "stream": + num_chunks = len(log_entry["streamed_output"]) + if num_chunks != 1: + msg = ( + f"Expected exactly one chunk of streamed output, " + f"got {num_chunks} instead. This is impossible. " + f"Encountered in: {log_entry['name']}" + ) + raise AssertionError(msg) + + data = {"chunk": log_entry["streamed_output"][0]} + # Clean up the stream, we don't need it anymore. + # And this avoids duplicates as well! + log_entry["streamed_output"] = [] + + yield StandardStreamEvent( + event=f"on_{log_entry['type']}_{event_type}", + name=log_entry["name"], + run_id=log_entry["id"], + tags=log_entry["tags"], + metadata=log_entry["metadata"], + data=data, + parent_ids=[], # Not supported in v1 + ) + + # Finally, we take care of the streaming output from the root chain + # if there is any. + state = run_log.state + if state["streamed_output"]: + num_chunks = len(state["streamed_output"]) + if num_chunks != 1: + msg = ( + f"Expected exactly one chunk of streamed output, " + f"got {num_chunks} instead. This is impossible. " + f"Encountered in: {state['name']}" + ) + raise AssertionError(msg) + + data = {"chunk": state["streamed_output"][0]} + # Clean up the stream, we don't need it anymore. + state["streamed_output"] = [] + + event = StandardStreamEvent( + event=f"on_{state['type']}_stream", + run_id=state["id"], + tags=root_tags, + metadata=root_metadata, + name=root_name, + data=data, + parent_ids=[], # Not supported in v1 + ) + if _root_event_filter.include_event(event, state["type"]): + yield event + + state = run_log.state + + # Finally yield the end event for the root runnable. + event = StandardStreamEvent( + event=f"on_{state['type']}_end", + name=root_name, + run_id=state["id"], + tags=root_tags, + metadata=root_metadata, + data={ + "output": state["final_output"], + }, + parent_ids=[], # Not supported in v1 + ) + if _root_event_filter.include_event(event, state["type"]): + yield event + + +async def _astream_events_implementation_v2( + runnable: Runnable[Input, Output], + input: Any, + config: Optional[RunnableConfig] = None, + *, + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + **kwargs: Any, +) -> AsyncIterator[StandardStreamEvent]: + """Implementation of the astream events API for V2 runnables.""" + from langchain_core.callbacks.base import BaseCallbackManager + from langchain_core.runnables import ensure_config + + event_streamer = _AstreamEventsCallbackHandler( + include_names=include_names, + include_types=include_types, + include_tags=include_tags, + exclude_names=exclude_names, + exclude_types=exclude_types, + exclude_tags=exclude_tags, + ) + + # Assign the stream handler to the config + config = ensure_config(config) + run_id = cast("UUID", config.setdefault("run_id", uuid4())) + callbacks = config.get("callbacks") + if callbacks is None: + config["callbacks"] = [event_streamer] + elif isinstance(callbacks, list): + config["callbacks"] = callbacks + [event_streamer] + elif isinstance(callbacks, BaseCallbackManager): + callbacks = callbacks.copy() + callbacks.add_handler(event_streamer, inherit=True) + config["callbacks"] = callbacks + else: + msg = ( + f"Unexpected type for callbacks: {callbacks}." + "Expected None, list or AsyncCallbackManager." + ) + raise ValueError(msg) + + # Call the runnable in streaming mode, + # add each chunk to the output stream + async def consume_astream() -> None: + try: + # if astream also calls tap_output_aiter this will be a no-op + async with aclosing(runnable.astream(input, config, **kwargs)) as stream: + async for _ in event_streamer.tap_output_aiter(run_id, stream): + # All the content will be picked up + pass + finally: + await event_streamer.send_stream.aclose() + + # Start the runnable in a task, so we can start consuming output + task = asyncio.create_task(consume_astream()) + + first_event_sent = False + first_event_run_id = None + + try: + async for event in event_streamer: + if not first_event_sent: + first_event_sent = True + # This is a work-around an issue where the inputs into the + # chain are not available until the entire input is consumed. + # As a temporary solution, we'll modify the input to be the input + # that was passed into the chain. + event["data"]["input"] = input + first_event_run_id = event["run_id"] + yield event + continue + + # If it's the end event corresponding to the root runnable + # we dont include the input in the event since it's guaranteed + # to be included in the first event. + if ( + event["run_id"] == first_event_run_id + and event["event"].endswith("_end") + and "input" in event["data"] + ): + del event["data"]["input"] + + yield event + except asyncio.CancelledError as exc: + # Cancel the task if it's still running + task.cancel(exc.args[0] if exc.args else None) + raise + finally: + # Cancel the task if it's still running + task.cancel() + # Await it anyway, to run any cleanup code, and propagate any exceptions + with contextlib.suppress(asyncio.CancelledError): + await task diff --git a/venv/Lib/site-packages/langchain_core/tracers/langchain.py b/venv/Lib/site-packages/langchain_core/tracers/langchain.py new file mode 100644 index 00000000..81029726 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/langchain.py @@ -0,0 +1,332 @@ +"""A Tracer implementation that records to LangChain endpoint.""" + +from __future__ import annotations + +import logging +import warnings +from concurrent.futures import ThreadPoolExecutor +from datetime import datetime, timezone +from typing import TYPE_CHECKING, Any, Optional, Union +from uuid import UUID + +from langsmith import Client +from langsmith import run_trees as rt +from langsmith import utils as ls_utils +from pydantic import PydanticDeprecationWarning +from tenacity import ( + Retrying, + retry_if_exception_type, + stop_after_attempt, + wait_exponential_jitter, +) +from typing_extensions import override + +from langchain_core.env import get_runtime_environment +from langchain_core.load import dumpd +from langchain_core.tracers.base import BaseTracer +from langchain_core.tracers.schemas import Run + +if TYPE_CHECKING: + from langchain_core.messages import BaseMessage + from langchain_core.outputs import ChatGenerationChunk, GenerationChunk + +logger = logging.getLogger(__name__) +_LOGGED = set() +_EXECUTOR: Optional[ThreadPoolExecutor] = None + + +def log_error_once(method: str, exception: Exception) -> None: + """Log an error once. + + Args: + method: The method that raised the exception. + exception: The exception that was raised. + """ + if (method, type(exception)) in _LOGGED: + return + _LOGGED.add((method, type(exception))) + logger.error(exception) + + +def wait_for_all_tracers() -> None: + """Wait for all tracers to finish.""" + if rt._CLIENT is not None: + rt._CLIENT.flush() + + +def get_client() -> Client: + """Get the client.""" + return rt.get_cached_client() + + +def _get_executor() -> ThreadPoolExecutor: + """Get the executor.""" + global _EXECUTOR # noqa: PLW0603 + if _EXECUTOR is None: + _EXECUTOR = ThreadPoolExecutor() + return _EXECUTOR + + +def _run_to_dict(run: Run, *, exclude_inputs: bool = False) -> dict: + # TODO: Update once langsmith moves to Pydantic V2 and we can swap run.dict for + # run.model_dump + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=PydanticDeprecationWarning) + + res = { + **run.dict(exclude={"child_runs", "inputs", "outputs"}), + "outputs": run.outputs, + } + if not exclude_inputs: + res["inputs"] = run.inputs + return res + + +class LangChainTracer(BaseTracer): + """Implementation of the SharedTracer that POSTS to the LangChain endpoint.""" + + run_inline = True + + def __init__( + self, + example_id: Optional[Union[UUID, str]] = None, + project_name: Optional[str] = None, + client: Optional[Client] = None, + tags: Optional[list[str]] = None, + **kwargs: Any, + ) -> None: + """Initialize the LangChain tracer. + + Args: + example_id: The example ID. + project_name: The project name. Defaults to the tracer project. + client: The client. Defaults to the global client. + tags: The tags. Defaults to an empty list. + kwargs: Additional keyword arguments. + """ + super().__init__(**kwargs) + self.example_id = ( + UUID(example_id) if isinstance(example_id, str) else example_id + ) + self.project_name = project_name or ls_utils.get_tracer_project() + self.client = client or get_client() + self.tags = tags or [] + self.latest_run: Optional[Run] = None + + def _start_trace(self, run: Run) -> None: + if self.project_name: + run.session_name = self.project_name + if self.tags is not None: + if run.tags: + run.tags = sorted(set(run.tags + self.tags)) + else: + run.tags = self.tags.copy() + + super()._start_trace(run) + if run._client is None: + run._client = self.client # type: ignore[misc] + + def on_chat_model_start( + self, + serialized: dict[str, Any], + messages: list[list[BaseMessage]], + *, + run_id: UUID, + tags: Optional[list[str]] = None, + parent_run_id: Optional[UUID] = None, + metadata: Optional[dict[str, Any]] = None, + name: Optional[str] = None, + **kwargs: Any, + ) -> Run: + """Start a trace for an LLM run. + + Args: + serialized: The serialized model. + messages: The messages. + run_id: The run ID. + tags: The tags. Defaults to None. + parent_run_id: The parent run ID. Defaults to None. + metadata: The metadata. Defaults to None. + name: The name. Defaults to None. + kwargs: Additional keyword arguments. + + Returns: + Run: The run. + """ + start_time = datetime.now(timezone.utc) + if metadata: + kwargs.update({"metadata": metadata}) + chat_model_run = Run( + id=run_id, + parent_run_id=parent_run_id, + serialized=serialized, + inputs={"messages": [[dumpd(msg) for msg in batch] for batch in messages]}, + extra=kwargs, + events=[{"name": "start", "time": start_time}], + start_time=start_time, + run_type="llm", + tags=tags, + name=name, # type: ignore[arg-type] + ) + self._start_trace(chat_model_run) + self._on_chat_model_start(chat_model_run) + return chat_model_run + + def _persist_run(self, run: Run) -> None: + # We want to free up more memory by avoiding keeping a reference to the + # whole nested run tree. + self.latest_run = Run.construct( + **run.dict(exclude={"child_runs", "inputs", "outputs"}), + inputs=run.inputs, + outputs=run.outputs, + ) + + def get_run_url(self) -> str: + """Get the LangSmith root run URL. + + Returns: + str: The LangSmith root run URL. + + Raises: + ValueError: If no traced run is found. + ValueError: If the run URL cannot be found. + """ + if not self.latest_run: + msg = "No traced run found." + raise ValueError(msg) + # If this is the first run in a project, the project may not yet be created. + # This method is only really useful for debugging flows, so we will assume + # there is some tolerace for latency. + for attempt in Retrying( + stop=stop_after_attempt(5), + wait=wait_exponential_jitter(), + retry=retry_if_exception_type(ls_utils.LangSmithError), + ): + with attempt: + return self.client.get_run_url( + run=self.latest_run, project_name=self.project_name + ) + msg = "Failed to get run URL." + raise ValueError(msg) + + def _get_tags(self, run: Run) -> list[str]: + """Get combined tags for a run.""" + tags = set(run.tags or []) + tags.update(self.tags or []) + return list(tags) + + def _persist_run_single(self, run: Run) -> None: + """Persist a run.""" + try: + run_dict = _run_to_dict(run) + run_dict["tags"] = self._get_tags(run) + extra = run_dict.get("extra", {}) + extra["runtime"] = get_runtime_environment() + run_dict["extra"] = extra + inputs_ = run_dict.get("inputs") + if inputs_ and (len(inputs_) > 1 or bool(next(iter(inputs_.values())))): + inputs_is_truthy = True + else: + inputs_is_truthy = False + run.extra["inputs_is_truthy"] = inputs_is_truthy + self.client.create_run(**run_dict, project_name=self.project_name) + except Exception as e: + # Errors are swallowed by the thread executor so we need to log them here + log_error_once("post", e) + raise + + def _update_run_single(self, run: Run) -> None: + """Update a run.""" + try: + exclude_inputs = run.extra.get("inputs_is_truthy", False) + run_dict = _run_to_dict(run, exclude_inputs=exclude_inputs) + run_dict["tags"] = self._get_tags(run) + self.client.update_run(run.id, **run_dict) + except Exception as e: + # Errors are swallowed by the thread executor so we need to log them here + log_error_once("patch", e) + raise + + def _on_llm_start(self, run: Run) -> None: + """Persist an LLM run.""" + if run.parent_run_id is None: + run.reference_example_id = self.example_id + self._persist_run_single(run) + + @override + def _llm_run_with_token_event( + self, + token: str, + run_id: UUID, + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + parent_run_id: Optional[UUID] = None, + ) -> Run: + """Append token event to LLM run and return the run.""" + return super()._llm_run_with_token_event( + # Drop the chunk; we don't need to save it + token, + run_id, + chunk=None, + parent_run_id=parent_run_id, + ) + + def _on_chat_model_start(self, run: Run) -> None: + """Persist an LLM run.""" + if run.parent_run_id is None: + run.reference_example_id = self.example_id + self._persist_run_single(run) + + def _on_llm_end(self, run: Run) -> None: + """Process the LLM Run.""" + self._update_run_single(run) + + def _on_llm_error(self, run: Run) -> None: + """Process the LLM Run upon error.""" + self._update_run_single(run) + + def _on_chain_start(self, run: Run) -> None: + """Process the Chain Run upon start.""" + if run.parent_run_id is None: + run.reference_example_id = self.example_id + self._persist_run_single(run) + + def _on_chain_end(self, run: Run) -> None: + """Process the Chain Run.""" + self._update_run_single(run) + + def _on_chain_error(self, run: Run) -> None: + """Process the Chain Run upon error.""" + self._update_run_single(run) + + def _on_tool_start(self, run: Run) -> None: + """Process the Tool Run upon start.""" + if run.parent_run_id is None: + run.reference_example_id = self.example_id + self._persist_run_single(run) + + def _on_tool_end(self, run: Run) -> None: + """Process the Tool Run.""" + self._update_run_single(run) + + def _on_tool_error(self, run: Run) -> None: + """Process the Tool Run upon error.""" + self._update_run_single(run) + + def _on_retriever_start(self, run: Run) -> None: + """Process the Retriever Run upon start.""" + if run.parent_run_id is None: + run.reference_example_id = self.example_id + self._persist_run_single(run) + + def _on_retriever_end(self, run: Run) -> None: + """Process the Retriever Run.""" + self._update_run_single(run) + + def _on_retriever_error(self, run: Run) -> None: + """Process the Retriever Run upon error.""" + self._update_run_single(run) + + def wait_for_futures(self) -> None: + """Wait for the given futures to complete.""" + if self.client is not None: + self.client.flush() diff --git a/venv/Lib/site-packages/langchain_core/tracers/langchain_v1.py b/venv/Lib/site-packages/langchain_core/tracers/langchain_v1.py new file mode 100644 index 00000000..63aab51e --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/langchain_v1.py @@ -0,0 +1,23 @@ +"""This module is deprecated and will be removed in a future release. + +Please use LangChainTracer instead. +""" + +from typing import Any + + +def get_headers(*args: Any, **kwargs: Any) -> Any: # noqa: ARG001 + """Throw an error because this has been replaced by get_headers.""" + msg = ( + "get_headers for LangChainTracerV1 is no longer supported. " + "Please use LangChainTracer instead." + ) + raise RuntimeError(msg) + + +def LangChainTracerV1(*args: Any, **kwargs: Any) -> Any: # noqa: N802,ARG001 + """Throw an error because this has been replaced by LangChainTracer.""" + msg = ( + "LangChainTracerV1 is no longer supported. Please use LangChainTracer instead." + ) + raise RuntimeError(msg) diff --git a/venv/Lib/site-packages/langchain_core/tracers/log_stream.py b/venv/Lib/site-packages/langchain_core/tracers/log_stream.py new file mode 100644 index 00000000..311fc841 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/log_stream.py @@ -0,0 +1,702 @@ +"""Tracer that streams run logs to a stream.""" + +from __future__ import annotations + +import asyncio +import contextlib +import copy +import threading +from collections import defaultdict +from typing import ( + TYPE_CHECKING, + Any, + Literal, + Optional, + TypeVar, + Union, + overload, +) + +import jsonpatch # type: ignore[import-untyped] +from typing_extensions import NotRequired, TypedDict, override + +from langchain_core.load import dumps +from langchain_core.load.load import load +from langchain_core.outputs import ChatGenerationChunk, GenerationChunk +from langchain_core.runnables import Runnable, RunnableConfig, ensure_config +from langchain_core.tracers._streaming import _StreamingCallbackHandler +from langchain_core.tracers.base import BaseTracer +from langchain_core.tracers.memory_stream import _MemoryStream + +if TYPE_CHECKING: + from collections.abc import AsyncIterator, Iterator, Sequence + from uuid import UUID + + from langchain_core.runnables.utils import Input, Output + from langchain_core.tracers.schemas import Run + + +class LogEntry(TypedDict): + """A single entry in the run log.""" + + id: str + """ID of the sub-run.""" + name: str + """Name of the object being run.""" + type: str + """Type of the object being run, eg. prompt, chain, llm, etc.""" + tags: list[str] + """List of tags for the run.""" + metadata: dict[str, Any] + """Key-value pairs of metadata for the run.""" + start_time: str + """ISO-8601 timestamp of when the run started.""" + + streamed_output_str: list[str] + """List of LLM tokens streamed by this run, if applicable.""" + streamed_output: list[Any] + """List of output chunks streamed by this run, if available.""" + inputs: NotRequired[Optional[Any]] + """Inputs to this run. Not available currently via astream_log.""" + final_output: Optional[Any] + """Final output of this run. + + Only available after the run has finished successfully.""" + end_time: Optional[str] + """ISO-8601 timestamp of when the run ended. + Only available after the run has finished.""" + + +class RunState(TypedDict): + """State of the run.""" + + id: str + """ID of the run.""" + streamed_output: list[Any] + """List of output chunks streamed by Runnable.stream()""" + final_output: Optional[Any] + """Final output of the run, usually the result of aggregating (`+`) streamed_output. + Updated throughout the run when supported by the Runnable.""" + + name: str + """Name of the object being run.""" + type: str + """Type of the object being run, eg. prompt, chain, llm, etc.""" + + # Do we want tags/metadata on the root run? Client kinda knows it in most situations + # tags: list[str] + + logs: dict[str, LogEntry] + """Map of run names to sub-runs. If filters were supplied, this list will + contain only the runs that matched the filters.""" + + +class RunLogPatch: + """Patch to the run log.""" + + ops: list[dict[str, Any]] + """List of jsonpatch operations, which describe how to create the run state + from an empty dict. This is the minimal representation of the log, designed to + be serialized as JSON and sent over the wire to reconstruct the log on the other + side. Reconstruction of the state can be done with any jsonpatch-compliant library, + see https://jsonpatch.com for more information.""" + + def __init__(self, *ops: dict[str, Any]) -> None: + """Create a RunLogPatch. + + Args: + *ops: The operations to apply to the state. + """ + self.ops = list(ops) + + def __add__(self, other: Union[RunLogPatch, Any]) -> RunLog: + """Combine two RunLogPatch instances.""" + if type(other) is RunLogPatch: + ops = self.ops + other.ops + state = jsonpatch.apply_patch(None, copy.deepcopy(ops)) + return RunLog(*ops, state=state) + + msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" + raise TypeError(msg) + + @override + def __repr__(self) -> str: + from pprint import pformat + + # 1:-1 to get rid of the [] around the list + return f"RunLogPatch({pformat(self.ops)[1:-1]})" + + @override + def __eq__(self, other: object) -> bool: + return isinstance(other, RunLogPatch) and self.ops == other.ops + + +class RunLog(RunLogPatch): + """Run log.""" + + state: RunState + """Current state of the log, obtained from applying all ops in sequence.""" + + def __init__(self, *ops: dict[str, Any], state: RunState) -> None: + """Create a RunLog. + + Args: + *ops: The operations to apply to the state. + state: The initial state of the run log. + """ + super().__init__(*ops) + self.state = state + + def __add__(self, other: Union[RunLogPatch, Any]) -> RunLog: + """Combine two RunLogs.""" + if type(other) is RunLogPatch: + ops = self.ops + other.ops + state = jsonpatch.apply_patch(self.state, other.ops) + return RunLog(*ops, state=state) + + msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" + raise TypeError(msg) + + @override + def __repr__(self) -> str: + from pprint import pformat + + return f"RunLog({pformat(self.state)})" + + @override + def __eq__(self, other: object) -> bool: + """Check if two RunLogs are equal.""" + # First compare that the state is the same + if not isinstance(other, RunLog): + return False + if self.state != other.state: + return False + # Then compare that the ops are the same + return super().__eq__(other) + + +T = TypeVar("T") + + +class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler): + """Tracer that streams run logs to a stream.""" + + def __init__( + self, + *, + auto_close: bool = True, + include_names: Optional[Sequence[str]] = None, + include_types: Optional[Sequence[str]] = None, + include_tags: Optional[Sequence[str]] = None, + exclude_names: Optional[Sequence[str]] = None, + exclude_types: Optional[Sequence[str]] = None, + exclude_tags: Optional[Sequence[str]] = None, + # Schema format is for internal use only. + _schema_format: Literal["original", "streaming_events"] = "streaming_events", + ) -> None: + """A tracer that streams run logs to a stream. + + Args: + auto_close: Whether to close the stream when the root run finishes. + include_names: Only include runs from Runnables with matching names. + include_types: Only include runs from Runnables with matching types. + include_tags: Only include runs from Runnables with matching tags. + exclude_names: Exclude runs from Runnables with matching names. + exclude_types: Exclude runs from Runnables with matching types. + exclude_tags: Exclude runs from Runnables with matching tags. + _schema_format: Primarily changes how the inputs and outputs are + handled. + **For internal use only. This API will change.** + - 'original' is the format used by all current tracers. + This format is slightly inconsistent with respect to inputs + and outputs. + - 'streaming_events' is used for supporting streaming events, + for internal usage. It will likely change in the future, or + be deprecated entirely in favor of a dedicated async tracer + for streaming events. + + Raises: + ValueError: If an invalid schema format is provided (internal use only). + """ + if _schema_format not in {"original", "streaming_events"}: + msg = ( + f"Invalid schema format: {_schema_format}. " + f"Expected one of 'original', 'streaming_events'." + ) + raise ValueError(msg) + super().__init__(_schema_format=_schema_format) + + self.auto_close = auto_close + self.include_names = include_names + self.include_types = include_types + self.include_tags = include_tags + self.exclude_names = exclude_names + self.exclude_types = exclude_types + self.exclude_tags = exclude_tags + + loop = asyncio.get_event_loop() + memory_stream = _MemoryStream[RunLogPatch](loop) + self.lock = threading.Lock() + self.send_stream = memory_stream.get_send_stream() + self.receive_stream = memory_stream.get_receive_stream() + self._key_map_by_run_id: dict[UUID, str] = {} + self._counter_map_by_name: dict[str, int] = defaultdict(int) + self.root_id: Optional[UUID] = None + + def __aiter__(self) -> AsyncIterator[RunLogPatch]: + """Iterate over the stream of run logs.""" + return self.receive_stream.__aiter__() + + def send(self, *ops: dict[str, Any]) -> bool: + """Send a patch to the stream, return False if the stream is closed. + + Args: + *ops: The operations to send to the stream. + + Returns: + bool: True if the patch was sent successfully, False if the stream + is closed. + """ + # We will likely want to wrap this in try / except at some point + # to handle exceptions that might arise at run time. + # For now we'll let the exception bubble up, and always return + # True on the happy path. + self.send_stream.send_nowait(RunLogPatch(*ops)) + return True + + async def tap_output_aiter( + self, run_id: UUID, output: AsyncIterator[T] + ) -> AsyncIterator[T]: + """Tap an output async iterator to stream its values to the log. + + Args: + run_id: The ID of the run. + output: The output async iterator. + + Yields: + T: The output value. + """ + async for chunk in output: + # root run is handled in .astream_log() + # if we can't find the run silently ignore + # eg. because this run wasn't included in the log + if ( + run_id != self.root_id + and (key := self._key_map_by_run_id.get(run_id)) + and ( + not self.send( + { + "op": "add", + "path": f"/logs/{key}/streamed_output/-", + "value": chunk, + } + ) + ) + ): + break + + yield chunk + + def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]: + """Tap an output async iterator to stream its values to the log. + + Args: + run_id: The ID of the run. + output: The output iterator. + + Yields: + T: The output value. + """ + for chunk in output: + # root run is handled in .astream_log() + # if we can't find the run silently ignore + # eg. because this run wasn't included in the log + if ( + run_id != self.root_id + and (key := self._key_map_by_run_id.get(run_id)) + and ( + not self.send( + { + "op": "add", + "path": f"/logs/{key}/streamed_output/-", + "value": chunk, + } + ) + ) + ): + break + + yield chunk + + def include_run(self, run: Run) -> bool: + """Check if a Run should be included in the log. + + Args: + run: The Run to check. + + Returns: + bool: True if the run should be included, False otherwise. + """ + if run.id == self.root_id: + return False + + run_tags = run.tags or [] + + if ( + self.include_names is None + and self.include_types is None + and self.include_tags is None + ): + include = True + else: + include = False + + if self.include_names is not None: + include = include or run.name in self.include_names + if self.include_types is not None: + include = include or run.run_type in self.include_types + if self.include_tags is not None: + include = include or any(tag in self.include_tags for tag in run_tags) + + if self.exclude_names is not None: + include = include and run.name not in self.exclude_names + if self.exclude_types is not None: + include = include and run.run_type not in self.exclude_types + if self.exclude_tags is not None: + include = include and all(tag not in self.exclude_tags for tag in run_tags) + + return include + + def _persist_run(self, run: Run) -> None: + # This is a legacy method only called once for an entire run tree + # therefore not useful here + pass + + def _on_run_create(self, run: Run) -> None: + """Start a run.""" + if self.root_id is None: + self.root_id = run.id + if not self.send( + { + "op": "replace", + "path": "", + "value": RunState( + id=str(run.id), + streamed_output=[], + final_output=None, + logs={}, + name=run.name, + type=run.run_type, + ), + } + ): + return + + if not self.include_run(run): + return + + # Determine previous index, increment by 1 + with self.lock: + self._counter_map_by_name[run.name] += 1 + count = self._counter_map_by_name[run.name] + self._key_map_by_run_id[run.id] = ( + run.name if count == 1 else f"{run.name}:{count}" + ) + + entry = LogEntry( + id=str(run.id), + name=run.name, + type=run.run_type, + tags=run.tags or [], + metadata=(run.extra or {}).get("metadata", {}), + start_time=run.start_time.isoformat(timespec="milliseconds"), + streamed_output=[], + streamed_output_str=[], + final_output=None, + end_time=None, + ) + + if self._schema_format == "streaming_events": + # If using streaming events let's add inputs as well + entry["inputs"] = _get_standardized_inputs(run, self._schema_format) + + # Add the run to the stream + self.send( + { + "op": "add", + "path": f"/logs/{self._key_map_by_run_id[run.id]}", + "value": entry, + } + ) + + def _on_run_update(self, run: Run) -> None: + """Finish a run.""" + try: + index = self._key_map_by_run_id.get(run.id) + + if index is None: + return + + ops = [] + + if self._schema_format == "streaming_events": + ops.append( + { + "op": "replace", + "path": f"/logs/{index}/inputs", + "value": _get_standardized_inputs(run, self._schema_format), + } + ) + + ops.extend( + [ + # Replace 'inputs' with final inputs + # This is needed because in many cases the inputs are not + # known until after the run is finished and the entire + # input stream has been processed by the runnable. + { + "op": "add", + "path": f"/logs/{index}/final_output", + # to undo the dumpd done by some runnables / tracer / etc + "value": _get_standardized_outputs(run, self._schema_format), + }, + { + "op": "add", + "path": f"/logs/{index}/end_time", + "value": run.end_time.isoformat(timespec="milliseconds") + if run.end_time is not None + else None, + }, + ] + ) + + self.send(*ops) + finally: + if run.id == self.root_id and self.auto_close: + self.send_stream.close() + + def _on_llm_new_token( + self, + run: Run, + token: str, + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]], + ) -> None: + """Process new LLM token.""" + index = self._key_map_by_run_id.get(run.id) + + if index is None: + return + + self.send( + { + "op": "add", + "path": f"/logs/{index}/streamed_output_str/-", + "value": token, + }, + { + "op": "add", + "path": f"/logs/{index}/streamed_output/-", + "value": chunk.message + if isinstance(chunk, ChatGenerationChunk) + else token, + }, + ) + + +def _get_standardized_inputs( + run: Run, schema_format: Literal["original", "streaming_events"] +) -> Optional[dict[str, Any]]: + """Extract standardized inputs from a run. + + Standardizes the inputs based on the type of the runnable used. + + Args: + run: Run object + schema_format: The schema format to use. + + Returns: + Valid inputs are only dict. By conventions, inputs always represented + invocation using named arguments. + None means that the input is not yet known! + """ + if schema_format == "original": + msg = ( + "Do not assign inputs with original schema drop the key for now." + "When inputs are added to astream_log they should be added with " + "standardized schema for streaming events." + ) + raise NotImplementedError(msg) + + inputs = load(run.inputs) + + if run.run_type in {"retriever", "llm", "chat_model"}: + return inputs + + # new style chains + # These nest an additional 'input' key inside the 'inputs' to make sure + # the input is always a dict. We need to unpack and user the inner value. + inputs = inputs["input"] + # We should try to fix this in Runnables and callbacks/tracers + # Runnables should be using a None type here not a placeholder + # dict. + if inputs == {"input": ""}: # Workaround for Runnables not using None + # The input is not known, so we don't assign data['input'] + return None + return inputs + + +def _get_standardized_outputs( + run: Run, schema_format: Literal["original", "streaming_events", "original+chat"] +) -> Optional[Any]: + """Extract standardized output from a run. + + Standardizes the outputs based on the type of the runnable used. + + Args: + run: the run object. + schema_format: The schema format to use. + + Returns: + An output if returned, otherwise a None + """ + outputs = load(run.outputs) + if schema_format == "original": + if run.run_type == "prompt" and "output" in outputs: + # These were previously dumped before the tracer. + # Now we needn't do anything to them. + return outputs["output"] + # Return the old schema, without standardizing anything + return outputs + + if run.run_type in {"retriever", "llm", "chat_model"}: + return outputs + + if isinstance(outputs, dict): + return outputs.get("output", None) + + return None + + +@overload +def _astream_log_implementation( + runnable: Runnable[Input, Output], + input: Any, + config: Optional[RunnableConfig] = None, + *, + stream: LogStreamCallbackHandler, + diff: Literal[True] = True, + with_streamed_output_list: bool = True, + **kwargs: Any, +) -> AsyncIterator[RunLogPatch]: ... + + +@overload +def _astream_log_implementation( + runnable: Runnable[Input, Output], + input: Any, + config: Optional[RunnableConfig] = None, + *, + stream: LogStreamCallbackHandler, + diff: Literal[False], + with_streamed_output_list: bool = True, + **kwargs: Any, +) -> AsyncIterator[RunLog]: ... + + +async def _astream_log_implementation( + runnable: Runnable[Input, Output], + input: Any, + config: Optional[RunnableConfig] = None, + *, + stream: LogStreamCallbackHandler, + diff: bool = True, + with_streamed_output_list: bool = True, + **kwargs: Any, +) -> Union[AsyncIterator[RunLogPatch], AsyncIterator[RunLog]]: + """Implementation of astream_log for a given runnable. + + The implementation has been factored out (at least temporarily) as both + astream_log and astream_events relies on it. + """ + import jsonpatch + + from langchain_core.callbacks.base import BaseCallbackManager + from langchain_core.tracers.log_stream import ( + RunLog, + RunLogPatch, + ) + + # Assign the stream handler to the config + config = ensure_config(config) + callbacks = config.get("callbacks") + if callbacks is None: + config["callbacks"] = [stream] + elif isinstance(callbacks, list): + config["callbacks"] = callbacks + [stream] + elif isinstance(callbacks, BaseCallbackManager): + callbacks = callbacks.copy() + callbacks.add_handler(stream, inherit=True) + config["callbacks"] = callbacks + else: + msg = ( + f"Unexpected type for callbacks: {callbacks}." + "Expected None, list or AsyncCallbackManager." + ) + raise ValueError(msg) + + # Call the runnable in streaming mode, + # add each chunk to the output stream + async def consume_astream() -> None: + try: + prev_final_output: Optional[Output] = None + final_output: Optional[Output] = None + + async for chunk in runnable.astream(input, config, **kwargs): + prev_final_output = final_output + if final_output is None: + final_output = chunk + else: + try: + final_output = final_output + chunk # type: ignore[operator] + except TypeError: + prev_final_output = None + final_output = chunk + patches: list[dict[str, Any]] = [] + if with_streamed_output_list: + patches.append( + { + "op": "add", + "path": "/streamed_output/-", + # chunk cannot be shared between + # streamed_output and final_output + # otherwise jsonpatch.apply will + # modify both + "value": copy.deepcopy(chunk), + } + ) + patches.extend( + {**op, "path": f"/final_output{op['path']}"} + for op in jsonpatch.JsonPatch.from_diff( + prev_final_output, final_output, dumps=dumps + ) + ) + await stream.send_stream.send(RunLogPatch(*patches)) + finally: + await stream.send_stream.aclose() + + # Start the runnable in a task, so we can start consuming output + task = asyncio.create_task(consume_astream()) + try: + # Yield each chunk from the output stream + if diff: + async for log in stream: + yield log + else: + state = RunLog(state=None) # type: ignore[arg-type] + async for log in stream: + state = state + log + yield state + finally: + # Wait for the runnable to finish, if not cancelled (eg. by break) + with contextlib.suppress(asyncio.CancelledError): + await task diff --git a/venv/Lib/site-packages/langchain_core/tracers/memory_stream.py b/venv/Lib/site-packages/langchain_core/tracers/memory_stream.py new file mode 100644 index 00000000..4a3d09a4 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/memory_stream.py @@ -0,0 +1,145 @@ +"""Module implements a memory stream for communication between two co-routines. + +This module provides a way to communicate between two co-routines using a memory +channel. The writer and reader can be in the same event loop or in different event +loops. When they're in different event loops, they will also be in different +threads. + +This is useful in situations when there's a mix of synchronous and asynchronous +used in the code. +""" + +import asyncio +from asyncio import AbstractEventLoop, Queue +from collections.abc import AsyncIterator +from typing import Generic, TypeVar + +T = TypeVar("T") + + +class _SendStream(Generic[T]): + def __init__( + self, reader_loop: AbstractEventLoop, queue: Queue, done: object + ) -> None: + """Create a writer for the queue and done object. + + Args: + reader_loop: The event loop to use for the writer. This loop will be used + to schedule the writes to the queue. + queue: The queue to write to. This is an asyncio queue. + done: Special sentinel object to indicate that the writer is done. + """ + self._reader_loop = reader_loop + self._queue = queue + self._done = done + + async def send(self, item: T) -> None: + """Schedule the item to be written to the queue using the original loop. + + This is a coroutine that can be awaited. + + Args: + item: The item to write to the queue. + """ + return self.send_nowait(item) + + def send_nowait(self, item: T) -> None: + """Schedule the item to be written to the queue using the original loop. + + This is a non-blocking call. + + Args: + item: The item to write to the queue. + + Raises: + RuntimeError: If the event loop is already closed when trying to write + to the queue. + """ + try: + self._reader_loop.call_soon_threadsafe(self._queue.put_nowait, item) + except RuntimeError: + if not self._reader_loop.is_closed(): + raise # Raise the exception if the loop is not closed + + async def aclose(self) -> None: + """Async schedule the done object write the queue using the original loop.""" + return self.close() + + def close(self) -> None: + """Schedule the done object write the queue using the original loop. + + This is a non-blocking call. + + Raises: + RuntimeError: If the event loop is already closed when trying to write + to the queue. + """ + try: + self._reader_loop.call_soon_threadsafe(self._queue.put_nowait, self._done) + except RuntimeError: + if not self._reader_loop.is_closed(): + raise # Raise the exception if the loop is not closed + + +class _ReceiveStream(Generic[T]): + def __init__(self, queue: Queue, done: object) -> None: + """Create a reader for the queue and done object. + + This reader should be used in the same loop as the loop that was passed + to the channel. + """ + self._queue = queue + self._done = done + self._is_closed = False + + async def __aiter__(self) -> AsyncIterator[T]: + while True: + item = await self._queue.get() + if item is self._done: + self._is_closed = True + break + yield item + + +class _MemoryStream(Generic[T]): + """Stream data from a writer to a reader even if they are in different threads. + + Uses asyncio queues to communicate between two co-routines. This implementation + should work even if the writer and reader co-routines belong to two different + event loops (e.g. one running from an event loop in the main thread + and the other running in an event loop in a background thread). + + This implementation is meant to be used with a single writer and a single reader. + + This is an internal implementation to LangChain. Please do not use it directly. + """ + + def __init__(self, loop: AbstractEventLoop) -> None: + """Create a channel for the given loop. + + Args: + loop: The event loop to use for the channel. The reader is assumed + to be running in the same loop as the one passed to this constructor. + This will NOT be validated at run time. + """ + self._loop = loop + self._queue: asyncio.Queue = asyncio.Queue(maxsize=0) + self._done = object() + + def get_send_stream(self) -> _SendStream[T]: + """Get a writer for the channel. + + Returns: + _SendStream: The writer for the channel. + """ + return _SendStream[T]( + reader_loop=self._loop, queue=self._queue, done=self._done + ) + + def get_receive_stream(self) -> _ReceiveStream[T]: + """Get a reader for the channel. + + Returns: + _ReceiveStream: The reader for the channel. + """ + return _ReceiveStream[T](queue=self._queue, done=self._done) diff --git a/venv/Lib/site-packages/langchain_core/tracers/root_listeners.py b/venv/Lib/site-packages/langchain_core/tracers/root_listeners.py new file mode 100644 index 00000000..efe1a56e --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/root_listeners.py @@ -0,0 +1,146 @@ +"""Tracers that call listeners.""" + +from collections.abc import Awaitable +from typing import TYPE_CHECKING, Callable, Optional, Union + +from langchain_core.runnables.config import ( + RunnableConfig, + acall_func_with_variable_args, + call_func_with_variable_args, +) +from langchain_core.tracers.base import AsyncBaseTracer, BaseTracer +from langchain_core.tracers.schemas import Run + +if TYPE_CHECKING: + from uuid import UUID + +Listener = Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]] +AsyncListener = Union[ + Callable[[Run], Awaitable[None]], Callable[[Run, RunnableConfig], Awaitable[None]] +] + + +class RootListenersTracer(BaseTracer): + """Tracer that calls listeners on run start, end, and error. + + Parameters: + log_missing_parent: Whether to log a warning if the parent is missing. + Default is False. + config: The runnable config. + on_start: The listener to call on run start. + on_end: The listener to call on run end. + on_error: The listener to call on run error. + """ + + log_missing_parent = False + + def __init__( + self, + *, + config: RunnableConfig, + on_start: Optional[Listener], + on_end: Optional[Listener], + on_error: Optional[Listener], + ) -> None: + """Initialize the tracer. + + Args: + config: The runnable config. + on_start: The listener to call on run start. + on_end: The listener to call on run end. + on_error: The listener to call on run error + """ + super().__init__(_schema_format="original+chat") + + self.config = config + self._arg_on_start = on_start + self._arg_on_end = on_end + self._arg_on_error = on_error + self.root_id: Optional[UUID] = None + + def _persist_run(self, run: Run) -> None: + # This is a legacy method only called once for an entire run tree + # therefore not useful here + pass + + def _on_run_create(self, run: Run) -> None: + if self.root_id is not None: + return + + self.root_id = run.id + + if self._arg_on_start is not None: + call_func_with_variable_args(self._arg_on_start, run, self.config) + + def _on_run_update(self, run: Run) -> None: + if run.id != self.root_id: + return + + if run.error is None: + if self._arg_on_end is not None: + call_func_with_variable_args(self._arg_on_end, run, self.config) + elif self._arg_on_error is not None: + call_func_with_variable_args(self._arg_on_error, run, self.config) + + +class AsyncRootListenersTracer(AsyncBaseTracer): + """Async Tracer that calls listeners on run start, end, and error. + + Parameters: + log_missing_parent: Whether to log a warning if the parent is missing. + Default is False. + config: The runnable config. + on_start: The listener to call on run start. + on_end: The listener to call on run end. + on_error: The listener to call on run error. + """ + + log_missing_parent = False + + def __init__( + self, + *, + config: RunnableConfig, + on_start: Optional[AsyncListener], + on_end: Optional[AsyncListener], + on_error: Optional[AsyncListener], + ) -> None: + """Initialize the tracer. + + Args: + config: The runnable config. + on_start: The listener to call on run start. + on_end: The listener to call on run end. + on_error: The listener to call on run error + """ + super().__init__(_schema_format="original+chat") + + self.config = config + self._arg_on_start = on_start + self._arg_on_end = on_end + self._arg_on_error = on_error + self.root_id: Optional[UUID] = None + + async def _persist_run(self, run: Run) -> None: + # This is a legacy method only called once for an entire run tree + # therefore not useful here + pass + + async def _on_run_create(self, run: Run) -> None: + if self.root_id is not None: + return + + self.root_id = run.id + + if self._arg_on_start is not None: + await acall_func_with_variable_args(self._arg_on_start, run, self.config) + + async def _on_run_update(self, run: Run) -> None: + if run.id != self.root_id: + return + + if run.error is None: + if self._arg_on_end is not None: + await acall_func_with_variable_args(self._arg_on_end, run, self.config) + elif self._arg_on_error is not None: + await acall_func_with_variable_args(self._arg_on_error, run, self.config) diff --git a/venv/Lib/site-packages/langchain_core/tracers/run_collector.py b/venv/Lib/site-packages/langchain_core/tracers/run_collector.py new file mode 100644 index 00000000..e7a7dfc7 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/run_collector.py @@ -0,0 +1,52 @@ +"""A tracer that collects all nested runs in a list.""" + +from typing import Any, Optional, Union +from uuid import UUID + +from langchain_core.tracers.base import BaseTracer +from langchain_core.tracers.schemas import Run + + +class RunCollectorCallbackHandler(BaseTracer): + """Tracer that collects all nested runs in a list. + + This tracer is useful for inspection and evaluation purposes. + + Parameters + ---------- + name : str, default="run-collector_callback_handler" + example_id : Optional[Union[UUID, str]], default=None + The ID of the example being traced. It can be either a UUID or a string. + """ + + name: str = "run-collector_callback_handler" + + def __init__( + self, example_id: Optional[Union[UUID, str]] = None, **kwargs: Any + ) -> None: + """Initialize the RunCollectorCallbackHandler. + + Parameters + ---------- + example_id : Optional[Union[UUID, str]], default=None + The ID of the example being traced. It can be either a UUID or a string. + **kwargs : Any + Additional keyword arguments + """ + super().__init__(**kwargs) + self.example_id = ( + UUID(example_id) if isinstance(example_id, str) else example_id + ) + self.traced_runs: list[Run] = [] + + def _persist_run(self, run: Run) -> None: + """Persist a run by adding it to the traced_runs list. + + Parameters + ---------- + run : Run + The run to be persisted. + """ + run_ = run.copy() + run_.reference_example_id = self.example_id + self.traced_runs.append(run_) diff --git a/venv/Lib/site-packages/langchain_core/tracers/schemas.py b/venv/Lib/site-packages/langchain_core/tracers/schemas.py new file mode 100644 index 00000000..e7ebad1d --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/schemas.py @@ -0,0 +1,139 @@ +"""Schemas for tracers.""" + +from __future__ import annotations + +import datetime +import warnings +from typing import Any, Optional +from uuid import UUID + +from langsmith import RunTree +from langsmith.schemas import RunTypeEnum as RunTypeEnumDep +from pydantic import PydanticDeprecationWarning +from pydantic.v1 import BaseModel as BaseModelV1 +from pydantic.v1 import Field as FieldV1 + +from langchain_core._api import deprecated + + +@deprecated("0.1.0", alternative="Use string instead.", removal="1.0") +def RunTypeEnum() -> type[RunTypeEnumDep]: # noqa: N802 + """RunTypeEnum.""" + warnings.warn( + "RunTypeEnum is deprecated. Please directly use a string instead" + " (e.g. 'llm', 'chain', 'tool').", + DeprecationWarning, + stacklevel=2, + ) + return RunTypeEnumDep + + +@deprecated("0.1.0", removal="1.0") +class TracerSessionV1Base(BaseModelV1): + """Base class for TracerSessionV1.""" + + start_time: datetime.datetime = FieldV1(default_factory=datetime.datetime.utcnow) + name: Optional[str] = None + extra: Optional[dict[str, Any]] = None + + +@deprecated("0.1.0", removal="1.0") +class TracerSessionV1Create(TracerSessionV1Base): + """Create class for TracerSessionV1.""" + + +@deprecated("0.1.0", removal="1.0") +class TracerSessionV1(TracerSessionV1Base): + """TracerSessionV1 schema.""" + + id: int + + +@deprecated("0.1.0", removal="1.0") +class TracerSessionBase(TracerSessionV1Base): + """Base class for TracerSession.""" + + tenant_id: UUID + + +@deprecated("0.1.0", removal="1.0") +class TracerSession(TracerSessionBase): + """TracerSessionV1 schema for the V2 API.""" + + id: UUID + + +@deprecated("0.1.0", alternative="Run", removal="1.0") +class BaseRun(BaseModelV1): + """Base class for Run.""" + + uuid: str + parent_uuid: Optional[str] = None + start_time: datetime.datetime = FieldV1(default_factory=datetime.datetime.utcnow) + end_time: datetime.datetime = FieldV1(default_factory=datetime.datetime.utcnow) + extra: Optional[dict[str, Any]] = None + execution_order: int + child_execution_order: int + serialized: dict[str, Any] + session_id: int + error: Optional[str] = None + + +@deprecated("0.1.0", alternative="Run", removal="1.0") +class LLMRun(BaseRun): + """Class for LLMRun.""" + + prompts: list[str] + # Temporarily, remove but we will completely remove LLMRun + # response: Optional[LLMResult] = None + + +@deprecated("0.1.0", alternative="Run", removal="1.0") +class ChainRun(BaseRun): + """Class for ChainRun.""" + + inputs: dict[str, Any] + outputs: Optional[dict[str, Any]] = None + child_llm_runs: list[LLMRun] = FieldV1(default_factory=list) + child_chain_runs: list[ChainRun] = FieldV1(default_factory=list) + child_tool_runs: list[ToolRun] = FieldV1(default_factory=list) + + +@deprecated("0.1.0", alternative="Run", removal="1.0") +class ToolRun(BaseRun): + """Class for ToolRun.""" + + tool_input: str + output: Optional[str] = None + action: str + child_llm_runs: list[LLMRun] = FieldV1(default_factory=list) + child_chain_runs: list[ChainRun] = FieldV1(default_factory=list) + child_tool_runs: list[ToolRun] = FieldV1(default_factory=list) + + +# Begin V2 API Schemas + + +Run = RunTree # For backwards compatibility + +# TODO: Update once langsmith moves to Pydantic V2 and we can swap Run.model_rebuild +# for Run.update_forward_refs +with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=PydanticDeprecationWarning) + + ChainRun.update_forward_refs() + ToolRun.update_forward_refs() + +__all__ = [ + "BaseRun", + "ChainRun", + "LLMRun", + "Run", + "RunTypeEnum", + "ToolRun", + "TracerSession", + "TracerSessionBase", + "TracerSessionV1", + "TracerSessionV1Base", + "TracerSessionV1Create", +] diff --git a/venv/Lib/site-packages/langchain_core/tracers/stdout.py b/venv/Lib/site-packages/langchain_core/tracers/stdout.py new file mode 100644 index 00000000..2fe70ea3 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/tracers/stdout.py @@ -0,0 +1,204 @@ +"""Tracers that print to the console.""" + +import json +from typing import Any, Callable + +from langchain_core.tracers.base import BaseTracer +from langchain_core.tracers.schemas import Run +from langchain_core.utils.input import get_bolded_text, get_colored_text + +MILLISECONDS_IN_SECOND = 1000 + + +def try_json_stringify(obj: Any, fallback: str) -> str: + """Try to stringify an object to JSON. + + Args: + obj: Object to stringify. + fallback: Fallback string to return if the object cannot be stringified. + + Returns: + A JSON string if the object can be stringified, otherwise the fallback string. + """ + try: + return json.dumps(obj, indent=2, ensure_ascii=False) + except Exception: + return fallback + + +def elapsed(run: Any) -> str: + """Get the elapsed time of a run. + + Args: + run: any object with a start_time and end_time attribute. + + Returns: + A string with the elapsed time in seconds or + milliseconds if time is less than a second. + + """ + elapsed_time = run.end_time - run.start_time + seconds = elapsed_time.total_seconds() + if seconds < 1: + return f"{seconds * MILLISECONDS_IN_SECOND:.0f}ms" + return f"{seconds:.2f}s" + + +class FunctionCallbackHandler(BaseTracer): + """Tracer that calls a function with a single str parameter.""" + + name: str = "function_callback_handler" + """The name of the tracer. This is used to identify the tracer in the logs. + Default is "function_callback_handler".""" + + def __init__(self, function: Callable[[str], None], **kwargs: Any) -> None: + """Create a FunctionCallbackHandler. + + Args: + function: The callback function to call. + """ + super().__init__(**kwargs) + self.function_callback = function + + def _persist_run(self, run: Run) -> None: + pass + + def get_parents(self, run: Run) -> list[Run]: + """Get the parents of a run. + + Args: + run: The run to get the parents of. + + Returns: + A list of parent runs. + """ + parents = [] + current_run = run + while current_run.parent_run_id: + parent = self.run_map.get(str(current_run.parent_run_id)) + if parent: + parents.append(parent) + current_run = parent + else: + break + return parents + + def get_breadcrumbs(self, run: Run) -> str: + """Get the breadcrumbs of a run. + + Args: + run: The run to get the breadcrumbs of. + + Returns: + A string with the breadcrumbs of the run. + """ + parents = self.get_parents(run)[::-1] + return " > ".join( + f"{parent.run_type}:{parent.name}" + if i != len(parents) - 1 + else f"{parent.run_type}:{parent.name}" + for i, parent in enumerate(parents + [run]) + ) + + # logging methods + def _on_chain_start(self, run: Run) -> None: + crumbs = self.get_breadcrumbs(run) + run_type = run.run_type.capitalize() + self.function_callback( + f"{get_colored_text('[chain/start]', color='green')} " + + get_bolded_text(f"[{crumbs}] Entering {run_type} run with input:\n") + + f"{try_json_stringify(run.inputs, '[inputs]')}" + ) + + def _on_chain_end(self, run: Run) -> None: + crumbs = self.get_breadcrumbs(run) + run_type = run.run_type.capitalize() + self.function_callback( + f"{get_colored_text('[chain/end]', color='blue')} " + + get_bolded_text( + f"[{crumbs}] [{elapsed(run)}] Exiting {run_type} run with output:\n" + ) + + f"{try_json_stringify(run.outputs, '[outputs]')}" + ) + + def _on_chain_error(self, run: Run) -> None: + crumbs = self.get_breadcrumbs(run) + run_type = run.run_type.capitalize() + self.function_callback( + f"{get_colored_text('[chain/error]', color='red')} " + + get_bolded_text( + f"[{crumbs}] [{elapsed(run)}] {run_type} run errored with error:\n" + ) + + f"{try_json_stringify(run.error, '[error]')}" + ) + + def _on_llm_start(self, run: Run) -> None: + crumbs = self.get_breadcrumbs(run) + inputs = ( + {"prompts": [p.strip() for p in run.inputs["prompts"]]} + if "prompts" in run.inputs + else run.inputs + ) + self.function_callback( + f"{get_colored_text('[llm/start]', color='green')} " + + get_bolded_text(f"[{crumbs}] Entering LLM run with input:\n") + + f"{try_json_stringify(inputs, '[inputs]')}" + ) + + def _on_llm_end(self, run: Run) -> None: + crumbs = self.get_breadcrumbs(run) + self.function_callback( + f"{get_colored_text('[llm/end]', color='blue')} " + + get_bolded_text( + f"[{crumbs}] [{elapsed(run)}] Exiting LLM run with output:\n" + ) + + f"{try_json_stringify(run.outputs, '[response]')}" + ) + + def _on_llm_error(self, run: Run) -> None: + crumbs = self.get_breadcrumbs(run) + self.function_callback( + f"{get_colored_text('[llm/error]', color='red')} " + + get_bolded_text( + f"[{crumbs}] [{elapsed(run)}] LLM run errored with error:\n" + ) + + f"{try_json_stringify(run.error, '[error]')}" + ) + + def _on_tool_start(self, run: Run) -> None: + crumbs = self.get_breadcrumbs(run) + self.function_callback( + f"{get_colored_text('[tool/start]', color='green')} " + + get_bolded_text(f"[{crumbs}] Entering Tool run with input:\n") + + f'"{run.inputs["input"].strip()}"' + ) + + def _on_tool_end(self, run: Run) -> None: + crumbs = self.get_breadcrumbs(run) + if run.outputs: + self.function_callback( + f"{get_colored_text('[tool/end]', color='blue')} " + + get_bolded_text( + f"[{crumbs}] [{elapsed(run)}] Exiting Tool run with output:\n" + ) + + f'"{str(run.outputs["output"]).strip()}"' + ) + + def _on_tool_error(self, run: Run) -> None: + crumbs = self.get_breadcrumbs(run) + self.function_callback( + f"{get_colored_text('[tool/error]', color='red')} " + + get_bolded_text(f"[{crumbs}] [{elapsed(run)}] ") + + f"Tool run errored with error:\n" + f"{run.error}" + ) + + +class ConsoleCallbackHandler(FunctionCallbackHandler): + """Tracer that prints to the console.""" + + name: str = "console_callback_handler" + + def __init__(self, **kwargs: Any) -> None: + """Create a ConsoleCallbackHandler.""" + super().__init__(function=print, **kwargs) diff --git a/venv/Lib/site-packages/langchain_core/utils/__init__.py b/venv/Lib/site-packages/langchain_core/utils/__init__.py new file mode 100644 index 00000000..52bd5c57 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/__init__.py @@ -0,0 +1,107 @@ +"""**Utility functions** for LangChain. + +These functions do not depend on any other LangChain module. +""" + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + # for type checking and IDE support, we include the imports here + # but we don't want to eagerly import them at runtime + from langchain_core.utils import image + from langchain_core.utils.aiter import abatch_iterate + from langchain_core.utils.env import get_from_dict_or_env, get_from_env + from langchain_core.utils.formatting import StrictFormatter, formatter + from langchain_core.utils.input import ( + get_bolded_text, + get_color_mapping, + get_colored_text, + print_text, + ) + from langchain_core.utils.iter import batch_iterate + from langchain_core.utils.loading import try_load_from_hub + from langchain_core.utils.pydantic import pre_init + from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value + from langchain_core.utils.utils import ( + build_extra_kwargs, + check_package_version, + convert_to_secret_str, + from_env, + get_pydantic_field_names, + guard_import, + mock_now, + raise_for_status_with_text, + secret_from_env, + xor_args, + ) + +__all__ = ( + "build_extra_kwargs", + "StrictFormatter", + "check_package_version", + "convert_to_secret_str", + "formatter", + "get_bolded_text", + "get_color_mapping", + "get_colored_text", + "get_pydantic_field_names", + "guard_import", + "mock_now", + "print_text", + "raise_for_status_with_text", + "xor_args", + "try_load_from_hub", + "image", + "get_from_env", + "get_from_dict_or_env", + "stringify_dict", + "comma_list", + "stringify_value", + "pre_init", + "batch_iterate", + "abatch_iterate", + "from_env", + "secret_from_env", +) + +_dynamic_imports = { + "image": "__module__", + "abatch_iterate": "aiter", + "get_from_dict_or_env": "env", + "get_from_env": "env", + "StrictFormatter": "formatting", + "formatter": "formatting", + "get_bolded_text": "input", + "get_color_mapping": "input", + "get_colored_text": "input", + "print_text": "input", + "batch_iterate": "iter", + "try_load_from_hub": "loading", + "pre_init": "pydantic", + "comma_list": "strings", + "stringify_dict": "strings", + "stringify_value": "strings", + "build_extra_kwargs": "utils", + "check_package_version": "utils", + "convert_to_secret_str": "utils", + "from_env": "utils", + "get_pydantic_field_names": "utils", + "guard_import": "utils", + "mock_now": "utils", + "secret_from_env": "utils", + "xor_args": "utils", + "raise_for_status_with_text": "utils", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..55b3fc9a Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/_merge.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/_merge.cpython-312.pyc new file mode 100644 index 00000000..4ccb0c2b Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/_merge.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/aiter.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/aiter.cpython-312.pyc new file mode 100644 index 00000000..923b26fd Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/aiter.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/env.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/env.cpython-312.pyc new file mode 100644 index 00000000..c8425ea6 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/env.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/formatting.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/formatting.cpython-312.pyc new file mode 100644 index 00000000..1dabe33b Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/formatting.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/function_calling.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/function_calling.cpython-312.pyc new file mode 100644 index 00000000..39843881 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/function_calling.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/html.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/html.cpython-312.pyc new file mode 100644 index 00000000..690ac26e Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/html.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/image.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/image.cpython-312.pyc new file mode 100644 index 00000000..0110d0ae Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/image.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/input.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/input.cpython-312.pyc new file mode 100644 index 00000000..c757858c Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/input.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/interactive_env.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/interactive_env.cpython-312.pyc new file mode 100644 index 00000000..7d880c52 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/interactive_env.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/iter.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/iter.cpython-312.pyc new file mode 100644 index 00000000..ce320d61 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/iter.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/json.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/json.cpython-312.pyc new file mode 100644 index 00000000..50fd86cc Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/json.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/json_schema.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/json_schema.cpython-312.pyc new file mode 100644 index 00000000..1a76b3fc Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/json_schema.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/loading.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/loading.cpython-312.pyc new file mode 100644 index 00000000..8756128c Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/loading.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/mustache.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/mustache.cpython-312.pyc new file mode 100644 index 00000000..53f27573 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/mustache.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/pydantic.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/pydantic.cpython-312.pyc new file mode 100644 index 00000000..53ef5911 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/pydantic.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/strings.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/strings.cpython-312.pyc new file mode 100644 index 00000000..d6d7922f Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/strings.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/usage.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/usage.cpython-312.pyc new file mode 100644 index 00000000..9e704522 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/usage.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/utils/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..73d11d33 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/utils/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/utils/_merge.py b/venv/Lib/site-packages/langchain_core/utils/_merge.py new file mode 100644 index 00000000..09542b78 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/_merge.py @@ -0,0 +1,152 @@ +from __future__ import annotations + +from typing import Any, Optional + + +def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any]: + r"""Merge dictionaries. + + Merge many dicts, handling specific scenarios where a key exists in both + dictionaries but has a value of None in 'left'. In such cases, the method uses the + value from 'right' for that key in the merged dictionary. + + Args: + left: The first dictionary to merge. + others: The other dictionaries to merge. + + Returns: + The merged dictionary. + + Raises: + TypeError: If the key exists in both dictionaries but has a different type. + TypeError: If the value has an unsupported type. + + Example: + If left = {"function_call": {"arguments": None}} and + right = {"function_call": {"arguments": "{\n"}} + then, after merging, for the key "function_call", + the value from 'right' is used, + resulting in merged = {"function_call": {"arguments": "{\n"}}. + """ + merged = left.copy() + for right in others: + for right_k, right_v in right.items(): + if right_k not in merged or right_v is not None and merged[right_k] is None: + merged[right_k] = right_v + elif right_v is None: + continue + elif type(merged[right_k]) is not type(right_v): + msg = ( + f'additional_kwargs["{right_k}"] already exists in this message,' + " but with a different type." + ) + raise TypeError(msg) + elif isinstance(merged[right_k], str): + # TODO: Add below special handling for 'type' key in 0.3 and remove + # merge_lists 'type' logic. + # + # if right_k == "type": + # if merged[right_k] == right_v: + # continue + # else: + # raise ValueError( + # "Unable to merge. Two different values seen for special " + # f"key 'type': {merged[right_k]} and {right_v}. 'type' " + # "should either occur once or have the same value across " + # "all dicts." + # ) + merged[right_k] += right_v + elif isinstance(merged[right_k], dict): + merged[right_k] = merge_dicts(merged[right_k], right_v) + elif isinstance(merged[right_k], list): + merged[right_k] = merge_lists(merged[right_k], right_v) + elif merged[right_k] == right_v: + continue + else: + msg = ( + f"Additional kwargs key {right_k} already exists in left dict and " + f"value has unsupported type {type(merged[right_k])}." + ) + raise TypeError(msg) + return merged + + +def merge_lists(left: Optional[list], *others: Optional[list]) -> Optional[list]: + """Add many lists, handling None. + + Args: + left: The first list to merge. + others: The other lists to merge. + + Returns: + The merged list. + """ + merged = left.copy() if left is not None else None + for other in others: + if other is None: + continue + if merged is None: + merged = other.copy() + else: + for e in other: + if isinstance(e, dict) and "index" in e and isinstance(e["index"], int): + to_merge = [ + i + for i, e_left in enumerate(merged) + if e_left["index"] == e["index"] + ] + if to_merge: + # TODO: Remove this once merge_dict is updated with special + # handling for 'type'. + new_e = ( + {k: v for k, v in e.items() if k != "type"} + if "type" in e + else e + ) + merged[to_merge[0]] = merge_dicts(merged[to_merge[0]], new_e) + else: + merged.append(e) + else: + merged.append(e) + return merged + + +def merge_obj(left: Any, right: Any) -> Any: + """Merge two objects. + + It handles specific scenarios where a key exists in both + dictionaries but has a value of None in 'left'. In such cases, the method uses the + value from 'right' for that key in the merged dictionary. + + Args: + left: The first object to merge. + right: The other object to merge. + + Returns: + The merged object. + + Raises: + TypeError: If the key exists in both dictionaries but has a different type. + ValueError: If the two objects cannot be merged. + """ + if left is None or right is None: + return left if left is not None else right + if type(left) is not type(right): + msg = ( + f"left and right are of different types. Left type: {type(left)}. Right " + f"type: {type(right)}." + ) + raise TypeError(msg) + if isinstance(left, str): + return left + right + if isinstance(left, dict): + return merge_dicts(left, right) + if isinstance(left, list): + return merge_lists(left, right) + if left == right: + return left + msg = ( + f"Unable to merge {left=} and {right=}. Both must be of type str, dict, or " + f"list, or else be two equal objects." + ) + raise ValueError(msg) diff --git a/venv/Lib/site-packages/langchain_core/utils/aiter.py b/venv/Lib/site-packages/langchain_core/utils/aiter.py new file mode 100644 index 00000000..8931d207 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/aiter.py @@ -0,0 +1,329 @@ +"""Asynchronous iterator utilities. + +Adapted from +https://github.com/maxfischer2781/asyncstdlib/blob/master/asyncstdlib/itertools.py +MIT License. +""" + +from collections import deque +from collections.abc import ( + AsyncGenerator, + AsyncIterable, + AsyncIterator, + Awaitable, + Iterator, +) +from contextlib import AbstractAsyncContextManager +from types import TracebackType +from typing import ( + Any, + Callable, + Generic, + Optional, + TypeVar, + Union, + cast, + overload, +) + +from typing_extensions import override + +T = TypeVar("T") + +_no_default = object() + + +# https://github.com/python/cpython/blob/main/Lib/test/test_asyncgen.py#L54 +# before 3.10, the builtin anext() was not available +def py_anext( + iterator: AsyncIterator[T], default: Union[T, Any] = _no_default +) -> Awaitable[Union[T, None, Any]]: + """Pure-Python implementation of anext() for testing purposes. + + Closely matches the builtin anext() C implementation. + Can be used to compare the built-in implementation of the inner + coroutines machinery to C-implementation of __anext__() and send() + or throw() on the returned generator. + + Args: + iterator: The async iterator to advance. + default: The value to return if the iterator is exhausted. + If not provided, a StopAsyncIteration exception is raised. + + Returns: + The next value from the iterator, or the default value + if the iterator is exhausted. + + Raises: + TypeError: If the iterator is not an async iterator. + """ + try: + __anext__ = cast( + "Callable[[AsyncIterator[T]], Awaitable[T]]", type(iterator).__anext__ + ) + except AttributeError as e: + msg = f"{iterator!r} is not an async iterator" + raise TypeError(msg) from e + + if default is _no_default: + return __anext__(iterator) + + async def anext_impl() -> Union[T, Any]: + try: + # The C code is way more low-level than this, as it implements + # all methods of the iterator protocol. In this implementation + # we're relying on higher-level coroutine concepts, but that's + # exactly what we want -- crosstest pure-Python high-level + # implementation and low-level C anext() iterators. + return await __anext__(iterator) + except StopAsyncIteration: + return default + + return anext_impl() + + +class NoLock: + """Dummy lock that provides the proper interface but no protection.""" + + async def __aenter__(self) -> None: + """Do nothing.""" + + async def __aexit__( + self, + exc_type: Optional[type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> bool: + """Exception not handled.""" + return False + + +async def tee_peer( + iterator: AsyncIterator[T], + # the buffer specific to this peer + buffer: deque[T], + # the buffers of all peers, including our own + peers: list[deque[T]], + lock: AbstractAsyncContextManager[Any], +) -> AsyncGenerator[T, None]: + """An individual iterator of a :py:func:`~.tee`. + + This function is a generator that yields items from the shared iterator + ``iterator``. It buffers items until the least advanced iterator has + yielded them as well. The buffer is shared with all other peers. + + Args: + iterator: The shared iterator. + buffer: The buffer for this peer. + peers: The buffers of all peers. + lock: The lock to synchronise access to the shared buffers. + + Yields: + The next item from the shared iterator. + """ + try: + while True: + if not buffer: + async with lock: + # Another peer produced an item while we were waiting for the lock. + # Proceed with the next loop iteration to yield the item. + if buffer: + continue + try: + item = await iterator.__anext__() + except StopAsyncIteration: + break + else: + # Append to all buffers, including our own. We'll fetch our + # item from the buffer again, instead of yielding it directly. + # This ensures the proper item ordering if any of our peers + # are fetching items concurrently. They may have buffered their + # item already. + for peer_buffer in peers: + peer_buffer.append(item) + yield buffer.popleft() + finally: + async with lock: + # this peer is done – remove its buffer + for idx, peer_buffer in enumerate(peers): # pragma: no branch + if peer_buffer is buffer: + peers.pop(idx) + break + # if we are the last peer, try and close the iterator + if not peers and hasattr(iterator, "aclose"): + await iterator.aclose() + + +class Tee(Generic[T]): + """Create ``n`` separate asynchronous iterators over ``iterable``. + + This splits a single ``iterable`` into multiple iterators, each providing + the same items in the same order. + All child iterators may advance separately but share the same items + from ``iterable`` -- when the most advanced iterator retrieves an item, + it is buffered until the least advanced iterator has yielded it as well. + A ``tee`` works lazily and can handle an infinite ``iterable``, provided + that all iterators advance. + + .. code-block:: python3 + + async def derivative(sensor_data): + previous, current = a.tee(sensor_data, n=2) + await a.anext(previous) # advance one iterator + return a.map(operator.sub, previous, current) + + Unlike :py:func:`itertools.tee`, :py:func:`~.tee` returns a custom type instead + of a :py:class:`tuple`. Like a tuple, it can be indexed, iterated and unpacked + to get the child iterators. In addition, its :py:meth:`~.tee.aclose` method + immediately closes all children, and it can be used in an ``async with`` context + for the same effect. + + If ``iterable`` is an iterator and read elsewhere, ``tee`` will *not* + provide these items. Also, ``tee`` must internally buffer each item until the + last iterator has yielded it; if the most and least advanced iterator differ + by most data, using a :py:class:`list` is more efficient (but not lazy). + + If the underlying iterable is concurrency safe (``anext`` may be awaited + concurrently) the resulting iterators are concurrency safe as well. Otherwise, + the iterators are safe if there is only ever one single "most advanced" iterator. + To enforce sequential use of ``anext``, provide a ``lock`` + - e.g. an :py:class:`asyncio.Lock` instance in an :py:mod:`asyncio` application - + and access is automatically synchronised. + """ + + def __init__( + self, + iterable: AsyncIterator[T], + n: int = 2, + *, + lock: Optional[AbstractAsyncContextManager[Any]] = None, + ): + """Create a ``tee``. + + Args: + iterable: The iterable to split. + n: The number of iterators to create. Defaults to 2. + lock: The lock to synchronise access to the shared buffers. + Defaults to None. + """ + self._iterator = iterable.__aiter__() # before 3.10 aiter() doesn't exist + self._buffers: list[deque[T]] = [deque() for _ in range(n)] + self._children = tuple( + tee_peer( + iterator=self._iterator, + buffer=buffer, + peers=self._buffers, + lock=lock if lock is not None else NoLock(), + ) + for buffer in self._buffers + ) + + def __len__(self) -> int: + """Return the number of child iterators.""" + return len(self._children) + + @overload + def __getitem__(self, item: int) -> AsyncIterator[T]: ... + + @overload + def __getitem__(self, item: slice) -> tuple[AsyncIterator[T], ...]: ... + + def __getitem__( + self, item: Union[int, slice] + ) -> Union[AsyncIterator[T], tuple[AsyncIterator[T], ...]]: + """Return the child iterator(s) for the given index or slice.""" + return self._children[item] + + def __iter__(self) -> Iterator[AsyncIterator[T]]: + """Iterate over the child iterators.""" + yield from self._children + + async def __aenter__(self) -> "Tee[T]": + """Return the tee instance.""" + return self + + async def __aexit__( + self, + exc_type: Optional[type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> bool: + """Close all child iterators.""" + await self.aclose() + return False + + async def aclose(self) -> None: + """Async close all child iterators.""" + for child in self._children: + await child.aclose() + + +atee = Tee + + +class aclosing(AbstractAsyncContextManager): # noqa: N801 + """Async context manager to wrap an AsyncGenerator that has a ``aclose()`` method. + + Code like this: + + async with aclosing(.fetch()) as agen: + + + is equivalent to this: + + agen = .fetch() + try: + + finally: + await agen.aclose() + + """ + + def __init__( + self, thing: Union[AsyncGenerator[Any, Any], AsyncIterator[Any]] + ) -> None: + """Create the context manager. + + Args: + thing: The resource to wrap. + """ + self.thing = thing + + @override + async def __aenter__(self) -> Union[AsyncGenerator[Any, Any], AsyncIterator[Any]]: + return self.thing + + @override + async def __aexit__( + self, + exc_type: Optional[type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + if hasattr(self.thing, "aclose"): + await self.thing.aclose() + + +async def abatch_iterate( + size: int, iterable: AsyncIterable[T] +) -> AsyncIterator[list[T]]: + """Utility batching function for async iterables. + + Args: + size: The size of the batch. + iterable: The async iterable to batch. + + Returns: + An async iterator over the batches. + """ + batch: list[T] = [] + async for element in iterable: + if len(batch) < size: + batch.append(element) + + if len(batch) >= size: + yield batch + batch = [] + + if batch: + yield batch diff --git a/venv/Lib/site-packages/langchain_core/utils/env.py b/venv/Lib/site-packages/langchain_core/utils/env.py new file mode 100644 index 00000000..51c5b9f2 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/env.py @@ -0,0 +1,82 @@ +"""Utilities for environment variables.""" + +from __future__ import annotations + +import os +from typing import Any, Optional, Union + + +def env_var_is_set(env_var: str) -> bool: + """Check if an environment variable is set. + + Args: + env_var (str): The name of the environment variable. + + Returns: + bool: True if the environment variable is set, False otherwise. + """ + return env_var in os.environ and os.environ[env_var] not in ( + "", + "0", + "false", + "False", + ) + + +def get_from_dict_or_env( + data: dict[str, Any], + key: Union[str, list[str]], + env_key: str, + default: Optional[str] = None, +) -> str: + """Get a value from a dictionary or an environment variable. + + Args: + data: The dictionary to look up the key in. + key: The key to look up in the dictionary. This can be a list of keys to try + in order. + env_key: The environment variable to look up if the key is not + in the dictionary. + default: The default value to return if the key is not in the dictionary + or the environment. Defaults to None. + """ + if isinstance(key, (list, tuple)): + for k in key: + if k in data and data[k]: + return data[k] + + if isinstance(key, str) and key in data and data[key]: + return data[key] + + key_for_err = key[0] if isinstance(key, (list, tuple)) else key + + return get_from_env(key_for_err, env_key, default=default) + + +def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str: + """Get a value from a dictionary or an environment variable. + + Args: + key: The key to look up in the dictionary. + env_key: The environment variable to look up if the key is not + in the dictionary. + default: The default value to return if the key is not in the dictionary + or the environment. Defaults to None. + + Returns: + str: The value of the key. + + Raises: + ValueError: If the key is not in the dictionary and no default value is + provided or if the environment variable is not set. + """ + if env_key in os.environ and os.environ[env_key]: + return os.environ[env_key] + if default is not None: + return default + msg = ( + f"Did not find {key}, please add an environment variable" + f" `{env_key}` which contains it, or pass" + f" `{key}` as a named parameter." + ) + raise ValueError(msg) diff --git a/venv/Lib/site-packages/langchain_core/utils/formatting.py b/venv/Lib/site-packages/langchain_core/utils/formatting.py new file mode 100644 index 00000000..d2313c5c --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/formatting.py @@ -0,0 +1,51 @@ +"""Utilities for formatting strings.""" + +from collections.abc import Mapping, Sequence +from string import Formatter +from typing import Any + + +class StrictFormatter(Formatter): + """Formatter that checks for extra keys.""" + + def vformat( + self, format_string: str, args: Sequence, kwargs: Mapping[str, Any] + ) -> str: + """Check that no arguments are provided. + + Args: + format_string: The format string. + args: The arguments. + kwargs: The keyword arguments. + + Returns: + The formatted string. + + Raises: + ValueError: If any arguments are provided. + """ + if len(args) > 0: + msg = ( + "No arguments should be provided, " + "everything should be passed as keyword arguments." + ) + raise ValueError(msg) + return super().vformat(format_string, args, kwargs) + + def validate_input_variables( + self, format_string: str, input_variables: list[str] + ) -> None: + """Check that all input variables are used in the format string. + + Args: + format_string: The format string. + input_variables: The input variables. + + Raises: + ValueError: If any input variables are not used in the format string. + """ + dummy_inputs = dict.fromkeys(input_variables, "foo") + super().format(format_string, **dummy_inputs) + + +formatter = StrictFormatter() diff --git a/venv/Lib/site-packages/langchain_core/utils/function_calling.py b/venv/Lib/site-packages/langchain_core/utils/function_calling.py new file mode 100644 index 00000000..bfc70df1 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/function_calling.py @@ -0,0 +1,797 @@ +"""Methods for creating function specs in the style of OpenAI Functions.""" + +from __future__ import annotations + +import collections +import inspect +import logging +import types +import typing +import uuid +from typing import ( + TYPE_CHECKING, + Annotated, + Any, + Callable, + Literal, + Optional, + Union, + cast, +) + +from pydantic import BaseModel +from pydantic.v1 import BaseModel as BaseModelV1 +from typing_extensions import TypedDict, get_args, get_origin, is_typeddict + +from langchain_core._api import beta, deprecated +from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage +from langchain_core.utils.json_schema import dereference_refs +from langchain_core.utils.pydantic import is_basemodel_subclass + +if TYPE_CHECKING: + from langchain_core.tools import BaseTool + +logger = logging.getLogger(__name__) + +PYTHON_TO_JSON_TYPES = { + "str": "string", + "int": "integer", + "float": "number", + "bool": "boolean", +} + + +class FunctionDescription(TypedDict): + """Representation of a callable function to send to an LLM.""" + + name: str + """The name of the function.""" + description: str + """A description of the function.""" + parameters: dict + """The parameters of the function.""" + + +class ToolDescription(TypedDict): + """Representation of a callable function to the OpenAI API.""" + + type: Literal["function"] + """The type of the tool.""" + function: FunctionDescription + """The function description.""" + + +def _rm_titles(kv: dict, prev_key: str = "") -> dict: + """Recursively removes "title" fields from a JSON schema dictionary. + + Remove "title" fields from the input JSON schema dictionary, + except when a "title" appears within a property definition under "properties". + + Args: + kv (dict): The input JSON schema as a dictionary. + prev_key (str): The key from the parent dictionary, used to identify context. + + Returns: + dict: A new dictionary with appropriate "title" fields removed. + """ + new_kv = {} + + for k, v in kv.items(): + if k == "title": + # If the value is a nested dict and part of a property under "properties", + # preserve the title but continue recursion + if isinstance(v, dict) and prev_key == "properties": + new_kv[k] = _rm_titles(v, k) + else: + # Otherwise, remove this "title" key + continue + elif isinstance(v, dict): + # Recurse into nested dictionaries + new_kv[k] = _rm_titles(v, k) + else: + # Leave non-dict values untouched + new_kv[k] = v + + return new_kv + + +def _convert_json_schema_to_openai_function( + schema: dict, + *, + name: Optional[str] = None, + description: Optional[str] = None, + rm_titles: bool = True, +) -> FunctionDescription: + """Converts a Pydantic model to a function description for the OpenAI API. + + Args: + schema: The JSON schema to convert. + name: The name of the function. If not provided, the title of the schema will be + used. + description: The description of the function. If not provided, the description + of the schema will be used. + rm_titles: Whether to remove titles from the schema. Defaults to True. + + Returns: + The function description. + """ + schema = dereference_refs(schema) + if "definitions" in schema: # pydantic 1 + schema.pop("definitions", None) + if "$defs" in schema: # pydantic 2 + schema.pop("$defs", None) + title = schema.pop("title", "") + default_description = schema.pop("description", "") + return { + "name": name or title, + "description": description or default_description, + "parameters": _rm_titles(schema) if rm_titles else schema, + } + + +def _convert_pydantic_to_openai_function( + model: type, + *, + name: Optional[str] = None, + description: Optional[str] = None, + rm_titles: bool = True, +) -> FunctionDescription: + """Converts a Pydantic model to a function description for the OpenAI API. + + Args: + model: The Pydantic model to convert. + name: The name of the function. If not provided, the title of the schema will be + used. + description: The description of the function. If not provided, the description + of the schema will be used. + rm_titles: Whether to remove titles from the schema. Defaults to True. + + Returns: + The function description. + """ + if hasattr(model, "model_json_schema"): + schema = model.model_json_schema() # Pydantic 2 + elif hasattr(model, "schema"): + schema = model.schema() # Pydantic 1 + else: + msg = "Model must be a Pydantic model." + raise TypeError(msg) + return _convert_json_schema_to_openai_function( + schema, name=name, description=description, rm_titles=rm_titles + ) + + +convert_pydantic_to_openai_function = deprecated( + "0.1.16", + alternative="langchain_core.utils.function_calling.convert_to_openai_function()", + removal="1.0", +)(_convert_pydantic_to_openai_function) + + +@deprecated( + "0.1.16", + alternative="langchain_core.utils.function_calling.convert_to_openai_tool()", + removal="1.0", +) +def convert_pydantic_to_openai_tool( + model: type[BaseModel], + *, + name: Optional[str] = None, + description: Optional[str] = None, +) -> ToolDescription: + """Converts a Pydantic model to a function description for the OpenAI API. + + Args: + model: The Pydantic model to convert. + name: The name of the function. If not provided, the title of the schema will be + used. + description: The description of the function. If not provided, the description + of the schema will be used. + + Returns: + The tool description. + """ + function = _convert_pydantic_to_openai_function( + model, name=name, description=description + ) + return {"type": "function", "function": function} + + +def _get_python_function_name(function: Callable) -> str: + """Get the name of a Python function.""" + return function.__name__ + + +def _convert_python_function_to_openai_function( + function: Callable, +) -> FunctionDescription: + """Convert a Python function to an OpenAI function-calling API compatible dict. + + Assumes the Python function has type hints and a docstring with a description. If + the docstring has Google Python style argument descriptions, these will be + included as well. + + Args: + function: The Python function to convert. + + Returns: + The OpenAI function description. + """ + from langchain_core.tools.base import create_schema_from_function + + func_name = _get_python_function_name(function) + model = create_schema_from_function( + func_name, + function, + filter_args=(), + parse_docstring=True, + error_on_invalid_docstring=False, + include_injected=False, + ) + return _convert_pydantic_to_openai_function( + model, + name=func_name, + description=model.__doc__, + ) + + +convert_python_function_to_openai_function = deprecated( + "0.1.16", + alternative="langchain_core.utils.function_calling.convert_to_openai_function()", + removal="1.0", +)(_convert_python_function_to_openai_function) + + +def _convert_typed_dict_to_openai_function(typed_dict: type) -> FunctionDescription: + visited: dict = {} + + model = cast( + "type[BaseModel]", + _convert_any_typed_dicts_to_pydantic(typed_dict, visited=visited), + ) + return _convert_pydantic_to_openai_function(model) + + +_MAX_TYPED_DICT_RECURSION = 25 + + +def _convert_any_typed_dicts_to_pydantic( + type_: type, + *, + visited: dict, + depth: int = 0, +) -> type: + from pydantic.v1 import Field as Field_v1 + from pydantic.v1 import create_model as create_model_v1 + + if type_ in visited: + return visited[type_] + if depth >= _MAX_TYPED_DICT_RECURSION: + return type_ + if is_typeddict(type_): + typed_dict = type_ + docstring = inspect.getdoc(typed_dict) + annotations_ = typed_dict.__annotations__ + description, arg_descriptions = _parse_google_docstring( + docstring, list(annotations_) + ) + fields: dict = {} + for arg, arg_type in annotations_.items(): + if get_origin(arg_type) is Annotated: + annotated_args = get_args(arg_type) + new_arg_type = _convert_any_typed_dicts_to_pydantic( + annotated_args[0], depth=depth + 1, visited=visited + ) + field_kwargs = dict(zip(("default", "description"), annotated_args[1:])) + if (field_desc := field_kwargs.get("description")) and not isinstance( + field_desc, str + ): + msg = ( + f"Invalid annotation for field {arg}. Third argument to " + f"Annotated must be a string description, received value of " + f"type {type(field_desc)}." + ) + raise ValueError(msg) + if arg_desc := arg_descriptions.get(arg): + field_kwargs["description"] = arg_desc + else: + pass + fields[arg] = (new_arg_type, Field_v1(**field_kwargs)) + else: + new_arg_type = _convert_any_typed_dicts_to_pydantic( + arg_type, depth=depth + 1, visited=visited + ) + field_kwargs = {"default": ...} + if arg_desc := arg_descriptions.get(arg): + field_kwargs["description"] = arg_desc + fields[arg] = (new_arg_type, Field_v1(**field_kwargs)) + model = create_model_v1(typed_dict.__name__, **fields) + model.__doc__ = description + visited[typed_dict] = model + return model + if (origin := get_origin(type_)) and (type_args := get_args(type_)): + subscriptable_origin = _py_38_safe_origin(origin) + type_args = tuple( + _convert_any_typed_dicts_to_pydantic(arg, depth=depth + 1, visited=visited) + for arg in type_args + ) + return subscriptable_origin[type_args] # type: ignore[index] + return type_ + + +def _format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription: + """Format tool into the OpenAI function API. + + Args: + tool: The tool to format. + + Returns: + The function description. + """ + from langchain_core.tools import simple + + is_simple_oai_tool = isinstance(tool, simple.Tool) and not tool.args_schema + if tool.tool_call_schema and not is_simple_oai_tool: + if isinstance(tool.tool_call_schema, dict): + return _convert_json_schema_to_openai_function( + tool.tool_call_schema, name=tool.name, description=tool.description + ) + if issubclass(tool.tool_call_schema, (BaseModel, BaseModelV1)): + return _convert_pydantic_to_openai_function( + tool.tool_call_schema, name=tool.name, description=tool.description + ) + error_msg = ( + f"Unsupported tool call schema: {tool.tool_call_schema}. " + "Tool call schema must be a JSON schema dict or a Pydantic model." + ) + raise ValueError(error_msg) + return { + "name": tool.name, + "description": tool.description, + "parameters": { + # This is a hack to get around the fact that some tools + # do not expose an args_schema, and expect an argument + # which is a string. + # And Open AI does not support an array type for the + # parameters. + "properties": { + "__arg1": {"title": "__arg1", "type": "string"}, + }, + "required": ["__arg1"], + "type": "object", + }, + } + + +format_tool_to_openai_function = deprecated( + "0.1.16", + alternative="langchain_core.utils.function_calling.convert_to_openai_function()", + removal="1.0", +)(_format_tool_to_openai_function) + + +@deprecated( + "0.1.16", + alternative="langchain_core.utils.function_calling.convert_to_openai_tool()", + removal="1.0", +) +def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription: + """Format tool into the OpenAI function API. + + Args: + tool: The tool to format. + + Returns: + The tool description. + """ + function = _format_tool_to_openai_function(tool) + return {"type": "function", "function": function} + + +def convert_to_openai_function( + function: Union[dict[str, Any], type, Callable, BaseTool], + *, + strict: Optional[bool] = None, +) -> dict[str, Any]: + """Convert a raw function/class to an OpenAI function. + + Args: + function: + A dictionary, Pydantic BaseModel class, TypedDict class, a LangChain + Tool object, or a Python function. If a dictionary is passed in, it is + assumed to already be a valid OpenAI function, a JSON schema with + top-level 'title' key specified, an Anthropic format + tool, or an Amazon Bedrock Converse format tool. + strict: + If True, model output is guaranteed to exactly match the JSON Schema + provided in the function definition. If None, ``strict`` argument will not + be included in function definition. + + Returns: + A dict version of the passed in function which is compatible with the OpenAI + function-calling API. + + Raises: + ValueError: If function is not in a supported format. + + .. versionchanged:: 0.2.29 + + ``strict`` arg added. + + .. versionchanged:: 0.3.13 + + Support for Anthropic format tools added. + + .. versionchanged:: 0.3.14 + + Support for Amazon Bedrock Converse format tools added. + + .. versionchanged:: 0.3.16 + + 'description' and 'parameters' keys are now optional. Only 'name' is + required and guaranteed to be part of the output. + """ + from langchain_core.tools import BaseTool + + # an Anthropic format tool + if isinstance(function, dict) and all( + k in function for k in ("name", "input_schema") + ): + oai_function = { + "name": function["name"], + "parameters": function["input_schema"], + } + if "description" in function: + oai_function["description"] = function["description"] + # an Amazon Bedrock Converse format tool + elif isinstance(function, dict) and "toolSpec" in function: + oai_function = { + "name": function["toolSpec"]["name"], + "parameters": function["toolSpec"]["inputSchema"]["json"], + } + if "description" in function["toolSpec"]: + oai_function["description"] = function["toolSpec"]["description"] + # already in OpenAI function format + elif isinstance(function, dict) and "name" in function: + oai_function = { + k: v + for k, v in function.items() + if k in ("name", "description", "parameters", "strict") + } + # a JSON schema with title and description + elif isinstance(function, dict) and "title" in function: + function_copy = function.copy() + oai_function = {"name": function_copy.pop("title")} + if "description" in function_copy: + oai_function["description"] = function_copy.pop("description") + if function_copy and "properties" in function_copy: + oai_function["parameters"] = function_copy + elif isinstance(function, type) and is_basemodel_subclass(function): + oai_function = cast("dict", _convert_pydantic_to_openai_function(function)) + elif is_typeddict(function): + oai_function = cast( + "dict", _convert_typed_dict_to_openai_function(cast("type", function)) + ) + elif isinstance(function, BaseTool): + oai_function = cast("dict", _format_tool_to_openai_function(function)) + elif callable(function): + oai_function = cast( + "dict", _convert_python_function_to_openai_function(function) + ) + else: + msg = ( + f"Unsupported function\n\n{function}\n\nFunctions must be passed in" + " as Dict, pydantic.BaseModel, or Callable. If they're a dict they must" + " either be in OpenAI function format or valid JSON schema with top-level" + " 'title' and 'description' keys." + ) + raise ValueError(msg) + + if strict is not None: + if "strict" in oai_function and oai_function["strict"] != strict: + msg = ( + f"Tool/function already has a 'strict' key wth value " + f"{oai_function['strict']} which is different from the explicit " + f"`strict` arg received {strict=}." + ) + raise ValueError(msg) + oai_function["strict"] = strict + if strict: + # As of 08/06/24, OpenAI requires that additionalProperties be supplied and + # set to False if strict is True. + # All properties layer needs 'additionalProperties=False' + oai_function["parameters"] = _recursive_set_additional_properties_false( + oai_function["parameters"] + ) + return oai_function + + +def convert_to_openai_tool( + tool: Union[dict[str, Any], type[BaseModel], Callable, BaseTool], + *, + strict: Optional[bool] = None, +) -> dict[str, Any]: + """Convert a tool-like object to an OpenAI tool schema. + + OpenAI tool schema reference: + https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools + + Args: + tool: + Either a dictionary, a pydantic.BaseModel class, Python function, or + BaseTool. If a dictionary is passed in, it is + assumed to already be a valid OpenAI function, a JSON schema with + top-level 'title' key specified, an Anthropic format + tool, or an Amazon Bedrock Converse format tool. + strict: + If True, model output is guaranteed to exactly match the JSON Schema + provided in the function definition. If None, ``strict`` argument will not + be included in tool definition. + + Returns: + A dict version of the passed in tool which is compatible with the + OpenAI tool-calling API. + + .. versionchanged:: 0.2.29 + + ``strict`` arg added. + + .. versionchanged:: 0.3.13 + + Support for Anthropic format tools added. + + .. versionchanged:: 0.3.14 + + Support for Amazon Bedrock Converse format tools added. + + .. versionchanged:: 0.3.16 + + 'description' and 'parameters' keys are now optional. Only 'name' is + required and guaranteed to be part of the output. + + .. versionchanged:: 0.3.44 + + Return OpenAI Responses API-style tools unchanged. This includes + any dict with "type" in "file_search", "function", "computer_use_preview", + "web_search_preview". + """ + if isinstance(tool, dict): + if tool.get("type") in ("function", "file_search", "computer_use_preview"): + return tool + # As of 03.12.25 can be "web_search_preview" or "web_search_preview_2025_03_11" + if (tool.get("type") or "").startswith("web_search_preview"): + return tool + oai_function = convert_to_openai_function(tool, strict=strict) + return {"type": "function", "function": oai_function} + + +def convert_to_json_schema( + schema: Union[dict[str, Any], type[BaseModel], Callable, BaseTool], + *, + strict: Optional[bool] = None, +) -> dict[str, Any]: + """Convert a schema representation to a JSON schema.""" + openai_tool = convert_to_openai_tool(schema, strict=strict) + if ( + not isinstance(openai_tool, dict) + or "function" not in openai_tool + or "name" not in openai_tool["function"] + ): + error_message = "Input must be a valid OpenAI-format tool." + raise ValueError(error_message) + + openai_function = openai_tool["function"] + json_schema = {} + json_schema["title"] = openai_function["name"] + + if "description" in openai_function: + json_schema["description"] = openai_function["description"] + + if "parameters" in openai_function: + parameters = openai_function["parameters"].copy() + json_schema.update(parameters) + + return json_schema + + +@beta() +def tool_example_to_messages( + input: str, + tool_calls: list[BaseModel], + tool_outputs: Optional[list[str]] = None, + *, + ai_response: Optional[str] = None, +) -> list[BaseMessage]: + """Convert an example into a list of messages that can be fed into an LLM. + + This code is an adapter that converts a single example to a list of messages + that can be fed into a chat model. + + The list of messages per example by default corresponds to: + + 1) HumanMessage: contains the content from which content should be extracted. + 2) AIMessage: contains the extracted information from the model + 3) ToolMessage: contains confirmation to the model that the model requested a tool + correctly. + + If `ai_response` is specified, there will be a final AIMessage with that response. + + The ToolMessage is required because some chat models are hyper-optimized for agents + rather than for an extraction use case. + + Arguments: + input: string, the user input + tool_calls: list[BaseModel], a list of tool calls represented as Pydantic + BaseModels + tool_outputs: Optional[list[str]], a list of tool call outputs. + Does not need to be provided. If not provided, a placeholder value + will be inserted. Defaults to None. + ai_response: Optional[str], if provided, content for a final AIMessage. + + Returns: + A list of messages + + Examples: + + .. code-block:: python + + from typing import Optional + from pydantic import BaseModel, Field + from langchain_openai import ChatOpenAI + + class Person(BaseModel): + '''Information about a person.''' + name: Optional[str] = Field(..., description="The name of the person") + hair_color: Optional[str] = Field( + ..., description="The color of the person's hair if known" + ) + height_in_meters: Optional[str] = Field( + ..., description="Height in METERs" + ) + + examples = [ + ( + "The ocean is vast and blue. It's more than 20,000 feet deep.", + Person(name=None, height_in_meters=None, hair_color=None), + ), + ( + "Fiona traveled far from France to Spain.", + Person(name="Fiona", height_in_meters=None, hair_color=None), + ), + ] + + + messages = [] + + for txt, tool_call in examples: + messages.extend( + tool_example_to_messages(txt, [tool_call]) + ) + """ + messages: list[BaseMessage] = [HumanMessage(content=input)] + openai_tool_calls = [ + { + "id": str(uuid.uuid4()), + "type": "function", + "function": { + # The name of the function right now corresponds to the name + # of the pydantic model. This is implicit in the API right now, + # and will be improved over time. + "name": tool_call.__class__.__name__, + "arguments": tool_call.model_dump_json(), + }, + } + for tool_call in tool_calls + ] + + messages.append( + AIMessage(content="", additional_kwargs={"tool_calls": openai_tool_calls}) + ) + tool_outputs = tool_outputs or ["You have correctly called this tool."] * len( + openai_tool_calls + ) + for output, tool_call_dict in zip(tool_outputs, openai_tool_calls): + messages.append(ToolMessage(content=output, tool_call_id=tool_call_dict["id"])) + + if ai_response: + messages.append(AIMessage(content=ai_response)) + return messages + + +def _parse_google_docstring( + docstring: Optional[str], + args: list[str], + *, + error_on_invalid_docstring: bool = False, +) -> tuple[str, dict]: + """Parse the function and argument descriptions from the docstring of a function. + + Assumes the function docstring follows Google Python style guide. + """ + if docstring: + docstring_blocks = docstring.split("\n\n") + if error_on_invalid_docstring: + filtered_annotations = { + arg for arg in args if arg not in ("run_manager", "callbacks", "return") + } + if filtered_annotations and ( + len(docstring_blocks) < 2 + or not any(block.startswith("Args:") for block in docstring_blocks[1:]) + ): + msg = "Found invalid Google-Style docstring." + raise ValueError(msg) + descriptors = [] + args_block = None + past_descriptors = False + for block in docstring_blocks: + if block.startswith("Args:"): + args_block = block + break + if block.startswith(("Returns:", "Example:")): + # Don't break in case Args come after + past_descriptors = True + elif not past_descriptors: + descriptors.append(block) + else: + continue + description = " ".join(descriptors) + else: + if error_on_invalid_docstring: + msg = "Found invalid Google-Style docstring." + raise ValueError(msg) + description = "" + args_block = None + arg_descriptions = {} + if args_block: + arg = None + for line in args_block.split("\n")[1:]: + if ":" in line: + arg, desc = line.split(":", maxsplit=1) + arg = arg.strip() + arg_name, _, _annotations = arg.partition(" ") + if _annotations.startswith("(") and _annotations.endswith(")"): + arg = arg_name + arg_descriptions[arg] = desc.strip() + elif arg: + arg_descriptions[arg] += " " + line.strip() + return description, arg_descriptions + + +def _py_38_safe_origin(origin: type) -> type: + origin_union_type_map: dict[type, Any] = ( + {types.UnionType: Union} if hasattr(types, "UnionType") else {} + ) + + origin_map: dict[type, Any] = { + dict: dict, + list: list, + tuple: tuple, + set: set, + collections.abc.Iterable: typing.Iterable, + collections.abc.Mapping: typing.Mapping, + collections.abc.Sequence: typing.Sequence, + collections.abc.MutableMapping: typing.MutableMapping, + **origin_union_type_map, + } + return cast("type", origin_map.get(origin, origin)) + + +def _recursive_set_additional_properties_false( + schema: dict[str, Any], +) -> dict[str, Any]: + if isinstance(schema, dict): + # Check if 'required' is a key at the current level or if the schema is empty, + # in which case additionalProperties still needs to be specified. + if "required" in schema or ( + "properties" in schema and not schema["properties"] + ): + schema["additionalProperties"] = False + + # Recursively check 'properties' and 'items' if they exist + if "properties" in schema: + for value in schema["properties"].values(): + _recursive_set_additional_properties_false(value) + if "items" in schema: + _recursive_set_additional_properties_false(schema["items"]) + + return schema diff --git a/venv/Lib/site-packages/langchain_core/utils/html.py b/venv/Lib/site-packages/langchain_core/utils/html.py new file mode 100644 index 00000000..b0a9880c --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/html.py @@ -0,0 +1,123 @@ +"""Utilities for working with HTML.""" + +import logging +import re +from collections.abc import Sequence +from typing import Optional, Union +from urllib.parse import urljoin, urlparse + +logger = logging.getLogger(__name__) + +PREFIXES_TO_IGNORE = ("javascript:", "mailto:", "#") +SUFFIXES_TO_IGNORE = ( + ".css", + ".js", + ".ico", + ".png", + ".jpg", + ".jpeg", + ".gif", + ".svg", + ".csv", + ".bz2", + ".zip", + ".epub", +) +SUFFIXES_TO_IGNORE_REGEX = ( + "(?!" + "|".join([re.escape(s) + r"[\#'\"]" for s in SUFFIXES_TO_IGNORE]) + ")" +) +PREFIXES_TO_IGNORE_REGEX = ( + "(?!" + "|".join([re.escape(s) for s in PREFIXES_TO_IGNORE]) + ")" +) +DEFAULT_LINK_REGEX = ( + rf"href=[\"']{PREFIXES_TO_IGNORE_REGEX}((?:{SUFFIXES_TO_IGNORE_REGEX}.)*?)[\#'\"]" +) + + +def find_all_links( + raw_html: str, *, pattern: Union[str, re.Pattern, None] = None +) -> list[str]: + """Extract all links from a raw HTML string. + + Args: + raw_html: original HTML. + pattern: Regex to use for extracting links from raw HTML. + + Returns: + list[str]: all links + """ + pattern = pattern or DEFAULT_LINK_REGEX + return list(set(re.findall(pattern, raw_html))) + + +def extract_sub_links( + raw_html: str, + url: str, + *, + base_url: Optional[str] = None, + pattern: Union[str, re.Pattern, None] = None, + prevent_outside: bool = True, + exclude_prefixes: Sequence[str] = (), + continue_on_failure: bool = False, +) -> list[str]: + """Extract all links from a raw HTML string and convert into absolute paths. + + Args: + raw_html: original HTML. + url: the url of the HTML. + base_url: the base URL to check for outside links against. + pattern: Regex to use for extracting links from raw HTML. + prevent_outside: If True, ignore external links which are not children + of the base URL. + exclude_prefixes: Exclude any URLs that start with one of these prefixes. + continue_on_failure: If True, continue if parsing a specific link raises an + exception. Otherwise, raise the exception. + + Returns: + list[str]: sub links. + """ + base_url_to_use = base_url if base_url is not None else url + parsed_base_url = urlparse(base_url_to_use) + parsed_url = urlparse(url) + all_links = find_all_links(raw_html, pattern=pattern) + absolute_paths = set() + for link in all_links: + try: + parsed_link = urlparse(link) + # Some may be absolute links like https://to/path + if parsed_link.scheme in {"http", "https"}: + absolute_path = link + # Some may have omitted the protocol like //to/path + elif link.startswith("//"): + absolute_path = f"{parsed_url.scheme}:{link}" + else: + absolute_path = urljoin(url, parsed_link.path) + if parsed_link.query: + absolute_path += f"?{parsed_link.query}" + absolute_paths.add(absolute_path) + except Exception as e: + if continue_on_failure: + logger.warning( + "Unable to load link %s. Raised exception:\n\n%s", link, e + ) + continue + raise + + results = [] + for path in absolute_paths: + if any(path.startswith(exclude_prefix) for exclude_prefix in exclude_prefixes): + continue + + if prevent_outside: + parsed_path = urlparse(path) + + if parsed_base_url.netloc != parsed_path.netloc: + continue + + # Will take care of verifying rest of path after netloc + # if it's more specific + if not path.startswith(base_url_to_use): + continue + + results.append(path) + return results diff --git a/venv/Lib/site-packages/langchain_core/utils/image.py b/venv/Lib/site-packages/langchain_core/utils/image.py new file mode 100644 index 00000000..8931f0bc --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/image.py @@ -0,0 +1,15 @@ +"""Utilities for image processing.""" + +from typing import Any + + +def __getattr__(name: str) -> Any: + if name in ("encode_image", "image_to_data_url"): + msg = ( + f"'{name}' has been removed for security reasons.\n\n" + f"Usage of this utility in environments with user-input paths is a " + f"security vulnerability. Out of an abundance of caution, the utility " + f"has been removed to prevent possible misuse." + ) + raise ValueError(msg) + raise AttributeError(name) diff --git a/venv/Lib/site-packages/langchain_core/utils/input.py b/venv/Lib/site-packages/langchain_core/utils/input.py new file mode 100644 index 00000000..afa3bf75 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/input.py @@ -0,0 +1,75 @@ +"""Handle chained inputs.""" + +from typing import Optional, TextIO + +_TEXT_COLOR_MAPPING = { + "blue": "36;1", + "yellow": "33;1", + "pink": "38;5;200", + "green": "32;1", + "red": "31;1", +} + + +def get_color_mapping( + items: list[str], excluded_colors: Optional[list] = None +) -> dict[str, str]: + """Get mapping for items to a support color. + + Args: + items: The items to map to colors. + excluded_colors: The colors to exclude. + + Returns: + The mapping of items to colors. + """ + colors = list(_TEXT_COLOR_MAPPING.keys()) + if excluded_colors is not None: + colors = [c for c in colors if c not in excluded_colors] + return {item: colors[i % len(colors)] for i, item in enumerate(items)} + + +def get_colored_text(text: str, color: str) -> str: + """Get colored text. + + Args: + text: The text to color. + color: The color to use. + + Returns: + The colored text. + """ + color_str = _TEXT_COLOR_MAPPING[color] + return f"\u001b[{color_str}m\033[1;3m{text}\u001b[0m" + + +def get_bolded_text(text: str) -> str: + """Get bolded text. + + Args: + text: The text to bold. + + Returns: + The bolded text. + """ + return f"\033[1m{text}\033[0m" + + +def print_text( + text: str, color: Optional[str] = None, end: str = "", file: Optional[TextIO] = None +) -> None: + """Print text with highlighting and no end characters. + + If a color is provided, the text will be printed in that color. + If a file is provided, the text will be written to that file. + + Args: + text: The text to print. + color: The color to use. Defaults to None. + end: The end character to use. Defaults to "". + file: The file to write to. Defaults to None. + """ + text_to_print = get_colored_text(text, color) if color else text + print(text_to_print, end=end, file=file) + if file: + file.flush() # ensure all printed content are written to file diff --git a/venv/Lib/site-packages/langchain_core/utils/interactive_env.py b/venv/Lib/site-packages/langchain_core/utils/interactive_env.py new file mode 100644 index 00000000..12676d0b --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/interactive_env.py @@ -0,0 +1,8 @@ +"""Utilities for working with interactive environments.""" + + +def is_interactive_env() -> bool: + """Determine if running within IPython or Jupyter.""" + import sys + + return hasattr(sys, "ps2") diff --git a/venv/Lib/site-packages/langchain_core/utils/iter.py b/venv/Lib/site-packages/langchain_core/utils/iter.py new file mode 100644 index 00000000..15c203a5 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/iter.py @@ -0,0 +1,217 @@ +"""Utilities for working with iterators.""" + +from collections import deque +from collections.abc import Generator, Iterable, Iterator +from contextlib import AbstractContextManager +from itertools import islice +from types import TracebackType +from typing import ( + Any, + Generic, + Optional, + TypeVar, + Union, + overload, +) + +from typing_extensions import Literal + +T = TypeVar("T") + + +class NoLock: + """Dummy lock that provides the proper interface but no protection.""" + + def __enter__(self) -> None: + """Do nothing.""" + + def __exit__( + self, + exc_type: Optional[type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Literal[False]: + """Exception not handled.""" + return False + + +def tee_peer( + iterator: Iterator[T], + # the buffer specific to this peer + buffer: deque[T], + # the buffers of all peers, including our own + peers: list[deque[T]], + lock: AbstractContextManager[Any], +) -> Generator[T, None, None]: + """An individual iterator of a :py:func:`~.tee`. + + This function is a generator that yields items from the shared iterator + ``iterator``. It buffers items until the least advanced iterator has + yielded them as well. The buffer is shared with all other peers. + + Args: + iterator: The shared iterator. + buffer: The buffer for this peer. + peers: The buffers of all peers. + lock: The lock to synchronise access to the shared buffers. + + Yields: + The next item from the shared iterator. + """ + try: + while True: + if not buffer: + with lock: + # Another peer produced an item while we were waiting for the lock. + # Proceed with the next loop iteration to yield the item. + if buffer: + continue + try: + item = next(iterator) + except StopIteration: + break + else: + # Append to all buffers, including our own. We'll fetch our + # item from the buffer again, instead of yielding it directly. + # This ensures the proper item ordering if any of our peers + # are fetching items concurrently. They may have buffered their + # item already. + for peer_buffer in peers: + peer_buffer.append(item) + yield buffer.popleft() + finally: + with lock: + # this peer is done – remove its buffer + for idx, peer_buffer in enumerate(peers): # pragma: no branch + if peer_buffer is buffer: + peers.pop(idx) + break + # if we are the last peer, try and close the iterator + if not peers and hasattr(iterator, "close"): + iterator.close() + + +class Tee(Generic[T]): + """Create ``n`` separate asynchronous iterators over ``iterable``. + + This splits a single ``iterable`` into multiple iterators, each providing + the same items in the same order. + All child iterators may advance separately but share the same items + from ``iterable`` -- when the most advanced iterator retrieves an item, + it is buffered until the least advanced iterator has yielded it as well. + A ``tee`` works lazily and can handle an infinite ``iterable``, provided + that all iterators advance. + + .. code-block:: python3 + + async def derivative(sensor_data): + previous, current = a.tee(sensor_data, n=2) + await a.anext(previous) # advance one iterator + return a.map(operator.sub, previous, current) + + Unlike :py:func:`itertools.tee`, :py:func:`~.tee` returns a custom type instead + of a :py:class:`tuple`. Like a tuple, it can be indexed, iterated and unpacked + to get the child iterators. In addition, its :py:meth:`~.tee.aclose` method + immediately closes all children, and it can be used in an ``async with`` context + for the same effect. + + If ``iterable`` is an iterator and read elsewhere, ``tee`` will *not* + provide these items. Also, ``tee`` must internally buffer each item until the + last iterator has yielded it; if the most and least advanced iterator differ + by most data, using a :py:class:`list` is more efficient (but not lazy). + + If the underlying iterable is concurrency safe (``anext`` may be awaited + concurrently) the resulting iterators are concurrency safe as well. Otherwise, + the iterators are safe if there is only ever one single "most advanced" iterator. + To enforce sequential use of ``anext``, provide a ``lock`` + - e.g. an :py:class:`asyncio.Lock` instance in an :py:mod:`asyncio` application - + and access is automatically synchronised. + """ + + def __init__( + self, + iterable: Iterator[T], + n: int = 2, + *, + lock: Optional[AbstractContextManager[Any]] = None, + ): + """Create a ``tee``. + + Args: + iterable: The iterable to split. + n: The number of iterators to create. Defaults to 2. + lock: The lock to synchronise access to the shared buffers. + Defaults to None. + """ + self._iterator = iter(iterable) + self._buffers: list[deque[T]] = [deque() for _ in range(n)] + self._children = tuple( + tee_peer( + iterator=self._iterator, + buffer=buffer, + peers=self._buffers, + lock=lock if lock is not None else NoLock(), + ) + for buffer in self._buffers + ) + + def __len__(self) -> int: + """Return the number of child iterators.""" + return len(self._children) + + @overload + def __getitem__(self, item: int) -> Iterator[T]: ... + + @overload + def __getitem__(self, item: slice) -> tuple[Iterator[T], ...]: ... + + def __getitem__( + self, item: Union[int, slice] + ) -> Union[Iterator[T], tuple[Iterator[T], ...]]: + """Return the child iterator(s) at the given index or slice.""" + return self._children[item] + + def __iter__(self) -> Iterator[Iterator[T]]: + """Return an iterator over the child iterators.""" + yield from self._children + + def __enter__(self) -> "Tee[T]": + """Return Tee instance.""" + return self + + def __exit__( + self, + exc_type: Optional[type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Literal[False]: + """Close all child iterators.""" + self.close() + return False + + def close(self) -> None: + """Close all child iterators.""" + for child in self._children: + child.close() + + +# Why this is needed https://stackoverflow.com/a/44638570 +safetee = Tee + + +def batch_iterate(size: Optional[int], iterable: Iterable[T]) -> Iterator[list[T]]: + """Utility batching function. + + Args: + size: The size of the batch. If None, returns a single batch. + iterable: The iterable to batch. + + Yields: + The batches of the iterable. + """ + it = iter(iterable) + while True: + chunk = list(islice(it, size)) + if not chunk: + return + yield chunk diff --git a/venv/Lib/site-packages/langchain_core/utils/json.py b/venv/Lib/site-packages/langchain_core/utils/json.py new file mode 100644 index 00000000..4b12e85f --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/json.py @@ -0,0 +1,197 @@ +"""Utilities for JSON.""" + +from __future__ import annotations + +import json +import re +from typing import Any, Callable + +from langchain_core.exceptions import OutputParserException + + +def _replace_new_line(match: re.Match[str]) -> str: + value = match.group(2) + value = re.sub(r"\n", r"\\n", value) + value = re.sub(r"\r", r"\\r", value) + value = re.sub(r"\t", r"\\t", value) + value = re.sub(r'(? str: + r"""Custom parser for multiline strings. + + The LLM response for `action_input` may be a multiline + string containing unescaped newlines, tabs or quotes. This function + replaces those characters with their escaped counterparts. + (newlines in JSON must be double-escaped: `\\n`). + """ + if isinstance(multiline_string, (bytes, bytearray)): + multiline_string = multiline_string.decode() + + return re.sub( + r'("action_input"\:\s*")(.*?)(")', + _replace_new_line, + multiline_string, + flags=re.DOTALL, + ) + + +# Adapted from https://github.com/KillianLucas/open-interpreter/blob/5b6080fae1f8c68938a1e4fa8667e3744084ee21/interpreter/utils/parse_partial_json.py +# MIT License + + +def parse_partial_json(s: str, *, strict: bool = False) -> Any: + """Parse a JSON string that may be missing closing braces. + + Args: + s: The JSON string to parse. + strict: Whether to use strict parsing. Defaults to False. + + Returns: + The parsed JSON object as a Python dictionary. + """ + # Attempt to parse the string as-is. + try: + return json.loads(s, strict=strict) + except json.JSONDecodeError: + pass + + # Initialize variables. + new_chars = [] + stack = [] + is_inside_string = False + escaped = False + + # Process each character in the string one at a time. + for char in s: + new_char = char + if is_inside_string: + if char == '"' and not escaped: + is_inside_string = False + elif char == "\n" and not escaped: + new_char = ( + "\\n" # Replace the newline character with the escape sequence. + ) + elif char == "\\": + escaped = not escaped + else: + escaped = False + elif char == '"': + is_inside_string = True + escaped = False + elif char == "{": + stack.append("}") + elif char == "[": + stack.append("]") + elif char in {"}", "]"}: + if stack and stack[-1] == char: + stack.pop() + else: + # Mismatched closing character; the input is malformed. + return None + + # Append the processed character to the new string. + new_chars.append(new_char) + + # If we're still inside a string at the end of processing, + # we need to close the string. + if is_inside_string: + if escaped: # Remoe unterminated escape character + new_chars.pop() + new_chars.append('"') + + # Reverse the stack to get the closing characters. + stack.reverse() + + # Try to parse mods of string until we succeed or run out of characters. + while new_chars: + # Close any remaining open structures in the reverse + # order that they were opened. + # Attempt to parse the modified string as JSON. + try: + return json.loads("".join(new_chars + stack), strict=strict) + except json.JSONDecodeError: + # If we still can't parse the string as JSON, + # try removing the last character + new_chars.pop() + + # If we got here, we ran out of characters to remove + # and still couldn't parse the string as JSON, so return the parse error + # for the original string. + return json.loads(s, strict=strict) + + +_json_markdown_re = re.compile(r"```(json)?(.*)", re.DOTALL) + + +def parse_json_markdown( + json_string: str, *, parser: Callable[[str], Any] = parse_partial_json +) -> dict: + """Parse a JSON string from a Markdown string. + + Args: + json_string: The Markdown string. + parser: The parser to use. Defaults to `parse_partial_json`. + + Returns: + The parsed JSON object as a Python dictionary. + """ + try: + return _parse_json(json_string, parser=parser) + except json.JSONDecodeError: + # Try to find JSON string within triple backticks + match = _json_markdown_re.search(json_string) + + # If no match found, assume the entire string is a JSON string + # Else, use the content within the backticks + json_str = json_string if match is None else match.group(2) + return _parse_json(json_str, parser=parser) + + +_json_strip_chars = " \n\r\t`" + + +def _parse_json( + json_str: str, *, parser: Callable[[str], Any] = parse_partial_json +) -> dict: + # Strip whitespace,newlines,backtick from the start and end + json_str = json_str.strip(_json_strip_chars) + + # handle newlines and other special characters inside the returned value + json_str = _custom_parser(json_str) + + # Parse the JSON string into a Python dictionary + return parser(json_str) + + +def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict: + """Parse and check a JSON string from a Markdown string. + + Checks that it contains the expected keys. + + Args: + text: The Markdown string. + expected_keys: The expected keys in the JSON string. + + Returns: + The parsed JSON object as a Python dictionary. + + Raises: + OutputParserException: If the JSON string is invalid or does not contain + the expected keys. + """ + try: + json_obj = parse_json_markdown(text) + except json.JSONDecodeError as e: + msg = f"Got invalid JSON object. Error: {e}" + raise OutputParserException(msg) from e + for key in expected_keys: + if key not in json_obj: + msg = ( + f"Got invalid return object. Expected key `{key}` " + f"to be present, but got {json_obj}" + ) + raise OutputParserException(msg) + return json_obj diff --git a/venv/Lib/site-packages/langchain_core/utils/json_schema.py b/venv/Lib/site-packages/langchain_core/utils/json_schema.py new file mode 100644 index 00000000..e5b1770c --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/json_schema.py @@ -0,0 +1,117 @@ +"""Utilities for JSON Schema.""" + +from __future__ import annotations + +from copy import deepcopy +from typing import TYPE_CHECKING, Any, Optional + +if TYPE_CHECKING: + from collections.abc import Sequence + + +def _retrieve_ref(path: str, schema: dict) -> dict: + components = path.split("/") + if components[0] != "#": + msg = ( + "ref paths are expected to be URI fragments, meaning they should start " + "with #." + ) + raise ValueError(msg) + out = schema + for component in components[1:]: + if component in out: + out = out[component] + elif component.isdigit() and int(component) in out: + out = out[int(component)] + else: + msg = f"Reference '{path}' not found." + raise KeyError(msg) + return deepcopy(out) + + +def _dereference_refs_helper( + obj: Any, + full_schema: dict[str, Any], + skip_keys: Sequence[str], + processed_refs: Optional[set[str]] = None, +) -> Any: + if processed_refs is None: + processed_refs = set() + + if isinstance(obj, dict): + obj_out = {} + for k, v in obj.items(): + if k in skip_keys: + obj_out[k] = v + elif k == "$ref": + if v in processed_refs: + continue + processed_refs.add(v) + ref = _retrieve_ref(v, full_schema) + full_ref = _dereference_refs_helper( + ref, full_schema, skip_keys, processed_refs + ) + processed_refs.remove(v) + return full_ref + elif isinstance(v, (list, dict)): + obj_out[k] = _dereference_refs_helper( + v, full_schema, skip_keys, processed_refs + ) + else: + obj_out[k] = v + return obj_out + if isinstance(obj, list): + return [ + _dereference_refs_helper(el, full_schema, skip_keys, processed_refs) + for el in obj + ] + return obj + + +def _infer_skip_keys( + obj: Any, full_schema: dict, processed_refs: Optional[set[str]] = None +) -> list[str]: + if processed_refs is None: + processed_refs = set() + + keys = [] + if isinstance(obj, dict): + for k, v in obj.items(): + if k == "$ref": + if v in processed_refs: + continue + processed_refs.add(v) + ref = _retrieve_ref(v, full_schema) + keys.append(v.split("/")[1]) + keys += _infer_skip_keys(ref, full_schema, processed_refs) + elif isinstance(v, (list, dict)): + keys += _infer_skip_keys(v, full_schema, processed_refs) + elif isinstance(obj, list): + for el in obj: + keys += _infer_skip_keys(el, full_schema, processed_refs) + return keys + + +def dereference_refs( + schema_obj: dict, + *, + full_schema: Optional[dict] = None, + skip_keys: Optional[Sequence[str]] = None, +) -> dict: + """Try to substitute $refs in JSON Schema. + + Args: + schema_obj: The schema object to dereference. + full_schema: The full schema object. Defaults to None. + skip_keys: The keys to skip. Defaults to None. + + Returns: + The dereferenced schema object. + """ + full_schema = full_schema or schema_obj + skip_keys = ( + skip_keys + if skip_keys is not None + else _infer_skip_keys(schema_obj, full_schema) + ) + return _dereference_refs_helper(schema_obj, full_schema, skip_keys) diff --git a/venv/Lib/site-packages/langchain_core/utils/loading.py b/venv/Lib/site-packages/langchain_core/utils/loading.py new file mode 100644 index 00000000..7921f6a0 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/loading.py @@ -0,0 +1,31 @@ +"""Utilities for loading configurations from langchain_core-hub.""" + +import warnings +from typing import Any + +from langchain_core._api.deprecation import deprecated + + +@deprecated( + since="0.1.30", + removal="1.0", + message=( + "Using the hwchase17/langchain-hub " + "repo for prompts is deprecated. Please use " + " instead." + ), +) +def try_load_from_hub( + *args: Any, # noqa: ARG001 + **kwargs: Any, # noqa: ARG001 +) -> Any: + """[DEPRECATED] Try to load from the old Hub.""" + warnings.warn( + "Loading from the deprecated github-based Hub is no longer supported. " + "Please use the new LangChain Hub at https://smith.langchain.com/hub instead.", + DeprecationWarning, + stacklevel=2, + ) + # return None, which indicates that we shouldn't load from old hub + # and might just be a filepath for e.g. load_chain + return None diff --git a/venv/Lib/site-packages/langchain_core/utils/mustache.py b/venv/Lib/site-packages/langchain_core/utils/mustache.py new file mode 100644 index 00000000..a7ba8883 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/mustache.py @@ -0,0 +1,661 @@ +"""Adapted from https://github.com/noahmorrison/chevron. + +MIT License. +""" + +from __future__ import annotations + +import logging +from collections.abc import Iterator, Mapping, Sequence +from types import MappingProxyType +from typing import ( + TYPE_CHECKING, + Any, + Literal, + Optional, + Union, + cast, +) + +if TYPE_CHECKING: + from typing_extensions import TypeAlias + +logger = logging.getLogger(__name__) + + +Scopes: TypeAlias = list[Union[Literal[False, 0], Mapping[str, Any]]] + + +# Globals +_CURRENT_LINE = 1 +_LAST_TAG_LINE = None + + +class ChevronError(SyntaxError): + """Custom exception for Chevron errors.""" + + +# +# Helper functions +# + + +def grab_literal(template: str, l_del: str) -> tuple[str, str]: + """Parse a literal from the template. + + Args: + template: The template to parse. + l_del: The left delimiter. + + Returns: + tuple[str, str]: The literal and the template. + """ + global _CURRENT_LINE + + try: + # Look for the next tag and move the template to it + literal, template = template.split(l_del, 1) + _CURRENT_LINE += literal.count("\n") + + # There are no more tags in the template? + except ValueError: + # Then the rest of the template is a literal + return (template, "") + + return (literal, template) + + +def l_sa_check( + template: str, # noqa: ARG001 + literal: str, + is_standalone: bool, # noqa: FBT001 +) -> bool: + """Do a preliminary check to see if a tag could be a standalone. + + Args: + template: The template. (Not used.) + literal: The literal. + is_standalone: Whether the tag is standalone. + + Returns: + bool: Whether the tag could be a standalone. + """ + # If there is a newline, or the previous tag was a standalone + if literal.find("\n") != -1 or is_standalone: + padding = literal.split("\n")[-1] + + # If all the characters since the last newline are spaces + # Then the next tag could be a standalone + # Otherwise it can't be + return padding.isspace() or padding == "" + return False + + +def r_sa_check( + template: str, + tag_type: str, + is_standalone: bool, # noqa: FBT001 +) -> bool: + """Do a final check to see if a tag could be a standalone. + + Args: + template: The template. + tag_type: The type of the tag. + is_standalone: Whether the tag is standalone. + + Returns: + bool: Whether the tag could be a standalone. + """ + # Check right side if we might be a standalone + if is_standalone and tag_type not in ["variable", "no escape"]: + on_newline = template.split("\n", 1) + + # If the stuff to the right of us are spaces we're a standalone + return on_newline[0].isspace() or not on_newline[0] + + # If we're a tag can't be a standalone + return False + + +def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], str]: + """Parse a tag from a template. + + Args: + template: The template. + l_del: The left delimiter. + r_del: The right delimiter. + + Returns: + tuple[tuple[str, str], str]: The tag and the template. + + Raises: + ChevronError: If the tag is unclosed. + ChevronError: If the set delimiter tag is unclosed. + """ + tag_types = { + "!": "comment", + "#": "section", + "^": "inverted section", + "/": "end", + ">": "partial", + "=": "set delimiter?", + "{": "no escape?", + "&": "no escape", + } + + # Get the tag + try: + tag, template = template.split(r_del, 1) + except ValueError as e: + msg = f"unclosed tag at line {_CURRENT_LINE}" + raise ChevronError(msg) from e + + # Find the type meaning of the first character + tag_type = tag_types.get(tag[0], "variable") + + # If the type is not a variable + if tag_type != "variable": + # Then that first character is not needed + tag = tag[1:] + + # If we might be a set delimiter tag + if tag_type == "set delimiter?": + # Double check to make sure we are + if tag.endswith("="): + tag_type = "set delimiter" + # Remove the equal sign + tag = tag[:-1] + + # Otherwise we should complain + else: + msg = f"unclosed set delimiter tag\nat line {_CURRENT_LINE}" + raise ChevronError(msg) + + elif ( + # If we might be a no html escape tag + tag_type == "no escape?" + # And we have a third curly brace + # (And are using curly braces as delimiters) + and l_del == "{{" + and r_del == "}}" + and template.startswith("}") + ): + # Then we are a no html escape tag + template = template[1:] + tag_type = "no escape" + + # Strip the whitespace off the key and return + return ((tag_type, tag.strip()), template) + + +# +# The main tokenizing function +# + + +def tokenize( + template: str, def_ldel: str = "{{", def_rdel: str = "}}" +) -> Iterator[tuple[str, str]]: + """Tokenize a mustache template. + + Tokenizes a mustache template in a generator fashion, + using file-like objects. It also accepts a string containing + the template. + + Args: + template: a file-like object, or a string of a mustache template + def_ldel: The default left delimiter + ("{{" by default, as in spec compliant mustache) + def_rdel: The default right delimiter + ("}}" by default, as in spec compliant mustache) + + Returns: + A generator of mustache tags in the form of a tuple (tag_type, tag_key) + Where tag_type is one of: + * literal + * section + * inverted section + * end + * partial + * no escape + And tag_key is either the key or in the case of a literal tag, + the literal itself. + """ + global _CURRENT_LINE, _LAST_TAG_LINE + _CURRENT_LINE = 1 + _LAST_TAG_LINE = None + + is_standalone = True + open_sections = [] + l_del = def_ldel + r_del = def_rdel + + while template: + literal, template = grab_literal(template, l_del) + + # If the template is completed + if not template: + # Then yield the literal and leave + yield ("literal", literal) + break + + # Do the first check to see if we could be a standalone + is_standalone = l_sa_check(template, literal, is_standalone) + + # Parse the tag + tag, template = parse_tag(template, l_del, r_del) + tag_type, tag_key = tag + + # Special tag logic + + # If we are a set delimiter tag + if tag_type == "set delimiter": + # Then get and set the delimiters + dels = tag_key.strip().split(" ") + l_del, r_del = dels[0], dels[-1] + + # If we are a section tag + elif tag_type in ["section", "inverted section"]: + # Then open a new section + open_sections.append(tag_key) + _LAST_TAG_LINE = _CURRENT_LINE + + # If we are an end tag + elif tag_type == "end": + # Then check to see if the last opened section + # is the same as us + try: + last_section = open_sections.pop() + except IndexError as e: + msg = ( + f'Trying to close tag "{tag_key}"\n' + "Looks like it was not opened.\n" + f"line {_CURRENT_LINE + 1}" + ) + raise ChevronError(msg) from e + if tag_key != last_section: + # Otherwise we need to complain + msg = ( + f'Trying to close tag "{tag_key}"\n' + f'last open tag is "{last_section}"\n' + f"line {_CURRENT_LINE + 1}" + ) + raise ChevronError(msg) + + # Do the second check to see if we're a standalone + is_standalone = r_sa_check(template, tag_type, is_standalone) + + # Which if we are + if is_standalone: + # Remove the stuff before the newline + template = template.split("\n", 1)[-1] + + # Partials need to keep the spaces on their left + if tag_type != "partial": + # But other tags don't + literal = literal.rstrip(" ") + + # Start yielding + # Ignore literals that are empty + if literal != "": + yield ("literal", literal) + + # Ignore comments and set delimiters + if tag_type not in ["comment", "set delimiter?"]: + yield (tag_type, tag_key) + + # If there are any open sections when we're done + if open_sections: + # Then we need to complain + msg = ( + "Unexpected EOF\n" + f'the tag "{open_sections[-1]}" was never closed\n' + f"was opened at line {_LAST_TAG_LINE}" + ) + raise ChevronError(msg) + + +# +# Helper functions +# + + +def _html_escape(string: str) -> str: + """HTML escape all of these " & < >.""" + html_codes = { + '"': """, + "<": "<", + ">": ">", + } + + # & must be handled first + string = string.replace("&", "&") + for char, code in html_codes.items(): + string = string.replace(char, code) + return string + + +def _get_key( + key: str, + scopes: Scopes, + *, + warn: bool, + keep: bool, + def_ldel: str, + def_rdel: str, +) -> Any: + """Get a key from the current scope.""" + # If the key is a dot + if key == ".": + # Then just return the current scope + return scopes[0] + + # Loop through the scopes + for scope in scopes: + try: + # Return an empty string if falsy, with two exceptions + # 0 should return 0, and False should return False + if scope in (0, False): + return scope + + resolved_scope = scope + # For every dot separated key + for child in key.split("."): + # Return an empty string if falsy, with two exceptions + # 0 should return 0, and False should return False + if resolved_scope in (0, False): + return resolved_scope + # Move into the scope + try: + # Try subscripting (Normal dictionaries) + resolved_scope = cast("dict[str, Any]", resolved_scope)[child] + except (TypeError, AttributeError): + try: + resolved_scope = getattr(resolved_scope, child) + except (TypeError, AttributeError): + # Try as a list + resolved_scope = resolved_scope[int(child)] # type: ignore[index] + + try: + # This allows for custom falsy data types + # https://github.com/noahmorrison/chevron/issues/35 + if resolved_scope._CHEVRON_return_scope_when_falsy: # type: ignore[union-attr] + return resolved_scope + except AttributeError: + if resolved_scope in (0, False): + return resolved_scope + return resolved_scope or "" + except (AttributeError, KeyError, IndexError, ValueError): + # We couldn't find the key in the current scope + # We'll try again on the next pass + pass + + # We couldn't find the key in any of the scopes + + if warn: + logger.warning("Could not find key '%s'", key) + + if keep: + return f"{def_ldel} {key} {def_rdel}" + + return "" + + +def _get_partial(name: str, partials_dict: Mapping[str, str]) -> str: + """Load a partial.""" + try: + # Maybe the partial is in the dictionary + return partials_dict[name] + except KeyError: + return "" + + +# +# The main rendering function +# +g_token_cache: dict[str, list[tuple[str, str]]] = {} + +EMPTY_DICT: MappingProxyType[str, str] = MappingProxyType({}) + + +def render( + template: Union[str, list[tuple[str, str]]] = "", + data: Mapping[str, Any] = EMPTY_DICT, + partials_dict: Mapping[str, str] = EMPTY_DICT, + padding: str = "", + def_ldel: str = "{{", + def_rdel: str = "}}", + scopes: Optional[Scopes] = None, + warn: bool = False, # noqa: FBT001,FBT002 + keep: bool = False, # noqa: FBT001,FBT002 +) -> str: + """Render a mustache template. + + Renders a mustache template with a data scope and inline partial capability. + + Args: + template: A file-like object or a string containing the template. + data: A python dictionary with your data scope. + partials_path: The path to where your partials are stored. + If set to None, then partials won't be loaded from the file system + (defaults to '.'). + partials_ext: The extension that you want the parser to look for + (defaults to 'mustache'). + partials_dict: A python dictionary which will be search for partials + before the filesystem is. {'include': 'foo'} is the same + as a file called include.mustache + (defaults to {}). + padding: This is for padding partials, and shouldn't be used + (but can be if you really want to). + def_ldel: The default left delimiter + ("{{" by default, as in spec compliant mustache). + def_rdel: The default right delimiter + ("}}" by default, as in spec compliant mustache). + scopes: The list of scopes that get_key will look through. + warn: Log a warning when a template substitution isn't found in the data + keep: Keep unreplaced tags when a substitution isn't found in the data. + + Returns: + A string containing the rendered template. + """ + # If the template is a sequence but not derived from a string + if isinstance(template, Sequence) and not isinstance(template, str): + # Then we don't need to tokenize it + # But it does need to be a generator + tokens: Iterator[tuple[str, str]] = (token for token in template) + elif template in g_token_cache: + tokens = (token for token in g_token_cache[template]) + else: + # Otherwise make a generator + tokens = tokenize(template, def_ldel, def_rdel) + + output = "" + + if scopes is None: + scopes = [data] + + # Run through the tokens + for tag, key in tokens: + # Set the current scope + current_scope = scopes[0] + + # If we're an end tag + if tag == "end": + # Pop out of the latest scope + del scopes[0] + + # If the current scope is falsy and not the only scope + elif not current_scope and len(scopes) != 1: + if tag in ["section", "inverted section"]: + # Set the most recent scope to a falsy value + scopes.insert(0, False) + + # If we're a literal tag + elif tag == "literal": + # Add padding to the key and add it to the output + output += key.replace("\n", "\n" + padding) + + # If we're a variable tag + elif tag == "variable": + # Add the html escaped key to the output + thing = _get_key( + key, scopes, warn=warn, keep=keep, def_ldel=def_ldel, def_rdel=def_rdel + ) + if thing is True and key == ".": + # if we've coerced into a boolean by accident + # (inverted tags do this) + # then get the un-coerced object (next in the stack) + thing = scopes[1] + if not isinstance(thing, str): + thing = str(thing) + output += _html_escape(thing) + + # If we're a no html escape tag + elif tag == "no escape": + # Just lookup the key and add it + thing = _get_key( + key, scopes, warn=warn, keep=keep, def_ldel=def_ldel, def_rdel=def_rdel + ) + if not isinstance(thing, str): + thing = str(thing) + output += thing + + # If we're a section tag + elif tag == "section": + # Get the sections scope + scope = _get_key( + key, scopes, warn=warn, keep=keep, def_ldel=def_ldel, def_rdel=def_rdel + ) + + # If the scope is a callable (as described in + # https://mustache.github.io/mustache.5.html) + if callable(scope): + # Generate template text from tags + text = "" + tags: list[tuple[str, str]] = [] + for token in tokens: + if token == ("end", key): + break + + tags.append(token) + tag_type, tag_key = token + if tag_type == "literal": + text += tag_key + elif tag_type == "no escape": + text += f"{def_ldel}& {tag_key} {def_rdel}" + else: + text += "{}{} {}{}".format( + def_ldel, + { + "comment": "!", + "section": "#", + "inverted section": "^", + "end": "/", + "partial": ">", + "set delimiter": "=", + "no escape": "&", + "variable": "", + }[tag_type], + tag_key, + def_rdel, + ) + + g_token_cache[text] = tags + + rend = scope( + text, + lambda template, data=None: render( + template, + data={}, + partials_dict=partials_dict, + padding=padding, + def_ldel=def_ldel, + def_rdel=def_rdel, + scopes=data and [data] + scopes or scopes, + warn=warn, + keep=keep, + ), + ) + + output += rend + + # If the scope is a sequence, an iterator or generator but not + # derived from a string + elif isinstance(scope, (Sequence, Iterator)) and not isinstance(scope, str): + # Then we need to do some looping + + # Gather up all the tags inside the section + # (And don't be tricked by nested end tags with the same key) + # TODO: This feels like it still has edge cases, no? + tags = [] + tags_with_same_key = 0 + for token in tokens: + if token == ("section", key): + tags_with_same_key += 1 + if token == ("end", key): + tags_with_same_key -= 1 + if tags_with_same_key < 0: + break + tags.append(token) + + # For every item in the scope + for thing in scope: + # Append it as the most recent scope and render + new_scope = [thing] + scopes + rend = render( + template=tags, + scopes=new_scope, + padding=padding, + partials_dict=partials_dict, + def_ldel=def_ldel, + def_rdel=def_rdel, + warn=warn, + keep=keep, + ) + + output += rend + + else: + # Otherwise we're just a scope section + scopes.insert(0, scope) + + # If we're an inverted section + elif tag == "inverted section": + # Add the flipped scope to the scopes + scope = _get_key( + key, scopes, warn=warn, keep=keep, def_ldel=def_ldel, def_rdel=def_rdel + ) + scopes.insert(0, cast("Literal[False]", not scope)) + + # If we're a partial + elif tag == "partial": + # Load the partial + partial = _get_partial(key, partials_dict) + + # Find what to pad the partial with + left = output.rpartition("\n")[2] + part_padding = padding + if left.isspace(): + part_padding += left + + # Render the partial + part_out = render( + template=partial, + partials_dict=partials_dict, + def_ldel=def_ldel, + def_rdel=def_rdel, + padding=part_padding, + scopes=scopes, + warn=warn, + keep=keep, + ) + + # If the partial was indented + if left.isspace(): + # then remove the spaces from the end + part_out = part_out.rstrip(" \t") + + # Add the partials output to the output + output += part_out + + return output diff --git a/venv/Lib/site-packages/langchain_core/utils/pydantic.py b/venv/Lib/site-packages/langchain_core/utils/pydantic.py new file mode 100644 index 00000000..ea987741 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/pydantic.py @@ -0,0 +1,649 @@ +"""Utilities for pydantic.""" + +from __future__ import annotations + +import inspect +import textwrap +import warnings +from contextlib import nullcontext +from functools import lru_cache, wraps +from types import GenericAlias +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + TypeVar, + Union, + cast, + overload, +) + +import pydantic +from packaging import version +from pydantic import ( + BaseModel, + ConfigDict, + PydanticDeprecationWarning, + RootModel, + root_validator, +) +from pydantic import ( + create_model as _create_model_base, +) +from pydantic.fields import FieldInfo as FieldInfoV2 +from pydantic.json_schema import ( + DEFAULT_REF_TEMPLATE, + GenerateJsonSchema, + JsonSchemaMode, + JsonSchemaValue, +) +from typing_extensions import override + +if TYPE_CHECKING: + from pydantic_core import core_schema + +try: + import pydantic + + PYDANTIC_VERSION = version.parse(pydantic.__version__) +except ImportError: + PYDANTIC_VERSION = version.parse("0.0.0") + + +def get_pydantic_major_version() -> int: + """DEPRECATED - Get the major version of Pydantic. + + Use PYDANTIC_VERSION.major instead. + """ + warnings.warn( + "get_pydantic_major_version is deprecated. Use PYDANTIC_VERSION.major instead.", + DeprecationWarning, + stacklevel=2, + ) + return PYDANTIC_VERSION.major + + +PYDANTIC_MAJOR_VERSION = PYDANTIC_VERSION.major +PYDANTIC_MINOR_VERSION = PYDANTIC_VERSION.minor + +IS_PYDANTIC_V1 = PYDANTIC_VERSION.major == 1 +IS_PYDANTIC_V2 = PYDANTIC_VERSION.major == 2 + +if IS_PYDANTIC_V1: + from pydantic.fields import FieldInfo as FieldInfoV1 + + PydanticBaseModel = pydantic.BaseModel + TypeBaseModel = type[BaseModel] +elif IS_PYDANTIC_V2: + from pydantic.v1.fields import FieldInfo as FieldInfoV1 # type: ignore[assignment] + + # Union type needs to be last assignment to PydanticBaseModel to make mypy happy. + PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore[assignment,misc] + TypeBaseModel = Union[type[BaseModel], type[pydantic.BaseModel]] # type: ignore[misc] +else: + msg = f"Unsupported Pydantic version: {PYDANTIC_VERSION.major}" + raise ValueError(msg) + + +TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel) + + +def is_pydantic_v1_subclass(cls: type) -> bool: + """Check if the installed Pydantic version is 1.x-like.""" + if IS_PYDANTIC_V1: + return True + if IS_PYDANTIC_V2: + from pydantic.v1 import BaseModel as BaseModelV1 + + if issubclass(cls, BaseModelV1): + return True + return False + + +def is_pydantic_v2_subclass(cls: type) -> bool: + """Check if the installed Pydantic version is 1.x-like.""" + from pydantic import BaseModel + + return IS_PYDANTIC_V2 and issubclass(cls, BaseModel) + + +def is_basemodel_subclass(cls: type) -> bool: + """Check if the given class is a subclass of Pydantic BaseModel. + + Check if the given class is a subclass of any of the following: + + * pydantic.BaseModel in Pydantic 1.x + * pydantic.BaseModel in Pydantic 2.x + * pydantic.v1.BaseModel in Pydantic 2.x + """ + # Before we can use issubclass on the cls we need to check if it is a class + if not inspect.isclass(cls) or isinstance(cls, GenericAlias): + return False + + if IS_PYDANTIC_V1: + from pydantic import BaseModel as BaseModelV1Proper + + if issubclass(cls, BaseModelV1Proper): + return True + elif IS_PYDANTIC_V2: + from pydantic import BaseModel as BaseModelV2 + from pydantic.v1 import BaseModel as BaseModelV1 + + if issubclass(cls, BaseModelV2): + return True + + if issubclass(cls, BaseModelV1): + return True + else: + msg = f"Unsupported Pydantic version: {PYDANTIC_VERSION.major}" + raise ValueError(msg) + return False + + +def is_basemodel_instance(obj: Any) -> bool: + """Check if the given class is an instance of Pydantic BaseModel. + + Check if the given class is an instance of any of the following: + + * pydantic.BaseModel in Pydantic 1.x + * pydantic.BaseModel in Pydantic 2.x + * pydantic.v1.BaseModel in Pydantic 2.x + """ + if IS_PYDANTIC_V1: + from pydantic import BaseModel as BaseModelV1Proper + + if isinstance(obj, BaseModelV1Proper): + return True + elif IS_PYDANTIC_V2: + from pydantic import BaseModel as BaseModelV2 + from pydantic.v1 import BaseModel as BaseModelV1 + + if isinstance(obj, BaseModelV2): + return True + + if isinstance(obj, BaseModelV1): + return True + else: + msg = f"Unsupported Pydantic version: {PYDANTIC_VERSION.major}" + raise ValueError(msg) + return False + + +# How to type hint this? +def pre_init(func: Callable) -> Any: + """Decorator to run a function before model initialization. + + Args: + func (Callable): The function to run before model initialization. + + Returns: + Any: The decorated function. + """ + with warnings.catch_warnings(): + warnings.filterwarnings(action="ignore", category=PydanticDeprecationWarning) + + @root_validator(pre=True) + @wraps(func) + def wrapper(cls: type[BaseModel], values: dict[str, Any]) -> dict[str, Any]: + """Decorator to run a function before model initialization. + + Args: + cls (Type[BaseModel]): The model class. + values (dict[str, Any]): The values to initialize the model with. + + Returns: + dict[str, Any]: The values to initialize the model with. + """ + # Insert default values + fields = cls.model_fields + for name, field_info in fields.items(): + # Check if allow_population_by_field_name is enabled + # If yes, then set the field name to the alias + if ( + hasattr(cls, "Config") + and hasattr(cls.Config, "allow_population_by_field_name") + and cls.Config.allow_population_by_field_name + and field_info.alias in values + ): + values[name] = values.pop(field_info.alias) + if ( + hasattr(cls, "model_config") + and cls.model_config.get("populate_by_name") + and field_info.alias in values + ): + values[name] = values.pop(field_info.alias) + + if ( + name not in values or values[name] is None + ) and not field_info.is_required(): + if field_info.default_factory is not None: + values[name] = field_info.default_factory() # type: ignore[call-arg] + else: + values[name] = field_info.default + + # Call the decorated function + return func(cls, values) + + return wrapper + + +class _IgnoreUnserializable(GenerateJsonSchema): + """A JSON schema generator that ignores unknown types. + + https://docs.pydantic.dev/latest/concepts/json_schema/#customizing-the-json-schema-generation-process + """ + + @override + def handle_invalid_for_json_schema( + self, schema: core_schema.CoreSchema, error_info: str + ) -> JsonSchemaValue: + return {} + + +def _create_subset_model_v1( + name: str, + model: type[BaseModel], + field_names: list, + *, + descriptions: Optional[dict] = None, + fn_description: Optional[str] = None, +) -> type[BaseModel]: + """Create a pydantic model with only a subset of model's fields.""" + if IS_PYDANTIC_V1: + from pydantic import create_model + elif IS_PYDANTIC_V2: + from pydantic.v1 import create_model # type: ignore[no-redef] + else: + msg = f"Unsupported pydantic version: {PYDANTIC_VERSION.major}" + raise NotImplementedError(msg) + + fields = {} + + for field_name in field_names: + # Using pydantic v1 so can access __fields__ as a dict. + field = model.__fields__[field_name] # type: ignore[index] + t = ( + # this isn't perfect but should work for most functions + field.outer_type_ + if field.required and not field.allow_none + else Optional[field.outer_type_] + ) + if descriptions and field_name in descriptions: + field.field_info.description = descriptions[field_name] + fields[field_name] = (t, field.field_info) + + rtn = create_model(name, **fields) # type: ignore[call-overload] + rtn.__doc__ = textwrap.dedent(fn_description or model.__doc__ or "") + return rtn + + +def _create_subset_model_v2( + name: str, + model: type[pydantic.BaseModel], + field_names: list[str], + *, + descriptions: Optional[dict] = None, + fn_description: Optional[str] = None, +) -> type[pydantic.BaseModel]: + """Create a pydantic model with a subset of the model fields.""" + from pydantic import create_model + from pydantic.fields import FieldInfo + + descriptions_ = descriptions or {} + fields = {} + for field_name in field_names: + field = model.model_fields[field_name] + description = descriptions_.get(field_name, field.description) + field_info = FieldInfo(description=description, default=field.default) + if field.metadata: + field_info.metadata = field.metadata + fields[field_name] = (field.annotation, field_info) + + rtn = create_model( # type: ignore[call-overload] + name, **fields, __config__=ConfigDict(arbitrary_types_allowed=True) + ) + + # TODO(0.3): Determine if there is a more "pydantic" way to preserve annotations. + # This is done to preserve __annotations__ when working with pydantic 2.x + # and using the Annotated type with TypedDict. + # Comment out the following line, to trigger the relevant test case. + selected_annotations = [ + (name, annotation) + for name, annotation in model.__annotations__.items() + if name in field_names + ] + + rtn.__annotations__ = dict(selected_annotations) + rtn.__doc__ = textwrap.dedent(fn_description or model.__doc__ or "") + return rtn + + +# Private functionality to create a subset model that's compatible across +# different versions of pydantic. +# Handles pydantic versions 1.x and 2.x. including v1 of pydantic in 2.x. +# However, can't find a way to type hint this. +def _create_subset_model( + name: str, + model: TypeBaseModel, + field_names: list[str], + *, + descriptions: Optional[dict] = None, + fn_description: Optional[str] = None, +) -> type[BaseModel]: + """Create subset model using the same pydantic version as the input model.""" + if IS_PYDANTIC_V1: + return _create_subset_model_v1( + name, + model, + field_names, + descriptions=descriptions, + fn_description=fn_description, + ) + if IS_PYDANTIC_V2: + from pydantic.v1 import BaseModel as BaseModelV1 + + if issubclass(model, BaseModelV1): + return _create_subset_model_v1( + name, + model, + field_names, + descriptions=descriptions, + fn_description=fn_description, + ) + return _create_subset_model_v2( + name, + model, + field_names, + descriptions=descriptions, + fn_description=fn_description, + ) + msg = f"Unsupported pydantic version: {PYDANTIC_VERSION.major}" + raise NotImplementedError(msg) + + +if IS_PYDANTIC_V2: + from pydantic import BaseModel as BaseModelV2 + from pydantic.v1 import BaseModel as BaseModelV1 + + @overload + def get_fields(model: type[BaseModelV2]) -> dict[str, FieldInfoV2]: ... + + @overload + def get_fields(model: BaseModelV2) -> dict[str, FieldInfoV2]: ... + + @overload + def get_fields(model: type[BaseModelV1]) -> dict[str, FieldInfoV1]: ... + + @overload + def get_fields(model: BaseModelV1) -> dict[str, FieldInfoV1]: ... + + def get_fields( + model: Union[type[Union[BaseModelV2, BaseModelV1]], BaseModelV2, BaseModelV1], + ) -> Union[dict[str, FieldInfoV2], dict[str, FieldInfoV1]]: + """Get the field names of a Pydantic model.""" + if hasattr(model, "model_fields"): + return model.model_fields + + if hasattr(model, "__fields__"): + return model.__fields__ # type: ignore[return-value] + msg = f"Expected a Pydantic model. Got {type(model)}" + raise TypeError(msg) + +elif IS_PYDANTIC_V1: + from pydantic import BaseModel as BaseModelV1_ + + def get_fields( # type: ignore[no-redef] + model: Union[type[BaseModelV1_], BaseModelV1_], + ) -> dict[str, FieldInfoV1]: + """Get the field names of a Pydantic model.""" + return model.__fields__ # type: ignore[return-value] + +else: + msg = f"Unsupported Pydantic version: {PYDANTIC_VERSION.major}" + raise ValueError(msg) + +_SchemaConfig = ConfigDict( + arbitrary_types_allowed=True, frozen=True, protected_namespaces=() +) + +NO_DEFAULT = object() + + +def _create_root_model( + name: str, + type_: Any, + module_name: Optional[str] = None, + default_: object = NO_DEFAULT, +) -> type[BaseModel]: + """Create a base class.""" + + def schema( + cls: type[BaseModel], + by_alias: bool = True, # noqa: FBT001,FBT002 + ref_template: str = DEFAULT_REF_TEMPLATE, + ) -> dict[str, Any]: + # Complains about schema not being defined in superclass + schema_ = super(cls, cls).schema( # type: ignore[misc] + by_alias=by_alias, ref_template=ref_template + ) + schema_["title"] = name + return schema_ + + def model_json_schema( + cls: type[BaseModel], + by_alias: bool = True, # noqa: FBT001,FBT002 + ref_template: str = DEFAULT_REF_TEMPLATE, + schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema, + mode: JsonSchemaMode = "validation", + ) -> dict[str, Any]: + # Complains about model_json_schema not being defined in superclass + schema_ = super(cls, cls).model_json_schema( # type: ignore[misc] + by_alias=by_alias, + ref_template=ref_template, + schema_generator=schema_generator, + mode=mode, + ) + schema_["title"] = name + return schema_ + + base_class_attributes = { + "__annotations__": {"root": type_}, + "model_config": ConfigDict(arbitrary_types_allowed=True), + "schema": classmethod(schema), + "model_json_schema": classmethod(model_json_schema), + "__module__": module_name or "langchain_core.runnables.utils", + } + + if default_ is not NO_DEFAULT: + base_class_attributes["root"] = default_ + with warnings.catch_warnings(): + try: + if ( + isinstance(type_, type) + and not isinstance(type_, GenericAlias) + and issubclass(type_, BaseModelV1) + ): + warnings.filterwarnings( + action="ignore", category=PydanticDeprecationWarning + ) + except TypeError: + pass + custom_root_type = type(name, (RootModel,), base_class_attributes) + return cast("type[BaseModel]", custom_root_type) + + +@lru_cache(maxsize=256) +def _create_root_model_cached( + model_name: str, + type_: Any, + *, + module_name: Optional[str] = None, + default_: object = NO_DEFAULT, +) -> type[BaseModel]: + return _create_root_model( + model_name, type_, default_=default_, module_name=module_name + ) + + +@lru_cache(maxsize=256) +def _create_model_cached( + model_name: str, + /, + **field_definitions: Any, +) -> type[BaseModel]: + return _create_model_base( + model_name, + __config__=_SchemaConfig, + **_remap_field_definitions(field_definitions), + ) + + +def create_model( + model_name: str, + module_name: Optional[str] = None, + /, + **field_definitions: Any, +) -> type[BaseModel]: + """Create a pydantic model with the given field definitions. + + Please use create_model_v2 instead of this function. + + Args: + model_name: The name of the model. + module_name: The name of the module where the model is defined. + This is used by Pydantic to resolve any forward references. + **field_definitions: The field definitions for the model. + + Returns: + Type[BaseModel]: The created model. + """ + kwargs = {} + if "__root__" in field_definitions: + kwargs["root"] = field_definitions.pop("__root__") + + return create_model_v2( + model_name, + module_name=module_name, + field_definitions=field_definitions, + **kwargs, + ) + + +# Reserved names should capture all the `public` names / methods that are +# used by BaseModel internally. This will keep the reserved names up-to-date. +# For reference, the reserved names are: +# "construct", "copy", "dict", "from_orm", "json", "parse_file", "parse_obj", +# "parse_raw", "schema", "schema_json", "update_forward_refs", "validate", +# "model_computed_fields", "model_config", "model_construct", "model_copy", +# "model_dump", "model_dump_json", "model_extra", "model_fields", +# "model_fields_set", "model_json_schema", "model_parametrized_name", +# "model_post_init", "model_rebuild", "model_validate", "model_validate_json", +# "model_validate_strings" +_RESERVED_NAMES = {key for key in dir(BaseModel) if not key.startswith("_")} + + +def _remap_field_definitions(field_definitions: dict[str, Any]) -> dict[str, Any]: + """This remaps fields to avoid colliding with internal pydantic fields.""" + from pydantic import Field + from pydantic.fields import FieldInfo + + remapped = {} + for key, value in field_definitions.items(): + if key.startswith("_") or key in _RESERVED_NAMES: + # Let's add a prefix to avoid colliding with internal pydantic fields + if isinstance(value, FieldInfo): + msg = ( + f"Remapping for fields starting with '_' or fields with a name " + f"matching a reserved name {_RESERVED_NAMES} is not supported if " + f" the field is a pydantic Field instance. Got {key}." + ) + raise NotImplementedError(msg) + type_, default_ = value + remapped[f"private_{key}"] = ( + type_, + Field( + default=default_, + alias=key, + serialization_alias=key, + title=key.lstrip("_").replace("_", " ").title(), + ), + ) + else: + remapped[key] = value + return remapped + + +def create_model_v2( + model_name: str, + *, + module_name: Optional[str] = None, + field_definitions: Optional[dict[str, Any]] = None, + root: Optional[Any] = None, +) -> type[BaseModel]: + """Create a pydantic model with the given field definitions. + + Attention: + Please do not use outside of langchain packages. This API + is subject to change at any time. + + Args: + model_name: The name of the model. + module_name: The name of the module where the model is defined. + This is used by Pydantic to resolve any forward references. + field_definitions: The field definitions for the model. + root: Type for a root model (RootModel) + + Returns: + Type[BaseModel]: The created model. + """ + field_definitions = field_definitions or {} + + if root: + if field_definitions: + msg = ( + "When specifying __root__ no other " + f"fields should be provided. Got {field_definitions}" + ) + raise NotImplementedError(msg) + + if isinstance(root, tuple): + kwargs = {"type_": root[0], "default_": root[1]} + else: + kwargs = {"type_": root} + + try: + named_root_model = _create_root_model_cached( + model_name, module_name=module_name, **kwargs + ) + except TypeError: + # something in the arguments into _create_root_model_cached is not hashable + named_root_model = _create_root_model( + model_name, + module_name=module_name, + **kwargs, + ) + return named_root_model + + # No root, just field definitions + names = set(field_definitions.keys()) + + capture_warnings = False + + for name in names: + # Also if any non-reserved name is used (e.g., model_id or model_name) + if name.startswith("model"): + capture_warnings = True + + with warnings.catch_warnings() if capture_warnings else nullcontext(): + if capture_warnings: + warnings.filterwarnings(action="ignore") + try: + return _create_model_cached(model_name, **field_definitions) + except TypeError: + # something in field definitions is not hashable + return _create_model_base( + model_name, + __config__=_SchemaConfig, + **_remap_field_definitions(field_definitions), + ) diff --git a/venv/Lib/site-packages/langchain_core/utils/strings.py b/venv/Lib/site-packages/langchain_core/utils/strings.py new file mode 100644 index 00000000..4eeb7ed5 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/strings.py @@ -0,0 +1,48 @@ +"""String utilities.""" + +from typing import Any + + +def stringify_value(val: Any) -> str: + """Stringify a value. + + Args: + val: The value to stringify. + + Returns: + str: The stringified value. + """ + if isinstance(val, str): + return val + if isinstance(val, dict): + return "\n" + stringify_dict(val) + if isinstance(val, list): + return "\n".join(stringify_value(v) for v in val) + return str(val) + + +def stringify_dict(data: dict) -> str: + """Stringify a dictionary. + + Args: + data: The dictionary to stringify. + + Returns: + str: The stringified dictionary. + """ + text = "" + for key, value in data.items(): + text += key + ": " + stringify_value(value) + "\n" + return text + + +def comma_list(items: list[Any]) -> str: + """Convert a list to a comma-separated string. + + Args: + items: The list to convert. + + Returns: + str: The comma-separated string. + """ + return ", ".join(str(item) for item in items) diff --git a/venv/Lib/site-packages/langchain_core/utils/usage.py b/venv/Lib/site-packages/langchain_core/utils/usage.py new file mode 100644 index 00000000..95ee2ca0 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/usage.py @@ -0,0 +1,39 @@ +"""Usage utilities.""" + +from typing import Callable + + +def _dict_int_op( + left: dict, + right: dict, + op: Callable[[int, int], int], + *, + default: int = 0, + depth: int = 0, + max_depth: int = 100, +) -> dict: + if depth >= max_depth: + msg = f"{max_depth=} exceeded, unable to combine dicts." + raise ValueError(msg) + combined: dict = {} + for k in set(left).union(right): + if isinstance(left.get(k, default), int) and isinstance( + right.get(k, default), int + ): + combined[k] = op(left.get(k, default), right.get(k, default)) + elif isinstance(left.get(k, {}), dict) and isinstance(right.get(k, {}), dict): + combined[k] = _dict_int_op( + left.get(k, {}), + right.get(k, {}), + op, + default=default, + depth=depth + 1, + max_depth=max_depth, + ) + else: + types = [type(d[k]) for d in (left, right) if k in d] + msg = ( + f"Unknown value types: {types}. Only dict and int values are supported." + ) + raise ValueError(msg) # noqa: TRY004 + return combined diff --git a/venv/Lib/site-packages/langchain_core/utils/utils.py b/venv/Lib/site-packages/langchain_core/utils/utils.py new file mode 100644 index 00000000..a7467ec5 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/utils/utils.py @@ -0,0 +1,468 @@ +"""Generic utility functions.""" + +import contextlib +import datetime +import functools +import importlib +import os +import warnings +from collections.abc import Iterator, Sequence +from importlib.metadata import version +from typing import Any, Callable, Optional, Union, overload + +from packaging.version import parse +from pydantic import SecretStr +from requests import HTTPError, Response +from typing_extensions import override + +from langchain_core.utils.pydantic import ( + is_pydantic_v1_subclass, +) + + +def xor_args(*arg_groups: tuple[str, ...]) -> Callable: + """Validate specified keyword args are mutually exclusive.". + + Args: + *arg_groups (tuple[str, ...]): Groups of mutually exclusive keyword args. + + Returns: + Callable: Decorator that validates the specified keyword args + are mutually exclusive + + Raises: + ValueError: If more than one arg in a group is defined. + """ + + def decorator(func: Callable) -> Callable: + @functools.wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> Any: + """Validate exactly one arg in each group is not None.""" + counts = [ + sum(1 for arg in arg_group if kwargs.get(arg) is not None) + for arg_group in arg_groups + ] + invalid_groups = [i for i, count in enumerate(counts) if count != 1] + if invalid_groups: + invalid_group_names = [", ".join(arg_groups[i]) for i in invalid_groups] + msg = ( + "Exactly one argument in each of the following" + " groups must be defined:" + f" {', '.join(invalid_group_names)}" + ) + raise ValueError(msg) + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def raise_for_status_with_text(response: Response) -> None: + """Raise an error with the response text. + + Args: + response (Response): The response to check for errors. + + Raises: + ValueError: If the response has an error status code. + """ + try: + response.raise_for_status() + except HTTPError as e: + raise ValueError(response.text) from e + + +@contextlib.contextmanager +def mock_now(dt_value: datetime.datetime) -> Iterator[type]: + """Context manager for mocking out datetime.now() in unit tests. + + Args: + dt_value: The datetime value to use for datetime.now(). + + Yields: + datetime.datetime: The mocked datetime class. + + Example: + with mock_now(datetime.datetime(2011, 2, 3, 10, 11)): + assert datetime.datetime.now() == datetime.datetime(2011, 2, 3, 10, 11) + """ + + class MockDateTime(datetime.datetime): + """Mock datetime.datetime.now() with a fixed datetime.""" + + @classmethod + @override + def now(cls, tz: Union[datetime.tzinfo, None] = None) -> "MockDateTime": + # Create a copy of dt_value. + return MockDateTime( + dt_value.year, + dt_value.month, + dt_value.day, + dt_value.hour, + dt_value.minute, + dt_value.second, + dt_value.microsecond, + dt_value.tzinfo, + ) + + real_datetime = datetime.datetime + datetime.datetime = MockDateTime # type: ignore[misc] + try: + yield datetime.datetime + finally: + datetime.datetime = real_datetime # type: ignore[misc] + + +def guard_import( + module_name: str, *, pip_name: Optional[str] = None, package: Optional[str] = None +) -> Any: + """Dynamically import a module. + + Raise an exception if the module is not installed. + + Args: + module_name (str): The name of the module to import. + pip_name (str, optional): The name of the module to install with pip. + Defaults to None. + package (str, optional): The package to import the module from. + Defaults to None. + + Returns: + Any: The imported module. + + Raises: + ImportError: If the module is not installed. + """ + try: + module = importlib.import_module(module_name, package) + except (ImportError, ModuleNotFoundError) as e: + pip_name = pip_name or module_name.split(".")[0].replace("_", "-") + msg = ( + f"Could not import {module_name} python package. " + f"Please install it with `pip install {pip_name}`." + ) + raise ImportError(msg) from e + return module + + +def check_package_version( + package: str, + lt_version: Optional[str] = None, + lte_version: Optional[str] = None, + gt_version: Optional[str] = None, + gte_version: Optional[str] = None, +) -> None: + """Check the version of a package. + + Args: + package (str): The name of the package. + lt_version (str, optional): The version must be less than this. + Defaults to None. + lte_version (str, optional): The version must be less than or equal to this. + Defaults to None. + gt_version (str, optional): The version must be greater than this. + Defaults to None. + gte_version (str, optional): The version must be greater than or equal to this. + Defaults to None. + + Raises: + ValueError: If the package version does not meet the requirements. + """ + imported_version = parse(version(package)) + if lt_version is not None and imported_version >= parse(lt_version): + msg = ( + f"Expected {package} version to be < {lt_version}. Received " + f"{imported_version}." + ) + raise ValueError(msg) + if lte_version is not None and imported_version > parse(lte_version): + msg = ( + f"Expected {package} version to be <= {lte_version}. Received " + f"{imported_version}." + ) + raise ValueError(msg) + if gt_version is not None and imported_version <= parse(gt_version): + msg = ( + f"Expected {package} version to be > {gt_version}. Received " + f"{imported_version}." + ) + raise ValueError(msg) + if gte_version is not None and imported_version < parse(gte_version): + msg = ( + f"Expected {package} version to be >= {gte_version}. Received " + f"{imported_version}." + ) + raise ValueError(msg) + + +def get_pydantic_field_names(pydantic_cls: Any) -> set[str]: + """Get field names, including aliases, for a pydantic class. + + Args: + pydantic_cls: Pydantic class. + + Returns: + set[str]: Field names. + """ + all_required_field_names = set() + if is_pydantic_v1_subclass(pydantic_cls): + for field in pydantic_cls.__fields__.values(): + all_required_field_names.add(field.name) + if field.has_alias: + all_required_field_names.add(field.alias) + else: # Assuming pydantic 2 for now + for name, field in pydantic_cls.model_fields.items(): + all_required_field_names.add(name) + if field.alias: + all_required_field_names.add(field.alias) + return all_required_field_names + + +def _build_model_kwargs( + values: dict[str, Any], + all_required_field_names: set[str], +) -> dict[str, Any]: + """Build "model_kwargs" param from Pydanitc constructor values. + + Args: + values: All init args passed in by user. + all_required_field_names: All required field names for the pydantic class. + + Returns: + dict[str, Any]: Extra kwargs. + + Raises: + ValueError: If a field is specified in both values and extra_kwargs. + ValueError: If a field is specified in model_kwargs. + """ + extra_kwargs = values.get("model_kwargs", {}) + for field_name in list(values): + if field_name in extra_kwargs: + msg = f"Found {field_name} supplied twice." + raise ValueError(msg) + if field_name not in all_required_field_names: + warnings.warn( + f"""WARNING! {field_name} is not default parameter. + {field_name} was transferred to model_kwargs. + Please confirm that {field_name} is what you intended.""", + stacklevel=7, + ) + extra_kwargs[field_name] = values.pop(field_name) + + invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs.keys()) + if invalid_model_kwargs: + warnings.warn( + f"Parameters {invalid_model_kwargs} should be specified explicitly. " + f"Instead they were passed in as part of `model_kwargs` parameter.", + stacklevel=7, + ) + for k in invalid_model_kwargs: + values[k] = extra_kwargs.pop(k) + + values["model_kwargs"] = extra_kwargs + return values + + +# DON'T USE! Kept for backwards-compatibility but should never have been public. +def build_extra_kwargs( + extra_kwargs: dict[str, Any], + values: dict[str, Any], + all_required_field_names: set[str], +) -> dict[str, Any]: + """Build extra kwargs from values and extra_kwargs. + + Args: + extra_kwargs: Extra kwargs passed in by user. + values: Values passed in by user. + all_required_field_names: All required field names for the pydantic class. + + Returns: + dict[str, Any]: Extra kwargs. + + Raises: + ValueError: If a field is specified in both values and extra_kwargs. + ValueError: If a field is specified in model_kwargs. + """ + for field_name in list(values): + if field_name in extra_kwargs: + msg = f"Found {field_name} supplied twice." + raise ValueError(msg) + if field_name not in all_required_field_names: + warnings.warn( + f"""WARNING! {field_name} is not default parameter. + {field_name} was transferred to model_kwargs. + Please confirm that {field_name} is what you intended.""", + stacklevel=7, + ) + extra_kwargs[field_name] = values.pop(field_name) + + invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs.keys()) + if invalid_model_kwargs: + msg = ( + f"Parameters {invalid_model_kwargs} should be specified explicitly. " + f"Instead they were passed in as part of `model_kwargs` parameter." + ) + raise ValueError(msg) + + return extra_kwargs + + +def convert_to_secret_str(value: Union[SecretStr, str]) -> SecretStr: + """Convert a string to a SecretStr if needed. + + Args: + value (Union[SecretStr, str]): The value to convert. + + Returns: + SecretStr: The SecretStr value. + """ + if isinstance(value, SecretStr): + return value + return SecretStr(value) + + +class _NoDefaultType: + """Type to indicate no default value is provided.""" + + +_NoDefault = _NoDefaultType() + + +@overload +def from_env(key: str, /) -> Callable[[], str]: ... + + +@overload +def from_env(key: str, /, *, default: str) -> Callable[[], str]: ... + + +@overload +def from_env(key: Sequence[str], /, *, default: str) -> Callable[[], str]: ... + + +@overload +def from_env(key: str, /, *, error_message: str) -> Callable[[], str]: ... + + +@overload +def from_env( + key: Union[str, Sequence[str]], /, *, default: str, error_message: Optional[str] +) -> Callable[[], str]: ... + + +@overload +def from_env( + key: str, /, *, default: None, error_message: Optional[str] +) -> Callable[[], Optional[str]]: ... + + +@overload +def from_env( + key: Union[str, Sequence[str]], /, *, default: None +) -> Callable[[], Optional[str]]: ... + + +def from_env( + key: Union[str, Sequence[str]], + /, + *, + default: Union[str, _NoDefaultType, None] = _NoDefault, + error_message: Optional[str] = None, +) -> Union[Callable[[], str], Callable[[], Optional[str]]]: + """Create a factory method that gets a value from an environment variable. + + Args: + key: The environment variable to look up. If a list of keys is provided, + the first key found in the environment will be used. + If no key is found, the default value will be used if set, + otherwise an error will be raised. + default: The default value to return if the environment variable is not set. + error_message: the error message which will be raised if the key is not found + and no default value is provided. + This will be raised as a ValueError. + """ + + def get_from_env_fn() -> Optional[str]: + """Get a value from an environment variable.""" + if isinstance(key, (list, tuple)): + for k in key: + if k in os.environ: + return os.environ[k] + if isinstance(key, str) and key in os.environ: + return os.environ[key] + + if isinstance(default, (str, type(None))): + return default + if error_message: + raise ValueError(error_message) + msg = ( + f"Did not find {key}, please add an environment variable" + f" `{key}` which contains it, or pass" + f" `{key}` as a named parameter." + ) + raise ValueError(msg) + + return get_from_env_fn + + +@overload +def secret_from_env(key: Union[str, Sequence[str]], /) -> Callable[[], SecretStr]: ... + + +@overload +def secret_from_env(key: str, /, *, default: str) -> Callable[[], SecretStr]: ... + + +@overload +def secret_from_env( + key: Union[str, Sequence[str]], /, *, default: None +) -> Callable[[], Optional[SecretStr]]: ... + + +@overload +def secret_from_env(key: str, /, *, error_message: str) -> Callable[[], SecretStr]: ... + + +def secret_from_env( + key: Union[str, Sequence[str]], + /, + *, + default: Union[str, _NoDefaultType, None] = _NoDefault, + error_message: Optional[str] = None, +) -> Union[Callable[[], Optional[SecretStr]], Callable[[], SecretStr]]: + """Secret from env. + + Args: + key: The environment variable to look up. + default: The default value to return if the environment variable is not set. + error_message: the error message which will be raised if the key is not found + and no default value is provided. + This will be raised as a ValueError. + + Returns: + factory method that will look up the secret from the environment. + """ + + def get_secret_from_env() -> Optional[SecretStr]: + """Get a value from an environment variable.""" + if isinstance(key, (list, tuple)): + for k in key: + if k in os.environ: + return SecretStr(os.environ[k]) + if isinstance(key, str) and key in os.environ: + return SecretStr(os.environ[key]) + if isinstance(default, str): + return SecretStr(default) + if default is None: + return None + if error_message: + raise ValueError(error_message) + msg = ( + f"Did not find {key}, please add an environment variable" + f" `{key}` which contains it, or pass" + f" `{key}` as a named parameter." + ) + raise ValueError(msg) + + return get_secret_from_env diff --git a/venv/Lib/site-packages/langchain_core/vectorstores/__init__.py b/venv/Lib/site-packages/langchain_core/vectorstores/__init__.py new file mode 100644 index 00000000..3881feb9 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/vectorstores/__init__.py @@ -0,0 +1,34 @@ +"""Vector stores.""" + +from typing import TYPE_CHECKING + +from langchain_core._import_utils import import_attr + +if TYPE_CHECKING: + from langchain_core.vectorstores.base import VST, VectorStore, VectorStoreRetriever + from langchain_core.vectorstores.in_memory import InMemoryVectorStore + +__all__ = ( + "VectorStore", + "VST", + "VectorStoreRetriever", + "InMemoryVectorStore", +) + +_dynamic_imports = { + "VectorStore": "base", + "VST": "base", + "VectorStoreRetriever": "base", + "InMemoryVectorStore": "in_memory", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + result = import_attr(attr_name, module_name, __spec__.parent) + globals()[attr_name] = result + return result + + +def __dir__() -> list[str]: + return list(__all__) diff --git a/venv/Lib/site-packages/langchain_core/vectorstores/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/vectorstores/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..3677e8b6 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/vectorstores/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/vectorstores/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/vectorstores/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..954eaa78 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/vectorstores/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/vectorstores/__pycache__/in_memory.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/vectorstores/__pycache__/in_memory.cpython-312.pyc new file mode 100644 index 00000000..30f6ac8d Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/vectorstores/__pycache__/in_memory.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/vectorstores/__pycache__/utils.cpython-312.pyc b/venv/Lib/site-packages/langchain_core/vectorstores/__pycache__/utils.cpython-312.pyc new file mode 100644 index 00000000..65a4e6b2 Binary files /dev/null and b/venv/Lib/site-packages/langchain_core/vectorstores/__pycache__/utils.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_core/vectorstores/base.py b/venv/Lib/site-packages/langchain_core/vectorstores/base.py new file mode 100644 index 00000000..173bb6ca --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/vectorstores/base.py @@ -0,0 +1,1145 @@ +"""**Vector store** stores embedded data and performs vector search. + +One of the most common ways to store and search over unstructured data is to +embed it and store the resulting embedding vectors, and then query the store +and retrieve the data that are 'most similar' to the embedded query. + +**Class hierarchy:** + +.. code-block:: + + VectorStore --> # Examples: Annoy, FAISS, Milvus + + BaseRetriever --> VectorStoreRetriever --> Retriever # Example: VespaRetriever + +**Main helpers:** + +.. code-block:: + + Embeddings, Document +""" # noqa: E501 + +from __future__ import annotations + +import logging +import math +import warnings +from abc import ABC, abstractmethod +from itertools import cycle +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Optional, + TypeVar, +) + +from pydantic import ConfigDict, Field, model_validator +from typing_extensions import Self, override + +from langchain_core.embeddings import Embeddings +from langchain_core.retrievers import BaseRetriever, LangSmithRetrieverParams +from langchain_core.runnables.config import run_in_executor + +if TYPE_CHECKING: + from collections.abc import Collection, Iterable, Iterator, Sequence + + from langchain_core.callbacks.manager import ( + AsyncCallbackManagerForRetrieverRun, + CallbackManagerForRetrieverRun, + ) + from langchain_core.documents import Document + +logger = logging.getLogger(__name__) + +VST = TypeVar("VST", bound="VectorStore") + + +class VectorStore(ABC): + """Interface for vector store.""" + + def add_texts( + self, + texts: Iterable[str], + metadatas: Optional[list[dict]] = None, + *, + ids: Optional[list[str]] = None, + **kwargs: Any, + ) -> list[str]: + """Run more texts through the embeddings and add to the vectorstore. + + Args: + texts: Iterable of strings to add to the vectorstore. + metadatas: Optional list of metadatas associated with the texts. + ids: Optional list of IDs associated with the texts. + **kwargs: vectorstore specific parameters. + One of the kwargs should be `ids` which is a list of ids + associated with the texts. + + Returns: + List of ids from adding the texts into the vectorstore. + + Raises: + ValueError: If the number of metadatas does not match the number of texts. + ValueError: If the number of ids does not match the number of texts. + """ + if type(self).add_documents != VectorStore.add_documents: + # Import document in local scope to avoid circular imports + from langchain_core.documents import Document + + # This condition is triggered if the subclass has provided + # an implementation of the upsert method. + # The existing add_texts + texts_: Sequence[str] = ( + texts if isinstance(texts, (list, tuple)) else list(texts) + ) + if metadatas and len(metadatas) != len(texts_): + msg = ( + "The number of metadatas must match the number of texts." + f"Got {len(metadatas)} metadatas and {len(texts_)} texts." + ) + raise ValueError(msg) + metadatas_ = iter(metadatas) if metadatas else cycle([{}]) + ids_: Iterator[Optional[str]] = iter(ids) if ids else cycle([None]) + docs = [ + Document(id=id_, page_content=text, metadata=metadata_) + for text, metadata_, id_ in zip(texts, metadatas_, ids_) + ] + if ids is not None: + # For backward compatibility + kwargs["ids"] = ids + + return self.add_documents(docs, **kwargs) + msg = f"`add_texts` has not been implemented for {self.__class__.__name__} " + raise NotImplementedError(msg) + + @property + def embeddings(self) -> Optional[Embeddings]: + """Access the query embedding object if available.""" + logger.debug( + "The embeddings property has not been implemented for %s", + self.__class__.__name__, + ) + return None + + def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> Optional[bool]: + """Delete by vector ID or other criteria. + + Args: + ids: List of ids to delete. If None, delete all. Default is None. + **kwargs: Other keyword arguments that subclasses might use. + + Returns: + Optional[bool]: True if deletion is successful, + False otherwise, None if not implemented. + """ + msg = "delete method must be implemented by subclass." + raise NotImplementedError(msg) + + def get_by_ids(self, ids: Sequence[str], /) -> list[Document]: + """Get documents by their IDs. + + The returned documents are expected to have the ID field set to the ID of the + document in the vector store. + + Fewer documents may be returned than requested if some IDs are not found or + if there are duplicated IDs. + + Users should not assume that the order of the returned documents matches + the order of the input IDs. Instead, users should rely on the ID field of the + returned documents. + + This method should **NOT** raise exceptions if no documents are found for + some IDs. + + Args: + ids: List of ids to retrieve. + + Returns: + List of Documents. + + .. versionadded:: 0.2.11 + """ + msg = f"{self.__class__.__name__} does not yet support get_by_ids." + raise NotImplementedError(msg) + + # Implementations should override this method to provide an async native version. + async def aget_by_ids(self, ids: Sequence[str], /) -> list[Document]: + """Async get documents by their IDs. + + The returned documents are expected to have the ID field set to the ID of the + document in the vector store. + + Fewer documents may be returned than requested if some IDs are not found or + if there are duplicated IDs. + + Users should not assume that the order of the returned documents matches + the order of the input IDs. Instead, users should rely on the ID field of the + returned documents. + + This method should **NOT** raise exceptions if no documents are found for + some IDs. + + Args: + ids: List of ids to retrieve. + + Returns: + List of Documents. + + .. versionadded:: 0.2.11 + """ + return await run_in_executor(None, self.get_by_ids, ids) + + async def adelete( + self, ids: Optional[list[str]] = None, **kwargs: Any + ) -> Optional[bool]: + """Async delete by vector ID or other criteria. + + Args: + ids: List of ids to delete. If None, delete all. Default is None. + **kwargs: Other keyword arguments that subclasses might use. + + Returns: + Optional[bool]: True if deletion is successful, + False otherwise, None if not implemented. + """ + return await run_in_executor(None, self.delete, ids, **kwargs) + + async def aadd_texts( + self, + texts: Iterable[str], + metadatas: Optional[list[dict]] = None, + *, + ids: Optional[list[str]] = None, + **kwargs: Any, + ) -> list[str]: + """Async run more texts through the embeddings and add to the vectorstore. + + Args: + texts: Iterable of strings to add to the vectorstore. + metadatas: Optional list of metadatas associated with the texts. + Default is None. + ids: Optional list + **kwargs: vectorstore specific parameters. + + Returns: + List of ids from adding the texts into the vectorstore. + + Raises: + ValueError: If the number of metadatas does not match the number of texts. + ValueError: If the number of ids does not match the number of texts. + """ + if ids is not None: + # For backward compatibility + kwargs["ids"] = ids + if type(self).aadd_documents != VectorStore.aadd_documents: + # Import document in local scope to avoid circular imports + from langchain_core.documents import Document + + # This condition is triggered if the subclass has provided + # an implementation of the upsert method. + # The existing add_texts + texts_: Sequence[str] = ( + texts if isinstance(texts, (list, tuple)) else list(texts) + ) + if metadatas and len(metadatas) != len(texts_): + msg = ( + "The number of metadatas must match the number of texts." + f"Got {len(metadatas)} metadatas and {len(texts_)} texts." + ) + raise ValueError(msg) + metadatas_ = iter(metadatas) if metadatas else cycle([{}]) + ids_: Iterator[Optional[str]] = iter(ids) if ids else cycle([None]) + + docs = [ + Document(id=id_, page_content=text, metadata=metadata_) + for text, metadata_, id_ in zip(texts, metadatas_, ids_) + ] + return await self.aadd_documents(docs, **kwargs) + return await run_in_executor(None, self.add_texts, texts, metadatas, **kwargs) + + def add_documents(self, documents: list[Document], **kwargs: Any) -> list[str]: + """Add or update documents in the vectorstore. + + Args: + documents: Documents to add to the vectorstore. + kwargs: Additional keyword arguments. + if kwargs contains ids and documents contain ids, + the ids in the kwargs will receive precedence. + + Returns: + List of IDs of the added texts. + + Raises: + ValueError: If the number of ids does not match the number of documents. + """ + if type(self).add_texts != VectorStore.add_texts: + if "ids" not in kwargs: + ids = [doc.id for doc in documents] + + # If there's at least one valid ID, we'll assume that IDs + # should be used. + if any(ids): + kwargs["ids"] = ids + + texts = [doc.page_content for doc in documents] + metadatas = [doc.metadata for doc in documents] + return self.add_texts(texts, metadatas, **kwargs) + msg = ( + f"`add_documents` and `add_texts` has not been implemented " + f"for {self.__class__.__name__} " + ) + raise NotImplementedError(msg) + + async def aadd_documents( + self, documents: list[Document], **kwargs: Any + ) -> list[str]: + """Async run more documents through the embeddings and add to the vectorstore. + + Args: + documents: Documents to add to the vectorstore. + kwargs: Additional keyword arguments. + + Returns: + List of IDs of the added texts. + + Raises: + ValueError: If the number of IDs does not match the number of documents. + """ + # If the async method has been overridden, we'll use that. + if type(self).aadd_texts != VectorStore.aadd_texts: + if "ids" not in kwargs: + ids = [doc.id for doc in documents] + + # If there's at least one valid ID, we'll assume that IDs + # should be used. + if any(ids): + kwargs["ids"] = ids + + texts = [doc.page_content for doc in documents] + metadatas = [doc.metadata for doc in documents] + return await self.aadd_texts(texts, metadatas, **kwargs) + + return await run_in_executor(None, self.add_documents, documents, **kwargs) + + def search(self, query: str, search_type: str, **kwargs: Any) -> list[Document]: + """Return docs most similar to query using a specified search type. + + Args: + query: Input text + search_type: Type of search to perform. Can be "similarity", + "mmr", or "similarity_score_threshold". + **kwargs: Arguments to pass to the search method. + + Returns: + List of Documents most similar to the query. + + Raises: + ValueError: If search_type is not one of "similarity", + "mmr", or "similarity_score_threshold". + """ + if search_type == "similarity": + return self.similarity_search(query, **kwargs) + if search_type == "similarity_score_threshold": + docs_and_similarities = self.similarity_search_with_relevance_scores( + query, **kwargs + ) + return [doc for doc, _ in docs_and_similarities] + if search_type == "mmr": + return self.max_marginal_relevance_search(query, **kwargs) + msg = ( + f"search_type of {search_type} not allowed. Expected " + "search_type to be 'similarity', 'similarity_score_threshold'" + " or 'mmr'." + ) + raise ValueError(msg) + + async def asearch( + self, query: str, search_type: str, **kwargs: Any + ) -> list[Document]: + """Async return docs most similar to query using a specified search type. + + Args: + query: Input text. + search_type: Type of search to perform. Can be "similarity", + "mmr", or "similarity_score_threshold". + **kwargs: Arguments to pass to the search method. + + Returns: + List of Documents most similar to the query. + + Raises: + ValueError: If search_type is not one of "similarity", + "mmr", or "similarity_score_threshold". + """ + if search_type == "similarity": + return await self.asimilarity_search(query, **kwargs) + if search_type == "similarity_score_threshold": + docs_and_similarities = await self.asimilarity_search_with_relevance_scores( + query, **kwargs + ) + return [doc for doc, _ in docs_and_similarities] + if search_type == "mmr": + return await self.amax_marginal_relevance_search(query, **kwargs) + msg = ( + f"search_type of {search_type} not allowed. Expected " + "search_type to be 'similarity', 'similarity_score_threshold' or 'mmr'." + ) + raise ValueError(msg) + + @abstractmethod + def similarity_search( + self, query: str, k: int = 4, **kwargs: Any + ) -> list[Document]: + """Return docs most similar to query. + + Args: + query: Input text. + k: Number of Documents to return. Defaults to 4. + **kwargs: Arguments to pass to the search method. + + Returns: + List of Documents most similar to the query. + """ + + @staticmethod + def _euclidean_relevance_score_fn(distance: float) -> float: + """Return a similarity score on a scale [0, 1].""" + # The 'correct' relevance function + # may differ depending on a few things, including: + # - the distance / similarity metric used by the VectorStore + # - the scale of your embeddings (OpenAI's are unit normed. Many + # others are not!) + # - embedding dimensionality + # - etc. + # This function converts the Euclidean norm of normalized embeddings + # (0 is most similar, sqrt(2) most dissimilar) + # to a similarity function (0 to 1) + return 1.0 - distance / math.sqrt(2) + + @staticmethod + def _cosine_relevance_score_fn(distance: float) -> float: + """Normalize the distance to a score on a scale [0, 1].""" + return 1.0 - distance + + @staticmethod + def _max_inner_product_relevance_score_fn(distance: float) -> float: + """Normalize the distance to a score on a scale [0, 1].""" + if distance > 0: + return 1.0 - distance + + return -1.0 * distance + + def _select_relevance_score_fn(self) -> Callable[[float], float]: + """The 'correct' relevance function. + + may differ depending on a few things, including: + - the distance / similarity metric used by the VectorStore + - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) + - embedding dimensionality + - etc. + + Vectorstores should define their own selection-based method of relevance. + """ + raise NotImplementedError + + def similarity_search_with_score( + self, *args: Any, **kwargs: Any + ) -> list[tuple[Document, float]]: + """Run similarity search with distance. + + Args: + *args: Arguments to pass to the search method. + **kwargs: Arguments to pass to the search method. + + Returns: + List of Tuples of (doc, similarity_score). + """ + raise NotImplementedError + + async def asimilarity_search_with_score( + self, *args: Any, **kwargs: Any + ) -> list[tuple[Document, float]]: + """Async run similarity search with distance. + + Args: + *args: Arguments to pass to the search method. + **kwargs: Arguments to pass to the search method. + + Returns: + List of Tuples of (doc, similarity_score). + """ + # This is a temporary workaround to make the similarity search + # asynchronous. The proper solution is to make the similarity search + # asynchronous in the vector store implementations. + return await run_in_executor( + None, self.similarity_search_with_score, *args, **kwargs + ) + + def _similarity_search_with_relevance_scores( + self, + query: str, + k: int = 4, + **kwargs: Any, + ) -> list[tuple[Document, float]]: + """Default similarity search with relevance scores. + + Modify if necessary in subclass. + Return docs and relevance scores in the range [0, 1]. + + 0 is dissimilar, 1 is most similar. + + Args: + query: Input text. + k: Number of Documents to return. Defaults to 4. + **kwargs: kwargs to be passed to similarity search. Should include: + score_threshold: Optional, a floating point value between 0 to 1 to + filter the resulting set of retrieved docs + + Returns: + List of Tuples of (doc, similarity_score) + """ + relevance_score_fn = self._select_relevance_score_fn() + docs_and_scores = self.similarity_search_with_score(query, k, **kwargs) + return [(doc, relevance_score_fn(score)) for doc, score in docs_and_scores] + + async def _asimilarity_search_with_relevance_scores( + self, + query: str, + k: int = 4, + **kwargs: Any, + ) -> list[tuple[Document, float]]: + """Default similarity search with relevance scores. + + Modify if necessary in subclass. + Return docs and relevance scores in the range [0, 1]. + + 0 is dissimilar, 1 is most similar. + + Args: + query: Input text. + k: Number of Documents to return. Defaults to 4. + **kwargs: kwargs to be passed to similarity search. Should include: + score_threshold: Optional, a floating point value between 0 to 1 to + filter the resulting set of retrieved docs + + Returns: + List of Tuples of (doc, similarity_score) + """ + relevance_score_fn = self._select_relevance_score_fn() + docs_and_scores = await self.asimilarity_search_with_score(query, k, **kwargs) + return [(doc, relevance_score_fn(score)) for doc, score in docs_and_scores] + + def similarity_search_with_relevance_scores( + self, + query: str, + k: int = 4, + **kwargs: Any, + ) -> list[tuple[Document, float]]: + """Return docs and relevance scores in the range [0, 1]. + + 0 is dissimilar, 1 is most similar. + + Args: + query: Input text. + k: Number of Documents to return. Defaults to 4. + **kwargs: kwargs to be passed to similarity search. Should include: + score_threshold: Optional, a floating point value between 0 to 1 to + filter the resulting set of retrieved docs. + + Returns: + List of Tuples of (doc, similarity_score). + """ + score_threshold = kwargs.pop("score_threshold", None) + + docs_and_similarities = self._similarity_search_with_relevance_scores( + query, k=k, **kwargs + ) + if any( + similarity < 0.0 or similarity > 1.0 + for _, similarity in docs_and_similarities + ): + warnings.warn( + "Relevance scores must be between" + f" 0 and 1, got {docs_and_similarities}", + stacklevel=2, + ) + + if score_threshold is not None: + docs_and_similarities = [ + (doc, similarity) + for doc, similarity in docs_and_similarities + if similarity >= score_threshold + ] + if len(docs_and_similarities) == 0: + logger.warning( + "No relevant docs were retrieved using the " + "relevance score threshold %s", + score_threshold, + ) + return docs_and_similarities + + async def asimilarity_search_with_relevance_scores( + self, + query: str, + k: int = 4, + **kwargs: Any, + ) -> list[tuple[Document, float]]: + """Async return docs and relevance scores in the range [0, 1]. + + 0 is dissimilar, 1 is most similar. + + Args: + query: Input text. + k: Number of Documents to return. Defaults to 4. + **kwargs: kwargs to be passed to similarity search. Should include: + score_threshold: Optional, a floating point value between 0 to 1 to + filter the resulting set of retrieved docs + + Returns: + List of Tuples of (doc, similarity_score) + """ + score_threshold = kwargs.pop("score_threshold", None) + + docs_and_similarities = await self._asimilarity_search_with_relevance_scores( + query, k=k, **kwargs + ) + if any( + similarity < 0.0 or similarity > 1.0 + for _, similarity in docs_and_similarities + ): + warnings.warn( + "Relevance scores must be between" + f" 0 and 1, got {docs_and_similarities}", + stacklevel=2, + ) + + if score_threshold is not None: + docs_and_similarities = [ + (doc, similarity) + for doc, similarity in docs_and_similarities + if similarity >= score_threshold + ] + if len(docs_and_similarities) == 0: + logger.warning( + "No relevant docs were retrieved using the " + "relevance score threshold %s", + score_threshold, + ) + return docs_and_similarities + + async def asimilarity_search( + self, query: str, k: int = 4, **kwargs: Any + ) -> list[Document]: + """Async return docs most similar to query. + + Args: + query: Input text. + k: Number of Documents to return. Defaults to 4. + **kwargs: Arguments to pass to the search method. + + Returns: + List of Documents most similar to the query. + """ + # This is a temporary workaround to make the similarity search + # asynchronous. The proper solution is to make the similarity search + # asynchronous in the vector store implementations. + return await run_in_executor(None, self.similarity_search, query, k=k, **kwargs) + + def similarity_search_by_vector( + self, embedding: list[float], k: int = 4, **kwargs: Any + ) -> list[Document]: + """Return docs most similar to embedding vector. + + Args: + embedding: Embedding to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + **kwargs: Arguments to pass to the search method. + + Returns: + List of Documents most similar to the query vector. + """ + raise NotImplementedError + + async def asimilarity_search_by_vector( + self, embedding: list[float], k: int = 4, **kwargs: Any + ) -> list[Document]: + """Async return docs most similar to embedding vector. + + Args: + embedding: Embedding to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + **kwargs: Arguments to pass to the search method. + + Returns: + List of Documents most similar to the query vector. + """ + # This is a temporary workaround to make the similarity search + # asynchronous. The proper solution is to make the similarity search + # asynchronous in the vector store implementations. + return await run_in_executor( + None, self.similarity_search_by_vector, embedding, k=k, **kwargs + ) + + def max_marginal_relevance_search( + self, + query: str, + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + **kwargs: Any, + ) -> list[Document]: + """Return docs selected using the maximal marginal relevance. + + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + query: Text to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + fetch_k: Number of Documents to fetch to pass to MMR algorithm. + Default is 20. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + **kwargs: Arguments to pass to the search method. + + Returns: + List of Documents selected by maximal marginal relevance. + """ + raise NotImplementedError + + async def amax_marginal_relevance_search( + self, + query: str, + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + **kwargs: Any, + ) -> list[Document]: + """Async return docs selected using the maximal marginal relevance. + + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + query: Text to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + fetch_k: Number of Documents to fetch to pass to MMR algorithm. + Default is 20. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + **kwargs: Arguments to pass to the search method. + + Returns: + List of Documents selected by maximal marginal relevance. + """ + # This is a temporary workaround to make the similarity search + # asynchronous. The proper solution is to make the similarity search + # asynchronous in the vector store implementations. + return await run_in_executor( + None, + self.max_marginal_relevance_search, + query, + k=k, + fetch_k=fetch_k, + lambda_mult=lambda_mult, + **kwargs, + ) + + def max_marginal_relevance_search_by_vector( + self, + embedding: list[float], + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + **kwargs: Any, + ) -> list[Document]: + """Return docs selected using the maximal marginal relevance. + + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + embedding: Embedding to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + fetch_k: Number of Documents to fetch to pass to MMR algorithm. + Default is 20. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + **kwargs: Arguments to pass to the search method. + + Returns: + List of Documents selected by maximal marginal relevance. + """ + raise NotImplementedError + + async def amax_marginal_relevance_search_by_vector( + self, + embedding: list[float], + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + **kwargs: Any, + ) -> list[Document]: + """Async return docs selected using the maximal marginal relevance. + + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + embedding: Embedding to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + fetch_k: Number of Documents to fetch to pass to MMR algorithm. + Default is 20. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + **kwargs: Arguments to pass to the search method. + + Returns: + List of Documents selected by maximal marginal relevance. + """ + return await run_in_executor( + None, + self.max_marginal_relevance_search_by_vector, + embedding, + k=k, + fetch_k=fetch_k, + lambda_mult=lambda_mult, + **kwargs, + ) + + @classmethod + def from_documents( + cls, + documents: list[Document], + embedding: Embeddings, + **kwargs: Any, + ) -> Self: + """Return VectorStore initialized from documents and embeddings. + + Args: + documents: List of Documents to add to the vectorstore. + embedding: Embedding function to use. + kwargs: Additional keyword arguments. + + Returns: + VectorStore: VectorStore initialized from documents and embeddings. + """ + texts = [d.page_content for d in documents] + metadatas = [d.metadata for d in documents] + + if "ids" not in kwargs: + ids = [doc.id for doc in documents] + + # If there's at least one valid ID, we'll assume that IDs + # should be used. + if any(ids): + kwargs["ids"] = ids + + return cls.from_texts(texts, embedding, metadatas=metadatas, **kwargs) + + @classmethod + async def afrom_documents( + cls, + documents: list[Document], + embedding: Embeddings, + **kwargs: Any, + ) -> Self: + """Async return VectorStore initialized from documents and embeddings. + + Args: + documents: List of Documents to add to the vectorstore. + embedding: Embedding function to use. + kwargs: Additional keyword arguments. + + Returns: + VectorStore: VectorStore initialized from documents and embeddings. + """ + texts = [d.page_content for d in documents] + metadatas = [d.metadata for d in documents] + + if "ids" not in kwargs: + ids = [doc.id for doc in documents] + + # If there's at least one valid ID, we'll assume that IDs + # should be used. + if any(ids): + kwargs["ids"] = ids + + return await cls.afrom_texts(texts, embedding, metadatas=metadatas, **kwargs) + + @classmethod + @abstractmethod + def from_texts( + cls: type[VST], + texts: list[str], + embedding: Embeddings, + metadatas: Optional[list[dict]] = None, + *, + ids: Optional[list[str]] = None, + **kwargs: Any, + ) -> VST: + """Return VectorStore initialized from texts and embeddings. + + Args: + texts: Texts to add to the vectorstore. + embedding: Embedding function to use. + metadatas: Optional list of metadatas associated with the texts. + Default is None. + ids: Optional list of IDs associated with the texts. + kwargs: Additional keyword arguments. + + Returns: + VectorStore: VectorStore initialized from texts and embeddings. + """ + + @classmethod + async def afrom_texts( + cls, + texts: list[str], + embedding: Embeddings, + metadatas: Optional[list[dict]] = None, + *, + ids: Optional[list[str]] = None, + **kwargs: Any, + ) -> Self: + """Async return VectorStore initialized from texts and embeddings. + + Args: + texts: Texts to add to the vectorstore. + embedding: Embedding function to use. + metadatas: Optional list of metadatas associated with the texts. + Default is None. + ids: Optional list of IDs associated with the texts. + kwargs: Additional keyword arguments. + + Returns: + VectorStore: VectorStore initialized from texts and embeddings. + """ + if ids is not None: + kwargs["ids"] = ids + return await run_in_executor( + None, cls.from_texts, texts, embedding, metadatas, **kwargs + ) + + def _get_retriever_tags(self) -> list[str]: + """Get tags for retriever.""" + tags = [self.__class__.__name__] + if self.embeddings: + tags.append(self.embeddings.__class__.__name__) + return tags + + def as_retriever(self, **kwargs: Any) -> VectorStoreRetriever: + """Return VectorStoreRetriever initialized from this VectorStore. + + Args: + **kwargs: Keyword arguments to pass to the search function. + Can include: + search_type (Optional[str]): Defines the type of search that + the Retriever should perform. + Can be "similarity" (default), "mmr", or + "similarity_score_threshold". + search_kwargs (Optional[Dict]): Keyword arguments to pass to the + search function. Can include things like: + k: Amount of documents to return (Default: 4) + score_threshold: Minimum relevance threshold + for similarity_score_threshold + fetch_k: Amount of documents to pass to MMR algorithm + (Default: 20) + lambda_mult: Diversity of results returned by MMR; + 1 for minimum diversity and 0 for maximum. (Default: 0.5) + filter: Filter by document metadata + + Returns: + VectorStoreRetriever: Retriever class for VectorStore. + + Examples: + + .. code-block:: python + + # Retrieve more documents with higher diversity + # Useful if your dataset has many similar documents + docsearch.as_retriever( + search_type="mmr", + search_kwargs={'k': 6, 'lambda_mult': 0.25} + ) + + # Fetch more documents for the MMR algorithm to consider + # But only return the top 5 + docsearch.as_retriever( + search_type="mmr", + search_kwargs={'k': 5, 'fetch_k': 50} + ) + + # Only retrieve documents that have a relevance score + # Above a certain threshold + docsearch.as_retriever( + search_type="similarity_score_threshold", + search_kwargs={'score_threshold': 0.8} + ) + + # Only get the single most similar document from the dataset + docsearch.as_retriever(search_kwargs={'k': 1}) + + # Use a filter to only retrieve documents from a specific paper + docsearch.as_retriever( + search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}} + ) + """ + tags = kwargs.pop("tags", None) or [] + self._get_retriever_tags() + return VectorStoreRetriever(vectorstore=self, tags=tags, **kwargs) + + +class VectorStoreRetriever(BaseRetriever): + """Base Retriever class for VectorStore.""" + + vectorstore: VectorStore + """VectorStore to use for retrieval.""" + search_type: str = "similarity" + """Type of search to perform. Defaults to "similarity".""" + search_kwargs: dict = Field(default_factory=dict) + """Keyword arguments to pass to the search function.""" + allowed_search_types: ClassVar[Collection[str]] = ( + "similarity", + "similarity_score_threshold", + "mmr", + ) + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @model_validator(mode="before") + @classmethod + def validate_search_type(cls, values: dict) -> Any: + """Validate search type. + + Args: + values: Values to validate. + + Returns: + Values: Validated values. + + Raises: + ValueError: If search_type is not one of the allowed search types. + ValueError: If score_threshold is not specified with a float value(0~1) + """ + search_type = values.get("search_type", "similarity") + if search_type not in cls.allowed_search_types: + msg = ( + f"search_type of {search_type} not allowed. Valid values are: " + f"{cls.allowed_search_types}" + ) + raise ValueError(msg) + if search_type == "similarity_score_threshold": + score_threshold = values.get("search_kwargs", {}).get("score_threshold") + if (score_threshold is None) or (not isinstance(score_threshold, float)): + msg = ( + "`score_threshold` is not specified with a float value(0~1) " + "in `search_kwargs`." + ) + raise ValueError(msg) + return values + + def _get_ls_params(self, **kwargs: Any) -> LangSmithRetrieverParams: + """Get standard params for tracing.""" + _kwargs = self.search_kwargs | kwargs + + ls_params = super()._get_ls_params(**_kwargs) + ls_params["ls_vector_store_provider"] = self.vectorstore.__class__.__name__ + + if self.vectorstore.embeddings: + ls_params["ls_embedding_provider"] = ( + self.vectorstore.embeddings.__class__.__name__ + ) + elif hasattr(self.vectorstore, "embedding") and isinstance( + self.vectorstore.embedding, Embeddings + ): + ls_params["ls_embedding_provider"] = ( + self.vectorstore.embedding.__class__.__name__ + ) + + return ls_params + + @override + def _get_relevant_documents( + self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any + ) -> list[Document]: + _kwargs = self.search_kwargs | kwargs + if self.search_type == "similarity": + docs = self.vectorstore.similarity_search(query, **_kwargs) + elif self.search_type == "similarity_score_threshold": + docs_and_similarities = ( + self.vectorstore.similarity_search_with_relevance_scores( + query, **_kwargs + ) + ) + docs = [doc for doc, _ in docs_and_similarities] + elif self.search_type == "mmr": + docs = self.vectorstore.max_marginal_relevance_search(query, **_kwargs) + else: + msg = f"search_type of {self.search_type} not allowed." + raise ValueError(msg) + return docs + + @override + async def _aget_relevant_documents( + self, + query: str, + *, + run_manager: AsyncCallbackManagerForRetrieverRun, + **kwargs: Any, + ) -> list[Document]: + _kwargs = self.search_kwargs | kwargs + if self.search_type == "similarity": + docs = await self.vectorstore.asimilarity_search(query, **_kwargs) + elif self.search_type == "similarity_score_threshold": + docs_and_similarities = ( + await self.vectorstore.asimilarity_search_with_relevance_scores( + query, **_kwargs + ) + ) + docs = [doc for doc, _ in docs_and_similarities] + elif self.search_type == "mmr": + docs = await self.vectorstore.amax_marginal_relevance_search( + query, **_kwargs + ) + else: + msg = f"search_type of {self.search_type} not allowed." + raise ValueError(msg) + return docs + + def add_documents(self, documents: list[Document], **kwargs: Any) -> list[str]: + """Add documents to the vectorstore. + + Args: + documents: Documents to add to the vectorstore. + **kwargs: Other keyword arguments that subclasses might use. + + Returns: + List of IDs of the added texts. + """ + return self.vectorstore.add_documents(documents, **kwargs) + + async def aadd_documents( + self, documents: list[Document], **kwargs: Any + ) -> list[str]: + """Async add documents to the vectorstore. + + Args: + documents: Documents to add to the vectorstore. + **kwargs: Other keyword arguments that subclasses might use. + + Returns: + List of IDs of the added texts. + """ + return await self.vectorstore.aadd_documents(documents, **kwargs) diff --git a/venv/Lib/site-packages/langchain_core/vectorstores/in_memory.py b/venv/Lib/site-packages/langchain_core/vectorstores/in_memory.py new file mode 100644 index 00000000..ab4ce1a0 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/vectorstores/in_memory.py @@ -0,0 +1,615 @@ +"""In-memory vector store.""" + +from __future__ import annotations + +import json +import uuid +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, +) + +from typing_extensions import override + +from langchain_core._api import deprecated +from langchain_core.documents import Document +from langchain_core.load import dumpd, load +from langchain_core.vectorstores import VectorStore +from langchain_core.vectorstores.utils import _cosine_similarity as cosine_similarity +from langchain_core.vectorstores.utils import maximal_marginal_relevance + +if TYPE_CHECKING: + from collections.abc import Iterator, Sequence + + from langchain_core.embeddings import Embeddings + from langchain_core.indexing import UpsertResponse + + +class InMemoryVectorStore(VectorStore): + """In-memory vector store implementation. + + Uses a dictionary, and computes cosine similarity for search using numpy. + + Setup: + Install ``langchain-core``. + + .. code-block:: bash + + pip install -U langchain-core + + Key init args — indexing params: + embedding_function: Embeddings + Embedding function to use. + + Instantiate: + .. code-block:: python + + from langchain_core.vectorstores import InMemoryVectorStore + from langchain_openai import OpenAIEmbeddings + + vector_store = InMemoryVectorStore(OpenAIEmbeddings()) + + Add Documents: + .. code-block:: python + + from langchain_core.documents import Document + + document_1 = Document(id="1", page_content="foo", metadata={"baz": "bar"}) + document_2 = Document(id="2", page_content="thud", metadata={"bar": "baz"}) + document_3 = Document(id="3", page_content="i will be deleted :(") + + documents = [document_1, document_2, document_3] + vector_store.add_documents(documents=documents) + + Inspect documents: + .. code-block:: python + + top_n = 10 + for index, (id, doc) in enumerate(vector_store.store.items()): + if index < top_n: + # docs have keys 'id', 'vector', 'text', 'metadata' + print(f"{id}: {doc['text']}") + else: + break + + Delete Documents: + .. code-block:: python + + vector_store.delete(ids=["3"]) + + Search: + .. code-block:: python + + results = vector_store.similarity_search(query="thud",k=1) + for doc in results: + print(f"* {doc.page_content} [{doc.metadata}]") + + .. code-block:: none + + * thud [{'bar': 'baz'}] + + Search with filter: + .. code-block:: python + + def _filter_function(doc: Document) -> bool: + return doc.metadata.get("bar") == "baz" + + results = vector_store.similarity_search( + query="thud", k=1, filter=_filter_function + ) + for doc in results: + print(f"* {doc.page_content} [{doc.metadata}]") + + .. code-block:: none + + * thud [{'bar': 'baz'}] + + + Search with score: + .. code-block:: python + + results = vector_store.similarity_search_with_score( + query="qux", k=1 + ) + for doc, score in results: + print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]") + + .. code-block:: none + + * [SIM=0.832268] foo [{'baz': 'bar'}] + + Async: + .. code-block:: python + + # add documents + # await vector_store.aadd_documents(documents=documents) + + # delete documents + # await vector_store.adelete(ids=["3"]) + + # search + # results = vector_store.asimilarity_search(query="thud", k=1) + + # search with score + results = await vector_store.asimilarity_search_with_score(query="qux", k=1) + for doc,score in results: + print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]") + + .. code-block:: none + + * [SIM=0.832268] foo [{'baz': 'bar'}] + + Use as Retriever: + .. code-block:: python + + retriever = vector_store.as_retriever( + search_type="mmr", + search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5}, + ) + retriever.invoke("thud") + + .. code-block:: none + + [Document(id='2', metadata={'bar': 'baz'}, page_content='thud')] + + """ # noqa: E501 + + def __init__(self, embedding: Embeddings) -> None: + """Initialize with the given embedding function. + + Args: + embedding: embedding function to use. + """ + # TODO: would be nice to change to + # dict[str, Document] at some point (will be a breaking change) + self.store: dict[str, dict[str, Any]] = {} + self.embedding = embedding + + @property + @override + def embeddings(self) -> Embeddings: + return self.embedding + + @override + def delete(self, ids: Optional[Sequence[str]] = None, **kwargs: Any) -> None: + if ids: + for _id in ids: + self.store.pop(_id, None) + + @override + async def adelete(self, ids: Optional[Sequence[str]] = None, **kwargs: Any) -> None: + self.delete(ids) + + @override + def add_documents( + self, + documents: list[Document], + ids: Optional[list[str]] = None, + **kwargs: Any, + ) -> list[str]: + """Add documents to the store.""" + texts = [doc.page_content for doc in documents] + vectors = self.embedding.embed_documents(texts) + + if ids and len(ids) != len(texts): + msg = ( + f"ids must be the same length as texts. " + f"Got {len(ids)} ids and {len(texts)} texts." + ) + raise ValueError(msg) + + id_iterator: Iterator[Optional[str]] = ( + iter(ids) if ids else iter(doc.id for doc in documents) + ) + + ids_ = [] + + for doc, vector in zip(documents, vectors): + doc_id = next(id_iterator) + doc_id_ = doc_id or str(uuid.uuid4()) + ids_.append(doc_id_) + self.store[doc_id_] = { + "id": doc_id_, + "vector": vector, + "text": doc.page_content, + "metadata": doc.metadata, + } + + return ids_ + + @override + async def aadd_documents( + self, documents: list[Document], ids: Optional[list[str]] = None, **kwargs: Any + ) -> list[str]: + """Add documents to the store.""" + texts = [doc.page_content for doc in documents] + vectors = await self.embedding.aembed_documents(texts) + + if ids and len(ids) != len(texts): + msg = ( + f"ids must be the same length as texts. " + f"Got {len(ids)} ids and {len(texts)} texts." + ) + raise ValueError(msg) + + id_iterator: Iterator[Optional[str]] = ( + iter(ids) if ids else iter(doc.id for doc in documents) + ) + ids_: list[str] = [] + + for doc, vector in zip(documents, vectors): + doc_id = next(id_iterator) + doc_id_ = doc_id or str(uuid.uuid4()) + ids_.append(doc_id_) + self.store[doc_id_] = { + "id": doc_id_, + "vector": vector, + "text": doc.page_content, + "metadata": doc.metadata, + } + + return ids_ + + @override + def get_by_ids(self, ids: Sequence[str], /) -> list[Document]: + """Get documents by their ids. + + Args: + ids: The ids of the documents to get. + + Returns: + A list of Document objects. + """ + documents = [] + + for doc_id in ids: + doc = self.store.get(doc_id) + if doc: + documents.append( + Document( + id=doc["id"], + page_content=doc["text"], + metadata=doc["metadata"], + ) + ) + return documents + + @deprecated( + alternative="VectorStore.add_documents", + message=( + "This was a beta API that was added in 0.2.11. It'll be removed in 0.3.0." + ), + since="0.2.29", + removal="1.0", + ) + def upsert(self, items: Sequence[Document], /, **_kwargs: Any) -> UpsertResponse: + """[DEPRECATED] Upsert documents into the store. + + Args: + items: The documents to upsert. + + Returns: + The upsert response. + """ + vectors = self.embedding.embed_documents([item.page_content for item in items]) + ids = [] + for item, vector in zip(items, vectors): + doc_id = item.id or str(uuid.uuid4()) + ids.append(doc_id) + self.store[doc_id] = { + "id": doc_id, + "vector": vector, + "text": item.page_content, + "metadata": item.metadata, + } + return { + "succeeded": ids, + "failed": [], + } + + @deprecated( + alternative="VectorStore.aadd_documents", + message=( + "This was a beta API that was added in 0.2.11. It'll be removed in 0.3.0." + ), + since="0.2.29", + removal="1.0", + ) + async def aupsert( + self, items: Sequence[Document], /, **_kwargs: Any + ) -> UpsertResponse: + """[DEPRECATED] Upsert documents into the store. + + Args: + items: The documents to upsert. + + Returns: + The upsert response. + """ + vectors = await self.embedding.aembed_documents( + [item.page_content for item in items] + ) + ids = [] + for item, vector in zip(items, vectors): + doc_id = item.id or str(uuid.uuid4()) + ids.append(doc_id) + self.store[doc_id] = { + "id": doc_id, + "vector": vector, + "text": item.page_content, + "metadata": item.metadata, + } + return { + "succeeded": ids, + "failed": [], + } + + @override + async def aget_by_ids(self, ids: Sequence[str], /) -> list[Document]: + """Async get documents by their ids. + + Args: + ids: The ids of the documents to get. + + Returns: + A list of Document objects. + """ + return self.get_by_ids(ids) + + def _similarity_search_with_score_by_vector( + self, + embedding: list[float], + k: int = 4, + filter: Optional[Callable[[Document], bool]] = None, + ) -> list[tuple[Document, float, list[float]]]: + # get all docs with fixed order in list + docs = list(self.store.values()) + + if filter is not None: + docs = [ + doc + for doc in docs + if filter(Document(page_content=doc["text"], metadata=doc["metadata"])) + ] + + if not docs: + return [] + + similarity = cosine_similarity([embedding], [doc["vector"] for doc in docs])[0] + + # get the indices ordered by similarity score + top_k_idx = similarity.argsort()[::-1][:k] + + return [ + ( + Document( + id=doc_dict["id"], + page_content=doc_dict["text"], + metadata=doc_dict["metadata"], + ), + float(similarity[idx].item()), + doc_dict["vector"], + ) + for idx in top_k_idx + # Assign using walrus operator to avoid multiple lookups + if (doc_dict := docs[idx]) + ] + + def similarity_search_with_score_by_vector( + self, + embedding: list[float], + k: int = 4, + filter: Optional[Callable[[Document], bool]] = None, + **_kwargs: Any, + ) -> list[tuple[Document, float]]: + """Search for the most similar documents to the given embedding. + + Args: + embedding: The embedding to search for. + k: The number of documents to return. + filter: A function to filter the documents. + + Returns: + A list of tuples of Document objects and their similarity scores. + """ + return [ + (doc, similarity) + for doc, similarity, _ in self._similarity_search_with_score_by_vector( + embedding=embedding, k=k, filter=filter + ) + ] + + @override + def similarity_search_with_score( + self, + query: str, + k: int = 4, + **kwargs: Any, + ) -> list[tuple[Document, float]]: + embedding = self.embedding.embed_query(query) + return self.similarity_search_with_score_by_vector( + embedding, + k, + **kwargs, + ) + + @override + async def asimilarity_search_with_score( + self, query: str, k: int = 4, **kwargs: Any + ) -> list[tuple[Document, float]]: + embedding = await self.embedding.aembed_query(query) + return self.similarity_search_with_score_by_vector( + embedding, + k, + **kwargs, + ) + + @override + def similarity_search_by_vector( + self, + embedding: list[float], + k: int = 4, + **kwargs: Any, + ) -> list[Document]: + docs_and_scores = self.similarity_search_with_score_by_vector( + embedding, + k, + **kwargs, + ) + return [doc for doc, _ in docs_and_scores] + + @override + async def asimilarity_search_by_vector( + self, embedding: list[float], k: int = 4, **kwargs: Any + ) -> list[Document]: + return self.similarity_search_by_vector(embedding, k, **kwargs) + + @override + def similarity_search( + self, query: str, k: int = 4, **kwargs: Any + ) -> list[Document]: + return [doc for doc, _ in self.similarity_search_with_score(query, k, **kwargs)] + + @override + async def asimilarity_search( + self, query: str, k: int = 4, **kwargs: Any + ) -> list[Document]: + return [ + doc + for doc, _ in await self.asimilarity_search_with_score(query, k, **kwargs) + ] + + @override + def max_marginal_relevance_search_by_vector( + self, + embedding: list[float], + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + *, + filter: Optional[Callable[[Document], bool]] = None, + **kwargs: Any, + ) -> list[Document]: + prefetch_hits = self._similarity_search_with_score_by_vector( + embedding=embedding, + k=fetch_k, + filter=filter, + ) + + try: + import numpy as np + except ImportError as e: + msg = ( + "numpy must be installed to use max_marginal_relevance_search " + "pip install numpy" + ) + raise ImportError(msg) from e + + mmr_chosen_indices = maximal_marginal_relevance( + np.array(embedding, dtype=np.float32), + [vector for _, _, vector in prefetch_hits], + k=k, + lambda_mult=lambda_mult, + ) + return [prefetch_hits[idx][0] for idx in mmr_chosen_indices] + + @override + def max_marginal_relevance_search( + self, + query: str, + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + **kwargs: Any, + ) -> list[Document]: + embedding_vector = self.embedding.embed_query(query) + return self.max_marginal_relevance_search_by_vector( + embedding_vector, + k, + fetch_k, + lambda_mult=lambda_mult, + **kwargs, + ) + + @override + async def amax_marginal_relevance_search( + self, + query: str, + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + **kwargs: Any, + ) -> list[Document]: + embedding_vector = await self.embedding.aembed_query(query) + return self.max_marginal_relevance_search_by_vector( + embedding_vector, + k, + fetch_k, + lambda_mult=lambda_mult, + **kwargs, + ) + + @classmethod + @override + def from_texts( + cls, + texts: list[str], + embedding: Embeddings, + metadatas: Optional[list[dict]] = None, + **kwargs: Any, + ) -> InMemoryVectorStore: + store = cls( + embedding=embedding, + ) + store.add_texts(texts=texts, metadatas=metadatas, **kwargs) + return store + + @classmethod + @override + async def afrom_texts( + cls, + texts: list[str], + embedding: Embeddings, + metadatas: Optional[list[dict]] = None, + **kwargs: Any, + ) -> InMemoryVectorStore: + store = cls( + embedding=embedding, + ) + await store.aadd_texts(texts=texts, metadatas=metadatas, **kwargs) + return store + + @classmethod + def load( + cls, path: str, embedding: Embeddings, **kwargs: Any + ) -> InMemoryVectorStore: + """Load a vector store from a file. + + Args: + path: The path to load the vector store from. + embedding: The embedding to use. + kwargs: Additional arguments to pass to the constructor. + + Returns: + A VectorStore object. + """ + _path: Path = Path(path) + with _path.open("r") as f: + store = load(json.load(f)) + vectorstore = cls(embedding=embedding, **kwargs) + vectorstore.store = store + return vectorstore + + def dump(self, path: str) -> None: + """Dump the vector store to a file. + + Args: + path: The path to dump the vector store to. + """ + _path: Path = Path(path) + _path.parent.mkdir(exist_ok=True, parents=True) + with _path.open("w") as f: + json.dump(dumpd(self.store), f, indent=2) diff --git a/venv/Lib/site-packages/langchain_core/vectorstores/utils.py b/venv/Lib/site-packages/langchain_core/vectorstores/utils.py new file mode 100644 index 00000000..5a72e44c --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/vectorstores/utils.py @@ -0,0 +1,128 @@ +"""Internal utilities for the in memory implementation of VectorStore. + +These are part of a private API, and users should not use them directly +as they can change without notice. +""" + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, Union + +if TYPE_CHECKING: + import numpy as np + + Matrix = Union[list[list[float]], list[np.ndarray], np.ndarray] + +logger = logging.getLogger(__name__) + + +def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray: + """Row-wise cosine similarity between two equal-width matrices. + + Args: + x: A matrix of shape (n, m). + y: A matrix of shape (k, m). + + Returns: + A matrix of shape (n, k) where each element (i, j) is the cosine similarity + between the ith row of X and the jth row of Y. + + Raises: + ValueError: If the number of columns in X and Y are not the same. + ImportError: If numpy is not installed. + """ + try: + import numpy as np + except ImportError as e: + msg = ( + "cosine_similarity requires numpy to be installed. " + "Please install numpy with `pip install numpy`." + ) + raise ImportError(msg) from e + + if len(x) == 0 or len(y) == 0: + return np.array([]) + + x = np.array(x) + y = np.array(y) + if x.shape[1] != y.shape[1]: + msg = ( + f"Number of columns in X and Y must be the same. X has shape {x.shape} " + f"and Y has shape {y.shape}." + ) + raise ValueError(msg) + try: + import simsimd as simd # type: ignore[import-not-found] + except ImportError: + logger.debug( + "Unable to import simsimd, defaulting to NumPy implementation. If you want " + "to use simsimd please install with `pip install simsimd`." + ) + x_norm = np.linalg.norm(x, axis=1) + y_norm = np.linalg.norm(y, axis=1) + # Ignore divide by zero errors run time warnings as those are handled below. + with np.errstate(divide="ignore", invalid="ignore"): + similarity = np.dot(x, y.T) / np.outer(x_norm, y_norm) + similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0 + return similarity + + x = np.array(x, dtype=np.float32) + y = np.array(y, dtype=np.float32) + return 1 - np.array(simd.cdist(x, y, metric="cosine")) + + +def maximal_marginal_relevance( + query_embedding: np.ndarray, + embedding_list: list, + lambda_mult: float = 0.5, + k: int = 4, +) -> list[int]: + """Calculate maximal marginal relevance. + + Args: + query_embedding: The query embedding. + embedding_list: A list of embeddings. + lambda_mult: The lambda parameter for MMR. Default is 0.5. + k: The number of embeddings to return. Default is 4. + + Returns: + A list of indices of the embeddings to return. + + Raises: + ImportError: If numpy is not installed. + """ + try: + import numpy as np + except ImportError as e: + msg = ( + "maximal_marginal_relevance requires numpy to be installed. " + "Please install numpy with `pip install numpy`." + ) + raise ImportError(msg) from e + + if min(k, len(embedding_list)) <= 0: + return [] + if query_embedding.ndim == 1: + query_embedding = np.expand_dims(query_embedding, axis=0) + similarity_to_query = _cosine_similarity(query_embedding, embedding_list)[0] + most_similar = int(np.argmax(similarity_to_query)) + idxs = [most_similar] + selected = np.array([embedding_list[most_similar]]) + while len(idxs) < min(k, len(embedding_list)): + best_score = -np.inf + idx_to_add = -1 + similarity_to_selected = _cosine_similarity(embedding_list, selected) + for i, query_score in enumerate(similarity_to_query): + if i in idxs: + continue + redundant_score = max(similarity_to_selected[i]) + equation_score = ( + lambda_mult * query_score - (1 - lambda_mult) * redundant_score + ) + if equation_score > best_score: + best_score = equation_score + idx_to_add = i + idxs.append(idx_to_add) + selected = np.append(selected, [embedding_list[idx_to_add]], axis=0) + return idxs diff --git a/venv/Lib/site-packages/langchain_core/version.py b/venv/Lib/site-packages/langchain_core/version.py new file mode 100644 index 00000000..29ee5ec6 --- /dev/null +++ b/venv/Lib/site-packages/langchain_core/version.py @@ -0,0 +1,3 @@ +"""langchain-core version information and utilities.""" + +VERSION = "0.3.59" diff --git a/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/INSTALLER b/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/METADATA b/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/METADATA new file mode 100644 index 00000000..900783dc --- /dev/null +++ b/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/METADATA @@ -0,0 +1,50 @@ +Metadata-Version: 2.1 +Name: langchain-text-splitters +Version: 0.3.8 +Summary: LangChain text splitting utilities +License: MIT +Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/text-splitters +Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-text-splitters%3D%3D0%22&expanded=true +Project-URL: repository, https://github.com/langchain-ai/langchain +Requires-Python: <4.0,>=3.9 +Requires-Dist: langchain-core<1.0.0,>=0.3.51 +Description-Content-Type: text/markdown + +# 🦜✂️ LangChain Text Splitters + +[![Downloads](https://static.pepy.tech/badge/langchain_text_splitters/month)](https://pepy.tech/project/langchain_text_splitters) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) + +## Quick Install + +```bash +pip install langchain-text-splitters +``` + +## What is it? + +LangChain Text Splitters contains utilities for splitting into chunks a wide variety of text documents. + +For full documentation see the [API reference](https://python.langchain.com/api_reference/text_splitters/index.html) +and the [Text Splitters](https://python.langchain.com/docs/modules/data_connection/document_transformers/) module in the main docs. + +## 📕 Releases & Versioning + +`langchain-text-splitters` is currently on version `0.0.x`. + +Minor version increases will occur for: + +- Breaking changes for any public interfaces NOT marked `beta` + +Patch version increases will occur for: + +- Bug fixes +- New features +- Any changes to private interfaces +- Any changes to `beta` features + +## 💁 Contributing + +As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation. + +For detailed information on how to contribute, see the [Contributing Guide](https://python.langchain.com/docs/contributing/). diff --git a/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/RECORD b/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/RECORD new file mode 100644 index 00000000..ff47a67e --- /dev/null +++ b/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/RECORD @@ -0,0 +1,33 @@ +langchain_text_splitters-0.3.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +langchain_text_splitters-0.3.8.dist-info/METADATA,sha256=j6N3gLUzNXogLdJQ3_wFxn7B7MXdmtSNBSXFu-F81xU,1937 +langchain_text_splitters-0.3.8.dist-info/RECORD,, +langchain_text_splitters-0.3.8.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90 +langchain_text_splitters-0.3.8.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34 +langchain_text_splitters/__init__.py,sha256=qsGh220teTx9XThFzStC61rgq5Xbj_2elz-HhjAQttM,2329 +langchain_text_splitters/__pycache__/__init__.cpython-312.pyc,, +langchain_text_splitters/__pycache__/base.cpython-312.pyc,, +langchain_text_splitters/__pycache__/character.cpython-312.pyc,, +langchain_text_splitters/__pycache__/html.cpython-312.pyc,, +langchain_text_splitters/__pycache__/json.cpython-312.pyc,, +langchain_text_splitters/__pycache__/jsx.cpython-312.pyc,, +langchain_text_splitters/__pycache__/konlpy.cpython-312.pyc,, +langchain_text_splitters/__pycache__/latex.cpython-312.pyc,, +langchain_text_splitters/__pycache__/markdown.cpython-312.pyc,, +langchain_text_splitters/__pycache__/nltk.cpython-312.pyc,, +langchain_text_splitters/__pycache__/python.cpython-312.pyc,, +langchain_text_splitters/__pycache__/sentence_transformers.cpython-312.pyc,, +langchain_text_splitters/__pycache__/spacy.cpython-312.pyc,, +langchain_text_splitters/base.py,sha256=3WMILH-1_xDG1EnHk-GZ1OQjdYYyR8VIxhuURSrTV3o,12157 +langchain_text_splitters/character.py,sha256=jOZh6zLRx81BLm-MvT2R8neH0Q0ie6lfm9UlzEAHln4,23723 +langchain_text_splitters/html.py,sha256=jSfTMgipe_zJ-Hm7ZVdVm93UkCScu0N0MZ4ZWOcBsyc,36835 +langchain_text_splitters/json.py,sha256=mXArsSUsH_kCPZUvUTxNHJsy6kBTlcb-5Jyba8LAo-w,5895 +langchain_text_splitters/jsx.py,sha256=vmwfEc5c7eagLUvjBh6BbqyEOanW0g7AIYQKGo8ed18,3247 +langchain_text_splitters/konlpy.py,sha256=6BA2oGyhN9OX2do0AzUUy6b80k9rTr3gR-6-04-KMDg,985 +langchain_text_splitters/latex.py,sha256=7WReUU7Ypbmhyr6s7zUPM2FACpMubBVOvYyLQktIQt4,546 +langchain_text_splitters/markdown.py,sha256=o4w_q1DRzv8EC_2Key51ReWKuZEu02njQA6nl8HRfNU,17126 +langchain_text_splitters/nltk.py,sha256=iqmLeBhc_y4gI04kQKccTvnLE24B_xHAts2SRaulrAU,1966 +langchain_text_splitters/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +langchain_text_splitters/python.py,sha256=J02CAKztyEN69A5wYNhRL2hw8_SWBSg33-MksjDmTHk,539 +langchain_text_splitters/sentence_transformers.py,sha256=CKhm0c6QkZIoWaVOzxsRKm1c7MIyO-1lyqphV1cgnxU,3787 +langchain_text_splitters/spacy.py,sha256=iX60ldg3TmztpiAcd88ohR_xUSyHQRcU2nFEg8F-1d4,1941 +langchain_text_splitters/xsl/converting_to_header.xslt,sha256=WesNqi4fo2d9CPv3bZdRsToLJYE12MrMZFv2ewNvWfU,1073 diff --git a/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/WHEEL b/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/WHEEL new file mode 100644 index 00000000..64b991e8 --- /dev/null +++ b/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: pdm-backend (2.4.3) +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/entry_points.txt b/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/entry_points.txt new file mode 100644 index 00000000..c3ad4726 --- /dev/null +++ b/venv/Lib/site-packages/langchain_text_splitters-0.3.8.dist-info/entry_points.txt @@ -0,0 +1,4 @@ +[console_scripts] + +[gui_scripts] + diff --git a/venv/Lib/site-packages/langchain_text_splitters/__init__.py b/venv/Lib/site-packages/langchain_text_splitters/__init__.py new file mode 100644 index 00000000..2bcc8d07 --- /dev/null +++ b/venv/Lib/site-packages/langchain_text_splitters/__init__.py @@ -0,0 +1,81 @@ +"""**Text Splitters** are classes for splitting text. + +**Class hierarchy:** + +.. code-block:: + + BaseDocumentTransformer --> TextSplitter --> TextSplitter # Example: CharacterTextSplitter + RecursiveCharacterTextSplitter --> TextSplitter + +Note: **MarkdownHeaderTextSplitter** and **HTMLHeaderTextSplitter do not derive from TextSplitter. + + +**Main helpers:** + +.. code-block:: + + Document, Tokenizer, Language, LineType, HeaderType + +""" # noqa: E501 + +from langchain_text_splitters.base import ( + Language, + TextSplitter, + Tokenizer, + TokenTextSplitter, + split_text_on_tokens, +) +from langchain_text_splitters.character import ( + CharacterTextSplitter, + RecursiveCharacterTextSplitter, +) +from langchain_text_splitters.html import ( + ElementType, + HTMLHeaderTextSplitter, + HTMLSectionSplitter, + HTMLSemanticPreservingSplitter, +) +from langchain_text_splitters.json import RecursiveJsonSplitter +from langchain_text_splitters.jsx import JSFrameworkTextSplitter +from langchain_text_splitters.konlpy import KonlpyTextSplitter +from langchain_text_splitters.latex import LatexTextSplitter +from langchain_text_splitters.markdown import ( + ExperimentalMarkdownSyntaxTextSplitter, + HeaderType, + LineType, + MarkdownHeaderTextSplitter, + MarkdownTextSplitter, +) +from langchain_text_splitters.nltk import NLTKTextSplitter +from langchain_text_splitters.python import PythonCodeTextSplitter +from langchain_text_splitters.sentence_transformers import ( + SentenceTransformersTokenTextSplitter, +) +from langchain_text_splitters.spacy import SpacyTextSplitter + +__all__ = [ + "TokenTextSplitter", + "TextSplitter", + "Tokenizer", + "Language", + "RecursiveCharacterTextSplitter", + "RecursiveJsonSplitter", + "LatexTextSplitter", + "JSFrameworkTextSplitter", + "PythonCodeTextSplitter", + "KonlpyTextSplitter", + "SpacyTextSplitter", + "NLTKTextSplitter", + "split_text_on_tokens", + "SentenceTransformersTokenTextSplitter", + "ElementType", + "HeaderType", + "LineType", + "HTMLHeaderTextSplitter", + "HTMLSectionSplitter", + "HTMLSemanticPreservingSplitter", + "MarkdownHeaderTextSplitter", + "MarkdownTextSplitter", + "CharacterTextSplitter", + "ExperimentalMarkdownSyntaxTextSplitter", +] diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/__init__.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 00000000..cd900500 Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/__init__.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/base.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/base.cpython-312.pyc new file mode 100644 index 00000000..61a18d2b Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/base.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/character.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/character.cpython-312.pyc new file mode 100644 index 00000000..ec9885a7 Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/character.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/html.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/html.cpython-312.pyc new file mode 100644 index 00000000..2af63026 Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/html.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/json.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/json.cpython-312.pyc new file mode 100644 index 00000000..13c37233 Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/json.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/jsx.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/jsx.cpython-312.pyc new file mode 100644 index 00000000..3bbd53cf Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/jsx.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/konlpy.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/konlpy.cpython-312.pyc new file mode 100644 index 00000000..e9866296 Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/konlpy.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/latex.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/latex.cpython-312.pyc new file mode 100644 index 00000000..08992c50 Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/latex.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/markdown.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/markdown.cpython-312.pyc new file mode 100644 index 00000000..8c985144 Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/markdown.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/nltk.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/nltk.cpython-312.pyc new file mode 100644 index 00000000..11997bcb Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/nltk.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/python.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/python.cpython-312.pyc new file mode 100644 index 00000000..beb289c7 Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/python.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/sentence_transformers.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/sentence_transformers.cpython-312.pyc new file mode 100644 index 00000000..d940884a Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/sentence_transformers.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/__pycache__/spacy.cpython-312.pyc b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/spacy.cpython-312.pyc new file mode 100644 index 00000000..dadb8dda Binary files /dev/null and b/venv/Lib/site-packages/langchain_text_splitters/__pycache__/spacy.cpython-312.pyc differ diff --git a/venv/Lib/site-packages/langchain_text_splitters/base.py b/venv/Lib/site-packages/langchain_text_splitters/base.py new file mode 100644 index 00000000..c94c171c --- /dev/null +++ b/venv/Lib/site-packages/langchain_text_splitters/base.py @@ -0,0 +1,343 @@ +from __future__ import annotations + +import copy +import logging +from abc import ABC, abstractmethod +from dataclasses import dataclass +from enum import Enum +from typing import ( + AbstractSet, + Any, + Callable, + Collection, + Iterable, + List, + Literal, + Optional, + Sequence, + Type, + TypeVar, + Union, +) + +from langchain_core.documents import BaseDocumentTransformer, Document + +logger = logging.getLogger(__name__) + +TS = TypeVar("TS", bound="TextSplitter") + + +class TextSplitter(BaseDocumentTransformer, ABC): + """Interface for splitting text into chunks.""" + + def __init__( + self, + chunk_size: int = 4000, + chunk_overlap: int = 200, + length_function: Callable[[str], int] = len, + keep_separator: Union[bool, Literal["start", "end"]] = False, + add_start_index: bool = False, + strip_whitespace: bool = True, + ) -> None: + """Create a new TextSplitter. + + Args: + chunk_size: Maximum size of chunks to return + chunk_overlap: Overlap in characters between chunks + length_function: Function that measures the length of given chunks + keep_separator: Whether to keep the separator and where to place it + in each corresponding chunk (True='start') + add_start_index: If `True`, includes chunk's start index in metadata + strip_whitespace: If `True`, strips whitespace from the start and end of + every document + """ + if chunk_overlap > chunk_size: + raise ValueError( + f"Got a larger chunk overlap ({chunk_overlap}) than chunk size " + f"({chunk_size}), should be smaller." + ) + self._chunk_size = chunk_size + self._chunk_overlap = chunk_overlap + self._length_function = length_function + self._keep_separator = keep_separator + self._add_start_index = add_start_index + self._strip_whitespace = strip_whitespace + + @abstractmethod + def split_text(self, text: str) -> List[str]: + """Split text into multiple components.""" + + def create_documents( + self, texts: List[str], metadatas: Optional[List[dict]] = None + ) -> List[Document]: + """Create documents from a list of texts.""" + _metadatas = metadatas or [{}] * len(texts) + documents = [] + for i, text in enumerate(texts): + index = 0 + previous_chunk_len = 0 + for chunk in self.split_text(text): + metadata = copy.deepcopy(_metadatas[i]) + if self._add_start_index: + offset = index + previous_chunk_len - self._chunk_overlap + index = text.find(chunk, max(0, offset)) + metadata["start_index"] = index + previous_chunk_len = len(chunk) + new_doc = Document(page_content=chunk, metadata=metadata) + documents.append(new_doc) + return documents + + def split_documents(self, documents: Iterable[Document]) -> List[Document]: + """Split documents.""" + texts, metadatas = [], [] + for doc in documents: + texts.append(doc.page_content) + metadatas.append(doc.metadata) + return self.create_documents(texts, metadatas=metadatas) + + def _join_docs(self, docs: List[str], separator: str) -> Optional[str]: + text = separator.join(docs) + if self._strip_whitespace: + text = text.strip() + if text == "": + return None + else: + return text + + def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]: + # We now want to combine these smaller pieces into medium size + # chunks to send to the LLM. + separator_len = self._length_function(separator) + + docs = [] + current_doc: List[str] = [] + total = 0 + for d in splits: + _len = self._length_function(d) + if ( + total + _len + (separator_len if len(current_doc) > 0 else 0) + > self._chunk_size + ): + if total > self._chunk_size: + logger.warning( + f"Created a chunk of size {total}, " + f"which is longer than the specified {self._chunk_size}" + ) + if len(current_doc) > 0: + doc = self._join_docs(current_doc, separator) + if doc is not None: + docs.append(doc) + # Keep on popping if: + # - we have a larger chunk than in the chunk overlap + # - or if we still have any chunks and the length is long + while total > self._chunk_overlap or ( + total + _len + (separator_len if len(current_doc) > 0 else 0) + > self._chunk_size + and total > 0 + ): + total -= self._length_function(current_doc[0]) + ( + separator_len if len(current_doc) > 1 else 0 + ) + current_doc = current_doc[1:] + current_doc.append(d) + total += _len + (separator_len if len(current_doc) > 1 else 0) + doc = self._join_docs(current_doc, separator) + if doc is not None: + docs.append(doc) + return docs + + @classmethod + def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter: + """Text splitter that uses HuggingFace tokenizer to count length.""" + try: + from transformers import PreTrainedTokenizerBase + + if not isinstance(tokenizer, PreTrainedTokenizerBase): + raise ValueError( + "Tokenizer received was not an instance of PreTrainedTokenizerBase" + ) + + def _huggingface_tokenizer_length(text: str) -> int: + return len(tokenizer.tokenize(text)) + + except ImportError: + raise ValueError( + "Could not import transformers python package. " + "Please install it with `pip install transformers`." + ) + return cls(length_function=_huggingface_tokenizer_length, **kwargs) + + @classmethod + def from_tiktoken_encoder( + cls: Type[TS], + encoding_name: str = "gpt2", + model_name: Optional[str] = None, + allowed_special: Union[Literal["all"], AbstractSet[str]] = set(), + disallowed_special: Union[Literal["all"], Collection[str]] = "all", + **kwargs: Any, + ) -> TS: + """Text splitter that uses tiktoken encoder to count length.""" + try: + import tiktoken + except ImportError: + raise ImportError( + "Could not import tiktoken python package. " + "This is needed in order to calculate max_tokens_for_prompt. " + "Please install it with `pip install tiktoken`." + ) + + if model_name is not None: + enc = tiktoken.encoding_for_model(model_name) + else: + enc = tiktoken.get_encoding(encoding_name) + + def _tiktoken_encoder(text: str) -> int: + return len( + enc.encode( + text, + allowed_special=allowed_special, + disallowed_special=disallowed_special, + ) + ) + + if issubclass(cls, TokenTextSplitter): + extra_kwargs = { + "encoding_name": encoding_name, + "model_name": model_name, + "allowed_special": allowed_special, + "disallowed_special": disallowed_special, + } + kwargs = {**kwargs, **extra_kwargs} + + return cls(length_function=_tiktoken_encoder, **kwargs) + + def transform_documents( + self, documents: Sequence[Document], **kwargs: Any + ) -> Sequence[Document]: + """Transform sequence of documents by splitting them.""" + return self.split_documents(list(documents)) + + +class TokenTextSplitter(TextSplitter): + """Splitting text to tokens using model tokenizer.""" + + def __init__( + self, + encoding_name: str = "gpt2", + model_name: Optional[str] = None, + allowed_special: Union[Literal["all"], AbstractSet[str]] = set(), + disallowed_special: Union[Literal["all"], Collection[str]] = "all", + **kwargs: Any, + ) -> None: + """Create a new TextSplitter.""" + super().__init__(**kwargs) + try: + import tiktoken + except ImportError: + raise ImportError( + "Could not import tiktoken python package. " + "This is needed in order to for TokenTextSplitter. " + "Please install it with `pip install tiktoken`." + ) + + if model_name is not None: + enc = tiktoken.encoding_for_model(model_name) + else: + enc = tiktoken.get_encoding(encoding_name) + self._tokenizer = enc + self._allowed_special = allowed_special + self._disallowed_special = disallowed_special + + def split_text(self, text: str) -> List[str]: + """Splits the input text into smaller chunks based on tokenization. + + This method uses a custom tokenizer configuration to encode the input text + into tokens, processes the tokens in chunks of a specified size with overlap, + and decodes them back into text chunks. The splitting is performed using the + `split_text_on_tokens` function. + + Args: + text (str): The input text to be split into smaller chunks. + + Returns: + List[str]: A list of text chunks, where each chunk is derived from a portion + of the input text based on the tokenization and chunking rules. + """ + + def _encode(_text: str) -> List[int]: + return self._tokenizer.encode( + _text, + allowed_special=self._allowed_special, + disallowed_special=self._disallowed_special, + ) + + tokenizer = Tokenizer( + chunk_overlap=self._chunk_overlap, + tokens_per_chunk=self._chunk_size, + decode=self._tokenizer.decode, + encode=_encode, + ) + + return split_text_on_tokens(text=text, tokenizer=tokenizer) + + +class Language(str, Enum): + """Enum of the programming languages.""" + + CPP = "cpp" + GO = "go" + JAVA = "java" + KOTLIN = "kotlin" + JS = "js" + TS = "ts" + PHP = "php" + PROTO = "proto" + PYTHON = "python" + RST = "rst" + RUBY = "ruby" + RUST = "rust" + SCALA = "scala" + SWIFT = "swift" + MARKDOWN = "markdown" + LATEX = "latex" + HTML = "html" + SOL = "sol" + CSHARP = "csharp" + COBOL = "cobol" + C = "c" + LUA = "lua" + PERL = "perl" + HASKELL = "haskell" + ELIXIR = "elixir" + POWERSHELL = "powershell" + + +@dataclass(frozen=True) +class Tokenizer: + """Tokenizer data class.""" + + chunk_overlap: int + """Overlap in tokens between chunks""" + tokens_per_chunk: int + """Maximum number of tokens per chunk""" + decode: Callable[[List[int]], str] + """ Function to decode a list of token ids to a string""" + encode: Callable[[str], List[int]] + """ Function to encode a string to a list of token ids""" + + +def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> List[str]: + """Split incoming text and return chunks using tokenizer.""" + splits: List[str] = [] + input_ids = tokenizer.encode(text) + start_idx = 0 + cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids)) + chunk_ids = input_ids[start_idx:cur_idx] + while start_idx < len(input_ids): + splits.append(tokenizer.decode(chunk_ids)) + if cur_idx == len(input_ids): + break + start_idx += tokenizer.tokens_per_chunk - tokenizer.chunk_overlap + cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids)) + chunk_ids = input_ids[start_idx:cur_idx] + return splits diff --git a/venv/Lib/site-packages/langchain_text_splitters/character.py b/venv/Lib/site-packages/langchain_text_splitters/character.py new file mode 100644 index 00000000..a2918bd2 --- /dev/null +++ b/venv/Lib/site-packages/langchain_text_splitters/character.py @@ -0,0 +1,720 @@ +from __future__ import annotations + +import re +from typing import Any, List, Literal, Optional, Union + +from langchain_text_splitters.base import Language, TextSplitter + + +class CharacterTextSplitter(TextSplitter): + """Splitting text that looks at characters.""" + + def __init__( + self, separator: str = "\n\n", is_separator_regex: bool = False, **kwargs: Any + ) -> None: + """Create a new TextSplitter.""" + super().__init__(**kwargs) + self._separator = separator + self._is_separator_regex = is_separator_regex + + def split_text(self, text: str) -> List[str]: + """Split incoming text and return chunks.""" + # First we naively split the large input into a bunch of smaller ones. + separator = ( + self._separator if self._is_separator_regex else re.escape(self._separator) + ) + splits = _split_text_with_regex(text, separator, self._keep_separator) + _separator = "" if self._keep_separator else self._separator + return self._merge_splits(splits, _separator) + + +def _split_text_with_regex( + text: str, separator: str, keep_separator: Union[bool, Literal["start", "end"]] +) -> List[str]: + # Now that we have the separator, split the text + if separator: + if keep_separator: + # The parentheses in the pattern keep the delimiters in the result. + _splits = re.split(f"({separator})", text) + splits = ( + ([_splits[i] + _splits[i + 1] for i in range(0, len(_splits) - 1, 2)]) + if keep_separator == "end" + else ([_splits[i] + _splits[i + 1] for i in range(1, len(_splits), 2)]) + ) + if len(_splits) % 2 == 0: + splits += _splits[-1:] + splits = ( + (splits + [_splits[-1]]) + if keep_separator == "end" + else ([_splits[0]] + splits) + ) + else: + splits = re.split(separator, text) + else: + splits = list(text) + return [s for s in splits if s != ""] + + +class RecursiveCharacterTextSplitter(TextSplitter): + """Splitting text by recursively look at characters. + + Recursively tries to split by different characters to find one + that works. + """ + + def __init__( + self, + separators: Optional[List[str]] = None, + keep_separator: Union[bool, Literal["start", "end"]] = True, + is_separator_regex: bool = False, + **kwargs: Any, + ) -> None: + """Create a new TextSplitter.""" + super().__init__(keep_separator=keep_separator, **kwargs) + self._separators = separators or ["\n\n", "\n", " ", ""] + self._is_separator_regex = is_separator_regex + + def _split_text(self, text: str, separators: List[str]) -> List[str]: + """Split incoming text and return chunks.""" + final_chunks = [] + # Get appropriate separator to use + separator = separators[-1] + new_separators = [] + for i, _s in enumerate(separators): + _separator = _s if self._is_separator_regex else re.escape(_s) + if _s == "": + separator = _s + break + if re.search(_separator, text): + separator = _s + new_separators = separators[i + 1 :] + break + + _separator = separator if self._is_separator_regex else re.escape(separator) + splits = _split_text_with_regex(text, _separator, self._keep_separator) + + # Now go merging things, recursively splitting longer texts. + _good_splits = [] + _separator = "" if self._keep_separator else separator + for s in splits: + if self._length_function(s) < self._chunk_size: + _good_splits.append(s) + else: + if _good_splits: + merged_text = self._merge_splits(_good_splits, _separator) + final_chunks.extend(merged_text) + _good_splits = [] + if not new_separators: + final_chunks.append(s) + else: + other_info = self._split_text(s, new_separators) + final_chunks.extend(other_info) + if _good_splits: + merged_text = self._merge_splits(_good_splits, _separator) + final_chunks.extend(merged_text) + return final_chunks + + def split_text(self, text: str) -> List[str]: + """Split the input text into smaller chunks based on predefined separators. + + Args: + text (str): The input text to be split. + + Returns: + List[str]: A list of text chunks obtained after splitting. + """ + return self._split_text(text, self._separators) + + @classmethod + def from_language( + cls, language: Language, **kwargs: Any + ) -> RecursiveCharacterTextSplitter: + """Return an instance of this class based on a specific language. + + This method initializes the text splitter with language-specific separators. + + Args: + language (Language): The language to configure the text splitter for. + **kwargs (Any): Additional keyword arguments to customize the splitter. + + Returns: + RecursiveCharacterTextSplitter: An instance of the text splitter configured + for the specified language. + """ + separators = cls.get_separators_for_language(language) + return cls(separators=separators, is_separator_regex=True, **kwargs) + + @staticmethod + def get_separators_for_language(language: Language) -> List[str]: + """Retrieve a list of separators specific to the given language. + + Args: + language (Language): The language for which to get the separators. + + Returns: + List[str]: A list of separators appropriate for the specified language. + """ + if language == Language.C or language == Language.CPP: + return [ + # Split along class definitions + "\nclass ", + # Split along function definitions + "\nvoid ", + "\nint ", + "\nfloat ", + "\ndouble ", + # Split along control flow statements + "\nif ", + "\nfor ", + "\nwhile ", + "\nswitch ", + "\ncase ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.GO: + return [ + # Split along function definitions + "\nfunc ", + "\nvar ", + "\nconst ", + "\ntype ", + # Split along control flow statements + "\nif ", + "\nfor ", + "\nswitch ", + "\ncase ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.JAVA: + return [ + # Split along class definitions + "\nclass ", + # Split along method definitions + "\npublic ", + "\nprotected ", + "\nprivate ", + "\nstatic ", + # Split along control flow statements + "\nif ", + "\nfor ", + "\nwhile ", + "\nswitch ", + "\ncase ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.KOTLIN: + return [ + # Split along class definitions + "\nclass ", + # Split along method definitions + "\npublic ", + "\nprotected ", + "\nprivate ", + "\ninternal ", + "\ncompanion ", + "\nfun ", + "\nval ", + "\nvar ", + # Split along control flow statements + "\nif ", + "\nfor ", + "\nwhile ", + "\nwhen ", + "\ncase ", + "\nelse ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.JS: + return [ + # Split along function definitions + "\nfunction ", + "\nconst ", + "\nlet ", + "\nvar ", + "\nclass ", + # Split along control flow statements + "\nif ", + "\nfor ", + "\nwhile ", + "\nswitch ", + "\ncase ", + "\ndefault ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.TS: + return [ + "\nenum ", + "\ninterface ", + "\nnamespace ", + "\ntype ", + # Split along class definitions + "\nclass ", + # Split along function definitions + "\nfunction ", + "\nconst ", + "\nlet ", + "\nvar ", + # Split along control flow statements + "\nif ", + "\nfor ", + "\nwhile ", + "\nswitch ", + "\ncase ", + "\ndefault ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.PHP: + return [ + # Split along function definitions + "\nfunction ", + # Split along class definitions + "\nclass ", + # Split along control flow statements + "\nif ", + "\nforeach ", + "\nwhile ", + "\ndo ", + "\nswitch ", + "\ncase ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.PROTO: + return [ + # Split along message definitions + "\nmessage ", + # Split along service definitions + "\nservice ", + # Split along enum definitions + "\nenum ", + # Split along option definitions + "\noption ", + # Split along import statements + "\nimport ", + # Split along syntax declarations + "\nsyntax ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.PYTHON: + return [ + # First, try to split along class definitions + "\nclass ", + "\ndef ", + "\n\tdef ", + # Now split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.RST: + return [ + # Split along section titles + "\n=+\n", + "\n-+\n", + "\n\\*+\n", + # Split along directive markers + "\n\n.. *\n\n", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.RUBY: + return [ + # Split along method definitions + "\ndef ", + "\nclass ", + # Split along control flow statements + "\nif ", + "\nunless ", + "\nwhile ", + "\nfor ", + "\ndo ", + "\nbegin ", + "\nrescue ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.ELIXIR: + return [ + # Split along method function and module definition + "\ndef ", + "\ndefp ", + "\ndefmodule ", + "\ndefprotocol ", + "\ndefmacro ", + "\ndefmacrop ", + # Split along control flow statements + "\nif ", + "\nunless ", + "\nwhile ", + "\ncase ", + "\ncond ", + "\nwith ", + "\nfor ", + "\ndo ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.RUST: + return [ + # Split along function definitions + "\nfn ", + "\nconst ", + "\nlet ", + # Split along control flow statements + "\nif ", + "\nwhile ", + "\nfor ", + "\nloop ", + "\nmatch ", + "\nconst ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.SCALA: + return [ + # Split along class definitions + "\nclass ", + "\nobject ", + # Split along method definitions + "\ndef ", + "\nval ", + "\nvar ", + # Split along control flow statements + "\nif ", + "\nfor ", + "\nwhile ", + "\nmatch ", + "\ncase ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.SWIFT: + return [ + # Split along function definitions + "\nfunc ", + # Split along class definitions + "\nclass ", + "\nstruct ", + "\nenum ", + # Split along control flow statements + "\nif ", + "\nfor ", + "\nwhile ", + "\ndo ", + "\nswitch ", + "\ncase ", + # Split by the normal type of lines + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.MARKDOWN: + return [ + # First, try to split along Markdown headings (starting with level 2) + "\n#{1,6} ", + # Note the alternative syntax for headings (below) is not handled here + # Heading level 2 + # --------------- + # End of code block + "```\n", + # Horizontal lines + "\n\\*\\*\\*+\n", + "\n---+\n", + "\n___+\n", + # Note that this splitter doesn't handle horizontal lines defined + # by *three or more* of ***, ---, or ___, but this is not handled + "\n\n", + "\n", + " ", + "", + ] + elif language == Language.LATEX: + return [ + # First, try to split along Latex sections + "\n\\\\chapter{", + "\n\\\\section{", + "\n\\\\subsection{", + "\n\\\\subsubsection{", + # Now split by environments + "\n\\\\begin{enumerate}", + "\n\\\\begin{itemize}", + "\n\\\\begin{description}", + "\n\\\\begin{list}", + "\n\\\\begin{quote}", + "\n\\\\begin{quotation}", + "\n\\\\begin{verse}", + "\n\\\\begin{verbatim}", + # Now split by math environments + "\n\\\\begin{align}", + "$$", + "$", + # Now split by the normal type of lines + " ", + "", + ] + elif language == Language.HTML: + return [ + # First, try to split along HTML tags + ",

) and + creating hierarchical Document objects that reflect the semantic structure + of the original content. For each identified section, the splitter associates + the extracted text with metadata corresponding to the encountered headers. + + If no specified headers are found, the entire content is returned as a single + Document. This allows for flexible handling of HTML input, ensuring that + information is organized according to its semantic headers. + + The splitter provides the option to return each HTML element as a separate + Document or aggregate them into semantically meaningful chunks. It also + gracefully handles multiple levels of nested headers, creating a rich, + hierarchical representation of the content. + + Args: + headers_to_split_on (List[Tuple[str, str]]): A list of (header_tag, + header_name) pairs representing the headers that define splitting + boundaries. For example, [("h1", "Header 1"), ("h2", "Header 2")] + will split content by

and

tags, assigning their textual + content to the Document metadata. + return_each_element (bool): If True, every HTML element encountered + (including headers, paragraphs, etc.) is returned as a separate + Document. If False, content under the same header hierarchy is + aggregated into fewer Documents. + + Returns: + List[Document]: A list of Document objects. Each Document contains + `page_content` holding the extracted text and `metadata` that maps + the header hierarchy to their corresponding titles. + + Example: + .. code-block:: python + + from langchain_text_splitters.html_header_text_splitter import ( + HTMLHeaderTextSplitter, + ) + + # Define headers for splitting on h1 and h2 tags. + headers_to_split_on = [("h1", "Main Topic"), ("h2", "Sub Topic")] + + splitter = HTMLHeaderTextSplitter( + headers_to_split_on=headers_to_split_on, + return_each_element=False + ) + + html_content = \"\"\" + + +

Introduction

+

Welcome to the introduction section.

+

Background

+

Some background details here.

+

Conclusion

+

Final thoughts.

+ + + \"\"\" + + documents = splitter.split_text(html_content) + + # 'documents' now contains Document objects reflecting the hierarchy: + # - Document with metadata={"Main Topic": "Introduction"} and + # content="Introduction" + # - Document with metadata={"Main Topic": "Introduction"} and + # content="Welcome to the introduction section." + # - Document with metadata={"Main Topic": "Introduction", + # "Sub Topic": "Background"} and content="Background" + # - Document with metadata={"Main Topic": "Introduction", + # "Sub Topic": "Background"} and content="Some background details here." + # - Document with metadata={"Main Topic": "Conclusion"} and + # content="Conclusion" + # - Document with metadata={"Main Topic": "Conclusion"} and + # content="Final thoughts." + """ + + def __init__( + self, + headers_to_split_on: List[Tuple[str, str]], + return_each_element: bool = False, + ) -> None: + """Initialize with headers to split on. + + Args: + headers_to_split_on: A list of tuples where + each tuple contains a header tag and its corresponding value. + return_each_element: Whether to return each HTML + element as a separate Document. Defaults to False. + """ + # Sort headers by their numeric level so that h1 < h2 < h3... + self.headers_to_split_on = sorted( + headers_to_split_on, key=lambda x: int(x[0][1:]) + ) + self.header_mapping = dict(self.headers_to_split_on) + self.header_tags = [tag for tag, _ in self.headers_to_split_on] + self.return_each_element = return_each_element + + def split_text(self, text: str) -> List[Document]: + """Split the given text into a list of Document objects. + + Args: + text: The HTML text to split. + + Returns: + A list of split Document objects. + """ + return self.split_text_from_file(StringIO(text)) + + def split_text_from_url( + self, url: str, timeout: int = 10, **kwargs: Any + ) -> List[Document]: + """Fetch text content from a URL and split it into documents. + + Args: + url: The URL to fetch content from. + timeout: Timeout for the request. Defaults to 10. + **kwargs: Additional keyword arguments for the request. + + Returns: + A list of split Document objects. + + Raises: + requests.RequestException: If the HTTP request fails. + """ + kwargs.setdefault("timeout", timeout) + response = requests.get(url, **kwargs) + response.raise_for_status() + return self.split_text(response.text) + + def split_text_from_file(self, file: Any) -> List[Document]: + """Split HTML content from a file into a list of Document objects. + + Args: + file: A file path or a file-like object containing HTML content. + + Returns: + A list of split Document objects. + """ + if isinstance(file, str): + with open(file, "r", encoding="utf-8") as f: + html_content = f.read() + else: + html_content = file.read() + return list(self._generate_documents(html_content)) + + def _generate_documents(self, html_content: str) -> Any: + """Private method that performs a DFS traversal over the DOM and yields. + + Document objects on-the-fly. This approach maintains the same splitting + logic (headers vs. non-headers, chunking, etc.) while walking the DOM + explicitly in code. + + Args: + html_content: The raw HTML content. + + Yields: + Document objects as they are created. + """ + try: + from bs4 import BeautifulSoup + except ImportError as e: + raise ImportError( + "Unable to import BeautifulSoup. Please install via `pip install bs4`." + ) from e + + soup = BeautifulSoup(html_content, "html.parser") + body = soup.body if soup.body else soup + + # Dictionary of active headers: + # key = user-defined header name (e.g. "Header 1") + # value = (header_text, level, dom_depth) + active_headers: Dict[str, Tuple[str, int, int]] = {} + current_chunk: List[str] = [] + + def finalize_chunk() -> Optional[Document]: + """Finalize the accumulated chunk into a single Document.""" + if not current_chunk: + return None + + final_text = " \n".join(line for line in current_chunk if line.strip()) + current_chunk.clear() + if not final_text.strip(): + return None + + final_meta = {k: v[0] for k, v in active_headers.items()} + return Document(page_content=final_text, metadata=final_meta) + + # We'll use a stack for DFS traversal + stack = [body] + while stack: + node = stack.pop() + children = list(node.children) + from bs4.element import Tag + + for child in reversed(children): + if isinstance(child, Tag): + stack.append(child) + + tag = getattr(node, "name", None) + if not tag: + continue + + text_elements = [ + str(child).strip() + for child in node.find_all(string=True, recursive=False) + ] + node_text = " ".join(elem for elem in text_elements if elem) + if not node_text: + continue + + dom_depth = len(list(node.parents)) + + # If this node is one of our headers + if tag in self.header_tags: + # If we're aggregating, finalize whatever chunk we had + if not self.return_each_element: + doc = finalize_chunk() + if doc: + yield doc + + # Determine numeric level (h1->1, h2->2, etc.) + try: + level = int(tag[1:]) + except ValueError: + level = 9999 + + # Remove any active headers that are at or deeper than this new level + headers_to_remove = [ + k for k, (_, lvl, d) in active_headers.items() if lvl >= level + ] + for key in headers_to_remove: + del active_headers[key] + + # Add/Update the active header + header_name = self.header_mapping[tag] + active_headers[header_name] = (node_text, level, dom_depth) + + # Always yield a Document for the header + header_meta = {k: v[0] for k, v in active_headers.items()} + yield Document(page_content=node_text, metadata=header_meta) + + else: + headers_out_of_scope = [ + k for k, (_, _, d) in active_headers.items() if dom_depth < d + ] + for key in headers_out_of_scope: + del active_headers[key] + + if self.return_each_element: + # Yield each element's text as its own Document + meta = {k: v[0] for k, v in active_headers.items()} + yield Document(page_content=node_text, metadata=meta) + else: + # Accumulate text in our chunk + current_chunk.append(node_text) + + # If we're aggregating and have leftover chunk, yield it + if not self.return_each_element: + doc = finalize_chunk() + if doc: + yield doc + + +class HTMLSectionSplitter: + """Splitting HTML files based on specified tag and font sizes. + + Requires lxml package. + """ + + def __init__( + self, + headers_to_split_on: List[Tuple[str, str]], + xslt_path: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Create a new HTMLSectionSplitter. + + Args: + headers_to_split_on: list of tuples of headers we want to track mapped to + (arbitrary) keys for metadata. Allowed header values: h1, h2, h3, h4, + h5, h6 e.g. [("h1", "Header 1"), ("h2", "Header 2"]. + xslt_path: path to xslt file for document transformation. + Uses a default if not passed. + Needed for html contents that using different format and layouts. + **kwargs (Any): Additional optional arguments for customizations. + + """ + self.headers_to_split_on = dict(headers_to_split_on) + + if xslt_path is None: + self.xslt_path = ( + pathlib.Path(__file__).parent / "xsl/converting_to_header.xslt" + ).absolute() + else: + self.xslt_path = pathlib.Path(xslt_path).absolute() + self.kwargs = kwargs + + def split_documents(self, documents: Iterable[Document]) -> List[Document]: + """Split documents.""" + texts, metadatas = [], [] + for doc in documents: + texts.append(doc.page_content) + metadatas.append(doc.metadata) + results = self.create_documents(texts, metadatas=metadatas) + + text_splitter = RecursiveCharacterTextSplitter(**self.kwargs) + + return text_splitter.split_documents(results) + + def split_text(self, text: str) -> List[Document]: + """Split HTML text string. + + Args: + text: HTML text + """ + return self.split_text_from_file(StringIO(text)) + + def create_documents( + self, texts: List[str], metadatas: Optional[List[dict]] = None + ) -> List[Document]: + """Create documents from a list of texts.""" + _metadatas = metadatas or [{}] * len(texts) + documents = [] + for i, text in enumerate(texts): + for chunk in self.split_text(text): + metadata = copy.deepcopy(_metadatas[i]) + + for key in chunk.metadata.keys(): + if chunk.metadata[key] == "#TITLE#": + chunk.metadata[key] = metadata["Title"] + metadata = {**metadata, **chunk.metadata} + new_doc = Document(page_content=chunk.page_content, metadata=metadata) + documents.append(new_doc) + return documents + + def split_html_by_headers(self, html_doc: str) -> List[Dict[str, Optional[str]]]: + """Split an HTML document into sections based on specified header tags. + + This method uses BeautifulSoup to parse the HTML content and divides it into + sections based on headers defined in `headers_to_split_on`. Each section + contains the header text, content under the header, and the tag name. + + Args: + html_doc (str): The HTML document to be split into sections. + + Returns: + List[Dict[str, Optional[str]]]: A list of dictionaries representing + sections. + Each dictionary contains: + - 'header': The header text or a default title for the first section. + - 'content': The content under the header. + - 'tag_name': The name of the header tag (e.g., "h1", "h2"). + """ + try: + from bs4 import ( + BeautifulSoup, # type: ignore[import-untyped] + PageElement, + ) + except ImportError as e: + raise ImportError( + "Unable to import BeautifulSoup/PageElement, \ + please install with `pip install \ + bs4`." + ) from e + + soup = BeautifulSoup(html_doc, "html.parser") + headers = list(self.headers_to_split_on.keys()) + sections: list[dict[str, str | None]] = [] + + headers = soup.find_all(["body"] + headers) # type: ignore[assignment] + + for i, header in enumerate(headers): + header_element = cast(PageElement, header) + if i == 0: + current_header = "#TITLE#" + current_header_tag = "h1" + section_content: List = [] + else: + current_header = header_element.text.strip() + current_header_tag = header_element.name # type: ignore[attr-defined] + section_content = [] + for element in header_element.next_elements: + if i + 1 < len(headers) and element == headers[i + 1]: + break + if isinstance(element, str): + section_content.append(element) + content = " ".join(section_content).strip() + + if content != "": + sections.append( + { + "header": current_header, + "content": content, + "tag_name": current_header_tag, + } + ) + + return sections + + def convert_possible_tags_to_header(self, html_content: str) -> str: + """Convert specific HTML tags to headers using an XSLT transformation. + + This method uses an XSLT file to transform the HTML content, converting + certain tags into headers for easier parsing. If no XSLT path is provided, + the HTML content is returned unchanged. + + Args: + html_content (str): The HTML content to be transformed. + + Returns: + str: The transformed HTML content as a string. + """ + if self.xslt_path is None: + return html_content + + try: + from lxml import etree + except ImportError as e: + raise ImportError( + "Unable to import lxml, please install with `pip install lxml`." + ) from e + # use lxml library to parse html document and return xml ElementTree + parser = etree.HTMLParser() + tree = etree.parse(StringIO(html_content), parser) + + xslt_tree = etree.parse(self.xslt_path) + transform = etree.XSLT(xslt_tree) + result = transform(tree) + return str(result) + + def split_text_from_file(self, file: Any) -> List[Document]: + """Split HTML content from a file into a list of Document objects. + + Args: + file: A file path or a file-like object containing HTML content. + + Returns: + A list of split Document objects. + """ + file_content = file.getvalue() + file_content = self.convert_possible_tags_to_header(file_content) + sections = self.split_html_by_headers(file_content) + + return [ + Document( + cast(str, section["content"]), + metadata={ + self.headers_to_split_on[str(section["tag_name"])]: section[ + "header" + ] + }, + ) + for section in sections + ] + + +@beta() +class HTMLSemanticPreservingSplitter(BaseDocumentTransformer): + """Split HTML content preserving semantic structure. + + Splits HTML content by headers into generalized chunks, preserving semantic + structure. If chunks exceed the maximum chunk size, it uses + RecursiveCharacterTextSplitter for further splitting. + + The splitter preserves full HTML elements (e.g., ,
    ) and converts + links to Markdown-like links. It can also preserve images, videos, and audio + elements by converting them into Markdown format. Note that some chunks may + exceed the maximum size to maintain semantic integrity. + + .. versionadded: 0.3.5 + + Args: + headers_to_split_on (List[Tuple[str, str]]): HTML headers (e.g., "h1", "h2") + that define content sections. + max_chunk_size (int): Maximum size for each chunk, with allowance for + exceeding this limit to preserve semantics. + chunk_overlap (int): Number of characters to overlap between chunks to ensure + contextual continuity. + separators (List[str]): Delimiters used by RecursiveCharacterTextSplitter for + further splitting. + elements_to_preserve (List[str]): HTML tags (e.g.,
,