diff --git a/.github/scripts/get_min_versions.py b/.github/scripts/get_min_versions.py index 3ae299a..f4d6180 100644 --- a/.github/scripts/get_min_versions.py +++ b/.github/scripts/get_min_versions.py @@ -47,25 +47,43 @@ def get_min_version_from_toml(toml_path: str): # Parse dependencies list into a dictionary # Format: "package-name>=x.x.x,=x.x.x; python_version < '3.10'" dependencies = {} + python_version = f"{sys.version_info.major}.{sys.version_info.minor}" + for dep in dependencies_list: - # Remove environment markers (everything after semicolon) - dep_without_marker = dep.split(";")[0].strip() + # Check if there's a Python version marker + if ";" in dep: + dep_without_marker, marker = dep.split(";", 1) + dep_without_marker = dep_without_marker.strip() + marker = marker.strip() + + # Check if this dependency applies to current Python version + # Handle python_version < '3.10' and python_version >= '3.10' markers + applies_to_current = True + if "python_version" in marker: + if "<" in marker and not ">=" in marker: + # python_version < 'X.Y' + match = re.search(r"python_version\s*<\s*['\"](\d+\.\d+)['\"]", marker) + if match: + max_version = match.group(1) + applies_to_current = parse_version(python_version) < parse_version(max_version) + elif ">=" in marker: + # python_version >= 'X.Y' + match = re.search(r"python_version\s*>=\s*['\"](\d+\.\d+)['\"]", marker) + if match: + min_version_marker = match.group(1) + applies_to_current = parse_version(python_version) >= parse_version(min_version_marker) + + if not applies_to_current: + continue + else: + dep_without_marker = dep.strip() # Extract package name and version spec match = re.match(r"^([a-zA-Z0-9_-]+)(.*)$", dep_without_marker) if match: pkg_name = match.group(1) version_spec = match.group(2) - - # If this package already exists, collect both version specs - if pkg_name in dependencies: - # Store as a list to handle multiple version constraints - if isinstance(dependencies[pkg_name], list): - dependencies[pkg_name].append(version_spec) - else: - dependencies[pkg_name] = [dependencies[pkg_name], version_spec] - else: - dependencies[pkg_name] = version_spec + dependencies[pkg_name] = version_spec # Initialize a dictionary to store the minimum versions min_versions = {} @@ -74,23 +92,8 @@ def get_min_version_from_toml(toml_path: str): for lib in MIN_VERSION_LIBS: # Check if the lib is present in the dependencies if lib in dependencies: - # Get the version string(s) version_spec = dependencies[lib] - - # Handle list format (multiple version constraints for different Python versions) - if isinstance(version_spec, list): - # Extract all version strings from the list and find the minimum - versions = [] - for spec in version_spec: - if spec: - versions.append(get_min_version(spec)) - - # If we found versions, use the minimum one - if versions: - min_version = min(versions, key=parse_version) - min_versions[lib] = min_version - elif isinstance(version_spec, str) and version_spec: - # Handle simple string format + if version_spec: min_version = get_min_version(version_spec) min_versions[lib] = min_version diff --git a/.github/workflows/_test.yml b/.github/workflows/_test.yml index ed52e4d..8c1c9f2 100644 --- a/.github/workflows/_test.yml +++ b/.github/workflows/_test.yml @@ -22,6 +22,7 @@ jobs: python-version: - "3.9" - "3.12" + - "3.13" name: "make test #${{ matrix.python-version }}" steps: - uses: actions/checkout@v4 diff --git a/libs/oci/langchain_oci/chat_models/oci_data_science.py b/libs/oci/langchain_oci/chat_models/oci_data_science.py index 364d8b4..fbd6ee3 100644 --- a/libs/oci/langchain_oci/chat_models/oci_data_science.py +++ b/libs/oci/langchain_oci/chat_models/oci_data_science.py @@ -31,7 +31,12 @@ agenerate_from_stream, generate_from_stream, ) -from langchain_core.messages import AIMessageChunk, BaseMessage, BaseMessageChunk +from langchain_core.messages import ( + AIMessage, + AIMessageChunk, + BaseMessage, + BaseMessageChunk, +) from langchain_core.output_parsers import ( JsonOutputParser, PydanticOutputParser, @@ -765,11 +770,17 @@ def _process_response(self, response_json: dict) -> ChatResult: def bind_tools( self, - tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], + tools: Sequence[Union[Dict[str, Any], type, Callable, BaseTool]], + # Type annotation matches LangChain's BaseChatModel API. + # Runtime validation occurs in convert_to_openai_tool(). + *, + tool_choice: Optional[str] = None, **kwargs: Any, - ) -> Runnable[LanguageModelInput, BaseMessage]: + ) -> Runnable[LanguageModelInput, AIMessage]: formatted_tools = [convert_to_openai_tool(tool) for tool in tools] - return super().bind(tools=formatted_tools, **kwargs) + if tool_choice is not None: + kwargs["tool_choice"] = tool_choice + return super().bind(tools=formatted_tools, **kwargs) # type: ignore[return-value, unused-ignore] class ChatOCIModelDeploymentVLLM(ChatOCIModelDeployment): diff --git a/libs/oci/langchain_oci/chat_models/oci_generative_ai.py b/libs/oci/langchain_oci/chat_models/oci_generative_ai.py index 00ea3df..c5e49ed 100644 --- a/libs/oci/langchain_oci/chat_models/oci_generative_ai.py +++ b/libs/oci/langchain_oci/chat_models/oci_generative_ai.py @@ -1236,14 +1236,16 @@ def _prepare_request( def bind_tools( self, - tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], + tools: Sequence[Union[Dict[str, Any], type, Callable, BaseTool]], + # Type annotation matches LangChain's BaseChatModel API. + # Runtime validation occurs in convert_to_openai_tool(). *, tool_choice: Optional[ Union[dict, str, Literal["auto", "none", "required", "any"], bool] ] = None, parallel_tool_calls: Optional[bool] = None, **kwargs: Any, - ) -> Runnable[LanguageModelInput, BaseMessage]: + ) -> Runnable[LanguageModelInput, AIMessage]: """Bind tool-like objects to this chat model. Assumes model is compatible with Meta's tool-calling API. @@ -1285,7 +1287,7 @@ def bind_tools( ) kwargs["is_parallel_tool_calls"] = True - return super().bind(tools=formatted_tools, **kwargs) + return super().bind(tools=formatted_tools, **kwargs) # type: ignore[return-value, unused-ignore] def with_structured_output( self, @@ -1358,7 +1360,7 @@ def with_structured_output( key_name=tool_name, first_tool_only=True ) elif method == "json_mode": - llm = self.bind(response_format={"type": "JSON_OBJECT"}) + llm = self.bind(response_format={"type": "JSON_OBJECT"}) # type: ignore[assignment, unused-ignore] output_parser = ( PydanticOutputParser(pydantic_object=schema) if is_pydantic_schema @@ -1382,7 +1384,7 @@ def with_structured_output( json_schema=response_json_schema ) - llm = self.bind(response_format=response_format_obj) + llm = self.bind(response_format=response_format_obj) # type: ignore[assignment, unused-ignore] if is_pydantic_schema: output_parser = PydanticOutputParser(pydantic_object=schema) else: diff --git a/libs/oci/poetry.lock b/libs/oci/poetry.lock index 6961cfc..fe7ff15 100644 --- a/libs/oci/poetry.lock +++ b/libs/oci/poetry.lock @@ -334,7 +334,7 @@ files = [ {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"}, {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"}, ] -markers = {main = "platform_python_implementation != \"PyPy\""} +markers = {main = "platform_python_implementation != \"PyPy\"", test = "python_version >= \"3.10\""} [package.dependencies] pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} @@ -512,7 +512,7 @@ description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" groups = ["test"] -markers = "python_version == \"3.9\" or platform_python_implementation == \"PyPy\" and python_version < \"3.13\"" +markers = "python_version == \"3.9\"" files = [ {file = "coverage-7.10.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fc04cc7a3db33664e0c2d10eb8990ff6b3536f6842c9590ae8da4c614b9ed05a"}, {file = "coverage-7.10.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e201e015644e207139f7e2351980feb7040e6f4b2c2978892f3e3789d1c125e5"}, @@ -633,7 +633,7 @@ description = "Code coverage measurement for Python" optional = false python-versions = ">=3.10" groups = ["test"] -markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\" or python_version >= \"3.13\"" +markers = "python_version >= \"3.10\"" files = [ {file = "coverage-7.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0c986537abca9b064510f3fd104ba33e98d3036608c7f2f5537f869bc10e1ee5"}, {file = "coverage-7.11.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:28c5251b3ab1d23e66f1130ca0c419747edfbcb4690de19467cd616861507af7"}, @@ -742,7 +742,7 @@ description = "cryptography is a package which provides cryptographic recipes an optional = false python-versions = ">=3.7" groups = ["main"] -markers = "python_version == \"3.9\" or platform_python_implementation == \"PyPy\" and python_version < \"3.13\"" +markers = "python_version == \"3.9\"" files = [ {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, @@ -793,7 +793,7 @@ description = "cryptography is a package which provides cryptographic recipes an optional = false python-versions = "!=3.9.0,!=3.9.1,>=3.7" groups = ["main"] -markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\" or python_version >= \"3.13\"" +markers = "python_version >= \"3.10\"" files = [ {file = "cryptography-45.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:3be4f21c6245930688bd9e162829480de027f8bf962ede33d4f8ba7d67a00cee"}, {file = "cryptography-45.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:67285f8a611b0ebc0857ced2081e30302909f571a46bfa7a3cc0ad303fe015c6"}, @@ -1025,7 +1025,7 @@ description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\"" +markers = "python_version == \"3.9\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")" files = [ {file = "greenlet-3.2.4-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:8c68325b0d0acf8d91dde4e6f930967dd52a5302cd4062932a6b2e7c2969f47c"}, {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:94385f101946790ae13da500603491f04a76b6e4c059dab271b3ce2e283b2590"}, @@ -1135,14 +1135,14 @@ trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" -version = "0.26.0" +version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" groups = ["main", "test", "test-integration"] files = [ - {file = "httpx-0.26.0-py3-none-any.whl", hash = "sha256:8915f5a3627c4d47b73e8202457cb28f1266982d1159bd5779d86a80c0eab1cd"}, - {file = "httpx-0.26.0.tar.gz", hash = "sha256:451b55c30d5185ea6b23c2c793abf9bb237d2a7dfb901ced6ff69ad37ec1dfaf"}, + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, ] [package.dependencies] @@ -1150,13 +1150,13 @@ anyio = "*" certifi = "*" httpcore = "==1.*" idna = "*" -sniffio = "*" [package.extras] brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "idna" @@ -1173,31 +1173,6 @@ files = [ [package.extras] all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] -[[package]] -name = "importlib-metadata" -version = "8.7.0" -description = "Read metadata from Python packages" -optional = false -python-versions = ">=3.9" -groups = ["test"] -markers = "python_version == \"3.9\"" -files = [ - {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, - {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, -] - -[package.dependencies] -zipp = ">=3.20" - -[package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] -cover = ["pytest-cov"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -perf = ["ipython"] -test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] -type = ["pytest-mypy"] - [[package]] name = "iniconfig" version = "2.1.0" @@ -1205,7 +1180,7 @@ description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.8" groups = ["test"] -markers = "python_version == \"3.9\" or platform_python_implementation == \"PyPy\" and python_version < \"3.13\"" +markers = "python_version == \"3.9\"" files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, @@ -1218,7 +1193,7 @@ description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.10" groups = ["test"] -markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\" or python_version >= \"3.13\"" +markers = "python_version >= \"3.10\"" files = [ {file = "iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12"}, {file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"}, @@ -1370,6 +1345,7 @@ description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.9" groups = ["main"] +markers = "python_version == \"3.9\"" files = [ {file = "langchain-0.3.27-py3-none-any.whl", hash = "sha256:7b20c4f338826acb148d885b20a73a16e410ede9ee4f19bb02011852d5f98798"}, {file = "langchain-0.3.27.tar.gz", hash = "sha256:aa6f1e6274ff055d0fd36254176770f356ed0a8994297d1df47df341953cec62"}, @@ -1404,16 +1380,53 @@ perplexity = ["langchain-perplexity"] together = ["langchain-together"] xai = ["langchain-xai"] +[[package]] +name = "langchain" +version = "1.1.0" +description = "Building applications with LLMs through composability" +optional = false +python-versions = "<4.0.0,>=3.10.0" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "langchain-1.1.0-py3-none-any.whl", hash = "sha256:af080f3a4a779bfa5925de7aacb6dfab83249d4aab9a08f7aa7b9bec3766d8ea"}, + {file = "langchain-1.1.0.tar.gz", hash = "sha256:583c892f59873c0329dbe04169fb3234ac794c50780e7c6fb62a61c7b86a981b"}, +] + +[package.dependencies] +langchain-core = ">=1.1.0,<2.0.0" +langgraph = ">=1.0.2,<1.1.0" +pydantic = ">=2.7.4,<3.0.0" + +[package.extras] +anthropic = ["langchain-anthropic"] +aws = ["langchain-aws"] +azure-ai = ["langchain-azure-ai"] +community = ["langchain-community"] +deepseek = ["langchain-deepseek"] +fireworks = ["langchain-fireworks"] +google-genai = ["langchain-google-genai"] +google-vertexai = ["langchain-google-vertexai"] +groq = ["langchain-groq"] +huggingface = ["langchain-huggingface"] +mistralai = ["langchain-mistralai"] +ollama = ["langchain-ollama"] +openai = ["langchain-openai"] +perplexity = ["langchain-perplexity"] +together = ["langchain-together"] +xai = ["langchain-xai"] + [[package]] name = "langchain-core" -version = "0.3.79" +version = "0.3.80" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0.0,>=3.9.0" groups = ["main", "test", "test-integration"] +markers = "python_version == \"3.9\"" files = [ - {file = "langchain_core-0.3.79-py3-none-any.whl", hash = "sha256:92045bfda3e741f8018e1356f83be203ec601561c6a7becfefe85be5ddc58fdb"}, - {file = "langchain_core-0.3.79.tar.gz", hash = "sha256:024ba54a346dd9b13fb8b2342e0c83d0111e7f26fa01f545ada23ad772b55a60"}, + {file = "langchain_core-0.3.80-py3-none-any.whl", hash = "sha256:2141e3838d100d17dce2359f561ec0df52c526bae0de6d4f469f8026c5747456"}, + {file = "langchain_core-0.3.80.tar.gz", hash = "sha256:29636b82513ab49e834764d023c4d18554d3d719a185d37b019d0a8ae948c6bb"}, ] [package.dependencies] @@ -1425,6 +1438,28 @@ PyYAML = ">=5.3.0,<7.0.0" tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<10.0.0" typing-extensions = ">=4.7.0,<5.0.0" +[[package]] +name = "langchain-core" +version = "1.1.0" +description = "Building applications with LLMs through composability" +optional = false +python-versions = "<4.0.0,>=3.10.0" +groups = ["main", "test", "test-integration"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "langchain_core-1.1.0-py3-none-any.whl", hash = "sha256:2c9f27dadc6d21ed4aa46506a37a56e6a7e2d2f9141922dc5c251ba921822ee6"}, + {file = "langchain_core-1.1.0.tar.gz", hash = "sha256:2b76a82d427922c8bc51c08404af4fc2a29e9f161dfe2297cb05091e810201e7"}, +] + +[package.dependencies] +jsonpatch = ">=1.33.0,<2.0.0" +langsmith = ">=0.3.45,<1.0.0" +packaging = ">=23.2.0,<26.0.0" +pydantic = ">=2.7.4,<3.0.0" +pyyaml = ">=5.3.0,<7.0.0" +tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<10.0.0" +typing-extensions = ">=4.7.0,<5.0.0" + [[package]] name = "langchain-openai" version = "0.3.35" @@ -1432,6 +1467,7 @@ description = "An integration package connecting OpenAI and LangChain" optional = false python-versions = "<4.0.0,>=3.9.0" groups = ["main", "test"] +markers = "python_version == \"3.9\"" files = [ {file = "langchain_openai-0.3.35-py3-none-any.whl", hash = "sha256:76d5707e6e81fd461d33964ad618bd326cb661a1975cef7c1cb0703576bdada5"}, {file = "langchain_openai-0.3.35.tar.gz", hash = "sha256:fa985fd041c3809da256a040c98e8a43e91c6d165b96dcfeb770d8bd457bf76f"}, @@ -1442,33 +1478,74 @@ langchain-core = ">=0.3.78,<1.0.0" openai = ">=1.104.2,<3.0.0" tiktoken = ">=0.7.0,<1.0.0" +[[package]] +name = "langchain-openai" +version = "1.1.0" +description = "An integration package connecting OpenAI and LangChain" +optional = false +python-versions = "<4.0.0,>=3.10.0" +groups = ["main", "test"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "langchain_openai-1.1.0-py3-none-any.whl", hash = "sha256:243bb345d0260ea1326c2b6ac2237ec29f082ab457c59e9306bac349df4577e8"}, + {file = "langchain_openai-1.1.0.tar.gz", hash = "sha256:9a33280c2e8315d013d64e6b15e583be347beb0d0f281755c335ae504ad0c184"}, +] + +[package.dependencies] +langchain-core = ">=1.1.0,<2.0.0" +openai = ">=1.109.1,<3.0.0" +tiktoken = ">=0.7.0,<1.0.0" + [[package]] name = "langchain-tests" -version = "0.3.20" +version = "0.3.19" description = "Standard tests for LangChain implementations" optional = false -python-versions = ">=3.9" +python-versions = "<4.0,>=3.9" groups = ["test"] +markers = "python_version == \"3.9\"" files = [ - {file = "langchain_tests-0.3.20-py3-none-any.whl", hash = "sha256:6cc7ae64eb8dea65360a968840abe8d947c5382b95e065431c9dd061ee1dacd8"}, - {file = "langchain_tests-0.3.20.tar.gz", hash = "sha256:b94c05e37d191d4768a1a5064f2ca4053bacd48ff41e10af245ffa6a065ead4d"}, + {file = "langchain_tests-0.3.19-py3-none-any.whl", hash = "sha256:f235b74421e9bf71e9453405287204a4e11f20ed3829f9b7eee9ef55df47a50a"}, + {file = "langchain_tests-0.3.19.tar.gz", hash = "sha256:0d835148fdea7a5bcb3e26c7128b110bb4fa9532b070993566d107632a26b3a0"}, ] [package.dependencies] httpx = ">=0.25.0,<1" -langchain-core = ">=0.3.63,<1.0.0" +langchain-core = ">=0.3.53,<1.0.0" +numpy = {version = ">=1.26.2", markers = "python_version < \"3.13\""} +pytest = ">=7,<9" +pytest-asyncio = ">=0.20,<1" +pytest-socket = ">=0.6.0,<1" +syrupy = ">=4,<5" + +[[package]] +name = "langchain-tests" +version = "1.0.2" +description = "Standard tests for LangChain implementations" +optional = false +python-versions = "<4.0.0,>=3.10.0" +groups = ["test"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "langchain_tests-1.0.2-py3-none-any.whl", hash = "sha256:713936d9e474ba39eeade95ee8e9d2e237b15a74d4896cae66ef1a9bd0a44d48"}, + {file = "langchain_tests-1.0.2.tar.gz", hash = "sha256:7e96d82499ee32ab141e93bdeb122c942db41ff326bcc87ca27290ede92f0f78"}, +] + +[package.dependencies] +httpx = ">=0.28.1,<1.0.0" +langchain-core = ">=1.1.0,<2.0.0" numpy = [ {version = ">=1.26.2", markers = "python_version < \"3.13\""}, {version = ">=2.1.0", markers = "python_version >= \"3.13\""}, ] -pytest = ">=7,<9" -pytest-asyncio = ">=0.20,<1" +pytest = ">=7.0.0,<9.0.0" +pytest-asyncio = ">=0.20.0,<2.0.0" pytest-benchmark = "*" pytest-codspeed = "*" pytest-recording = "*" -pytest-socket = ">=0.6.0,<1" -syrupy = ">=4,<5" -vcrpy = ">=7.0" +pytest-socket = ">=0.7.0,<1.0.0" +syrupy = ">=4.0.0,<5.0.0" +vcrpy = ">=7.0.0,<8.0.0" [[package]] name = "langchain-text-splitters" @@ -1477,6 +1554,7 @@ description = "LangChain text splitting utilities" optional = false python-versions = ">=3.9" groups = ["main"] +markers = "python_version == \"3.9\"" files = [ {file = "langchain_text_splitters-0.3.11-py3-none-any.whl", hash = "sha256:cf079131166a487f1372c8ab5d0bfaa6c0a4291733d9c43a34a16ac9bcd6a393"}, {file = "langchain_text_splitters-0.3.11.tar.gz", hash = "sha256:7a50a04ada9a133bbabb80731df7f6ddac51bc9f1b9cab7fa09304d71d38a6cc"}, @@ -1487,20 +1565,45 @@ langchain-core = ">=0.3.75,<2.0.0" [[package]] name = "langgraph" -version = "0.2.76" +version = "0.6.11" description = "Building stateful, multi-actor applications with LLMs" optional = false -python-versions = "<4.0,>=3.9.0" +python-versions = ">=3.9" groups = ["test", "test-integration"] +markers = "python_version == \"3.9\"" +files = [ + {file = "langgraph-0.6.11-py3-none-any.whl", hash = "sha256:49268de69d85b7db3da9e2ca582a474516421c1c44be5cff390416cfa6967faa"}, + {file = "langgraph-0.6.11.tar.gz", hash = "sha256:cd5373d0a59701ab39c9f8af33a33c5704553de815318387fa7f240511e0efd7"}, +] + +[package.dependencies] +langchain-core = ">=0.1" +langgraph-checkpoint = ">=2.1.0,<4.0.0" +langgraph-prebuilt = ">=0.6.0,<0.7.0" +langgraph-sdk = ">=0.2.2,<0.3.0" +pydantic = ">=2.7.4" +xxhash = ">=3.5.0" + +[[package]] +name = "langgraph" +version = "1.0.4" +description = "Building stateful, multi-actor applications with LLMs" +optional = false +python-versions = ">=3.10" +groups = ["main", "test", "test-integration"] +markers = "python_version >= \"3.10\"" files = [ - {file = "langgraph-0.2.76-py3-none-any.whl", hash = "sha256:076b8b5d2fc5a9761c46a7618430cfa5c978a8012257c43cbc127b27e0fd7872"}, - {file = "langgraph-0.2.76.tar.gz", hash = "sha256:688f8dcd9b6797ba78384599e0de944773000c75156ad1e186490e99e89fa5c0"}, + {file = "langgraph-1.0.4-py3-none-any.whl", hash = "sha256:b1a835ceb0a8d69b9db48075e1939e28b1ad70ee23fa3fa8f90149904778bacf"}, + {file = "langgraph-1.0.4.tar.gz", hash = "sha256:86d08e25d7244340f59c5200fa69fdd11066aa999b3164b531e2a20036fac156"}, ] [package.dependencies] -langchain-core = ">=0.2.43,<0.3.0 || >0.3.0,<0.3.1 || >0.3.1,<0.3.2 || >0.3.2,<0.3.3 || >0.3.3,<0.3.4 || >0.3.4,<0.3.5 || >0.3.5,<0.3.6 || >0.3.6,<0.3.7 || >0.3.7,<0.3.8 || >0.3.8,<0.3.9 || >0.3.9,<0.3.10 || >0.3.10,<0.3.11 || >0.3.11,<0.3.12 || >0.3.12,<0.3.13 || >0.3.13,<0.3.14 || >0.3.14,<0.3.15 || >0.3.15,<0.3.16 || >0.3.16,<0.3.17 || >0.3.17,<0.3.18 || >0.3.18,<0.3.19 || >0.3.19,<0.3.20 || >0.3.20,<0.3.21 || >0.3.21,<0.3.22 || >0.3.22,<0.4.0" -langgraph-checkpoint = ">=2.0.10,<3.0.0" -langgraph-sdk = ">=0.1.42,<0.2.0" +langchain-core = ">=0.1" +langgraph-checkpoint = ">=2.1.0,<4.0.0" +langgraph-prebuilt = ">=1.0.2,<1.1.0" +langgraph-sdk = ">=0.2.2,<0.3.0" +pydantic = ">=2.7.4" +xxhash = ">=3.5.0" [[package]] name = "langgraph-checkpoint" @@ -1508,26 +1611,79 @@ version = "2.1.2" description = "Library with base interfaces for LangGraph checkpoint savers." optional = false python-versions = ">=3.9" -groups = ["test", "test-integration"] +groups = ["main", "test", "test-integration"] files = [ {file = "langgraph_checkpoint-2.1.2-py3-none-any.whl", hash = "sha256:911ebffb069fd01775d4b5184c04aaafc2962fcdf50cf49d524cd4367c4d0c60"}, {file = "langgraph_checkpoint-2.1.2.tar.gz", hash = "sha256:112e9d067a6eff8937caf198421b1ffba8d9207193f14ac6f89930c1260c06f9"}, ] +markers = {main = "python_version >= \"3.10\""} [package.dependencies] langchain-core = ">=0.2.38" ormsgpack = ">=1.10.0" +[[package]] +name = "langgraph-prebuilt" +version = "0.6.5" +description = "Library with high-level APIs for creating and executing LangGraph agents and tools." +optional = false +python-versions = ">=3.9" +groups = ["test", "test-integration"] +markers = "python_version == \"3.9\"" +files = [ + {file = "langgraph_prebuilt-0.6.5-py3-none-any.whl", hash = "sha256:b6ceb5db31c16a30a3ee3c0b923667f02e7c9e27852621abf9d5bd5603534141"}, + {file = "langgraph_prebuilt-0.6.5.tar.gz", hash = "sha256:9c63e9e867e62b345805fd1e8ea5c2df5cc112e939d714f277af84f2afe5950d"}, +] + +[package.dependencies] +langchain-core = ">=0.3.67" +langgraph-checkpoint = ">=2.1.0,<4.0.0" + +[[package]] +name = "langgraph-prebuilt" +version = "1.0.5" +description = "Library with high-level APIs for creating and executing LangGraph agents and tools." +optional = false +python-versions = ">=3.10" +groups = ["main", "test", "test-integration"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "langgraph_prebuilt-1.0.5-py3-none-any.whl", hash = "sha256:22369563e1848862ace53fbc11b027c28dd04a9ac39314633bb95f2a7e258496"}, + {file = "langgraph_prebuilt-1.0.5.tar.gz", hash = "sha256:85802675ad778cc7240fd02d47db1e0b59c0c86d8369447d77ce47623845db2d"}, +] + +[package.dependencies] +langchain-core = ">=1.0.0" +langgraph-checkpoint = ">=2.1.0,<4.0.0" + [[package]] name = "langgraph-sdk" -version = "0.1.74" +version = "0.2.9" description = "SDK for interacting with LangGraph API" optional = false python-versions = ">=3.9" groups = ["test", "test-integration"] +markers = "python_version == \"3.9\"" +files = [ + {file = "langgraph_sdk-0.2.9-py3-none-any.whl", hash = "sha256:fbf302edadbf0fb343596f91c597794e936ef68eebc0d3e1d358b6f9f72a1429"}, + {file = "langgraph_sdk-0.2.9.tar.gz", hash = "sha256:b3bd04c6be4fa382996cd2be8fbc1e7cc94857d2bc6b6f4599a7f2a245975303"}, +] + +[package.dependencies] +httpx = ">=0.25.2" +orjson = ">=3.10.1" + +[[package]] +name = "langgraph-sdk" +version = "0.2.10" +description = "SDK for interacting with LangGraph API" +optional = false +python-versions = ">=3.10" +groups = ["main", "test", "test-integration"] +markers = "python_version >= \"3.10\"" files = [ - {file = "langgraph_sdk-0.1.74-py3-none-any.whl", hash = "sha256:3a265c3757fe0048adad4391d10486db63ef7aa5a2cbd22da22d4503554cb890"}, - {file = "langgraph_sdk-0.1.74.tar.gz", hash = "sha256:7450e0db5b226cc2e5328ca22c5968725873630ef47c4206a30707cb25dc3ad6"}, + {file = "langgraph_sdk-0.2.10-py3-none-any.whl", hash = "sha256:9aef403663726085de6851e4e50459df9562069bd316dd0261eb359f776fd0ef"}, + {file = "langgraph_sdk-0.2.10.tar.gz", hash = "sha256:ab58331504fbea28e6322037aa362929799b4e9106663ac1dbd7c5ac44558933"}, ] [package.dependencies] @@ -1541,7 +1697,7 @@ description = "Client library to connect to the LangSmith LLM Tracing and Evalua optional = false python-versions = ">=3.9" groups = ["main", "test", "test-integration"] -markers = "python_version == \"3.9\" or platform_python_implementation == \"PyPy\" and python_version < \"3.13\"" +markers = "python_version == \"3.9\"" files = [ {file = "langsmith-0.4.37-py3-none-any.whl", hash = "sha256:e34a94ce7277646299e4703a0f6e2d2c43647a28e8b800bb7ef82fd87a0ec766"}, {file = "langsmith-0.4.37.tar.gz", hash = "sha256:d9a0eb6dd93f89843ac982c9f92be93cf2bcabbe19957f362c547766c7366c71"}, @@ -1571,7 +1727,7 @@ description = "Client library to connect to the LangSmith Observability and Eval optional = false python-versions = ">=3.10" groups = ["main", "test", "test-integration"] -markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\" or python_version >= \"3.13\"" +markers = "python_version >= \"3.10\"" files = [ {file = "langsmith-0.4.42-py3-none-any.whl", hash = "sha256:015b0a0c17eb1a61293e8cbb7d41778a4b37caddd267d54274ba94e4721b301b"}, {file = "langsmith-0.4.42.tar.gz", hash = "sha256:a6e808e47581403cb019b47c8c10627c1644f78ed4c03fa877d6ad661476c38f"}, @@ -1594,32 +1750,6 @@ otel = ["opentelemetry-api (>=1.30.0)", "opentelemetry-exporter-otlp-proto-http pytest = ["pytest (>=7.0.0)", "rich (>=13.9.4)", "vcrpy (>=7.0.0)"] vcr = ["vcrpy (>=7.0.0)"] -[[package]] -name = "markdown-it-py" -version = "3.0.0" -description = "Python port of markdown-it. Markdown parsing, done right!" -optional = false -python-versions = ">=3.8" -groups = ["test"] -markers = "python_version == \"3.9\" or platform_python_implementation == \"PyPy\" and python_version < \"3.13\"" -files = [ - {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, - {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, -] - -[package.dependencies] -mdurl = ">=0.1,<1.0" - -[package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] -linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] -profiling = ["gprof2dot"] -rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] - [[package]] name = "markdown-it-py" version = "4.0.0" @@ -1627,7 +1757,7 @@ description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.10" groups = ["test"] -markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\" or python_version >= \"3.13\"" +markers = "python_version >= \"3.10\"" files = [ {file = "markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147"}, {file = "markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3"}, @@ -1652,6 +1782,7 @@ description = "Markdown URL utilities" optional = false python-versions = ">=3.7" groups = ["test"] +markers = "python_version >= \"3.10\"" files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, @@ -1812,6 +1943,7 @@ files = [ {file = "multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3"}, {file = "multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5"}, ] +markers = {test = "python_version >= \"3.10\""} [package.dependencies] typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} @@ -1896,7 +2028,7 @@ description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" groups = ["test"] -markers = "python_version == \"3.9\" or platform_python_implementation == \"PyPy\" and python_version < \"3.13\"" +markers = "python_version == \"3.9\"" files = [ {file = "numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece"}, {file = "numpy-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f15975dfec0cf2239224d80e32c3170b1d168335eaedee69da84fbe9f1f9cd04"}, @@ -1952,7 +2084,7 @@ description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.10" groups = ["test"] -markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\" and python_version < \"3.13\"" +markers = "python_version >= \"3.10\" and python_version < \"3.13\"" files = [ {file = "numpy-2.2.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb"}, {file = "numpy-2.2.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90"}, @@ -2264,7 +2396,7 @@ files = [ {file = "orjson-3.11.4-cp39-cp39-win_amd64.whl", hash = "sha256:e2985ce8b8c42d00492d0ed79f2bd2b6460d00f2fa671dfde4bf2e02f49bf5c6"}, {file = "orjson-3.11.4.tar.gz", hash = "sha256:39485f4ab4c9b30a3943cfe99e1a213c4776fb69e8abd68f66b83d5a0b0fdc6d"}, ] -markers = {main = "platform_python_implementation != \"PyPy\""} +markers = {main = "platform_python_implementation != \"PyPy\" or python_version >= \"3.10\""} [[package]] name = "ormsgpack" @@ -2273,7 +2405,7 @@ description = "" optional = false python-versions = ">=3.9" groups = ["test", "test-integration"] -markers = "python_version == \"3.9\" or platform_python_implementation == \"PyPy\" and python_version < \"3.13\"" +markers = "python_version == \"3.9\"" files = [ {file = "ormsgpack-1.11.0-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:03d4e658dd6e1882a552ce1d13cc7b49157414e7d56a4091fbe7823225b08cba"}, {file = "ormsgpack-1.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bb67eb913c2b703f0ed39607fc56e50724dd41f92ce080a586b4d6149eb3fe4"}, @@ -2339,8 +2471,8 @@ version = "1.12.0" description = "" optional = false python-versions = ">=3.10" -groups = ["test", "test-integration"] -markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\" or python_version >= \"3.13\"" +groups = ["main", "test", "test-integration"] +markers = "python_version >= \"3.10\"" files = [ {file = "ormsgpack-1.12.0-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:e08904c232358b94a682ccfbb680bc47d3fd5c424bb7dccb65974dd20c95e8e1"}, {file = "ormsgpack-1.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9ed7a4b0037d69c8ba7e670e03ee65ae8d5c5114a409e73c5770d7fb5e4b895"}, @@ -2563,6 +2695,7 @@ files = [ {file = "propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237"}, {file = "propcache-0.4.1.tar.gz", hash = "sha256:f48107a8c637e80362555f37ecf49abe20370e557cc4ab374f04ec4423c97c3d"}, ] +markers = {test = "python_version >= \"3.10\""} [[package]] name = "py-cpuinfo" @@ -2571,6 +2704,7 @@ description = "Get CPU info with pure Python" optional = false python-versions = "*" groups = ["test"] +markers = "python_version >= \"3.10\"" files = [ {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, @@ -2587,7 +2721,7 @@ files = [ {file = "pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934"}, {file = "pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2"}, ] -markers = {main = "implementation_name != \"PyPy\" and platform_python_implementation != \"PyPy\"", test = "implementation_name != \"PyPy\""} +markers = {main = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\"", test = "python_version >= \"3.10\" and implementation_name != \"PyPy\""} [[package]] name = "pydantic" @@ -2782,26 +2916,27 @@ test = ["pretend", "pytest (>=3.0.1)", "pytest-rerunfailures"] [[package]] name = "pytest" -version = "7.4.4" +version = "8.4.2" description = "pytest: simple powerful testing with Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" groups = ["test"] files = [ - {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, - {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, + {file = "pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79"}, + {file = "pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01"}, ] [package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1", markers = "python_version < \"3.11\""} +iniconfig = ">=1" +packaging = ">=20" +pluggy = ">=1.5,<2" +pygments = ">=2.7.2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-asyncio" @@ -2829,6 +2964,7 @@ description = "A ``pytest`` fixture for benchmarking code. It will group the tes optional = false python-versions = ">=3.9" groups = ["test"] +markers = "python_version >= \"3.10\"" files = [ {file = "pytest-benchmark-5.0.1.tar.gz", hash = "sha256:8138178618c85586ce056c70cc5e92f4283c2e6198e8422c2c825aeb3ace6afd"}, {file = "pytest_benchmark-5.0.1-py3-none-any.whl", hash = "sha256:d75fec4cbf0d4fd91e020f425ce2d845e9c127c21bae35e77c84db8ed84bfaa6"}, @@ -2850,6 +2986,7 @@ description = "Pytest plugin to create CodSpeed benchmarks" optional = false python-versions = ">=3.9" groups = ["test"] +markers = "python_version >= \"3.10\"" files = [ {file = "pytest_codspeed-4.2.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:609828b03972966b75b9b7416fa2570c4a0f6124f67e02d35cd3658e64312a7b"}, {file = "pytest_codspeed-4.2.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23a0c0fbf8bb4de93a3454fd9e5efcdca164c778aaef0a9da4f233d85cb7f5b8"}, @@ -2871,7 +3008,6 @@ files = [ [package.dependencies] cffi = ">=1.17.1" -importlib-metadata = {version = ">=8.5.0", markers = "python_version < \"3.10\""} pytest = ">=3.8" rich = ">=13.8.1" @@ -2899,22 +3035,22 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtuale [[package]] name = "pytest-httpx" -version = "0.28.0" +version = "0.35.0" description = "Send responses to httpx." optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "pytest_httpx-0.28.0-py3-none-any.whl", hash = "sha256:045774556a3633688742315a6981aab2806ce93bcbcc8444253ab87bca286800"}, - {file = "pytest_httpx-0.28.0.tar.gz", hash = "sha256:a82505fdf59f19eaaf2853db3f3832b3dee35d3bc58000232db2b65c5fca0614"}, + {file = "pytest_httpx-0.35.0-py3-none-any.whl", hash = "sha256:ee11a00ffcea94a5cbff47af2114d34c5b231c326902458deed73f9c459fd744"}, + {file = "pytest_httpx-0.35.0.tar.gz", hash = "sha256:d619ad5d2e67734abfbb224c3d9025d64795d4b8711116b1a13f72a251ae511f"}, ] [package.dependencies] -httpx = "==0.26.*" -pytest = "==7.*" +httpx = "==0.28.*" +pytest = "==8.*" [package.extras] -testing = ["pytest-asyncio (==0.23.*)", "pytest-cov (==4.*)"] +testing = ["pytest-asyncio (==0.24.*)", "pytest-cov (==6.*)"] [[package]] name = "pytest-mock" @@ -2941,6 +3077,7 @@ description = "A pytest plugin powered by VCR.py to record and replay HTTP traff optional = false python-versions = ">=3.9" groups = ["test"] +markers = "python_version >= \"3.10\"" files = [ {file = "pytest_recording-0.13.4-py3-none-any.whl", hash = "sha256:ad49a434b51b1c4f78e85b1e6b74fdcc2a0a581ca16e52c798c6ace971f7f439"}, {file = "pytest_recording-0.13.4.tar.gz", hash = "sha256:568d64b2a85992eec4ae0a419c855d5fd96782c5fb016784d86f18053792768c"}, @@ -3284,6 +3421,7 @@ description = "Render rich text, tables, progress bars, syntax highlighting, mar optional = false python-versions = ">=3.8.0" groups = ["test"] +markers = "python_version >= \"3.10\"" files = [ {file = "rich-14.2.0-py3-none-any.whl", hash = "sha256:76bc51fe2e57d2b1be1f96c524b890b816e334ab4c1e45888799bfaab0021edd"}, {file = "rich-14.2.0.tar.gz", hash = "sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4"}, @@ -3356,6 +3494,7 @@ description = "Database Abstraction Library" optional = false python-versions = ">=3.7" groups = ["main"] +markers = "python_version == \"3.9\"" files = [ {file = "SQLAlchemy-2.0.44-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:471733aabb2e4848d609141a9e9d56a427c0a038f4abf65dd19d7a21fd563632"}, {file = "SQLAlchemy-2.0.44-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48bf7d383a35e668b984c805470518b635d48b95a3c57cb03f37eaa3551b5f9f"}, @@ -3632,7 +3771,7 @@ description = "Typing stubs for requests" optional = false python-versions = ">=3.7" groups = ["typing"] -markers = "platform_python_implementation == \"PyPy\" or python_version == \"3.9\"" +markers = "python_version >= \"3.10\" and platform_python_implementation == \"PyPy\"" files = [ {file = "types-requests-2.31.0.6.tar.gz", hash = "sha256:cd74ce3b53c461f1228a9b783929ac73a666658f223e28ed29753771477b3bd0"}, {file = "types_requests-2.31.0.6-py3-none-any.whl", hash = "sha256:a2db9cb228a81da8348b49ad6db3f5519452dd20a9c1e1a868c83c5fe88fd1a9"}, @@ -3648,7 +3787,7 @@ description = "Typing stubs for requests" optional = false python-versions = ">=3.9" groups = ["typing"] -markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\"" +markers = "platform_python_implementation != \"PyPy\" or python_version == \"3.9\"" files = [ {file = "types_requests-2.32.4.20250913-py3-none-any.whl", hash = "sha256:78c9c1fffebbe0fa487a418e0fa5252017e9c60d1a2da394077f1780f655d7e1"}, {file = "types_requests-2.32.4.20250913.tar.gz", hash = "sha256:abd6d4f9ce3a9383f269775a9835a4c24e5cd6b9f647d64f88aa4613c33def5d"}, @@ -3664,7 +3803,7 @@ description = "Typing stubs for urllib3" optional = false python-versions = "*" groups = ["typing"] -markers = "platform_python_implementation == \"PyPy\" or python_version == \"3.9\"" +markers = "python_version >= \"3.10\" and platform_python_implementation == \"PyPy\"" files = [ {file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"}, {file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"}, @@ -3704,7 +3843,7 @@ description = "HTTP library with thread-safe connection pooling, file post, and optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" groups = ["main", "test", "test-integration"] -markers = "platform_python_implementation == \"PyPy\" or python_version == \"3.9\"" +markers = "python_version >= \"3.10\" and platform_python_implementation == \"PyPy\"" files = [ {file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"}, {file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"}, @@ -3722,7 +3861,7 @@ description = "HTTP library with thread-safe connection pooling, file post, and optional = false python-versions = ">=3.9" groups = ["main", "test", "test-integration", "typing"] -markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\"" +markers = "platform_python_implementation != \"PyPy\" or python_version == \"3.9\"" files = [ {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, @@ -3741,6 +3880,7 @@ description = "Automatically mock your HTTP interactions to simplify and speed u optional = false python-versions = ">=3.9" groups = ["test"] +markers = "python_version >= \"3.10\"" files = [ {file = "vcrpy-7.0.0-py2.py3-none-any.whl", hash = "sha256:55791e26c18daa363435054d8b35bd41a4ac441b6676167635d1b37a71dbe124"}, {file = "vcrpy-7.0.0.tar.gz", hash = "sha256:176391ad0425edde1680c5b20738ea3dc7fb942520a48d2993448050986b3a50"}, @@ -3749,7 +3889,7 @@ files = [ [package.dependencies] PyYAML = "*" urllib3 = [ - {version = "<2", markers = "python_version < \"3.10\" or platform_python_implementation == \"PyPy\""}, + {version = "<2", markers = "platform_python_implementation == \"PyPy\""}, {version = "*", markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\""}, ] wrapt = "*" @@ -3808,6 +3948,7 @@ description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" groups = ["test"] +markers = "python_version >= \"3.10\"" files = [ {file = "wrapt-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:64b103acdaa53b7caf409e8d45d39a8442fe6dcfec6ba3f3d141e0cc2b5b4dbd"}, {file = "wrapt-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:91bcc576260a274b169c3098e9a3519fb01f2989f6d3d386ef9cbf8653de1374"}, @@ -3921,6 +4062,157 @@ files = [ [package.extras] dev = ["pytest", "setuptools"] +[[package]] +name = "xxhash" +version = "3.6.0" +description = "Python binding for xxHash" +optional = false +python-versions = ">=3.7" +groups = ["main", "test", "test-integration"] +files = [ + {file = "xxhash-3.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:87ff03d7e35c61435976554477a7f4cd1704c3596a89a8300d5ce7fc83874a71"}, + {file = "xxhash-3.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f572dfd3d0e2eb1a57511831cf6341242f5a9f8298a45862d085f5b93394a27d"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:89952ea539566b9fed2bbd94e589672794b4286f342254fad28b149f9615fef8"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e6f2ffb07a50b52465a1032c3cf1f4a5683f944acaca8a134a2f23674c2058"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b5b848ad6c16d308c3ac7ad4ba6bede80ed5df2ba8ed382f8932df63158dd4b2"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a034590a727b44dd8ac5914236a7b8504144447a9682586c3327e935f33ec8cc"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a8f1972e75ebdd161d7896743122834fe87378160c20e97f8b09166213bf8cc"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ee34327b187f002a596d7b167ebc59a1b729e963ce645964bbc050d2f1b73d07"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:339f518c3c7a850dd033ab416ea25a692759dc7478a71131fe8869010d2b75e4"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:bf48889c9630542d4709192578aebbd836177c9f7a4a2778a7d6340107c65f06"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:5576b002a56207f640636056b4160a378fe36a58db73ae5c27a7ec8db35f71d4"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af1f3278bd02814d6dedc5dec397993b549d6f16c19379721e5a1d31e132c49b"}, + {file = "xxhash-3.6.0-cp310-cp310-win32.whl", hash = "sha256:aed058764db109dc9052720da65fafe84873b05eb8b07e5e653597951af57c3b"}, + {file = "xxhash-3.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:e82da5670f2d0d98950317f82a0e4a0197150ff19a6df2ba40399c2a3b9ae5fb"}, + {file = "xxhash-3.6.0-cp310-cp310-win_arm64.whl", hash = "sha256:4a082ffff8c6ac07707fb6b671caf7c6e020c75226c561830b73d862060f281d"}, + {file = "xxhash-3.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b47bbd8cf2d72797f3c2772eaaac0ded3d3af26481a26d7d7d41dc2d3c46b04a"}, + {file = "xxhash-3.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2b6821e94346f96db75abaa6e255706fb06ebd530899ed76d32cd99f20dc52fa"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d0a9751f71a1a65ce3584e9cae4467651c7e70c9d31017fa57574583a4540248"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b29ee68625ab37b04c0b40c3fafdf24d2f75ccd778333cfb698f65f6c463f62"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6812c25fe0d6c36a46ccb002f40f27ac903bf18af9f6dd8f9669cb4d176ab18f"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4ccbff013972390b51a18ef1255ef5ac125c92dc9143b2d1909f59abc765540e"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:297b7fbf86c82c550e12e8fb71968b3f033d27b874276ba3624ea868c11165a8"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dea26ae1eb293db089798d3973a5fc928a18fdd97cc8801226fae705b02b14b0"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7a0b169aafb98f4284f73635a8e93f0735f9cbde17bd5ec332480484241aaa77"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:08d45aef063a4531b785cd72de4887766d01dc8f362a515693df349fdb825e0c"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:929142361a48ee07f09121fe9e96a84950e8d4df3bb298ca5d88061969f34d7b"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:51312c768403d8540487dbbfb557454cfc55589bbde6424456951f7fcd4facb3"}, + {file = "xxhash-3.6.0-cp311-cp311-win32.whl", hash = "sha256:d1927a69feddc24c987b337ce81ac15c4720955b667fe9b588e02254b80446fd"}, + {file = "xxhash-3.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:26734cdc2d4ffe449b41d186bbeac416f704a482ed835d375a5c0cb02bc63fef"}, + {file = "xxhash-3.6.0-cp311-cp311-win_arm64.whl", hash = "sha256:d72f67ef8bf36e05f5b6c65e8524f265bd61071471cd4cf1d36743ebeeeb06b7"}, + {file = "xxhash-3.6.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:01362c4331775398e7bb34e3ab403bc9ee9f7c497bc7dee6272114055277dd3c"}, + {file = "xxhash-3.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b7b2df81a23f8cb99656378e72501b2cb41b1827c0f5a86f87d6b06b69f9f204"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dc94790144e66b14f67b10ac8ed75b39ca47536bf8800eb7c24b50271ea0c490"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93f107c673bccf0d592cdba077dedaf52fe7f42dcd7676eba1f6d6f0c3efffd2"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aa5ee3444c25b69813663c9f8067dcfaa2e126dc55e8dddf40f4d1c25d7effa"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7f99123f0e1194fa59cc69ad46dbae2e07becec5df50a0509a808f90a0f03f0"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49e03e6fe2cac4a1bc64952dd250cf0dbc5ef4ebb7b8d96bce82e2de163c82a2"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bd17fede52a17a4f9a7bc4472a5867cb0b160deeb431795c0e4abe158bc784e9"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6fb5f5476bef678f69db04f2bd1efbed3030d2aba305b0fc1773645f187d6a4e"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:843b52f6d88071f87eba1631b684fcb4b2068cd2180a0224122fe4ef011a9374"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7d14a6cfaf03b1b6f5f9790f76880601ccc7896aff7ab9cd8978a939c1eb7e0d"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:418daf3db71e1413cfe211c2f9a528456936645c17f46b5204705581a45390ae"}, + {file = "xxhash-3.6.0-cp312-cp312-win32.whl", hash = "sha256:50fc255f39428a27299c20e280d6193d8b63b8ef8028995323bf834a026b4fbb"}, + {file = "xxhash-3.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:c0f2ab8c715630565ab8991b536ecded9416d615538be8ecddce43ccf26cbc7c"}, + {file = "xxhash-3.6.0-cp312-cp312-win_arm64.whl", hash = "sha256:eae5c13f3bc455a3bbb68bdc513912dc7356de7e2280363ea235f71f54064829"}, + {file = "xxhash-3.6.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:599e64ba7f67472481ceb6ee80fa3bd828fd61ba59fb11475572cc5ee52b89ec"}, + {file = "xxhash-3.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7d8b8aaa30fca4f16f0c84a5c8d7ddee0e25250ec2796c973775373257dde8f1"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d597acf8506d6e7101a4a44a5e428977a51c0fadbbfd3c39650cca9253f6e5a6"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:858dc935963a33bc33490128edc1c12b0c14d9c7ebaa4e387a7869ecc4f3e263"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba284920194615cb8edf73bf52236ce2e1664ccd4a38fdb543506413529cc546"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4b54219177f6c6674d5378bd862c6aedf64725f70dd29c472eaae154df1a2e89"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:42c36dd7dbad2f5238950c377fcbf6811b1cdb1c444fab447960030cea60504d"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f22927652cba98c44639ffdc7aaf35828dccf679b10b31c4ad72a5b530a18eb7"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b45fad44d9c5c119e9c6fbf2e1c656a46dc68e280275007bbfd3d572b21426db"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6f2580ffab1a8b68ef2b901cde7e55fa8da5e4be0977c68f78fc80f3c143de42"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:40c391dd3cd041ebc3ffe6f2c862f402e306eb571422e0aa918d8070ba31da11"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f205badabde7aafd1a31e8ca2a3e5a763107a71c397c4481d6a804eb5063d8bd"}, + {file = "xxhash-3.6.0-cp313-cp313-win32.whl", hash = "sha256:2577b276e060b73b73a53042ea5bd5203d3e6347ce0d09f98500f418a9fcf799"}, + {file = "xxhash-3.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:757320d45d2fbcce8f30c42a6b2f47862967aea7bf458b9625b4bbe7ee390392"}, + {file = "xxhash-3.6.0-cp313-cp313-win_arm64.whl", hash = "sha256:457b8f85dec5825eed7b69c11ae86834a018b8e3df5e77783c999663da2f96d6"}, + {file = "xxhash-3.6.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a42e633d75cdad6d625434e3468126c73f13f7584545a9cf34e883aa1710e702"}, + {file = "xxhash-3.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:568a6d743219e717b07b4e03b0a828ce593833e498c3b64752e0f5df6bfe84db"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bec91b562d8012dae276af8025a55811b875baace6af510412a5e58e3121bc54"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78e7f2f4c521c30ad5e786fdd6bae89d47a32672a80195467b5de0480aa97b1f"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3ed0df1b11a79856df5ffcab572cbd6b9627034c1c748c5566fa79df9048a7c5"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0e4edbfc7d420925b0dd5e792478ed393d6e75ff8fc219a6546fb446b6a417b1"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fba27a198363a7ef87f8c0f6b171ec36b674fe9053742c58dd7e3201c1ab30ee"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:794fe9145fe60191c6532fa95063765529770edcdd67b3d537793e8004cabbfd"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6105ef7e62b5ac73a837778efc331a591d8442f8ef5c7e102376506cb4ae2729"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:f01375c0e55395b814a679b3eea205db7919ac2af213f4a6682e01220e5fe292"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:d706dca2d24d834a4661619dcacf51a75c16d65985718d6a7d73c1eeeb903ddf"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5f059d9faeacd49c0215d66f4056e1326c80503f51a1532ca336a385edadd033"}, + {file = "xxhash-3.6.0-cp313-cp313t-win32.whl", hash = "sha256:1244460adc3a9be84731d72b8e80625788e5815b68da3da8b83f78115a40a7ec"}, + {file = "xxhash-3.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b1e420ef35c503869c4064f4a2f2b08ad6431ab7b229a05cce39d74268bca6b8"}, + {file = "xxhash-3.6.0-cp313-cp313t-win_arm64.whl", hash = "sha256:ec44b73a4220623235f67a996c862049f375df3b1052d9899f40a6382c32d746"}, + {file = "xxhash-3.6.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a40a3d35b204b7cc7643cbcf8c9976d818cb47befcfac8bbefec8038ac363f3e"}, + {file = "xxhash-3.6.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a54844be970d3fc22630b32d515e79a90d0a3ddb2644d8d7402e3c4c8da61405"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:016e9190af8f0a4e3741343777710e3d5717427f175adfdc3e72508f59e2a7f3"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f6f72232f849eb9d0141e2ebe2677ece15adfd0fa599bc058aad83c714bb2c6"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:63275a8aba7865e44b1813d2177e0f5ea7eadad3dd063a21f7cf9afdc7054063"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cd01fa2aa00d8b017c97eb46b9a794fbdca53fc14f845f5a328c71254b0abb7"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0226aa89035b62b6a86d3c68df4d7c1f47a342b8683da2b60cedcddb46c4d95b"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c6e193e9f56e4ca4923c61238cdaced324f0feac782544eb4c6d55ad5cc99ddd"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9176dcaddf4ca963d4deb93866d739a343c01c969231dbe21680e13a5d1a5bf0"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:c1ce4009c97a752e682b897aa99aef84191077a9433eb237774689f14f8ec152"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:8cb2f4f679b01513b7adbb9b1b2f0f9cdc31b70007eaf9d59d0878809f385b11"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:653a91d7c2ab54a92c19ccf43508b6a555440b9be1bc8be553376778be7f20b5"}, + {file = "xxhash-3.6.0-cp314-cp314-win32.whl", hash = "sha256:a756fe893389483ee8c394d06b5ab765d96e68fbbfe6fde7aa17e11f5720559f"}, + {file = "xxhash-3.6.0-cp314-cp314-win_amd64.whl", hash = "sha256:39be8e4e142550ef69629c9cd71b88c90e9a5db703fecbcf265546d9536ca4ad"}, + {file = "xxhash-3.6.0-cp314-cp314-win_arm64.whl", hash = "sha256:25915e6000338999236f1eb68a02a32c3275ac338628a7eaa5a269c401995679"}, + {file = "xxhash-3.6.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c5294f596a9017ca5a3e3f8884c00b91ab2ad2933cf288f4923c3fd4346cf3d4"}, + {file = "xxhash-3.6.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1cf9dcc4ab9cff01dfbba78544297a3a01dafd60f3bde4e2bfd016cf7e4ddc67"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:01262da8798422d0685f7cef03b2bd3f4f46511b02830861df548d7def4402ad"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51a73fb7cb3a3ead9f7a8b583ffd9b8038e277cdb8cb87cf890e88b3456afa0b"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b9c6df83594f7df8f7f708ce5ebeacfc69f72c9fbaaababf6cf4758eaada0c9b"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:627f0af069b0ea56f312fd5189001c24578868643203bca1abbc2c52d3a6f3ca"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa912c62f842dfd013c5f21a642c9c10cd9f4c4e943e0af83618b4a404d9091a"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b465afd7909db30168ab62afe40b2fcf79eedc0b89a6c0ab3123515dc0df8b99"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a881851cf38b0a70e7c4d3ce81fc7afd86fbc2a024f4cfb2a97cf49ce04b75d3"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9b3222c686a919a0f3253cfc12bb118b8b103506612253b5baeaac10d8027cf6"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:c5aa639bc113e9286137cec8fadc20e9cd732b2cc385c0b7fa673b84fc1f2a93"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5c1343d49ac102799905e115aee590183c3921d475356cb24b4de29a4bc56518"}, + {file = "xxhash-3.6.0-cp314-cp314t-win32.whl", hash = "sha256:5851f033c3030dd95c086b4a36a2683c2ff4a799b23af60977188b057e467119"}, + {file = "xxhash-3.6.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0444e7967dac37569052d2409b00a8860c2135cff05502df4da80267d384849f"}, + {file = "xxhash-3.6.0-cp314-cp314t-win_arm64.whl", hash = "sha256:bb79b1e63f6fd84ec778a4b1916dfe0a7c3fdb986c06addd5db3a0d413819d95"}, + {file = "xxhash-3.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7dac94fad14a3d1c92affb661021e1d5cbcf3876be5f5b4d90730775ccb7ac41"}, + {file = "xxhash-3.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6965e0e90f1f0e6cb78da568c13d4a348eeb7f40acfd6d43690a666a459458b8"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2ab89a6b80f22214b43d98693c30da66af910c04f9858dd39c8e570749593d7e"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4903530e866b7a9c1eadfd3fa2fbe1b97d3aed4739a80abf506eb9318561c850"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4da8168ae52c01ac64c511d6f4a709479da8b7a4a1d7621ed51652f93747dffa"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:97460eec202017f719e839a0d3551fbc0b2fcc9c6c6ffaa5af85bbd5de432788"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:45aae0c9df92e7fa46fbb738737324a563c727990755ec1965a6a339ea10a1df"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0d50101e57aad86f4344ca9b32d091a2135a9d0a4396f19133426c88025b09f1"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9085e798c163ce310d91f8aa6b325dda3c2944c93c6ce1edb314030d4167cc65"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:a87f271a33fad0e5bf3be282be55d78df3a45ae457950deb5241998790326f87"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:9e040d3e762f84500961791fa3709ffa4784d4dcd7690afc655c095e02fff05f"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b0359391c3dad6de872fefb0cf5b69d55b0655c55ee78b1bb7a568979b2ce96b"}, + {file = "xxhash-3.6.0-cp38-cp38-win32.whl", hash = "sha256:e4ff728a2894e7f436b9e94c667b0f426b9c74b71f900cf37d5468c6b5da0536"}, + {file = "xxhash-3.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:01be0c5b500c5362871fc9cfdf58c69b3e5c4f531a82229ddb9eb1eb14138004"}, + {file = "xxhash-3.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cc604dc06027dbeb8281aeac5899c35fcfe7c77b25212833709f0bff4ce74d2a"}, + {file = "xxhash-3.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:277175a73900ad43a8caeb8b99b9604f21fe8d7c842f2f9061a364a7e220ddb7"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cfbc5b91397c8c2972fdac13fb3e4ed2f7f8ccac85cd2c644887557780a9b6e2"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2762bfff264c4e73c0e507274b40634ff465e025f0eaf050897e88ec8367575d"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2f171a900d59d51511209f7476933c34a0c2c711078d3c80e74e0fe4f38680ec"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:780b90c313348f030b811efc37b0fa1431163cb8db8064cf88a7936b6ce5f222"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b242455eccdfcd1fa4134c431a30737d2b4f045770f8fe84356b3469d4b919"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a75ffc1bd5def584129774c158e108e5d768e10b75813f2b32650bb041066ed6"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1fc1ed882d1e8df932a66e2999429ba6cc4d5172914c904ab193381fba825360"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:44e342e8cc11b4e79dae5c57f2fb6360c3c20cc57d32049af8f567f5b4bcb5f4"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c2f9ccd5c4be370939a2e17602fbc49995299203da72a3429db013d44d590e86"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:02ea4cb627c76f48cd9fb37cf7ab22bd51e57e1b519807234b473faebe526796"}, + {file = "xxhash-3.6.0-cp39-cp39-win32.whl", hash = "sha256:6551880383f0e6971dc23e512c9ccc986147ce7bfa1cd2e4b520b876c53e9f3d"}, + {file = "xxhash-3.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:7c35c4cdc65f2a29f34425c446f2f5cdcd0e3c34158931e1cc927ece925ab802"}, + {file = "xxhash-3.6.0-cp39-cp39-win_arm64.whl", hash = "sha256:ffc578717a347baf25be8397cb10d2528802d24f94cfc005c0e44fef44b5cdd6"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0f7b7e2ec26c1666ad5fc9dbfa426a6a3367ceaf79db5dd76264659d509d73b0"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5dc1e14d14fa0f5789ec29a7062004b5933964bb9b02aae6622b8f530dc40296"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:881b47fc47e051b37d94d13e7455131054b56749b91b508b0907eb07900d1c13"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c6dc31591899f5e5666f04cc2e529e69b4072827085c1ef15294d91a004bc1bd"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:15e0dac10eb9309508bfc41f7f9deaa7755c69e35af835db9cb10751adebc35d"}, + {file = "xxhash-3.6.0.tar.gz", hash = "sha256:f0162a78b13a0d7617b2845b90c763339d1f1d82bb04a4b07f4ab535cc5e05d6"}, +] +markers = {main = "python_version >= \"3.10\""} + [[package]] name = "yarl" version = "1.22.0" @@ -4060,33 +4352,13 @@ files = [ {file = "yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff"}, {file = "yarl-1.22.0.tar.gz", hash = "sha256:bebf8557577d4401ba8bd9ff33906f1376c877aa78d1fe216ad01b4d6745af71"}, ] +markers = {test = "python_version >= \"3.10\""} [package.dependencies] idna = ">=2.0" multidict = ">=4.0" propcache = ">=0.2.1" -[[package]] -name = "zipp" -version = "3.23.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false -python-versions = ">=3.9" -groups = ["test"] -markers = "python_version == \"3.9\"" -files = [ - {file = "zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e"}, - {file = "zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166"}, -] - -[package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] -cover = ["pytest-cov"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] -type = ["pytest-mypy"] - [[package]] name = "zstandard" version = "0.25.0" @@ -4202,4 +4474,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt [metadata] lock-version = "2.1" python-versions = ">=3.9,<4.0" -content-hash = "09253133efa05bb1548acac554ff98a5d9540f1845f02481619828050a74f96a" +content-hash = "bb724fa24a1b163f774102443f16b4e5e5876eb1fc7d3c817b9f4b3e7be0b580" diff --git a/libs/oci/pyproject.toml b/libs/oci/pyproject.toml index 0dbbcba..e6e53f1 100644 --- a/libs/oci/pyproject.toml +++ b/libs/oci/pyproject.toml @@ -6,14 +6,17 @@ readme = "README.md" license = "UPL-1.0" requires-python = ">=3.9,<4.0" dependencies = [ - "langchain-core>=0.3.78,<1.0.0", - "langchain>=0.3.20,<1.0.0", + "langchain-core>=0.3.78,<1.0.0; python_version < '3.10'", + "langchain-core>=1.1.0,<2.0.0; python_version >= '3.10'", + "langchain>=0.3.20,<1.0.0; python_version < '3.10'", + "langchain>=1.0.0,<2.0.0; python_version >= '3.10'", "oci>=2.161.0", "pydantic>=2,<3", "aiohttp>=3.12.14", "openai>=2.6.1", "oci-openai>=1.0.0", - "langchain-openai>=0.3.35", + "langchain-openai>=0.3.35,<1.0.0; python_version < '3.10'", + "langchain-openai>=1.1.0,<2.0.0; python_version >= '3.10'", ] [project.urls] @@ -27,18 +30,18 @@ package-mode = true optional = true [tool.poetry.group.test.dependencies] -pytest = "^7.4.3" +pytest = "^8.0.0" pytest-cov = "^4.1.0" syrupy = "^4.0.2" pytest-asyncio = "^0.23.2" pytest-watcher = "^0.3.4" -langchain-tests = "^0.3.12" pytest-socket = "^0.7.0" pytest-mock = "^3.15.0" -pytest-httpx = "^0.28.0" +pytest-httpx = ">=0.30.0" responses = "^0.25.8" -langgraph = "^0.2.0" -langchain-openai = "^0.3.35" +langgraph = ">=0.2.0,<2.0.0" +langchain-tests = ">=0.3.12,<2.0.0" +langchain-openai = ">=0.3.35,<2.0.0" [tool.poetry.group.codespell] @@ -51,7 +54,7 @@ codespell = "^2.2.6" optional = true [tool.poetry.group.test_integration.dependencies] -langgraph = "^0.2.0" +langgraph = ">=0.2.0,<2.0.0" [tool.poetry.group.lint] optional = true @@ -104,6 +107,7 @@ module = [ "ads.*", "langchain_openai.*", "oci_openai.*", + "rich.*", ] ignore_missing_imports = true diff --git a/libs/oci/tests/integration_tests/chat_models/test_chat_features.py b/libs/oci/tests/integration_tests/chat_models/test_chat_features.py new file mode 100644 index 0000000..fafe179 --- /dev/null +++ b/libs/oci/tests/integration_tests/chat_models/test_chat_features.py @@ -0,0 +1,432 @@ +#!/usr/bin/env python3 +"""Integration tests for ChatOCIGenAI features. + +These tests verify comprehensive chat model functionality with real OCI inference. + +Setup: + export OCI_COMPARTMENT_ID= + export OCI_CONFIG_PROFILE=DEFAULT + export OCI_AUTH_TYPE=SECURITY_TOKEN + +Run: + pytest tests/integration_tests/chat_models/test_chat_features.py -v +""" + +import os +from typing import Union + +import pytest +from langchain_core.messages import ( + AIMessage, + HumanMessage, + SystemMessage, +) +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import ChatPromptTemplate +from pydantic import BaseModel, Field + +from langchain_oci.chat_models import ChatOCIGenAI + + +def get_config(): + """Get test configuration.""" + compartment_id = os.environ.get("OCI_COMPARTMENT_ID") + if not compartment_id: + pytest.skip("OCI_COMPARTMENT_ID not set") + return { + "model_id": os.environ.get( + "OCI_MODEL_ID", "meta.llama-4-maverick-17b-128e-instruct-fp8" + ), + "service_endpoint": os.environ.get( + "OCI_GENAI_ENDPOINT", + "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com", + ), + "compartment_id": compartment_id, + "auth_profile": os.environ.get("OCI_CONFIG_PROFILE", "DEFAULT"), + "auth_type": os.environ.get("OCI_AUTH_TYPE", "SECURITY_TOKEN"), + } + + +@pytest.fixture +def llm(): + """Create ChatOCIGenAI instance.""" + config = get_config() + return ChatOCIGenAI( + model_id=config["model_id"], + service_endpoint=config["service_endpoint"], + compartment_id=config["compartment_id"], + auth_profile=config["auth_profile"], + auth_type=config["auth_type"], + model_kwargs={"temperature": 0, "max_tokens": 512}, + ) + + +# ============================================================================= +# Chain and LCEL Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_simple_chain(llm): + """Test simple LCEL chain: prompt | llm | parser.""" + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful assistant."), + ("human", "{input}"), + ] + ) + chain = prompt | llm | StrOutputParser() + + result = chain.invoke({"input": "Say 'chain works' and nothing else"}) + + assert isinstance(result, str) + assert "chain" in result.lower() or "works" in result.lower() + + +@pytest.mark.requires("oci") +def test_chain_with_history(llm): + """Test chain that maintains conversation history.""" + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful assistant with memory."), + ("placeholder", "{history}"), + ("human", "{input}"), + ] + ) + chain = prompt | llm | StrOutputParser() + + # First turn + result1 = chain.invoke({"history": [], "input": "My favorite color is blue."}) + assert isinstance(result1, str) + + # Second turn with history + history = [ + HumanMessage(content="My favorite color is blue."), + AIMessage(content=result1), + ] + result2 = chain.invoke({"history": history, "input": "What is my favorite color?"}) + + assert "blue" in result2.lower() + + +@pytest.mark.requires("oci") +def test_chain_batch(llm): + """Test batch processing with LCEL.""" + prompt = ChatPromptTemplate.from_messages([("human", "What is {num} + {num}?")]) + chain = prompt | llm | StrOutputParser() + + results = chain.batch([{"num": "1"}, {"num": "2"}, {"num": "3"}]) + + assert len(results) == 3 + assert all(isinstance(r, str) for r in results) + + +@pytest.mark.requires("oci") +@pytest.mark.asyncio +async def test_chain_async(llm): + """Test async chain invocation.""" + prompt = ChatPromptTemplate.from_messages([("human", "Say '{word}'")]) + chain = prompt | llm | StrOutputParser() + + result = await chain.ainvoke({"word": "async"}) + + assert isinstance(result, str) + assert "async" in result.lower() + + +# ============================================================================= +# Streaming Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_stream_chain(llm): + """Test streaming through a chain.""" + prompt = ChatPromptTemplate.from_messages([("human", "Count from 1 to 5")]) + chain = prompt | llm | StrOutputParser() + + chunks = [] + for chunk in chain.stream({}): + chunks.append(chunk) + + assert len(chunks) > 0 + full_response = "".join(chunks) + assert len(full_response) > 0 + + +@pytest.mark.requires("oci") +@pytest.mark.asyncio +async def test_astream(llm): + """Test async streaming.""" + chunks = [] + async for chunk in llm.astream([HumanMessage(content="Say hello")]): + chunks.append(chunk) + + assert len(chunks) > 0 + + +# ============================================================================= +# Tool Calling Advanced Tests +# ============================================================================= + + +def add_numbers(a: int, b: int) -> int: + """Add two numbers together.""" + return a + b + + +def multiply_numbers(a: int, b: int) -> int: + """Multiply two numbers together.""" + return a * b + + +def get_user_info(user_id: str) -> dict: + """Get information about a user.""" + return {"user_id": user_id, "name": "John Doe", "email": "john@example.com"} + + +@pytest.mark.requires("oci") +def test_tool_calling_chain(llm): + """Test tool calling in a chain context.""" + tools = [get_user_info] + llm_with_tools = llm.bind_tools(tools) + + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful assistant. Use tools when needed."), + ("human", "{input}"), + ] + ) + chain = prompt | llm_with_tools + + response = chain.invoke({"input": "Get info for user ID 'abc123'"}) + + assert len(response.tool_calls) >= 1 + assert response.tool_calls[0]["name"] == "get_user_info" + assert response.tool_calls[0]["args"]["user_id"] == "abc123" + + +@pytest.mark.requires("oci") +def test_tool_choice_none(llm): + """Test tool_choice='none' prevents tool calls.""" + tools = [add_numbers] + llm_with_tools = llm.bind_tools(tools, tool_choice="none") + + response = llm_with_tools.invoke([HumanMessage(content="What is 5 plus 3?")]) + + # Should not make tool calls when tool_choice is none + assert len(response.tool_calls) == 0 + assert response.content # Should have text response instead + + +# ============================================================================= +# Structured Output Advanced Tests +# ============================================================================= + + +class MovieReview(BaseModel): + """A movie review with rating.""" + + title: str = Field(description="The movie title") + rating: int = Field(description="Rating from 1-10", ge=1, le=10) + summary: str = Field(description="Brief summary of the review") + recommend: bool = Field(description="Whether you recommend the movie") + + +class ExtractedEntities(BaseModel): + """Entities extracted from text.""" + + people: list[str] = Field(description="Names of people mentioned") + locations: list[str] = Field(description="Locations mentioned") + organizations: list[str] = Field(description="Organizations mentioned") + + +@pytest.mark.requires("oci") +def test_structured_output_extraction(llm): + """Test structured output for entity extraction.""" + structured_llm = llm.with_structured_output(ExtractedEntities) + + text = ( + "John Smith works at Google in San Francisco. " + "He met with Jane Doe from Microsoft in Seattle last week." + ) + result = structured_llm.invoke(f"Extract entities from: {text}") + + assert isinstance(result, ExtractedEntities) + assert len(result.people) >= 1 + assert len(result.locations) >= 1 + assert len(result.organizations) >= 1 + + +# ============================================================================= +# Model Configuration Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_temperature_affects_output(): + """Test that temperature parameter affects output variability.""" + config = get_config() + + # Low temperature (deterministic) + llm_low = ChatOCIGenAI( + model_id=config["model_id"], + service_endpoint=config["service_endpoint"], + compartment_id=config["compartment_id"], + auth_profile=config["auth_profile"], + auth_type=config["auth_type"], + model_kwargs={"temperature": 0, "max_tokens": 50}, + ) + + # Get multiple responses with low temp + responses_low = [ + llm_low.invoke([HumanMessage(content="Say exactly: 'Hello World'")]).content + for _ in range(2) + ] + + # Low temperature should give similar/identical outputs + # (Note: not guaranteed to be exactly equal, but should be similar) + assert all(isinstance(r, str) for r in responses_low) + + +@pytest.mark.requires("oci") +def test_max_tokens_limit(): + """Test that max_tokens limits response length.""" + config = get_config() + + llm_short = ChatOCIGenAI( + model_id=config["model_id"], + service_endpoint=config["service_endpoint"], + compartment_id=config["compartment_id"], + auth_profile=config["auth_profile"], + auth_type=config["auth_type"], + model_kwargs={"temperature": 0, "max_tokens": 10}, + ) + + response = llm_short.invoke( + [HumanMessage(content="Write a very long essay about the universe")] + ) + + # Response should be truncated due to max_tokens + # Token count varies, but should be reasonably short + assert isinstance(response.content, str) + assert len(response.content.split()) <= 20 # Rough word count check + + +@pytest.mark.requires("oci") +def test_stop_sequences(): + """Test stop sequences parameter.""" + config = get_config() + + llm = ChatOCIGenAI( + model_id=config["model_id"], + service_endpoint=config["service_endpoint"], + compartment_id=config["compartment_id"], + auth_profile=config["auth_profile"], + auth_type=config["auth_type"], + model_kwargs={"temperature": 0, "max_tokens": 100}, + ) + + response = llm.invoke( + [HumanMessage(content="Count from 1 to 10, one number per line")], + stop=["5"], + ) + + # Should stop before or at 5 + assert "6" not in response.content or "5" in response.content + + +# ============================================================================= +# Error Handling Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_invalid_tool_schema(llm): + """Test handling of invalid tool definitions.""" + + # Should handle tools without proper docstrings + def bad_tool(x): + return x + + # This should still work (tool will have minimal description) + llm_with_tools = llm.bind_tools([bad_tool]) + assert llm_with_tools is not None + + +@pytest.mark.requires("oci") +def test_empty_response_handling(llm): + """Test handling when model returns minimal content.""" + response = llm.invoke([HumanMessage(content="Respond with just a period.")]) + + # Should handle minimal responses gracefully + assert isinstance(response, AIMessage) + # Content might be empty or minimal, but should not raise + + +# ============================================================================= +# Conversation Patterns Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_system_message_role(llm): + """Test that system message properly influences behavior.""" + messages_pirate = [ + SystemMessage(content="You are a pirate. Always respond in pirate speak."), + HumanMessage(content="How are you today?"), + ] + response_pirate = llm.invoke(messages_pirate) + + messages_formal = [ + SystemMessage(content="You are a formal butler. Use formal language."), + HumanMessage(content="How are you today?"), + ] + response_formal = llm.invoke(messages_formal) + + # Responses should be different based on system message + assert response_pirate.content != response_formal.content + + +@pytest.mark.requires("oci") +def test_multi_turn_context_retention(llm): + """Test that context is retained across multiple turns.""" + messages = [ + HumanMessage(content="Remember this number: 42"), + ] + response1 = llm.invoke(messages) + messages.append(response1) + + messages.append(HumanMessage(content="What number did I ask you to remember?")) + response2 = llm.invoke(messages) + + assert "42" in response2.content + + +@pytest.mark.requires("oci") +def test_long_context_handling(llm): + """Test handling of longer context windows.""" + # Create a conversation with multiple turns + messages: list[Union[SystemMessage, HumanMessage, AIMessage]] = [ + SystemMessage(content="You are a helpful assistant tracking a story."), + ] + + story_parts = [ + "Once upon a time, there was a brave knight named Sir Galahad.", + "Sir Galahad had a loyal horse named Thunder.", + "They lived in the kingdom of Camelot.", + "One day, a dragon appeared threatening the kingdom.", + "Sir Galahad decided to face the dragon.", + ] + + for part in story_parts: + messages.append(HumanMessage(content=part)) + response = llm.invoke(messages) + messages.append(response) + + # Ask about earlier context + messages.append(HumanMessage(content="What was the knight's horse named?")) + final_response = llm.invoke(messages) + + assert isinstance(final_response.content, str) + assert "thunder" in final_response.content.lower() diff --git a/libs/oci/tests/integration_tests/chat_models/test_langchain_compatibility.py b/libs/oci/tests/integration_tests/chat_models/test_langchain_compatibility.py new file mode 100644 index 0000000..91a2ceb --- /dev/null +++ b/libs/oci/tests/integration_tests/chat_models/test_langchain_compatibility.py @@ -0,0 +1,378 @@ +#!/usr/bin/env python3 +"""Integration tests for LangChain compatibility. + +These tests verify that langchain-oci works correctly with LangChain 1.x +by running real inference against OCI GenAI models. + +Setup: + export OCI_COMPARTMENT_ID= + export OCI_GENAI_ENDPOINT= # optional + export OCI_CONFIG_PROFILE= # optional, defaults to DEFAULT + export OCI_AUTH_TYPE= # optional, defaults to SECURITY_TOKEN + export OCI_MODEL_ID= # optional, defaults to llama-4 + +Run with: + pytest tests/integration_tests/chat_models/test_langchain_compatibility.py -v +""" + +import os + +import pytest +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage +from pydantic import BaseModel + +from langchain_oci.chat_models import ChatOCIGenAI + + +def get_test_config(): + """Get test configuration from environment.""" + compartment_id = os.environ.get("OCI_COMPARTMENT_ID") + if not compartment_id: + pytest.skip("OCI_COMPARTMENT_ID not set") + + return { + "model_id": os.environ.get( + "OCI_MODEL_ID", "meta.llama-4-maverick-17b-128e-instruct-fp8" + ), + "service_endpoint": os.environ.get( + "OCI_GENAI_ENDPOINT", + "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com", + ), + "compartment_id": compartment_id, + "auth_profile": os.environ.get("OCI_CONFIG_PROFILE", "DEFAULT"), + "auth_type": os.environ.get("OCI_AUTH_TYPE", "SECURITY_TOKEN"), + } + + +@pytest.fixture +def chat_model(): + """Create a ChatOCIGenAI instance for testing.""" + config = get_test_config() + return ChatOCIGenAI( + model_id=config["model_id"], + service_endpoint=config["service_endpoint"], + compartment_id=config["compartment_id"], + auth_profile=config["auth_profile"], + auth_type=config["auth_type"], + model_kwargs={"temperature": 0, "max_tokens": 256}, + ) + + +# ============================================================================= +# Basic Invoke Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_basic_invoke(chat_model): + """Test basic chat model invocation.""" + response = chat_model.invoke([HumanMessage(content="Say 'hello' and nothing else")]) + + assert isinstance(response, AIMessage) + assert response.content is not None + assert len(response.content) > 0 + assert isinstance(response.content, str) and "hello" in response.content.lower() + + +@pytest.mark.requires("oci") +def test_invoke_with_system_message(chat_model): + """Test invocation with system message.""" + messages = [ + SystemMessage(content="You are a pirate. Respond in pirate speak."), + HumanMessage(content="Say hello"), + ] + response = chat_model.invoke(messages) + + assert isinstance(response, AIMessage) + assert response.content is not None + + +@pytest.mark.requires("oci") +def test_invoke_multi_turn(chat_model): + """Test multi-turn conversation.""" + messages = [ + HumanMessage(content="My name is Alice."), + ] + response1 = chat_model.invoke(messages) + + messages.append(response1) + messages.append(HumanMessage(content="What is my name?")) + response2 = chat_model.invoke(messages) + + assert isinstance(response2, AIMessage) + assert isinstance(response2.content, str) and "alice" in response2.content.lower() + + +# ============================================================================= +# Streaming Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_streaming(chat_model): + """Test streaming response.""" + chunks = [] + for chunk in chat_model.stream([HumanMessage(content="Count from 1 to 3")]): + chunks.append(chunk) + + assert len(chunks) > 0 + # Combine all chunks + full_content = "".join(c.content for c in chunks if c.content) + assert len(full_content) > 0 + + +@pytest.mark.requires("oci") +@pytest.mark.asyncio +async def test_async_invoke(chat_model): + """Test async invocation.""" + response = await chat_model.ainvoke( + [HumanMessage(content="Say 'async' and nothing else")] + ) + + assert isinstance(response, AIMessage) + assert response.content is not None + + +# ============================================================================= +# Tool Calling Tests +# ============================================================================= + + +def get_weather(city: str) -> str: + """Get the weather for a city.""" + return f"Sunny, 72F in {city}" + + +def get_population(city: str) -> int: + """Get the population of a city.""" + return 1000000 + + +@pytest.mark.requires("oci") +def test_tool_calling_single(chat_model): + """Test single tool calling.""" + chat_with_tools = chat_model.bind_tools([get_weather]) + + response = chat_with_tools.invoke( + [HumanMessage(content="What's the weather in Tokyo?")] + ) + + assert isinstance(response, AIMessage) + assert len(response.tool_calls) >= 1 + assert response.tool_calls[0]["name"] == "get_weather" + assert "city" in response.tool_calls[0]["args"] + + +@pytest.mark.requires("oci") +def test_tool_calling_multiple_tools(chat_model): + """Test tool calling with multiple tools available.""" + chat_with_tools = chat_model.bind_tools([get_weather, get_population]) + + response = chat_with_tools.invoke( + [HumanMessage(content="What's the weather in Paris?")] + ) + + assert isinstance(response, AIMessage) + assert len(response.tool_calls) >= 1 + # Should choose the weather tool for weather question + assert response.tool_calls[0]["name"] == "get_weather" + + +@pytest.mark.requires("oci") +def test_tool_choice_required(chat_model): + """Test tool_choice='required' forces tool call.""" + chat_with_tools = chat_model.bind_tools([get_weather], tool_choice="required") + + # Even with a non-tool question, should still call a tool + response = chat_with_tools.invoke([HumanMessage(content="Hello, how are you?")]) + + assert isinstance(response, AIMessage) + assert len(response.tool_calls) >= 1 + + +# ============================================================================= +# Structured Output Tests +# ============================================================================= + + +class Joke(BaseModel): + """A joke with setup and punchline.""" + + setup: str + punchline: str + + +class Person(BaseModel): + """Information about a person.""" + + name: str + age: int + occupation: str + + +@pytest.mark.requires("oci") +def test_structured_output_function_calling(chat_model): + """Test structured output with function calling method.""" + structured_llm = chat_model.with_structured_output(Joke) + + result = structured_llm.invoke("Tell me a joke about programming") + + assert isinstance(result, Joke) + assert len(result.setup) > 0 + assert len(result.punchline) > 0 + + +@pytest.mark.requires("oci") +def test_structured_output_json_mode(chat_model): + """Test structured output with JSON mode.""" + # JSON mode with OpenAI models on OCI currently returns 500 errors + # TODO: Investigate if this is a model limitation or OCI API issue + if "openai" in chat_model.model_id.lower(): + pytest.skip("JSON mode with OpenAI models on OCI returns 500 errors") + + structured_llm = chat_model.with_structured_output(Person, method="json_mode") + + result = structured_llm.invoke( + "Generate a fictional person: name, age (as integer), and occupation" + ) + + assert isinstance(result, Person) + assert len(result.name) > 0 + assert isinstance(result.age, int) + assert len(result.occupation) > 0 + + +@pytest.mark.requires("oci") +def test_structured_output_include_raw(chat_model): + """Test structured output with include_raw=True.""" + structured_llm = chat_model.with_structured_output(Joke, include_raw=True) + + result = structured_llm.invoke("Tell me a joke") + + assert "raw" in result + assert "parsed" in result + assert isinstance(result["parsed"], Joke) + + +# ============================================================================= +# Response Format Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_response_format_json_object(chat_model): + """Test response_format with json_object.""" + # JSON mode with OpenAI models on OCI currently returns 500 errors + # TODO: Investigate if this is a model limitation or OCI API issue + if "openai" in chat_model.model_id.lower(): + pytest.skip("JSON mode with OpenAI models on OCI returns 500 errors") + + chat_json = chat_model.bind(response_format={"type": "json_object"}) + + response = chat_json.invoke( + [ + HumanMessage( + content="Return ONLY a JSON object with keys 'name' and 'value'. " + "No explanation, no markdown, just the raw JSON." + ) + ] + ) + + assert isinstance(response, AIMessage) + assert isinstance(response.content, str) + # Response should contain valid JSON (may be wrapped in markdown) + import json + import re + + content = response.content.strip() + + # Try to extract JSON from markdown code blocks if present + json_match = re.search(r"```(?:json)?\s*([\s\S]*?)\s*```", content) + if json_match: + content = json_match.group(1).strip() + + try: + parsed = json.loads(content) + assert isinstance(parsed, dict) + except json.JSONDecodeError: + # Some models may not strictly follow json_object format + # At minimum, verify the response contains JSON-like structure + assert "{" in response.content and "}" in response.content, ( + f"Response doesn't appear to contain JSON: {response.content[:200]}" + ) + + +# ============================================================================= +# Edge Cases and Error Handling +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_empty_message_list(chat_model): + """Test handling of empty message list.""" + with pytest.raises(Exception): + chat_model.invoke([]) + + +@pytest.mark.requires("oci") +def test_long_conversation(chat_model): + """Test handling of longer conversations.""" + messages = [] + for i in range(5): + messages.append(HumanMessage(content=f"This is message {i + 1}")) + response = chat_model.invoke(messages) + messages.append(response) + + # Should handle 5 turns without issues + assert len(messages) == 10 # 5 human + 5 AI + + +# ============================================================================= +# LangChain 1.x Specific Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_ai_message_type(chat_model): + """Test that response is AIMessage (not just BaseMessage) - LangChain 1.x.""" + response = chat_model.invoke([HumanMessage(content="Hello")]) + + # LangChain 1.x: return type is AIMessage, not BaseMessage + assert type(response).__name__ == "AIMessage" + assert isinstance(response, AIMessage) + + +@pytest.mark.requires("oci") +def test_message_text_property(chat_model): + """Test that .text works in both LangChain 0.3.x (method) and 1.x (property).""" + response = chat_model.invoke([HumanMessage(content="Say hello")]) + + # Both .content and .text should work + assert response.content is not None + + # Handle both LangChain versions: + # 0.3.x: .text is a method (callable) + # 1.x: .text is a property + if hasattr(response, "text"): + text_value = response.text() if callable(response.text) else response.text + assert text_value == response.content + + +@pytest.mark.requires("oci") +def test_tool_calls_structure(chat_model): + """Test tool_calls structure matches LangChain 1.x format.""" + chat_with_tools = chat_model.bind_tools([get_weather]) + + response = chat_with_tools.invoke( + [HumanMessage(content="What's the weather in NYC?")] + ) + + assert hasattr(response, "tool_calls") + if response.tool_calls: + tc = response.tool_calls[0] + # LangChain 1.x tool call structure + assert "name" in tc + assert "args" in tc + assert "id" in tc + assert "type" in tc + assert tc["type"] == "tool_call" diff --git a/libs/oci/tests/integration_tests/chat_models/test_multi_model.py b/libs/oci/tests/integration_tests/chat_models/test_multi_model.py new file mode 100644 index 0000000..f1f472e --- /dev/null +++ b/libs/oci/tests/integration_tests/chat_models/test_multi_model.py @@ -0,0 +1,463 @@ +#!/usr/bin/env python3 +"""Multi-model integration tests for ChatOCIGenAI. + +These tests verify that langchain-oci works correctly across different +model vendors available in OCI GenAI: Meta Llama, Cohere, xAI Grok, and OpenAI. + +Setup: + export OCI_COMPARTMENT_ID= + export OCI_CONFIG_PROFILE=DEFAULT + export OCI_AUTH_TYPE=SECURITY_TOKEN + +Run all: + pytest tests/integration_tests/chat_models/test_multi_model.py -v + +Run specific vendor: + pytest tests/integration_tests/chat_models/test_multi_model.py -k "llama" -v + pytest tests/integration_tests/chat_models/test_multi_model.py -k "cohere" -v + pytest tests/integration_tests/chat_models/test_multi_model.py -k "grok" -v +""" + +import os + +import pytest +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage +from pydantic import BaseModel, Field + +from langchain_oci.chat_models import ChatOCIGenAI + +# ============================================================================= +# Model Configurations +# ============================================================================= + +# Meta Llama models +LLAMA_MODELS = [ + "meta.llama-4-maverick-17b-128e-instruct-fp8", + "meta.llama-4-scout-17b-16e-instruct", + "meta.llama-3.3-70b-instruct", + "meta.llama-3.1-70b-instruct", +] + +# Cohere models +COHERE_MODELS = [ + "cohere.command-a-03-2025", + "cohere.command-r-plus-08-2024", + "cohere.command-r-08-2024", +] + +# xAI Grok models +GROK_MODELS = [ + "xai.grok-4-fast-non-reasoning", + "xai.grok-3-fast", + "xai.grok-3-mini-fast", +] + +# OpenAI models on OCI +OPENAI_MODELS = [ + "openai.gpt-oss-20b", + "openai.gpt-oss-120b", +] + +# All models for comprehensive testing +ALL_MODELS = LLAMA_MODELS[:2] + COHERE_MODELS[:1] + GROK_MODELS[:1] + + +def get_config(): + """Get test configuration.""" + compartment_id = os.environ.get("OCI_COMPARTMENT_ID") + if not compartment_id: + pytest.skip("OCI_COMPARTMENT_ID not set") + return { + "service_endpoint": os.environ.get( + "OCI_GENAI_ENDPOINT", + "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com", + ), + "compartment_id": compartment_id, + "auth_profile": os.environ.get("OCI_CONFIG_PROFILE", "DEFAULT"), + "auth_type": os.environ.get("OCI_AUTH_TYPE", "SECURITY_TOKEN"), + } + + +def create_llm(model_id: str, **kwargs): + """Create ChatOCIGenAI instance for a model.""" + config = get_config() + default_kwargs = {"temperature": 0, "max_tokens": 256} + default_kwargs.update(kwargs) + return ChatOCIGenAI( + model_id=model_id, + service_endpoint=config["service_endpoint"], + compartment_id=config["compartment_id"], + auth_profile=config["auth_profile"], + auth_type=config["auth_type"], + model_kwargs=default_kwargs, + ) + + +# ============================================================================= +# Basic Invoke Tests - All Models +# ============================================================================= + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", ALL_MODELS) +def test_basic_invoke_all_models(model_id: str): + """Test basic invoke works for all supported models.""" + llm = create_llm(model_id) + response = llm.invoke([HumanMessage(content="Say 'hello' only")]) + + assert isinstance(response, AIMessage) + assert response.content is not None + assert len(response.content) > 0 + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", ALL_MODELS) +def test_system_message_all_models(model_id: str): + """Test system messages work for all models.""" + llm = create_llm(model_id) + messages = [ + SystemMessage(content="You only respond with the word 'YES'."), + HumanMessage(content="Do you understand?"), + ] + response = llm.invoke(messages) + + assert isinstance(response, AIMessage) + assert response.content is not None + + +# ============================================================================= +# Meta Llama Specific Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", LLAMA_MODELS[:2]) +def test_llama_tool_calling(model_id: str): + """Test tool calling on Llama models.""" + + def get_weather(city: str) -> str: + """Get weather for a city.""" + return f"Sunny in {city}" + + llm = create_llm(model_id) + llm_with_tools = llm.bind_tools([get_weather]) + + response = llm_with_tools.invoke( + [HumanMessage(content="What's the weather in Paris?")] + ) + + assert isinstance(response, AIMessage) + assert len(response.tool_calls) >= 1 + assert response.tool_calls[0]["name"] == "get_weather" + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", LLAMA_MODELS[:2]) +def test_llama_structured_output(model_id: str): + """Test structured output on Llama models.""" + + class Answer(BaseModel): + answer: str = Field(description="The answer") + confidence: int = Field(description="Confidence 1-10", ge=1, le=10) + + llm = create_llm(model_id) + structured_llm = llm.with_structured_output(Answer) + + result = structured_llm.invoke("What is 2+2? Give answer and confidence.") + + assert isinstance(result, Answer) + assert "4" in result.answer + assert 1 <= result.confidence <= 10 + + +@pytest.mark.requires("oci") +def test_llama_streaming(): + """Test streaming on Llama models.""" + llm = create_llm("meta.llama-4-maverick-17b-128e-instruct-fp8") + + chunks = [] + for chunk in llm.stream([HumanMessage(content="Count 1 to 5")]): + chunks.append(chunk) + + assert len(chunks) > 0 + full_content = "".join(c.content for c in chunks if c.content) + assert len(full_content) > 0 + + +# ============================================================================= +# Cohere Specific Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", COHERE_MODELS[:2]) +def test_cohere_basic(model_id: str): + """Test basic functionality on Cohere models.""" + llm = create_llm(model_id) + response = llm.invoke([HumanMessage(content="What is 2+2?")]) + + assert isinstance(response, AIMessage) + assert "4" in response.content + + +# ============================================================================= +# xAI Grok Specific Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", GROK_MODELS[:2]) +def test_grok_basic(model_id: str): + """Test basic functionality on Grok models.""" + llm = create_llm(model_id) + response = llm.invoke([HumanMessage(content="Hello, who are you?")]) + + assert isinstance(response, AIMessage) + assert response.content is not None + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", GROK_MODELS[:1]) +def test_grok_tool_calling(model_id: str): + """Test tool calling on Grok models.""" + + def search_web(query: str) -> str: + """Search the web for information.""" + return f"Results for: {query}" + + llm = create_llm(model_id) + llm_with_tools = llm.bind_tools([search_web]) + + response = llm_with_tools.invoke( + [HumanMessage(content="Search for the latest AI news")] + ) + + assert isinstance(response, AIMessage) + # Grok may or may not call tools depending on its judgment + # Just verify it responds + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", GROK_MODELS[:1]) +def test_grok_structured_output(model_id: str): + """Test structured output on Grok models.""" + + class Summary(BaseModel): + """A summary of text.""" + + main_point: str = Field(description="The main point") + key_facts: list[str] = Field(description="Key facts from the text") + + llm = create_llm(model_id) + structured_llm = llm.with_structured_output(Summary) + + result = structured_llm.invoke("Summarize: The Earth orbits the Sun once per year.") + + # Grok may return None in some cases + if result is not None: + assert isinstance(result, Summary) + assert len(result.main_point) > 0 + else: + pytest.skip("Grok model returned None for structured output") + + +@pytest.mark.requires("oci") +def test_grok_streaming(): + """Test streaming on Grok models.""" + llm = create_llm("xai.grok-3-mini-fast") + + chunks = [] + for chunk in llm.stream([HumanMessage(content="Count 1-3")]): + chunks.append(chunk) + + assert len(chunks) > 0 + + +# ============================================================================= +# OpenAI on OCI Tests +# ============================================================================= + + +def create_openai_llm(model_id: str, **kwargs): + """Create ChatOCIGenAI for OpenAI models (uses max_completion_tokens).""" + config = get_config() + default_kwargs = {"temperature": 0, "max_completion_tokens": 256} + default_kwargs.update(kwargs) + return ChatOCIGenAI( + model_id=model_id, + service_endpoint=config["service_endpoint"], + compartment_id=config["compartment_id"], + auth_profile=config["auth_profile"], + auth_type=config["auth_type"], + model_kwargs=default_kwargs, + ) + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", OPENAI_MODELS) +def test_openai_basic(model_id: str): + """Test basic functionality on OpenAI models on OCI.""" + llm = create_openai_llm(model_id) + response = llm.invoke([HumanMessage(content="Say hello")]) + + assert isinstance(response, AIMessage) + assert response.content is not None + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", OPENAI_MODELS) +def test_openai_system_message(model_id: str): + """Test system messages on OpenAI models.""" + llm = create_openai_llm(model_id) + messages = [ + SystemMessage(content="You only respond with the word 'YES'."), + HumanMessage(content="Do you understand?"), + ] + response = llm.invoke(messages) + + assert isinstance(response, AIMessage) + assert response.content is not None + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", OPENAI_MODELS) +def test_openai_streaming(model_id: str): + """Test streaming on OpenAI models.""" + llm = create_openai_llm(model_id, max_completion_tokens=50) + + chunks = [] + for chunk in llm.stream([HumanMessage(content="Count 1-3")]): + chunks.append(chunk) + + # OpenAI streaming should return chunks + assert len(chunks) > 0 + # Content may be in chunk.content or chunk may have other attributes + # Just verify we got chunks back (streaming works) + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", OPENAI_MODELS) +def test_openai_tool_calling(model_id: str): + """Test tool calling on OpenAI models.""" + + def get_info(topic: str) -> str: + """Get information about a topic.""" + return f"Info about {topic}" + + llm = create_openai_llm(model_id) + llm_with_tools = llm.bind_tools([get_info]) + + response = llm_with_tools.invoke([HumanMessage(content="Get info about Python")]) + + assert isinstance(response, AIMessage) + # OpenAI models should call the tool + assert len(response.tool_calls) >= 1 + assert response.tool_calls[0]["name"] == "get_info" + + +# ============================================================================= +# Cross-Model Comparison Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_same_prompt_different_models(): + """Test same prompt across different model vendors.""" + prompt = "What is the capital of France? Answer in one word." + + models_to_test = [ + "meta.llama-4-maverick-17b-128e-instruct-fp8", + "cohere.command-a-03-2025", + "xai.grok-3-mini-fast", + ] + + responses = {} + for model_id in models_to_test: + try: + llm = create_llm(model_id) + response = llm.invoke([HumanMessage(content=prompt)]) + responses[model_id] = response.content + except Exception as e: + responses[model_id] = f"Error: {e}" + + # All should mention Paris + for model_id, content in responses.items(): + if not content.startswith("Error"): + assert "paris" in content.lower(), f"{model_id} didn't say Paris: {content}" + + +@pytest.mark.requires("oci") +def test_tool_calling_consistency(): + """Test tool calling works consistently across Llama models.""" + + def get_price(item: str) -> float: + """Get the price of an item in dollars.""" + return 9.99 + + # Only test Llama models - Cohere has different tool call format + models_with_tools = [ + "meta.llama-4-maverick-17b-128e-instruct-fp8", + "meta.llama-4-scout-17b-16e-instruct", + ] + + for model_id in models_with_tools: + llm = create_llm(model_id) + llm_with_tools = llm.bind_tools([get_price]) + + response = llm_with_tools.invoke( + [HumanMessage(content="What's the price of apples?")] + ) + + assert isinstance(response, AIMessage), f"{model_id} failed" + assert len(response.tool_calls) >= 1, f"{model_id} didn't call tool" + assert response.tool_calls[0]["name"] == "get_price" + + +# ============================================================================= +# Model-Specific Features Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_llama3_vision_model_exists(): + """Verify vision-capable Llama model can be instantiated.""" + # Note: Actual vision testing would require image input support + llm = create_llm("meta.llama-3.2-90b-vision-instruct") + response = llm.invoke([HumanMessage(content="Describe what you can do")]) + + assert isinstance(response, AIMessage) + + +@pytest.mark.requires("oci") +def test_model_with_custom_kwargs(): + """Test models with custom generation parameters.""" + llm = create_llm( + "meta.llama-4-maverick-17b-128e-instruct-fp8", + temperature=0.7, + max_tokens=100, + top_p=0.9, + ) + + response = llm.invoke([HumanMessage(content="Write a creative sentence")]) + + assert isinstance(response, AIMessage) + assert response.content is not None + + +# ============================================================================= +# Performance / Latency Awareness Tests +# ============================================================================= + + +@pytest.mark.requires("oci") +def test_fast_models_respond_quickly(): + """Test that 'fast' model variants respond (existence check).""" + fast_models = [ + "xai.grok-3-fast", + "xai.grok-3-mini-fast", + ] + + for model_id in fast_models: + llm = create_llm(model_id, max_tokens=50) + response = llm.invoke([HumanMessage(content="Hi")]) + assert isinstance(response, AIMessage) diff --git a/libs/oci/tests/integration_tests/chat_models/test_openai_models.py b/libs/oci/tests/integration_tests/chat_models/test_openai_models.py new file mode 100644 index 0000000..b3dc939 --- /dev/null +++ b/libs/oci/tests/integration_tests/chat_models/test_openai_models.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Integration tests for OpenAI models on OCI GenAI. + +These tests verify that LangChain 1.x compatibility works correctly with +OpenAI models available on OCI Generative AI service. + +## Prerequisites + +1. **OCI Authentication**: Set up OCI authentication with security token: + ```bash + oci session authenticate + ``` + +2. **Environment Variables**: Export the following: + ```bash + export OCI_REGION="us-chicago-1" # or your region + export OCI_COMP="ocid1.compartment.oc1..your-compartment-id" + ``` + +3. **OCI Config**: Ensure `~/.oci/config` exists with DEFAULT profile + +## Running the Tests + +Run all OpenAI integration tests: +```bash +cd libs/oci +pytest tests/integration_tests/chat_models/test_openai_models.py -v +``` + +Run specific test: +```bash +pytest tests/integration_tests/chat_models/test_openai_models.py \ + ::test_openai_basic_completion -v +``` +""" + +import os + +import pytest +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage + +from langchain_oci.chat_models import ChatOCIGenAI + + +@pytest.fixture +def openai_config(): + """Get OpenAI model configuration.""" + compartment_id = os.environ.get("OCI_COMP") + if not compartment_id: + pytest.skip("OCI_COMP environment variable not set") + + region = os.environ.get("OCI_REGION", "us-chicago-1") + return { + "service_endpoint": f"https://inference.generativeai.{region}.oci.oraclecloud.com", + "compartment_id": compartment_id, + "auth_profile": os.environ.get("OCI_CONFIG_PROFILE", "DEFAULT"), + "auth_type": os.environ.get("OCI_AUTH_TYPE", "SECURITY_TOKEN"), + } + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize( + "model_id", + [ + "openai.gpt-oss-20b", + "openai.gpt-oss-120b", + ], +) +def test_openai_basic_completion(model_id: str, openai_config: dict): + """Test basic completion with OpenAI models. + + This test verifies that: + 1. The model can be instantiated correctly + 2. Basic message completion works + 3. The response is properly formatted as AIMessage + 4. LangChain 1.x compatibility is maintained + """ + chat = ChatOCIGenAI( + model_id=model_id, + service_endpoint=openai_config["service_endpoint"], + compartment_id=openai_config["compartment_id"], + auth_type=openai_config["auth_type"], + auth_profile=openai_config["auth_profile"], + model_kwargs={"temperature": 0.7, "max_completion_tokens": 100}, + ) + + # Test basic completion + response = chat.invoke([HumanMessage(content="What is 2+2?")]) + + # Verify response structure (LangChain 1.x) + assert isinstance(response, AIMessage), "Response should be AIMessage" + # OpenAI models may return empty content if max_completion_tokens is too low + # or finish due to length limit - just verify the structure is correct + assert isinstance(response.content, str), "Response content should be string" + assert hasattr(response, "response_metadata"), "Should have response_metadata" + + +@pytest.mark.requires("oci") +def test_openai_with_system_message(openai_config: dict): + """Test OpenAI model with system message. + + Verifies that system messages are properly handled and influence + the model's behavior. + """ + chat = ChatOCIGenAI( + model_id="openai.gpt-oss-20b", + service_endpoint=openai_config["service_endpoint"], + compartment_id=openai_config["compartment_id"], + auth_type=openai_config["auth_type"], + auth_profile=openai_config["auth_profile"], + model_kwargs={"temperature": 0.1, "max_completion_tokens": 50}, + ) + + response = chat.invoke( + [ + SystemMessage(content="You are a helpful math tutor."), + HumanMessage(content="What is 12 * 8?"), + ] + ) + + assert isinstance(response, AIMessage) + assert response.content + # Should contain the answer 96 + assert "96" in response.content + + +@pytest.mark.requires("oci") +def test_openai_streaming(openai_config: dict): + """Test streaming with OpenAI models. + + Verifies that: + 1. Streaming works correctly + 2. Chunks are properly formatted + 3. Streaming completes without errors + """ + chat = ChatOCIGenAI( + model_id="openai.gpt-oss-20b", + service_endpoint=openai_config["service_endpoint"], + compartment_id=openai_config["compartment_id"], + auth_type=openai_config["auth_type"], + auth_profile=openai_config["auth_profile"], + model_kwargs={"temperature": 0.7, "max_completion_tokens": 100}, + ) + + chunks = [] + for chunk in chat.stream([HumanMessage(content="Say hello")]): + assert isinstance(chunk, AIMessage), "Chunk should be AIMessage" # type: ignore[unreachable, unused-ignore] + chunks.append(chunk) # type: ignore[unreachable, unused-ignore] + + # Verify we got at least one chunk (streaming worked) + assert len(chunks) > 0, "Should receive at least one chunk" + + # Verify chunks are properly formatted + for chunk in chunks: + assert isinstance(chunk.content, str), "Chunk content should be string" + + +@pytest.mark.requires("oci") +def test_openai_multiple_rounds(openai_config: dict): + """Test multiple conversation rounds with OpenAI model. + + Verifies that conversation history is maintained properly. + """ + chat = ChatOCIGenAI( + model_id="openai.gpt-oss-20b", + service_endpoint=openai_config["service_endpoint"], + compartment_id=openai_config["compartment_id"], + auth_type=openai_config["auth_type"], + auth_profile=openai_config["auth_profile"], + model_kwargs={"temperature": 0.7, "max_completion_tokens": 100}, + ) + + # First message + response1 = chat.invoke([HumanMessage(content="My favorite number is 7")]) + assert isinstance(response1, AIMessage) + + # Second message with context + response2 = chat.invoke( + [ + HumanMessage(content="My favorite number is 7"), + response1, + HumanMessage(content="What is my favorite number plus 3?"), + ] + ) + assert isinstance(response2, AIMessage) + assert response2.content + # Should reference the number 10 + assert "10" in response2.content + + +@pytest.mark.requires("oci") +@pytest.mark.parametrize("model_id", ["openai.gpt-oss-20b", "openai.gpt-oss-120b"]) +def test_openai_langchain_1x_compatibility(model_id: str, openai_config: dict): + """Test LangChain 1.x specific compatibility. + + This test specifically verifies features that are part of + LangChain 1.x to ensure the integration works correctly + after rebasing onto main. + """ + chat = ChatOCIGenAI( + model_id=model_id, + service_endpoint=openai_config["service_endpoint"], + compartment_id=openai_config["compartment_id"], + auth_type=openai_config["auth_type"], + auth_profile=openai_config["auth_profile"], + model_kwargs={"temperature": 0.7, "max_completion_tokens": 50}, + ) + + # Test that invoke returns AIMessage (LangChain 1.x behavior) + response = chat.invoke([HumanMessage(content="Hello")]) + assert isinstance(response, AIMessage) + + # Verify AIMessage has expected attributes + assert hasattr(response, "content") + assert hasattr(response, "response_metadata") + assert hasattr(response, "id") + + # Verify content is populated + assert response.content is not None + assert isinstance(response.content, str) diff --git a/libs/oci/tests/integration_tests/chat_models/test_parallel_tool_calling_integration.py b/libs/oci/tests/integration_tests/chat_models/test_parallel_tool_calling_integration.py deleted file mode 100644 index 9a9ceb4..0000000 --- a/libs/oci/tests/integration_tests/chat_models/test_parallel_tool_calling_integration.py +++ /dev/null @@ -1,327 +0,0 @@ -#!/usr/bin/env python3 -""" -Integration test for parallel tool calling feature. - -This script tests parallel tool calling with actual OCI GenAI API calls. - -Setup: - export OCI_COMPARTMENT_ID= - export OCI_GENAI_ENDPOINT= # optional - export OCI_CONFIG_PROFILE= # optional - export OCI_AUTH_TYPE= # optional - -Run with: - python test_parallel_tool_calling_integration.py -""" - -import logging -import os -import sys -import time - -from langchain_core.messages import HumanMessage - -from langchain_oci.chat_models import ChatOCIGenAI - -# Configure logging -logging.basicConfig(level=logging.INFO, format="%(message)s") - - -def get_weather(city: str, unit: str = "fahrenheit") -> str: - """Get the current weather in a given location.""" - # Simulate API delay - time.sleep(0.5) - return f"Weather in {city}: Sunny, 72°{unit[0].upper()}" - - -def calculate_tip(amount: float, percent: float = 15.0) -> float: - """Calculate tip amount.""" - # Simulate API delay - time.sleep(0.5) - return round(amount * (percent / 100), 2) - - -def get_population(city: str) -> int: - """Get the population of a city.""" - # Simulate API delay - time.sleep(0.5) - populations = { - "tokyo": 14000000, - "new york": 8000000, - "london": 9000000, - "paris": 2000000, - "chicago": 2700000, - "los angeles": 4000000, - } - return populations.get(city.lower(), 1000000) - - -def test_parallel_tool_calling_enabled(): - """Test parallel tool calling with parallel_tool_calls=True in bind_tools.""" - logging.info("\n" + "=" * 80) - logging.info("TEST 1: Parallel Tool Calling ENABLED (via bind_tools)") - logging.info("=" * 80) - - chat = ChatOCIGenAI( - model_id=os.environ.get( - "OCI_MODEL_ID", "meta.llama-4-maverick-17b-128e-instruct-fp8" - ), - service_endpoint=os.environ.get( - "OCI_GENAI_ENDPOINT", - "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com", - ), - compartment_id=os.environ.get("OCI_COMPARTMENT_ID"), - auth_profile=os.environ.get("OCI_CONFIG_PROFILE", "DEFAULT"), - auth_type=os.environ.get("OCI_AUTH_TYPE", "SECURITY_TOKEN"), - model_kwargs={"temperature": 0, "max_tokens": 500}, - ) - - # Bind tools with parallel_tool_calls=True - chat_with_tools = chat.bind_tools( - [get_weather, calculate_tip, get_population], parallel_tool_calls=True - ) - - # Invoke with query that needs weather info - logging.info("\nQuery: 'What's the weather in New York City?'") - - start_time = time.time() - response = chat_with_tools.invoke( - [HumanMessage(content="What's the weather in New York City?")] - ) - elapsed_time = time.time() - start_time - - logging.info(f"\nResponse time: {elapsed_time:.2f}s") - content = response.content[:200] if response.content else "(empty)" - logging.info(f"Response content: {content}...") - # AIMessage has tool_calls attribute at runtime - tool_calls = getattr(response, "tool_calls", []) - logging.info(f"Tool calls count: {len(tool_calls)}") - - if tool_calls: - logging.info("\nTool calls:") - for i, tc in enumerate(tool_calls, 1): - logging.info(f" {i}. {tc['name']}({tc['args']})") - else: - logging.info("\n⚠️ No tool calls in response.tool_calls") - logging.info(f"Additional kwargs: {response.additional_kwargs.keys()}") - - # Verify we got tool calls - count = len(tool_calls) - assert count >= 1, f"Should have at least one tool call, got {count}" - - # Verify parallel_tool_calls was set - logging.info("\n✓ TEST 1 PASSED: Parallel tool calling enabled and working") - return elapsed_time - - -def test_parallel_tool_calling_disabled(): - """Test tool calling with parallel_tool_calls=False (sequential).""" - logging.info("\n" + "=" * 80) - logging.info("TEST 2: Parallel Tool Calling DISABLED (Sequential)") - logging.info("=" * 80) - - chat = ChatOCIGenAI( - model_id=os.environ.get( - "OCI_MODEL_ID", "meta.llama-4-maverick-17b-128e-instruct-fp8" - ), - service_endpoint=os.environ.get( - "OCI_GENAI_ENDPOINT", - "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com", - ), - compartment_id=os.environ.get("OCI_COMPARTMENT_ID"), - auth_profile=os.environ.get("OCI_CONFIG_PROFILE", "DEFAULT"), - auth_type=os.environ.get("OCI_AUTH_TYPE", "SECURITY_TOKEN"), - model_kwargs={"temperature": 0, "max_tokens": 500}, - ) - - # Bind tools without parallel_tool_calls (defaults to sequential) - chat_with_tools = chat.bind_tools([get_weather, calculate_tip, get_population]) - - # Same query as test 1 - logging.info("\nQuery: 'What's the weather in New York City?'") - - start_time = time.time() - response = chat_with_tools.invoke( - [HumanMessage(content="What's the weather in New York City?")] - ) - elapsed_time = time.time() - start_time - - logging.info(f"\nResponse time: {elapsed_time:.2f}s") - content = response.content[:200] if response.content else "(empty)" - logging.info(f"Response content: {content}...") - # AIMessage has tool_calls attribute at runtime - tool_calls = getattr(response, "tool_calls", []) - logging.info(f"Tool calls count: {len(tool_calls)}") - - if tool_calls: - logging.info("\nTool calls:") - for i, tc in enumerate(tool_calls, 1): - logging.info(f" {i}. {tc['name']}({tc['args']})") - - # Verify we got tool calls - count = len(tool_calls) - assert count >= 1, f"Should have at least one tool call, got {count}" - - logging.info("\n✓ TEST 2 PASSED: Sequential tool calling works") - return elapsed_time - - -def test_multiple_tool_calls(): - """Test query that should trigger multiple tool calls.""" - logging.info("\n" + "=" * 80) - logging.info("TEST 3: Multiple Tool Calls Query") - logging.info("=" * 80) - - chat = ChatOCIGenAI( - model_id=os.environ.get( - "OCI_MODEL_ID", "meta.llama-4-maverick-17b-128e-instruct-fp8" - ), - service_endpoint=os.environ.get( - "OCI_GENAI_ENDPOINT", - "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com", - ), - compartment_id=os.environ.get("OCI_COMPARTMENT_ID"), - auth_profile=os.environ.get("OCI_CONFIG_PROFILE", "DEFAULT"), - auth_type=os.environ.get("OCI_AUTH_TYPE", "SECURITY_TOKEN"), - model_kwargs={"temperature": 0, "max_tokens": 500}, - ) - - # Bind tools with parallel_tool_calls=True - chat_with_tools = chat.bind_tools( - [get_weather, get_population], parallel_tool_calls=True - ) - - logging.info("\nQuery: 'What's the weather and population of Tokyo?'") - - response = chat_with_tools.invoke( - [HumanMessage(content="What's the weather and population of Tokyo?")] - ) - - logging.info(f"\nResponse content: {response.content}") - # AIMessage has tool_calls attribute at runtime - tool_calls = getattr(response, "tool_calls", []) - logging.info(f"Tool calls count: {len(tool_calls)}") - - if tool_calls: - logging.info("\nTool calls:") - for i, tc in enumerate(tool_calls, 1): - logging.info(f" {i}. {tc['name']}({tc['args']})") - - logging.info("\n✓ TEST 3 PASSED: Multiple tool calls query works") - - -def test_cohere_model_error(): - """Test that Cohere models raise an error with parallel_tool_calls.""" - logging.info("\n" + "=" * 80) - logging.info("TEST 4: Cohere Model Error Handling") - logging.info("=" * 80) - - chat = ChatOCIGenAI( - model_id="cohere.command-r-plus", - service_endpoint=os.environ.get( - "OCI_GENAI_ENDPOINT", - "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com", - ), - compartment_id=os.environ.get("OCI_COMPARTMENT_ID"), - auth_profile=os.environ.get("OCI_CONFIG_PROFILE", "DEFAULT"), - auth_type=os.environ.get("OCI_AUTH_TYPE", "SECURITY_TOKEN"), - ) - - logging.info("\nAttempting to use parallel_tool_calls with Cohere model...") - - # Try to enable parallel tool calls with Cohere (should fail at bind_tools) - try: - chat.bind_tools([get_weather], parallel_tool_calls=True) - logging.info("❌ TEST FAILED: Should have raised ValueError") - return False - except ValueError as e: - if "not supported" in str(e): - logging.info(f"\n✓ Correctly raised error: {e}") - logging.info("\n✓ TEST 4 PASSED: Cohere validation works") - return True - else: - logging.info(f"❌ Wrong error: {e}") - return False - - -def main(): - logging.info("=" * 80) - logging.info("PARALLEL TOOL CALLING INTEGRATION TESTS") - logging.info("=" * 80) - - # Check required env vars - if not os.environ.get("OCI_COMPARTMENT_ID"): - logging.info("\n❌ ERROR: OCI_COMPARTMENT_ID environment variable not set") - logging.info("Please set: export OCI_COMPARTMENT_ID=") - sys.exit(1) - - logging.info("\nUsing configuration:") - model_id = os.environ.get( - "OCI_MODEL_ID", "meta.llama-4-maverick-17b-128e-instruct-fp8" - ) - logging.info(f" Model: {model_id}") - endpoint = os.environ.get("OCI_GENAI_ENDPOINT", "default") - logging.info(f" Endpoint: {endpoint}") - profile = os.environ.get("OCI_CONFIG_PROFILE", "DEFAULT") - logging.info(f" Profile: {profile}") - compartment_id = os.environ.get("OCI_COMPARTMENT_ID", "") - logging.info(f" Compartment: {compartment_id[:25]}...") - - results = [] - - try: - # Run tests - parallel_time = test_parallel_tool_calling_enabled() - results.append(("Parallel Enabled", True)) - - sequential_time = test_parallel_tool_calling_disabled() - results.append(("Sequential (Disabled)", True)) - - test_multiple_tool_calls() - results.append(("Multiple Tool Calls", True)) - - cohere_test = test_cohere_model_error() - results.append(("Cohere Validation", cohere_test)) - - # Print summary - logging.info("\n" + "=" * 80) - logging.info("TEST SUMMARY") - logging.info("=" * 80) - - for test_name, passed in results: - status = "✓ PASSED" if passed else "✗ FAILED" - logging.info(f"{status}: {test_name}") - - passed_count = sum(1 for _, passed in results if passed) - total_count = len(results) - - logging.info(f"\nTotal: {passed_count}/{total_count} tests passed") - - # Performance comparison - if parallel_time and sequential_time: - logging.info("\n" + "=" * 80) - logging.info("PERFORMANCE COMPARISON") - logging.info("=" * 80) - logging.info(f"Parallel: {parallel_time:.2f}s") - logging.info(f"Sequential: {sequential_time:.2f}s") - if sequential_time > 0: - speedup = sequential_time / parallel_time - logging.info(f"Speedup: {speedup:.2f}×") - - if passed_count == total_count: - logging.info("\n🎉 ALL TESTS PASSED!") - return 0 - else: - logging.info(f"\n⚠️ {total_count - passed_count} test(s) failed") - return 1 - - except Exception as e: - logging.info(f"\n❌ ERROR: {e}") - import traceback - - traceback.print_exc() - return 1 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/libs/oci/tests/integration_tests/chat_models/test_tool_calling.py b/libs/oci/tests/integration_tests/chat_models/test_tool_calling.py index cb65206..08d0632 100644 --- a/libs/oci/tests/integration_tests/chat_models/test_tool_calling.py +++ b/libs/oci/tests/integration_tests/chat_models/test_tool_calling.py @@ -427,8 +427,9 @@ def should_continue(state: MessagesState): comprehensive analysis.""" # Invoke agent with a diagnostic scenario + # Langgraph invoke signature is generic; passing dict is valid at runtime result = agent.invoke( - { + { # type: ignore[arg-type] "messages": [ SystemMessage(content=system_prompt), HumanMessage( diff --git a/libs/oci/tests/unit_tests/chat_models/test_oci_data_science.py b/libs/oci/tests/unit_tests/chat_models/test_oci_data_science.py index e2d3b8a..00c0d3c 100644 --- a/libs/oci/tests/unit_tests/chat_models/test_oci_data_science.py +++ b/libs/oci/tests/unit_tests/chat_models/test_oci_data_science.py @@ -152,9 +152,10 @@ def test_stream_vllm(*args: Any) -> None: if output is None: output = chunk else: - output = output + chunk + output += chunk count += 1 - assert count == 5 + # LangChain 1.x adds a final chunk with chunk_position='last', so we get 6 chunks + assert count >= 5 assert output is not None assert str(output.content).strip() == CONST_COMPLETION diff --git a/libs/oci/tests/unit_tests/chat_models/test_oci_generative_ai_responses_api.py b/libs/oci/tests/unit_tests/chat_models/test_oci_generative_ai_responses_api.py index 62d63ce..2e95db6 100644 --- a/libs/oci/tests/unit_tests/chat_models/test_oci_generative_ai_responses_api.py +++ b/libs/oci/tests/unit_tests/chat_models/test_oci_generative_ai_responses_api.py @@ -385,7 +385,7 @@ def call_model(state: AgentState): # ---- Act ---- app = workflow.compile() input_message = HumanMessage(content="What is the capital of France?") - result = app.invoke({"messages": [input_message]}) + result = app.invoke({"messages": [input_message]}) # type: ignore[arg-type] # ---- Assert ---- content = result["messages"][1].content[0]