diff --git a/pyproject.toml b/pyproject.toml index b72d6d16..c965a83f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,14 +45,14 @@ langgraph = [ test = [ "black[jupyter]==25.12.0", "isort==7.0.0", - "langgraph==1.0.4", + "langgraph==1.0.5", "mypy==1.19.1", "pytest-asyncio==0.26.0", "pytest==8.4.2", "pytest-cov==7.0.0", "pytest-depends==1.0.1", "Pillow==12.0.0", - "langchain-tests==1.1.0" + "langchain-tests==1.1.1" ] diff --git a/requirements.txt b/requirements.txt index 7e7f921c..21530f18 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,5 +2,5 @@ google-cloud-alloydb-connector[asyncpg]==1.11.0 google-cloud-storage==3.7.0 numpy==2.3.5; python_version >= "3.11" numpy==2.2.6; python_version == "3.10" -langgraph==1.0.4 +langgraph==1.0.5 langchain-postgres==0.0.16 diff --git a/samples/langchain_on_vertexai/prebuilt_langchain_agent_template.py b/samples/langchain_on_vertexai/prebuilt_langchain_agent_template.py index f4a14675..f51d03e2 100644 --- a/samples/langchain_on_vertexai/prebuilt_langchain_agent_template.py +++ b/samples/langchain_on_vertexai/prebuilt_langchain_agent_template.py @@ -27,7 +27,7 @@ ) from langchain_core.documents import Document from langchain_google_vertexai import VertexAIEmbeddings -from vertexai.preview import reasoning_engines # type: ignore +from vertexai import agent_engines from langchain_google_alloydb_pg import AlloyDBEngine, AlloyDBVectorStore @@ -75,7 +75,7 @@ def similarity_search(query: str) -> list[Document]: # Uncomment to test locally -# app = reasoning_engines.LangchainAgent( +# app = agent_engines.LangchainAgent( # model="gemini-2.0-flash-001", # tools=[similarity_search], # model_kwargs={ @@ -87,22 +87,24 @@ def similarity_search(query: str) -> list[Document]: # Initialize VertexAI vertexai.init(project=PROJECT_ID, location="us-central1", staging_bucket=STAGING_BUCKET) +client = vertexai.Client(project=PROJECT_ID, location="us-central1") # Deploy to VertexAI DISPLAY_NAME = os.getenv("DISPLAY_NAME") or "PrebuiltAgent" -remote_app = reasoning_engines.ReasoningEngine.create( - reasoning_engines.LangchainAgent( +remote_app = client.agent_engines.create( + agent=agent_engines.LangchainAgent( model="gemini-2.0-flash-001", tools=[similarity_search], # type: ignore[list-item] model_kwargs={ "temperature": 0.1, }, ), - requirements="requirements.txt", - display_name="PrebuiltAgent", - sys_version="3.11", - extra_packages=["config.py"], + config={ + "requirements": "requirements.txt", + "extra_packages": ["config.py"], + "display_name": "PrebuiltAgent" + } ) # type: ignore[arg-type] print(remote_app.query(input="movies about engineers")) # type: ignore[attr-defined] diff --git a/samples/langchain_on_vertexai/retriever_agent_with_history_template.py b/samples/langchain_on_vertexai/retriever_agent_with_history_template.py index e2eea8d6..83fa2ebc 100644 --- a/samples/langchain_on_vertexai/retriever_agent_with_history_template.py +++ b/samples/langchain_on_vertexai/retriever_agent_with_history_template.py @@ -135,16 +135,19 @@ def set_up(self): history_messages_key="chat_history", ) - def query(self, input: str, session_id: str, **kwargs: Any) -> str: + def query(self, **kwargs: Any) -> str: """Query the application. Args: - input: The user query. - session_id: The user's session id. + **kwargs: Keyword arguments. Expects "input" and "session_id". Returns: The LLM response dictionary. """ + input = kwargs.get("input") + session_id = kwargs.get("session_id") + if not input or not session_id: + return "Please provide an input and a session_id." response = self.agent.invoke( {"input": input}, config={"configurable": {"session_id": session_id}}, diff --git a/samples/langchain_on_vertexai/retriever_chain_template.py b/samples/langchain_on_vertexai/retriever_chain_template.py index 905c41dd..a182a6e6 100644 --- a/samples/langchain_on_vertexai/retriever_chain_template.py +++ b/samples/langchain_on_vertexai/retriever_chain_template.py @@ -109,17 +109,19 @@ def set_up(self): # an LLM to generate a response self.chain = create_retrieval_chain(retriever, combine_docs_chain) - def query(self, input: str, **kwargs: Any) -> str: + def query(self, **kwargs: Any) -> str: """Query the application. Args: - input: The user query. - **kwargs: Additional arguments for Protocol compliance. + **kwargs: Keyword arguments. Expects a key "input" with the user query. Returns: The LLM response dictionary. """ # Define the runtime logic that serves user queries + input = kwargs.get("input") + if not input: + return "Please provide an input." response = self.chain.invoke({"input": input}) return response["answer"] diff --git a/samples/requirements.txt b/samples/requirements.txt index 55ca38fb..9ceb6f61 100644 --- a/samples/requirements.txt +++ b/samples/requirements.txt @@ -1,4 +1,4 @@ -google-cloud-aiplatform[reasoningengine,langchain]==1.97.0 +google-cloud-aiplatform[reasoningengine,langchain]==1.132.0 google-cloud-resource-manager==1.15.0 langchain-community==0.3.31 langchain-google-alloydb-pg==0.13.0