Skip to content

Commit

Permalink
Merge pull request #6 from r-carroll/add-llama
Browse files Browse the repository at this point in the history
Add support for local models via Ollama
  • Loading branch information
rbs333 authored Feb 28, 2025
2 parents 6de2e3e + c69fde7 commit 487bba7
Show file tree
Hide file tree
Showing 13 changed files with 85 additions and 14 deletions.
14 changes: 14 additions & 0 deletions Ollama.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# Ollama setup
1. Download and install [Ollama](https://ollama.com/)
2. Once Ollama is running on your system, run `ollama pull llama3.1`
> Currently this is a ~5GB download, it's best to download it before the workshop if you plan on using it
3. Update the `MODEL_NAME` in your `dot.env` file to `ollama`

You're now ready to begin the workshop! Head back to the [Readme.md](Readme.md)

## Restarting the workshop
Mixing use of llama and openai on the same Redis instance can cause unexpected behavior. If you want to switch from one to the other it is recommended to kill and re-create the instance. To do this:
1. Run `docker ps` and take note of the ID for the running image
2. `docker stop imageId`
3. `docker rm imageId`
4. Start a new instance using the command from earlier, `docker run -d --name redis -p 6379:6379 -p 8001:8001 redis/redis-stack:latest`
8 changes: 8 additions & 0 deletions Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ One of the game's well known lines, "You have died of dysentery," inspired this
- [docker](https://docs.docker.com/get-started/get-docker/)
- [openai api key](https://platform.openai.com/docs/quickstart)

## (Optional) Ollama
This workshop is optimized to run targeting OpenAI models. If you prefer to run locally however, you may do so via the experimental Ollama configuration.
* [Ollama setup instructions](Ollama.md)

## (Optional) helpers

- [LangSmith](https://docs.smith.langchain.com/)
Expand Down Expand Up @@ -241,7 +245,11 @@ In our scenario we want to be able to retrieve the time-bound information that t

### Steps:
- Open [participant_agent/utils/vector_store.py](participant_agent/utils/vector_store.py)
- Take note of how `embedding_model` is getting instantiated. If using Ollama then switch this for the appropriate embedding using `llama3.1` for the `model` parameter
> [OpenAI embeddings](https://python.langchain.com/docs/integrations/text_embedding/openai/) \
[Ollama embeddings](https://python.langchain.com/docs/integrations/text_embedding/ollama/)
- Where `vector_store=None` update to `vector_store = RedisVectorStore.from_documents(<docs>, <embedding_model>, config=<config>)` with the appropriate variables.

- Open [participant_agent/utils/tools.py](participant_agent/utils/tools.py)
- Uncomment code for retrieval tool
- Update the create_retriever_tool to take the correct params. Ex: `create_retriever_tool(vector_store.as_retriever(), "get_directions", "meaningful doc string")`
Expand Down
3 changes: 2 additions & 1 deletion dot.env
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,5 @@ OPENAI_API_KEY=openai_key
LANGCHAIN_TRACING_V2=
LANGCHAIN_ENDPOINT=
LANGCHAIN_API_KEY=
LANGCHAIN_PROJECT=
LANGCHAIN_PROJECT=
MODEL_NAME=openai
2 changes: 1 addition & 1 deletion example_agent/ex_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

# Define the config
class GraphConfig(TypedDict):
model_name: Literal["anthropic", "openai"]
model_name: Literal["anthropic", "openai", "ollama"]


# Define the function that determines whether to continue or not
Expand Down
14 changes: 12 additions & 2 deletions example_agent/utils/ex_nodes.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,26 @@
import os
from functools import lru_cache

from dotenv import load_dotenv
from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
from langchain_ollama import ChatOllama
from langgraph.prebuilt import ToolNode

from example_agent.utils.ex_tools import tools

from .ex_state import AgentState, MultipleChoiceResponse

load_dotenv()

ENVIRON_MODEL_NAME = os.environ.get("MODEL_NAME")

@lru_cache(maxsize=4)
def _get_tool_model(model_name: str):
if model_name == "openai":
model = ChatOpenAI(temperature=0, model_name="gpt-4o")
elif model_name == "ollama":
model = ChatOllama(temperature=0, model="llama3.1", num_ctx=4096)
else:
raise ValueError(f"Unsupported model type: {model_name}")

Expand All @@ -24,6 +32,8 @@ def _get_tool_model(model_name: str):
def _get_response_model(model_name: str):
if model_name == "openai":
model = ChatOpenAI(temperature=0, model_name="gpt-4o")
elif model_name == "ollama":
model = ChatOllama(temperature=0, model="llama3.1", num_ctx=4096)
else:
raise ValueError(f"Unsupported model type: {model_name}")

Expand All @@ -36,7 +46,7 @@ def multi_choice_structured(state: AgentState, config):
# We call the model with structured output in order to return the same format to the user every time
# state['messages'][-2] is the last ToolMessage in the convo, which we convert to a HumanMessage for the model to use
# We could also pass the entire chat history, but this saves tokens since all we care to structure is the output of the tool
model_name = config.get("configurable", {}).get("model_name", "openai")
model_name = config.get("configurable", {}).get("model_name", ENVIRON_MODEL_NAME)

response = _get_response_model(model_name).invoke(
[
Expand Down Expand Up @@ -75,7 +85,7 @@ def call_tool_model(state: AgentState, config):
messages = [{"role": "system", "content": system_prompt}] + state["messages"]

# Get from LangGraph config
model_name = config.get("configurable", {}).get("model_name", "openai")
model_name = config.get("configurable", {}).get("model_name", ENVIRON_MODEL_NAME)

# Get our model that binds our tools
model = _get_tool_model(model_name)
Expand Down
16 changes: 14 additions & 2 deletions example_agent/utils/ex_vector_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
from dotenv import load_dotenv
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
from langchain_ollama import OllamaEmbeddings
from redis import Redis
from langchain_redis import RedisConfig, RedisVectorStore

load_dotenv()
Expand All @@ -11,20 +13,30 @@
INDEX_NAME = os.environ.get("VECTOR_INDEX_NAME", "oregon_trail")

config = RedisConfig(index_name=INDEX_NAME, redis_url=REDIS_URL)
redis_client = Redis.from_url(REDIS_URL)

doc = Document(
page_content="the northern trail, of the blue mountains, was destroyed by a flood and is no longer safe to traverse. It is recommended to take the southern trail although it is longer."
)

# TODO: participant can change to whatever desired model
embedding_model = OpenAIEmbeddings()
# embedding_model = OllamaEmbeddings(model="llama3.1")

def _clean_existing(prefix):
for key in redis_client.scan_iter(f"{prefix}:*"):
redis_client.delete(key)

def get_vector_store():
try:
config.from_existing = True
vector_store = RedisVectorStore(OpenAIEmbeddings(), config=config)
vector_store = RedisVectorStore(embedding_model, config=config)
except:
print("Init vector store with document")
print("Clean any existing data in index")
_clean_existing(config.index_name)
config.from_existing = False
vector_store = RedisVectorStore.from_documents(
[doc], OpenAIEmbeddings(), config=config
[doc], embedding_model, config=config
)
return vector_store
2 changes: 1 addition & 1 deletion participant_agent/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# The graph config can be updated with LangGraph Studio which can be helpful
class GraphConfig(TypedDict):
model_name: Literal["openai"] # could add more LLM providers here
model_name: Literal["openai", "ollama"] # could add more LLM providers here


# Define the function that determines whether to continue or not
Expand Down
14 changes: 11 additions & 3 deletions participant_agent/utils/nodes.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
import os
from functools import lru_cache

from dotenv import load_dotenv
from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
from langchain_ollama import ChatOllama
from langgraph.prebuilt import ToolNode

from participant_agent.utils.tools import tools

from .state import AgentState, MultipleChoiceResponse

load_dotenv()


# need to use this in call_tool_model function
@lru_cache(maxsize=4)
Expand All @@ -17,6 +22,8 @@ def _get_tool_model(model_name: str):
"""
if model_name == "openai":
model = ChatOpenAI(temperature=0, model_name="gpt-4o")
elif model_name == "ollama":
model = ChatOllama(temperature=0, model="llama3.1", num_ctx=4096)
else:
raise ValueError(f"Unsupported model type: {model_name}")

Expand All @@ -32,6 +39,8 @@ def _get_tool_model(model_name: str):
def _get_response_model(model_name: str):
if model_name == "openai":
model = ChatOpenAI(temperature=0, model_name="gpt-4o")
elif model_name == "ollama":
model = ChatOllama(temperature=0, model="llama3.1", num_ctx=4096)
else:
raise ValueError(f"Unsupported model type: {model_name}")

Expand All @@ -45,7 +54,7 @@ def multi_choice_structured(state: AgentState, config):
# We call the model with structured output in order to return the same format to the user every time
# state['messages'][-2] is the last ToolMessage in the convo, which we convert to a HumanMessage for the model to use
# We could also pass the entire chat history, but this saves tokens since all we care to structure is the output of the tool
model_name = config.get("configurable", {}).get("model_name", "openai")
model_name = config.get("configurable", {}).get("model_name", os.environ.get("MODEL_NAME"))

response = _get_response_model(model_name).invoke(
[
Expand Down Expand Up @@ -84,8 +93,7 @@ def call_tool_model(state: AgentState, config):
messages = [{"role": "system", "content": system_prompt}] + state["messages"]

# Get from LangGraph config
model_name = config.get("configurable", {}).get("model_name", "openai")

model_name = config.get("configurable", {}).get("model_name", os.environ.get("MODEL_NAME"))
# Get our model that binds our tools
model = _get_tool_model(model_name)

Expand Down
12 changes: 11 additions & 1 deletion participant_agent/utils/vector_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from dotenv import load_dotenv
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
from langchain_ollama import OllamaEmbeddings
from langchain_redis import RedisConfig, RedisVectorStore

load_dotenv()
Expand All @@ -11,18 +12,27 @@
INDEX_NAME = os.environ.get("VECTOR_INDEX_NAME", "oregon_trail")

config = RedisConfig(index_name=INDEX_NAME, redis_url=REDIS_URL)
redis_client = Redis.from_url(REDIS_URL)

doc = Document(
page_content="the northern trail, of the blue mountains, was destroyed by a flood and is no longer safe to traverse. It is recommended to take the southern trail although it is longer."
)

# TODO: participant can change to whatever desired model
embedding_model = OpenAIEmbeddings()

def _clean_existing(prefix):
for key in redis_client.scan_iter(f"{prefix}:*"):
redis_client.delete(key)

def get_vector_store():
try:
config.from_existing = True
vector_store = RedisVectorStore(OpenAIEmbeddings(), config=config)
vector_store = RedisVectorStore(embedding_model, config=config)
except:
print("Init vector store with document")
print("Clean any existing data in index")
_clean_existing(config.index_name)
config.from_existing = False

# TODO: define vector store
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
langgraph==0.2.56
langchain==0.3.13
langchain-openai==0.2.3
langchain-ollama==0.2.3
langchain-redis==0.1.1
pydantic==2.9.2
python-dotenv==1.0.1
Expand Down
2 changes: 1 addition & 1 deletion test_example_oregon_trail.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def test_1_wagon_leader(app):

res = graph.invoke({"messages": scenario["question"]})

assert res["messages"][-1].content == scenario["answer"]
assert scenario["answer"] in res["messages"][-1].content

print(f"\n response: {scenario['answer']}")

Expand Down
2 changes: 1 addition & 1 deletion test_participant_oregon_trail.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def test_1_wagon_leader(app):

res = graph.invoke({"messages": scenario["question"]})

assert res["messages"][-1].content == scenario["answer"]
assert scenario["answer"] in res["messages"][-1].content

print(f"\n response: {scenario['answer']}")

Expand Down
9 changes: 8 additions & 1 deletion test_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,18 @@

from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain_ollama import ChatOllama
from redis import Redis

load_dotenv()

llm = ChatOpenAI(model="gpt-4o")
if os.environ.get("MODEL_NAME") == "openai":
llm = ChatOpenAI(model="gpt-4o")
elif os.environ.get("MODEL_NAME") == "ollama":
llm = ChatOllama(model="llama3.1")
else:
raise Exception("Setup failed, MODEL_NAME not defined in .env")

client = Redis.from_url(os.environ.get("REDIS_URL"))


Expand Down

0 comments on commit 487bba7

Please sign in to comment.