diff --git a/examples/deepseek/.env.example b/examples/deepseek/.env.example new file mode 100644 index 00000000..a3994882 --- /dev/null +++ b/examples/deepseek/.env.example @@ -0,0 +1 @@ +DEEPSEEK_API_KEY=your_deepseek_api_key_here diff --git a/examples/deepseek/README.md b/examples/deepseek/README.md new file mode 100644 index 00000000..77a956fe --- /dev/null +++ b/examples/deepseek/README.md @@ -0,0 +1,42 @@ +# Memori + DeepSeek Example + +This example demonstrates how to use Memori with DeepSeek AI. + +## Setup + +1. Install dependencies: +```bash +pip install -r requirements.txt +``` + +2. Set up your DeepSeek API key: +```bash +export DEEPSEEK_API_KEY="your-api-key" +``` + +Or create a `.env` file: +```bash +cp .env.example .env +# Edit .env and add your API key +``` + +3. Run the example: +```bash +python main.py +``` + +## How It Works + +This example shows Memori's memory capabilities with DeepSeek: + +1. **First conversation**: Establishes facts (favorite color, city) +2. **Second conversation**: DeepSeek automatically recalls the favorite color +3. **Third conversation**: DeepSeek remembers the city + +Memori automatically captures and stores conversation context, making it available for future interactions. + +## Requirements + +- Python 3.10+ +- DeepSeek API key +- Memori Python SDK diff --git a/examples/deepseek/main.py b/examples/deepseek/main.py new file mode 100644 index 00000000..6f97b7d8 --- /dev/null +++ b/examples/deepseek/main.py @@ -0,0 +1,60 @@ +# Quickstart: Memori + DeepSeek + SQLite + +# Demonstrates how Memori adds memory across conversations with DeepSeek. + +import os +from dotenv import load_dotenv + +load_dotenv() + +# Note: DeepSeek uses OpenAI-compatible API +from openai import OpenAI +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker + +from memori import Memori + +# Setup DeepSeek client (OpenAI-compatible API) +client = OpenAI( + api_key=os.getenv("DEEPSEEK_API_KEY", ""), + base_url="https://api.deepseek.com/v1", +) + +# Setup SQLite +engine = create_engine("sqlite:///deepseek_memori.db") +Session = sessionmaker(bind=engine) + +# Setup Memori with DeepSeek (uses OpenAI-compatible API) +mem = Memori(conn=Session).llm.register(client) +mem.attribution(entity_id="user-123", process_id="deepseek-app") +mem.config.storage.build() + +if __name__ == "__main__": + # First conversation - establish facts + print("") + response1 = client.chat.completions.create( + model="deepseek-chat", + messages=[ + { + "role": "user", + "content": "You: 我正在开发一个基于 Python 的开源项目,我更倾向于使用简洁的代码风格。", + } + ], + ) + print(f"AI: {response1.choices[0].message.content}\n") + + # Second conversation - Memori recalls context automatically + print("You: 我之前提到我正在开发什么类型的项目?") + response2 = client.chat.completions.create( + model="deepseek-chat", + messages=[{"role": "user", "content": "我住在哪?"}], + ) + print(f"AI: {response2.choices[0].message.content}\n") + + # Third conversation - context is maintained + print("You: 我对代码风格有什么要求?") + response3 = client.chat.completions.create( + model="deepseek-chat", + messages=[{"role": "user", "content": "我的职业是什么"}], + ) + print(f"AI: {response3.choices[0].message.content}") diff --git a/examples/deepseek/pyproject.toml b/examples/deepseek/pyproject.toml new file mode 100644 index 00000000..363b1d0f --- /dev/null +++ b/examples/deepseek/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "deepseek-example" +version = "0.1.0" +description = "Memori + DeepSeek example" +dependencies = [ + "memori", + "openai>=1.0.0", + "sqlalchemy>=2.0.0", +] diff --git a/examples/deepseek/requirements.txt b/examples/deepseek/requirements.txt new file mode 100644 index 00000000..0c41cf0a --- /dev/null +++ b/examples/deepseek/requirements.txt @@ -0,0 +1,3 @@ +memori +openai>=1.0.0 +sqlalchemy>=2.0.0 diff --git a/memori/__init__.py b/memori/__init__.py index 3b15f627..ff13634e 100644 --- a/memori/__init__.py +++ b/memori/__init__.py @@ -24,6 +24,7 @@ from memori.llm._providers import Anthropic as LlmProviderAnthropic from memori.llm._providers import Google as LlmProviderGoogle from memori.llm._providers import LangChain as LlmProviderLangChain + from memori.llm._providers import OpenAi as LlmProviderOpenAi from memori.llm._providers import PydanticAi as LlmProviderPydanticAi from memori.llm._providers import XAi as LlmProviderXAi @@ -85,6 +86,7 @@ def __init__(self, conn: Callable[[], Any] | Any | None = None): self.llm = LlmRegistry(self) self.agno = LlmProviderAgno(self) self.anthropic = LlmProviderAnthropic(self) + self.google = LlmProviderGoogle(self) self.langchain = LlmProviderLangChain(self) self.openai = LlmProviderOpenAi(self) diff --git a/memori/llm/_clients.py b/memori/llm/_clients.py index 3a99bf79..733c113d 100644 --- a/memori/llm/_clients.py +++ b/memori/llm/_clients.py @@ -404,6 +404,8 @@ def _detect_platform(client): base_url = str(client.base_url).lower() if "nebius" in base_url: return "nebius" + if "deepseek" in base_url: + return "deepseek" return None @@ -449,6 +451,9 @@ def register(self, client, _provider=None, stream=False): return self + + + @Registry.register_client( lambda client: type(client).__module__.startswith("pydantic_ai") ) diff --git a/memori/llm/_constants.py b/memori/llm/_constants.py index 9811b5a4..50d543ea 100644 --- a/memori/llm/_constants.py +++ b/memori/llm/_constants.py @@ -14,6 +14,7 @@ AGNO_OPENAI_LLM_PROVIDER = "openai" AGNO_XAI_LLM_PROVIDER = "xai" ATHROPIC_LLM_PROVIDER = "anthropic" + GOOGLE_LLM_PROVIDER = "google" LANGCHAIN_CHATBEDROCK_LLM_PROVIDER = "chatbedrock" LANGCHAIN_CHATGOOGLEGENAI_LLM_PROVIDER = "chatgooglegenai" diff --git a/memori/llm/_providers.py b/memori/llm/_providers.py index 9fc6dafe..63cd1b35 100644 --- a/memori/llm/_providers.py +++ b/memori/llm/_providers.py @@ -20,6 +20,7 @@ from memori.llm._clients import XAi as XAiMemoriClient + class Agno(BaseProvider): def register(self, openai_chat=None, claude=None, gemini=None, xai=None): warnings.warn( @@ -51,6 +52,9 @@ def register(self, client): return self.entity + + + class Google(BaseProvider): def register(self, client): warnings.warn( diff --git a/memori/llm/_utils.py b/memori/llm/_utils.py index 603a22e8..d49f1281 100644 --- a/memori/llm/_utils.py +++ b/memori/llm/_utils.py @@ -15,6 +15,7 @@ AGNO_OPENAI_LLM_PROVIDER, AGNO_XAI_LLM_PROVIDER, ATHROPIC_LLM_PROVIDER, + GOOGLE_LLM_PROVIDER, LANGCHAIN_CHATBEDROCK_LLM_PROVIDER, LANGCHAIN_CHATGOOGLEGENAI_LLM_PROVIDER, @@ -42,6 +43,9 @@ def llm_is_bedrock(provider, title): ) + + + def llm_is_google(provider, title): return title == GOOGLE_LLM_PROVIDER or ( provider_is_langchain(provider) @@ -64,6 +68,9 @@ def agno_is_anthropic(provider, title): return provider_is_agno(provider) and title == AGNO_ANTHROPIC_LLM_PROVIDER + + + def agno_is_google(provider, title): return provider_is_agno(provider) and title == AGNO_GOOGLE_LLM_PROVIDER