Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
157 changes: 123 additions & 34 deletions docs/getting-started/quickstart.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,52 +41,137 @@ export OPENAI_API_KEY="your-api-key-here"
Create a new Python file `quickstart.py` and add the following code:

```python
import os
import sqlite3
"""

from memori import Memori
from openai import OpenAI
This Quickstart shows chat working and
the underlying storage provided by Memori.

This script performs the following steps:

def get_sqlite_connection():
return sqlite3.connect("memori.db")
- Initializes a local SQLite database
- Adds a memory ("User likes Fried Rice")
- IMMEDIATELY verifies it via Vector Search (mem.recall)
- Verify it via SQL Inspection (Raw Logs)
- USES the memory in a follow-up question (Recall Loop)

"""

client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
import os

memori = Memori(conn=get_sqlite_connection).llm.register(client)
memori.attribution(entity_id="123456", process_id="test-ai-agent")
memori.config.storage.build()
import sqlite3
from dotenv import load_dotenv
from memori import Memori
from openai import OpenAI
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker

response = client.chat.completions.create(
model="gpt-4.1-mini",
messages=[
{"role": "user", "content": "My favorite color is blue."}
]
)
print(response.choices[0].message.content + "\n")

# Advanced Augmentation runs asynchronously to efficiently
# create memories. For this example, a short lived command
# line program, we need to wait for it to finish.
# Setup
# =====

memori.augmentation.wait()
load_dotenv()
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("Missing OPENAI_API_KEY in .env")

# Memori stored that your favorite color is blue in SQLite.
# Now reset everything so there's no prior context.
# 1. Initialize Database (Local Mode)
# -----------------------------------
DB_FILE = "memori_quickstart.db"
# Clean slate for the demo
if os.path.exists(DB_FILE):
os.remove(DB_FILE)

# 2. Configure Memori with SQLAlchemy
# -----------------------------------
engine = create_engine(f"sqlite:///{DB_FILE}")
session = sessionmaker(bind=engine)
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

memori = Memori(conn=get_sqlite_connection).llm.register(client)
memori.attribution(entity_id="123456", process_id="test-ai-agent")

response = client.chat.completions.create(
model="gpt-4.1-mini",
messages=[
{"role": "user", "content": "What's my favorite color?"}
]
)
print(response.choices[0].message.content + "\n")
# 3. Initialize Memory
# --------------------
# Wrap the OpenAI clientso that Memoru can intercept
# requests and responses to store memories.
print(f"[ MEMORI ]: Initializing agent (DB: {DB_FILE})...")
mem = Memori(conn=session).llm.register(client)

# Attribution tracks WHO said WHAT (Critical for Enterprise Context)
mem.attribution(entity_id="demo_user", process_id="quickstart_script")
mem.config.storage.build()


# Inspection tools
# ================

def inspect_memories(subject):
print(f"""\nMemories on "{subject}" ---""")

# Recall
# ------
# Ask Memori "What do you know about [subject]?"
try:
facts = mem.recall(subject, limit=5)
if facts:
for i, fact in enumerate(facts):
print(f"""- {fact["content"]} (Score: {fact["similarity"]:.2%})""")
else:
print(f"""[MISSING]: No memories found on "{subject}".""")
except Exception as e:
print(f"[ERROR]: Recall failed: {e}")

# Raw SQL Logs
# ------------
# Prove the data is physically on disk.
try:
conn = sqlite3.connect(DB_FILE)
cursor = conn.cursor()
cursor.execute("SELECT content FROM memori_conversation_message ORDER BY date_created DESC LIMIT 1")
row = cursor.fetchone()
if row:
print(f"""[SQL LOG]: Last message stored on disk: "{row[0]}" """)
conn.close()
except Exception as e:
print(f"[ERROR]: SQL Inspection failed: {e}")


if __name__ == "__main__":

# Provide the initial fact
# ------------------------
user_input = "My favorite food is Fried Rice because I grew up in Toronto."
print(f"\nUser: {user_input}")

# Send the chat to OpenAI via Memori's wrapper.
# This automatically triggers the extraction pipeline.
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": user_input}],
)
print(f"AI: {response.choices[0].message.content}")

# Pause until memories from the above chat are fully processed.
print("\n[Processing] Storing memories...")
mem.augmentation.wait()

# Verification
# ------------
# Instead of ending the script here, we prove it worked.
inspect_memories("Fried Rice")
inspect_memories("Toronto")
print("\nSUCCESS! Memories persisted and verified.")

# Recall
# -------
# Ask the AI a question that requires the memory it just formed.
print("\nRECALL TEST")
recall_question = "Based on what you just learned, what is my favorite food?"
print(f"User: {recall_question}")

recall_response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": recall_question}],
)
print(f"AI: {recall_response.choices[0].message.content}")

print("\nSUCCESS: Memory persisted, verified, and retrieved.")
```

## Step 4: Run the Application
Expand All @@ -97,7 +182,11 @@ Execute your Python file:
python quickstart.py
```

You should see the AI respond to both questions, with the second response correctly recalling that your favorite color is blue!
You should see:

- The AI response to the initial statement, “My favorite food is Fried Rice because I grew up in Toronto.”
- The program displaying the memories stored about the subjects “Fried Rice” and “Toronto”, as well as the last thing you said in the chat.
- The AI response to a question about your favorite food, based on a memory provided by Memori.

## Step 5: Check the memories created

Expand Down