Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 85 additions & 0 deletions agents/agent1.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,19 +24,94 @@
}

# ── 2. Tools ───────────────────────────────────────────────────────────────
def get_weather(lat: float, lon: float) -> dict:
"""
Return today's forecast:
{ "high": °C, "low": °C, "conditions": str }
"""
url = (
"https://api.open-meteo.com/v1/forecast"
f"?latitude={lat}&longitude={lon}"
"&daily=weathercode,temperature_2m_max,temperature_2m_min"
"&forecast_days=1&timezone=auto"
)
r = requests.get(url, timeout=15)
r.raise_for_status()
daily = r.json()["daily"]
return {
"high": daily["temperature_2m_max"][0],
"low": daily["temperature_2m_min"][0],
"conditions": WEATHER_CODES.get(daily["weathercode"][0], "Unknown"),
}

def convert_c_to_f(c: float) -> float:
return c * 9 / 5 + 32

# ── 3. LLM client ───────────────────────────────────────────────────────────
llm = ChatOllama(model="llama3.2", temperature=0.0)

# ── 4. System prompt ────────────────────────────────────────────────────────
SYSTEM = textwrap.dedent("""
You are an agent with two tools:

get_weather(lat:float, lon:float)
→ {"high": float, "low": float, "conditions": str}

convert_c_to_f(c:float) → float

When you plan, emit exactly:

Thought: <your thought>
Action: <tool name>
Args: {"lat":X,"lon":Y} or {"c":Z}

Do NOT output anything else.
""").strip()

# ── 5. TAO run helper ───────────────────────────────────────────────────────
def run(question: str) -> str:
"""Execute the two-step TAO dance once, printing the trace, returning the answer."""
messages = [
{"role": "system", "content": SYSTEM},
{"role": "user", "content": question},
]

print("\n--- Thought → Action → Observation → Final ---\n")

# LLM chooses coordinates
reply1 = llm.invoke(messages)
plan1 = reply1.content.strip()
print(plan1 + "\n")

coords = json.loads(plan1.split("Args:")[1].strip())
obs1 = get_weather(**coords)
print(f"Observation: {obs1}\n")


# LLM decides to convert °C → °F
messages += [
{"role": "assistant", "content": plan1},
{"role": "user", "content": f"Observation: {obs1}"}
]
reply2 = llm.invoke(messages)
plan2 = reply2.content.strip()
print(plan2 + "\n")

# Convert both temps
high_f = convert_c_to_f(obs1["high"])
low_f = convert_c_to_f(obs1["low"])
obs2 = {"high_f": high_f, "low_f": low_f}
print(f"Observation: {obs2}\n")


# Assemble final answer
final = (
f"Today will be **{obs1['conditions']}** with a high of "
f"**{high_f:.1f} °F** and a low of **{low_f:.1f} °F**."
)
print(f"Final: {final}\n")
return final


# ── 6. Interactive loop ────────────────────────────────────────────────────
if __name__ == "__main__":
Expand All @@ -48,3 +123,13 @@
break

# Build the question for the agent
query = (
f"What is the predicted weather today for {loc}? "
"Include conditions plus high/low in °F."
)

try:
run(query) # prints the entire trace internally
except Exception as e:
print(f"⚠️ Error: {e}\n")

60 changes: 60 additions & 0 deletions agents/agent5.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,76 @@ def book_flight(flight_number: str):

# Define the AI agent

# Defines the AI agents

booking_agent = Agent(
role="Airline Booking Assistant",
goal="Help users book flights efficiently.",
backstory="You are an expert airline booking assistant, providing the best booking options with clear information.",
verbose=True,
llm=ollama_llm,
)

# New agent for travel planning tasks
travel_agent = Agent(
role="Travel Assistant",
goal="Assist in planning and organizing travel details.",
backstory="You are skilled at planning and organizing travel itineraries efficiently.",
verbose=True,
llm=ollama_llm,
)

# New agent for customer service tasks
customer_service_agent = Agent(
role="Customer Service Representative",
goal="Provide excellent customer service by handling user requests and presenting options.",
backstory="You are skilled at providing customer support and ensuring user satisfaction.",
verbose=True,
llm=ollama_llm,
)

# Define the tasks with expected outputs
# Create a task to extract travel details (source, destination, date) using the LLM.
extract_travel_info_task = Task(
description=(
"Extract destination and date from {user_request}"
# "Return your result as a JSON object formatted as: "
# "{\"destination\": \"<destination>\", \"date\": \"<date>\"}."
),
agent=customer_service_agent,
expected_output="A JSON object containing 'destination', and 'date'."
)

# Create a task to find suitable flights
find_flights_task = Task(
description="Find available flights for the extracted destination and date.",
agent=travel_agent,
expected_output="A JSON list of available flights, including flight number, airline, departure time, and price."
)

# Create a task to present flight options
present_flights_task = Task(
description="Present flight options in a user-friendly format and ask the user to choose one. The list of flight should not be in JSON format and should have a number by each choice.",
agent=customer_service_agent,
expected_output="The selected flight number chosen by the user. If no response, just use 1."
)

# Create a task to book the flight
book_flight_task = Task(
description="Book the selected flight and confirm booking. If more than one option, just use the first one.",
agent=booking_agent,
function=lambda details: book_flight(details["flight_number"]),
expected_output="A confirmation message with flight number and booking status.",
verbose=True
)

# Create the Crew
crew = Crew(
agents=[booking_agent, customer_service_agent, travel_agent],
tasks=[extract_travel_info_task, find_flights_task, present_flights_task, book_flight_task],
process="sequential",
verbose=False
)

# Run the process with user input
user_input = "I need a flight to New York on August 11, 2025."
Expand Down
118 changes: 116 additions & 2 deletions agents/curr_conv_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,87 @@
# -----------------------------------------------------------------------------
# MEMORY PERSISTENCE (with history)
# -----------------------------------------------------------------------------
MEMORY_FILE = "currency_memory.json"

def load_memory():
"""Load memory from disk, initializing history if missing."""
if os.path.exists(MEMORY_FILE):
mem = json.load(open(MEMORY_FILE))
# Ensure history key exists
if "history" not in mem:
mem["history"] = []
return mem
# Default memory structure
return {"last_amount": None, "last_from": None, "last_to": None, "history": []}

def save_memory(mem):
"""Persist memory (including history) back to disk."""
with open(MEMORY_FILE, "w") as f:
json.dump(mem, f)

memory = load_memory()

# -----------------------------------------------------------------------------
# SMOLAGENTS TOOLS
# -----------------------------------------------------------------------------

# tool to get rates from a URL
@tool
def fetch_live_rate(from_currency: str, to_currency: str) -> float:
"""
Retrieve a live exchange rate from the fawazahmed0 Exchange API.

Args:
from_currency (str): 3-letter source code, e.g. "USD"
to_currency (str): 3-letter target code, e.g. "EUR"

Returns:
float: Target units per one source unit.

Raises:
RuntimeError: if the rate cannot be fetched.
"""
base = from_currency.lower()
target = to_currency.lower()
urls = [
f"https://cdn.jsdelivr.net/npm/@fawazahmed0/currency-api@latest"
f"/v1/currencies/{base}.json",
f"https://latest.currency-api.pages.dev"
f"/v1/currencies/{base}.json"
]
for url in urls:
try:
resp = requests.get(url, timeout=5)
resp.raise_for_status()
data = resp.json()
rates = data.get(base, {})
if target in rates:
return rates[target]
except Exception:
continue
raise RuntimeError(f"Failed to fetch rate from {from_currency} to {to_currency}")


# tool to do basic calculations

@tool
def calculate(expression: str) -> float:
"""
Evaluate a basic arithmetic expression safely.

Args:
expression (str): e.g. "100 * 0.85"

Returns:
float: numeric result.

Raises:
RuntimeError: if the expression is invalid.
"""
try:
return eval(expression, {"__builtins__": {}})
except Exception as e:
raise RuntimeError(f"Calculation error: {e}")

# -----------------------------------------------------------------------------
# AGENT CONFIGURATION
Expand All @@ -29,6 +100,11 @@
temperature=0.0, # deterministic tool use
)

agent = CodeAgent(
tools=[fetch_live_rate, calculate],
model=model,
add_base_tools=False
)

# -----------------------------------------------------------------------------
# QUERY PARSING + FILLING from MEMORY
Expand All @@ -46,7 +122,30 @@ def parse_and_fill(query: str):
"""
q = query.strip()
amt = frm = to = None


# 1) Full form: amount + source + "to" + target
m1 = re.match(r"(?:Convert\s+)?([\d.]+)\s*([A-Za-z]{3})\s+to\s+([A-Za-z]{3})$", q, re.IGNORECASE)
if m1:
amt, frm, to = m1.group(1), m1.group(2).upper(), m1.group(3).upper()
else:
# 2) Amount + source only
m2 = re.match(r"(?:Convert\s+)?([\d.]+)\s*([A-Za-z]{3})$", q, re.IGNORECASE)
if m2:
amt, frm = m2.group(1), m2.group(2).upper()
to = memory["last_to"]

# 3) Amount + "to" + target only
m3 = re.match(r"(?:Convert\s+)?([\d.]+)\s+to\s+([A-Za-z]{3})$", q, re.IGNORECASE)
if m3:
amt, to = m3.group(1), m3.group(2).upper()
frm = memory["last_from"]

# 4) Amount only
m4 = re.match(r"(?:Convert\s+)?([\d.]+)$", q, re.IGNORECASE)
if m4:
amt = m4.group(1)
frm = memory["last_from"]
to = memory["last_to"]

if not (amt and frm and to):
raise ValueError(
Expand Down Expand Up @@ -79,6 +178,18 @@ def parse_and_fill(query: str):

# Handle special commands - exit and history

if low in ("exit", "quit"):
print("Goodbye!")
break
if low in ("history", "show history"):
if not memory["history"]:
print("No conversion history yet.\n")
else:
print("Conversion History:")
for entry in memory["history"]:
print(f" • {entry['query']} → {entry['result']:.2f}")
print()
continue

# Normal convert request

Expand All @@ -87,9 +198,12 @@ def parse_and_fill(query: str):
prompt = f"Convert {amt} {frm} to {to}"

# Run the agent (LLM will call fetch_live_rate & calculate)

result = agent.run(prompt)

# Store and persist this interaction
memory["history"].append({"query": user_input, "amount": amt,
"from": frm, "to": to, "result": float(result)})
save_memory(memory)

# Friendly output
print(f"{amt} {frm} is approximately {float(result):.2f} {to}.\n")
Expand Down
1 change: 1 addition & 0 deletions agents/currency_memory.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"last_amount": "500", "last_from": "JPY", "last_to": "INR", "history": [{"query": "Convert 100 USD to EUR", "amount": "100", "from": "USD", "to": "EUR", "result": 85.949297}, {"query": "Convert 300", "amount": "300", "from": "USD", "to": "EUR", "result": 257.703555}, {"query": "Convert 300 to INR", "amount": "300", "from": "USD", "to": "INR", "result": 26305.982415000002}, {"query": "Convert 150 JPY", "amount": "150", "from": "JPY", "to": "INR", "result": 11907.0}]}
Loading