Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,12 @@ OPENAI_API_KEY=

# Database URL
DATABASE_URL=

# Competition ID (update accordingly)
COMPETITION_ID=IQC2025S2

# Langsmith Tracing
LANGSMITH_TRACING=true
LANGSMITH_ENDPOINT="https://api.smith.langchain.com"
LANGSMITH_API_KEY=
LANGSMITH_PROJECT=
14 changes: 14 additions & 0 deletions brain/agent_config.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from langchain_core.runnables import RunnableConfig

from brain.alpha_class import Alpha

DEFAULT_CONFIG = {
"region": "USA",
"universe": "TOP3000",
Expand All @@ -17,3 +19,15 @@ def get_universe_config(config: RunnableConfig) -> dict:
**DEFAULT_CONFIG,
**{k: conf[k] for k in DEFAULT_CONFIG.keys() if k in conf},
}


def get_config(alpha: Alpha) -> dict:
"""Get the configuration for the alpha."""
return {
"region": alpha.region,
"universe": alpha.universe,
"neutralization": alpha.neutralization,
"truncation": alpha.truncation,
"decay": alpha.decay,
"delay": alpha.delay,
}
168 changes: 168 additions & 0 deletions brain/agentic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
from langgraph.graph import END, START, StateGraph

from brain.agents import (
invoke_executor,
invoke_fine_tuner,
invoke_planner,
invoke_seeder,
invoke_tester,
)
from brain.alpha_storage import Storage
from brain.api import BrainAPI
from brain.graph_state import GraphState
from brain.score import get_score

MAX_EXPLORATION_COUNT = 3


def plateau_condition(state: GraphState) -> bool:
"""Check if the exploration has plateaued."""
storage = state["storage"]
best_alpha = storage.best_alpha
old_best_alpha = state.get("old_best_alpha")

if best_alpha is None or old_best_alpha is None:
return False

# Check if the score has not improved significantly
score_diff = (
best_alpha.fitness - old_best_alpha.fitness + best_alpha.sharpe - old_best_alpha.sharpe
)
return score_diff < 0.01


def seed_finder_node(state: GraphState) -> GraphState:
"""Find a seed alpha to start the exploration."""
# Iterate databse till we find some decent alpha, or some other seed idea
alpha_idea, config = invoke_seeder(state)
print(f"Seed alpha: {alpha_idea}")
return {
"alpha_idea": alpha_idea,
"default_config": config,
"node": "plan",
"state": "explore",
"explore_count": 0,
"static_finetune": True,
"storage": Storage(score_func=get_score, max_size=50),
}


def planner_node(state: GraphState) -> GraphState:
"""Plan the next steps based on the current alpha and state."""
state["explore_count"] += 1
state["old_best_alpha"] = state["storage"].best_alpha

plan = invoke_planner(state)
if not plan:
return {**state, "node": "seed"}

if state["storage"].best_alpha is None:
plan.insert(0, "Execute initial alpha idea to obtain baseline")

plan.append(
"Think about previous changes and how they affected the alpha."
" Propose new alphas based on the most successful changes."
)

return {**state, "node": "execute", "plan": plan}


def executor_node(state: GraphState) -> GraphState:
state = invoke_executor(state)
# TODO: Pass some summary of results from the executor to planner
return {**state, "node": "explore_test", "state": "explore"}


def fine_tuner_node(state: GraphState) -> GraphState:
invoke_fine_tuner(state)
return {**state, "node": "submit_test", "state": "fine_tune", "static_finetune": False}


def explore_test_node(state: GraphState) -> GraphState:
"""Decide what happens next after exploring a new alpha idea."""
best_alpha = state["storage"].best_alpha
old_best_alpha = state.get("old_best_alpha")

if (
best_alpha is not None
and (old_best_alpha is None or old_best_alpha.alpha_id != best_alpha.alpha_id)
and len(best_alpha.failing_tests) == 0
):
score = best_alpha.update_score()
trade_count = best_alpha.long_count + best_alpha.short_count
print("Best alpha score:", score, "Trade count:", trade_count)
if score > -50 and trade_count > 400 and invoke_tester(state):
return {
**state,
"node": "fine_tuner",
"state": "fine_tune",
"static_finetune": True,
"explore_count": 0,
}

# TODO: Test plateau condition, compare previous best, with current best alpha
if state["explore_count"] < MAX_EXPLORATION_COUNT and plateau_condition(state):
return {**state, "node": "plan", "state": "explore"}

return {**state, "node": "seed", "state": "explore"}


def submit_test_node(state: GraphState) -> GraphState:
"""Decide what happens next after fine-tuning a new alpha idea."""
best_alpha = state["storage"].best_alpha
old_best_alpha = state.get("old_best_alpha")

if (
best_alpha is not None
and (old_best_alpha is None or old_best_alpha.alpha_id != best_alpha.alpha_id)
and len(best_alpha.failing_tests) == 0
):
score = best_alpha.update_score()
trade_count = best_alpha.long_count + best_alpha.short_count
print("Best alpha score:", score, "Trade count:", trade_count)
if score > 200 and trade_count > 400 and invoke_tester(state):
# TODO: Mark alpha as "submitted" or "ready for production"
print("Submitting alpha! Score:", score)
BrainAPI.submit_alpha(best_alpha.alpha_id)
return {**state, "node": "seed", "state": "explore", "explore_count": 0}

# TODO: Test plateau condition, compare previous best, with current best alpha
if state["explore_count"] < MAX_EXPLORATION_COUNT and plateau_condition(state):
return {**state, "node": "plan", "state": "fine_tune"}

return {**state, "node": "seed", "state": "explore"}


builder = StateGraph(GraphState)

builder.add_node("seed_finder", seed_finder_node)
builder.add_node("planner", planner_node)
builder.add_node("executor", executor_node)
builder.add_node("fine_tuner", fine_tuner_node)
builder.add_node("explore_test", explore_test_node)
builder.add_node("submit_test", submit_test_node)
# builder.add_node("stagnation_chk", stagnation_node)


# Static flow
builder.add_edge(START, "seed_finder")
builder.add_edge("executor", "explore_test")
builder.add_edge("fine_tuner", "submit_test")

# Conditional branching (no more …then= kwarg in 0.4.8)
for node in ["seed_finder", "planner", "explore_test", "submit_test"]:
builder.add_conditional_edges(
node,
lambda state: state["node"],
path_map={
"plan": "planner",
"execute": "executor",
"seed": "seed_finder",
"fine_tuner": "fine_tuner",
"explore_test": "explore_test",
"submit_test": "submit_test",
"stop": END,
},
)

graph = builder.compile() # returns a CompiledStateGraph
13 changes: 13 additions & 0 deletions brain/agents/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
__all__ = [
"invoke_executor",
"invoke_fine_tuner",
"invoke_planner",
"invoke_tester",
"invoke_seeder",
]

from .executor import invoke as invoke_executor
from .fine_tuner import invoke as invoke_fine_tuner
from .planner import invoke as invoke_planner
from .seeder import invoke as invoke_seeder
from .tester import invoke as invoke_tester
43 changes: 43 additions & 0 deletions brain/agents/alpha_tester.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
from functools import partial

from brain.agents.executor import create_alpha_simulation
from brain.alpha_class import Alpha
from brain.genetic_algorithm import execute_alphas, genetic_algorithm
from brain.graph_state import GraphState

param_options = {
"universe": ["TOP3000", "TOP1000", "TOP500", "TOP200"],
"neutralization": ["INDUSTRY", "SECTOR", "MARKET", "NONE", "SUBINDUSTRY"],
"decay": [2, 4, 6, 8, 10, 12, 14, 16, 20],
"truncation": [0.005, 0.01, 0.05, 0.1],
# "pasteurization": ["ON", "OFF"],
}


def get_config(alpha: Alpha) -> dict:
"""Get the configuration for the alpha."""
return {
"region": alpha.region,
"universe": alpha.universe,
"neutralization": alpha.neutralization,
"truncation": alpha.truncation,
"decay": alpha.decay,
"delay": alpha.delay,
}


def invoke(state: GraphState) -> GraphState:
"""Invoke fine-tuning agent."""
storage = state.get("storage")
# Rank, sign, change delay +-1 (check for 0), truncation +-0.01

for param, options in param_options.items():
alphas = [storage.best_alpha.replace(**{param: option}) for option in options]
list(execute_alphas(alphas, storage))

congig = get_config(storage.best_alpha)
plan = state.get("plan", [])
create_alpha = partial(create_alpha_simulation, plan=plan, config=congig)

storage.reset_counter()
genetic_algorithm(storage, create_alpha, len(plan) + 10)
Loading