-
Notifications
You must be signed in to change notification settings - Fork 2
API Reference
kyuchan edited this page Apr 4, 2026
·
1 revision
The shared blackboard for all 13 thinking tools. Located in src/sparks/state.py.
class CognitiveState(BaseModel):
goal: str # Analysis goal
phase: Phase # SEQUENTIAL | ITERATIVE | INTEGRATED
round: int # Current round number
depth: str # "quick" | "standard" | "deep"
# Lens (domain-specific observation guide)
lens: Optional[Lens]
# Nervous system signals
signals: NervousSignals
# Layer 1: Tool outputs
observations: list[Observation] # From observe, body_think, shift_dimension, transform
patterns: list[Pattern] # From recognize_patterns, form_patterns
principles: list[Principle] # From abstract
analogies: list[Analogy] # From analogize
contradictions: list[Contradiction] # From recognize_patterns
model_results: list[ModelResult] # From model
# Extended outputs
hypotheses: list[Hypothesis] # From imagine
perspective_insights: list[PerspectiveInsight] # From empathize
play_discoveries: list[PlayDiscovery] # From play
# History
snapshots: dict[int, StateSnapshot] # Per-round snapshots
forgotten_rounds: list[int] # Rounds that were clean_slatedKey methods:
-
take_snapshot()→StateSnapshot— capture current state -
clean_slate()— strategic forgetting (clear derived, keep observations)
Final output from a Sparks run.
class SynthesisOutput(BaseModel):
principles: list[Principle]
convergence_score: float # 0-1
coverage: float # 0-1
contradictions: list[Contradiction]
analogies: list[Analogy]
model_accuracy: Optional[float]
rounds_completed: int
tools_used: list[str]
total_cost: float
confidence: float # Average principle confidence
limitations: list[str]
thinking_process: dict # Detailed log of reasoningA discovered core law.
class Principle(BaseModel):
id: str
statement: str # The principle itself
supporting_patterns: list[str] # Pattern IDs
counter_evidence: list[Evidence]
abstraction_level: int # 1-5 (higher = more abstract)
confidence: float # 0-1
round_extracted: intRaw observation from data.
class Observation(BaseModel):
id: str
channel: str # Which lens channel
content: str # What was observed
lens_used: str
evidence: list[Evidence]
confidence: float
source_refs: list[str] # ["file.txt:42"] for provenanceclass Pattern(BaseModel):
id: str
type: str # "recurring" | "absent" | "interference"
description: str
evidence: list[Evidence]
confidence: float
related_patterns: list[str]class Analogy(BaseModel):
id: str
current: str # What we're analyzing
past_match: str # What it's analogous to
structural_mapping: str # How they correspond
prediction: str # What the analogy predicts
confidence: floatclass NeuralPopulation(BaseModel):
name: str
rate: float # 0.0 - 1.0
tau: float # Time constant
threshold: float # Firing threshold
baseline: float # Resting rate
refractory: float # Cooldown timer
gain: float # Neuromodulatory multiplier
def step(input_current, dt, noise) # One time stepclass Connection(BaseModel):
source: str
target: str
weight: float # 0.01 - 1.0
sign: int # +1 excitatory, -1 inhibitory
plasticity: bool # Can this learn?
@property
effective_weight -> float # weight * signclass NeuralCircuit(BaseModel):
populations: dict[str, NeuralPopulation]
connections: list[Connection]
time_step: int
# Neuromodulators
dopamine: float # Reward signal
norepinephrine: float # Arousal
acetylcholine: float # Learning rate
# Methods
def update(sensory_input, dt) # Main integration step
def get_tool_activations() -> dict # Tool firing rates
def get_active_tools(threshold) -> list # Above-threshold tools
def get_mode() -> str # "sympathetic" | "parasympathetic" | "balanced"
def record_tool_outcome(tool, success) # Dopamine feedback
def save(path) / load(path) # Persistence
@staticmethod
def encode_state(state) -> dict # CognitiveState → sensory inputInterface for all thinking tools. Located in src/sparks/tools/base.py.
class BaseTool(ABC):
name: str
def __init__(event_bus: EventBus, tracker: CostTracker)
@abstractmethod
def run(state: CognitiveState, **kwargs) -> None
"""Execute tool, mutating state in place."""
def should_run(state: CognitiveState) -> bool
"""Local rule: should this tool run now?"""
def emit(event_type, data_id, round)
"""Publish event to the bus."""from sparks.tools.base import BaseTool
from sparks.state import CognitiveState
class MyTool(BaseTool):
name = "my_custom_tool"
def should_run(self, state: CognitiveState) -> bool:
return len(state.principles) >= 1
def run(self, state: CognitiveState, **kwargs):
# Your logic here
# Mutate state.observations, state.patterns, etc.
passSave to ~/.sparks/tools/my_tool.py — it's auto-discovered.
class CostTracker:
total_cost: float
total_calls: int
breakdown: dict[str, float] # tool → cost
def record(tool, model, input_tokens, output_tokens)
def remaining() -> float
def can_afford(model, estimated_tokens) -> bool
def select_model(tool) -> str # Auto-routing with budget awarenessSimple pub/sub for tool coordination:
class EventBus:
def publish(event: StateEvent)
def subscribe(event_type: str, callback: Callable)Cognitive Sparks
Core Concepts
How It Works
Setup & Usage
Results
Reference