2026-03-25 17:19:58 +00:00
|
|
|
|
"""
|
|
|
|
|
|
Nexus Experience Store — Embodied Memory
|
|
|
|
|
|
|
|
|
|
|
|
SQLite-backed store for lived experiences only. The model remembers
|
|
|
|
|
|
what it perceived, what it thought, and what it did — nothing else.
|
|
|
|
|
|
|
|
|
|
|
|
Each row is one cycle of the perceive→think→act loop.
|
2026-04-12 12:18:55 -04:00
|
|
|
|
|
|
|
|
|
|
Implements the GBrain "compiled truth + timeline" pattern (#1181):
|
|
|
|
|
|
- compiled_truths: current best understanding, rewritten when evidence changes
|
|
|
|
|
|
- experiences: append-only evidence trail that never gets edited
|
2026-03-25 17:19:58 +00:00
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
import sqlite3
|
|
|
|
|
|
import json
|
|
|
|
|
|
import time
|
|
|
|
|
|
from pathlib import Path
|
|
|
|
|
|
from typing import Optional
|
|
|
|
|
|
|
|
|
|
|
|
DEFAULT_DB = Path.home() / ".nexus" / "experience.db"
|
|
|
|
|
|
MAX_CONTEXT_EXPERIENCES = 20 # Recent experiences fed to the model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ExperienceStore:
|
|
|
|
|
|
def __init__(self, db_path: Optional[Path] = None):
|
|
|
|
|
|
self.db_path = db_path or DEFAULT_DB
|
|
|
|
|
|
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
self.conn = sqlite3.connect(str(self.db_path))
|
|
|
|
|
|
self.conn.execute("PRAGMA journal_mode=WAL")
|
|
|
|
|
|
self.conn.execute("PRAGMA synchronous=NORMAL")
|
|
|
|
|
|
self._init_tables()
|
|
|
|
|
|
|
|
|
|
|
|
def _init_tables(self):
|
|
|
|
|
|
self.conn.executescript("""
|
|
|
|
|
|
CREATE TABLE IF NOT EXISTS experiences (
|
|
|
|
|
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
|
|
|
|
timestamp REAL NOT NULL,
|
|
|
|
|
|
perception TEXT NOT NULL,
|
|
|
|
|
|
thought TEXT,
|
|
|
|
|
|
action TEXT,
|
|
|
|
|
|
action_result TEXT,
|
|
|
|
|
|
cycle_ms INTEGER DEFAULT 0,
|
|
|
|
|
|
session_id TEXT
|
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
|
|
CREATE TABLE IF NOT EXISTS summaries (
|
|
|
|
|
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
|
|
|
|
timestamp REAL NOT NULL,
|
|
|
|
|
|
summary TEXT NOT NULL,
|
|
|
|
|
|
exp_start INTEGER NOT NULL,
|
|
|
|
|
|
exp_end INTEGER NOT NULL
|
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
|
|
CREATE INDEX IF NOT EXISTS idx_exp_ts
|
|
|
|
|
|
ON experiences(timestamp DESC);
|
|
|
|
|
|
CREATE INDEX IF NOT EXISTS idx_exp_session
|
|
|
|
|
|
ON experiences(session_id);
|
2026-04-12 12:18:55 -04:00
|
|
|
|
|
|
|
|
|
|
-- GBrain compiled truth pattern (#1181)
|
|
|
|
|
|
-- Current best understanding about an entity/topic.
|
|
|
|
|
|
-- Rewritten when new evidence changes the picture.
|
|
|
|
|
|
-- The timeline (experiences table) is the evidence trail — never edited.
|
|
|
|
|
|
CREATE TABLE IF NOT EXISTS compiled_truths (
|
|
|
|
|
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
|
|
|
|
entity TEXT NOT NULL, -- what this truth is about (person, topic, project)
|
|
|
|
|
|
truth TEXT NOT NULL, -- current best understanding
|
|
|
|
|
|
confidence REAL DEFAULT 0.5, -- 0.0–1.0
|
|
|
|
|
|
source_exp_id INTEGER, -- last experience that updated this truth
|
|
|
|
|
|
created_at REAL NOT NULL,
|
|
|
|
|
|
updated_at REAL NOT NULL,
|
|
|
|
|
|
metadata_json TEXT DEFAULT '{}',
|
|
|
|
|
|
UNIQUE(entity) -- one compiled truth per entity
|
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
|
|
CREATE INDEX IF NOT EXISTS idx_truth_entity
|
|
|
|
|
|
ON compiled_truths(entity);
|
|
|
|
|
|
CREATE INDEX IF NOT EXISTS idx_truth_updated
|
|
|
|
|
|
ON compiled_truths(updated_at DESC);
|
2026-03-25 17:19:58 +00:00
|
|
|
|
""")
|
|
|
|
|
|
self.conn.commit()
|
|
|
|
|
|
|
|
|
|
|
|
def record(
|
|
|
|
|
|
self,
|
|
|
|
|
|
perception: str,
|
|
|
|
|
|
thought: Optional[str] = None,
|
|
|
|
|
|
action: Optional[str] = None,
|
|
|
|
|
|
action_result: Optional[str] = None,
|
|
|
|
|
|
cycle_ms: int = 0,
|
|
|
|
|
|
session_id: Optional[str] = None,
|
|
|
|
|
|
) -> int:
|
|
|
|
|
|
"""Record one perceive→think→act cycle."""
|
|
|
|
|
|
cur = self.conn.execute(
|
|
|
|
|
|
"""INSERT INTO experiences
|
|
|
|
|
|
(timestamp, perception, thought, action, action_result,
|
|
|
|
|
|
cycle_ms, session_id)
|
|
|
|
|
|
VALUES (?, ?, ?, ?, ?, ?, ?)""",
|
|
|
|
|
|
(time.time(), perception, thought, action,
|
|
|
|
|
|
action_result, cycle_ms, session_id),
|
|
|
|
|
|
)
|
|
|
|
|
|
self.conn.commit()
|
|
|
|
|
|
return cur.lastrowid
|
|
|
|
|
|
|
|
|
|
|
|
def recent(self, limit: int = MAX_CONTEXT_EXPERIENCES) -> list[dict]:
|
|
|
|
|
|
"""Fetch the most recent experiences for context."""
|
|
|
|
|
|
rows = self.conn.execute(
|
|
|
|
|
|
"""SELECT id, timestamp, perception, thought, action,
|
|
|
|
|
|
action_result, cycle_ms
|
|
|
|
|
|
FROM experiences
|
|
|
|
|
|
ORDER BY timestamp DESC
|
|
|
|
|
|
LIMIT ?""",
|
|
|
|
|
|
(limit,),
|
|
|
|
|
|
).fetchall()
|
|
|
|
|
|
|
|
|
|
|
|
return [
|
|
|
|
|
|
{
|
|
|
|
|
|
"id": r[0],
|
|
|
|
|
|
"timestamp": r[1],
|
|
|
|
|
|
"perception": r[2],
|
|
|
|
|
|
"thought": r[3],
|
|
|
|
|
|
"action": r[4],
|
|
|
|
|
|
"action_result": r[5],
|
|
|
|
|
|
"cycle_ms": r[6],
|
|
|
|
|
|
}
|
|
|
|
|
|
for r in reversed(rows) # Chronological order
|
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
def format_for_context(self, limit: int = MAX_CONTEXT_EXPERIENCES) -> str:
|
|
|
|
|
|
"""Format recent experiences as natural language for the model."""
|
|
|
|
|
|
experiences = self.recent(limit)
|
|
|
|
|
|
if not experiences:
|
|
|
|
|
|
return "You have no memories yet. This is your first moment."
|
|
|
|
|
|
|
|
|
|
|
|
lines = []
|
|
|
|
|
|
for exp in experiences:
|
|
|
|
|
|
ago = time.time() - exp["timestamp"]
|
|
|
|
|
|
if ago < 60:
|
|
|
|
|
|
when = f"{int(ago)}s ago"
|
|
|
|
|
|
elif ago < 3600:
|
|
|
|
|
|
when = f"{int(ago / 60)}m ago"
|
|
|
|
|
|
else:
|
|
|
|
|
|
when = f"{int(ago / 3600)}h ago"
|
|
|
|
|
|
|
|
|
|
|
|
line = f"[{when}] You perceived: {exp['perception']}"
|
|
|
|
|
|
if exp["thought"]:
|
|
|
|
|
|
line += f"\n You thought: {exp['thought']}"
|
|
|
|
|
|
if exp["action"]:
|
|
|
|
|
|
line += f"\n You did: {exp['action']}"
|
|
|
|
|
|
if exp["action_result"]:
|
|
|
|
|
|
line += f"\n Result: {exp['action_result']}"
|
|
|
|
|
|
lines.append(line)
|
|
|
|
|
|
|
|
|
|
|
|
return "Your recent experiences:\n\n" + "\n\n".join(lines)
|
|
|
|
|
|
|
|
|
|
|
|
def count(self) -> int:
|
|
|
|
|
|
"""Total experiences recorded."""
|
|
|
|
|
|
return self.conn.execute(
|
|
|
|
|
|
"SELECT COUNT(*) FROM experiences"
|
|
|
|
|
|
).fetchone()[0]
|
|
|
|
|
|
|
|
|
|
|
|
def save_summary(self, summary: str, exp_start: int, exp_end: int):
|
|
|
|
|
|
"""Store a compressed summary of a range of experiences.
|
|
|
|
|
|
Used when context window fills — distill old memories."""
|
|
|
|
|
|
self.conn.execute(
|
|
|
|
|
|
"""INSERT INTO summaries (timestamp, summary, exp_start, exp_end)
|
|
|
|
|
|
VALUES (?, ?, ?, ?)""",
|
|
|
|
|
|
(time.time(), summary, exp_start, exp_end),
|
|
|
|
|
|
)
|
|
|
|
|
|
self.conn.commit()
|
|
|
|
|
|
|
|
|
|
|
|
def get_summaries(self, limit: int = 5) -> list[dict]:
|
|
|
|
|
|
"""Fetch recent experience summaries."""
|
|
|
|
|
|
rows = self.conn.execute(
|
|
|
|
|
|
"""SELECT id, timestamp, summary, exp_start, exp_end
|
|
|
|
|
|
FROM summaries ORDER BY timestamp DESC LIMIT ?""",
|
|
|
|
|
|
(limit,),
|
|
|
|
|
|
).fetchall()
|
|
|
|
|
|
return [
|
|
|
|
|
|
{"id": r[0], "timestamp": r[1], "summary": r[2],
|
|
|
|
|
|
"exp_start": r[3], "exp_end": r[4]}
|
|
|
|
|
|
for r in reversed(rows)
|
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
|
|
self.conn.close()
|
2026-04-12 12:18:55 -04:00
|
|
|
|
|
|
|
|
|
|
# ── GBrain compiled truth + timeline pattern (#1181) ────────────────
|
|
|
|
|
|
|
|
|
|
|
|
def upsert_compiled_truth(
|
|
|
|
|
|
self,
|
|
|
|
|
|
entity: str,
|
|
|
|
|
|
truth: str,
|
|
|
|
|
|
confidence: float = 0.5,
|
|
|
|
|
|
source_exp_id: Optional[int] = None,
|
|
|
|
|
|
metadata: Optional[dict] = None,
|
|
|
|
|
|
) -> int:
|
|
|
|
|
|
"""Create or update the compiled truth for an entity.
|
|
|
|
|
|
|
|
|
|
|
|
This is the 'compiled truth on top' from the GBrain pattern.
|
|
|
|
|
|
When new evidence changes our understanding, we rewrite this
|
|
|
|
|
|
record. The timeline (experiences table) preserves what led
|
|
|
|
|
|
here — it is never edited.
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
entity: What this truth is about (person, topic, project).
|
|
|
|
|
|
truth: Current best understanding.
|
|
|
|
|
|
confidence: 0.0–1.0 confidence score.
|
|
|
|
|
|
source_exp_id: Last experience ID that informed this truth.
|
|
|
|
|
|
metadata: Optional extra data as a dict.
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
The row ID of the compiled truth.
|
|
|
|
|
|
"""
|
|
|
|
|
|
now = time.time()
|
|
|
|
|
|
meta_json = json.dumps(metadata) if metadata else "{}"
|
|
|
|
|
|
|
|
|
|
|
|
self.conn.execute(
|
|
|
|
|
|
"""INSERT INTO compiled_truths
|
|
|
|
|
|
(entity, truth, confidence, source_exp_id, created_at, updated_at, metadata_json)
|
|
|
|
|
|
VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
|
|
|
|
ON CONFLICT(entity) DO UPDATE SET
|
|
|
|
|
|
truth = excluded.truth,
|
|
|
|
|
|
confidence = excluded.confidence,
|
|
|
|
|
|
source_exp_id = excluded.source_exp_id,
|
|
|
|
|
|
updated_at = excluded.updated_at,
|
|
|
|
|
|
metadata_json = excluded.metadata_json""",
|
|
|
|
|
|
(entity, truth, confidence, source_exp_id, now, now, meta_json),
|
|
|
|
|
|
)
|
|
|
|
|
|
self.conn.commit()
|
|
|
|
|
|
|
|
|
|
|
|
row = self.conn.execute(
|
|
|
|
|
|
"SELECT id FROM compiled_truths WHERE entity = ?", (entity,)
|
|
|
|
|
|
).fetchone()
|
|
|
|
|
|
return row[0]
|
|
|
|
|
|
|
|
|
|
|
|
def get_compiled_truth(self, entity: str) -> Optional[dict]:
|
|
|
|
|
|
"""Get the current compiled truth for an entity."""
|
|
|
|
|
|
row = self.conn.execute(
|
|
|
|
|
|
"""SELECT id, entity, truth, confidence, source_exp_id,
|
|
|
|
|
|
created_at, updated_at, metadata_json
|
|
|
|
|
|
FROM compiled_truths WHERE entity = ?""",
|
|
|
|
|
|
(entity,),
|
|
|
|
|
|
).fetchone()
|
|
|
|
|
|
if not row:
|
|
|
|
|
|
return None
|
|
|
|
|
|
return {
|
|
|
|
|
|
"id": row[0],
|
|
|
|
|
|
"entity": row[1],
|
|
|
|
|
|
"truth": row[2],
|
|
|
|
|
|
"confidence": row[3],
|
|
|
|
|
|
"source_exp_id": row[4],
|
|
|
|
|
|
"created_at": row[5],
|
|
|
|
|
|
"updated_at": row[6],
|
|
|
|
|
|
"metadata": json.loads(row[7]) if row[7] else {},
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def get_all_compiled_truths(
|
|
|
|
|
|
self, min_confidence: float = 0.0, limit: int = 100
|
|
|
|
|
|
) -> list[dict]:
|
|
|
|
|
|
"""Get all compiled truths, optionally filtered by minimum confidence."""
|
|
|
|
|
|
rows = self.conn.execute(
|
|
|
|
|
|
"""SELECT id, entity, truth, confidence, source_exp_id,
|
|
|
|
|
|
created_at, updated_at, metadata_json
|
|
|
|
|
|
FROM compiled_truths
|
|
|
|
|
|
WHERE confidence >= ?
|
|
|
|
|
|
ORDER BY updated_at DESC
|
|
|
|
|
|
LIMIT ?""",
|
|
|
|
|
|
(min_confidence, limit),
|
|
|
|
|
|
).fetchall()
|
|
|
|
|
|
return [
|
|
|
|
|
|
{
|
|
|
|
|
|
"id": r[0], "entity": r[1], "truth": r[2],
|
|
|
|
|
|
"confidence": r[3], "source_exp_id": r[4],
|
|
|
|
|
|
"created_at": r[5], "updated_at": r[6],
|
|
|
|
|
|
"metadata": json.loads(r[7]) if r[7] else {},
|
|
|
|
|
|
}
|
|
|
|
|
|
for r in rows
|
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
def search_compiled_truths(self, query: str, limit: int = 10) -> list[dict]:
|
|
|
|
|
|
"""Search compiled truths by entity name or truth content (LIKE match)."""
|
|
|
|
|
|
rows = self.conn.execute(
|
|
|
|
|
|
"""SELECT id, entity, truth, confidence, source_exp_id,
|
|
|
|
|
|
created_at, updated_at, metadata_json
|
|
|
|
|
|
FROM compiled_truths
|
|
|
|
|
|
WHERE entity LIKE ? OR truth LIKE ?
|
|
|
|
|
|
ORDER BY confidence DESC, updated_at DESC
|
|
|
|
|
|
LIMIT ?""",
|
|
|
|
|
|
(f"%{query}%", f"%{query}%", limit),
|
|
|
|
|
|
).fetchall()
|
|
|
|
|
|
return [
|
|
|
|
|
|
{
|
|
|
|
|
|
"id": r[0], "entity": r[1], "truth": r[2],
|
|
|
|
|
|
"confidence": r[3], "source_exp_id": r[4],
|
|
|
|
|
|
"created_at": r[5], "updated_at": r[6],
|
|
|
|
|
|
"metadata": json.loads(r[7]) if r[7] else {},
|
|
|
|
|
|
}
|
|
|
|
|
|
for r in rows
|
|
|
|
|
|
]
|