Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
7c38007094 feat(memory): add grounded observation synthesis layer
All checks were successful
Lint / lint (pull_request) Successful in 16s
2026-04-22 10:59:40 -04:00
9 changed files with 571 additions and 289 deletions

View File

@@ -57,7 +57,7 @@ CONFIGURABLE_TOOLSETS = [
("moa", "🧠 Mixture of Agents", "mixture_of_agents"),
("tts", "🔊 Text-to-Speech", "text_to_speech"),
("skills", "📚 Skills", "list, view, manage"),
("todo", "📋 Task Planning", "todo, ultraplan"),
("todo", "📋 Task Planning", "todo"),
("memory", "💾 Memory", "persistent memory across sessions"),
("session_search", "🔎 Session Search", "search past conversations"),
("clarify", "❓ Clarifying Questions", "clarify"),

View File

@@ -26,6 +26,7 @@ from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
from .store import MemoryStore
from .retrieval import FactRetriever
from .observations import ObservationSynthesizer
logger = logging.getLogger(__name__)
@@ -37,28 +38,29 @@ logger = logging.getLogger(__name__)
FACT_STORE_SCHEMA = {
"name": "fact_store",
"description": (
"Deep structured memory with algebraic reasoning. "
"Deep structured memory with algebraic reasoning and grounded observation synthesis. "
"Use alongside the memory tool — memory for always-on context, "
"fact_store for deep recall and compositional queries.\n\n"
"fact_store for deep recall, compositional queries, and higher-order observations.\n\n"
"ACTIONS (simple → powerful):\n"
"• add — Store a fact the user would expect you to remember.\n"
"• search — Keyword lookup ('editor config', 'deploy process').\n"
"• probe — Entity recall: ALL facts about a person/thing.\n"
"• related — What connects to an entity? Structural adjacency.\n"
"• reason — Compositional: facts connected to MULTIPLE entities simultaneously.\n"
"• observe — Synthesized higher-order observations backed by supporting facts.\n"
"• contradict — Memory hygiene: find facts making conflicting claims.\n"
"• update/remove/list — CRUD operations.\n\n"
"IMPORTANT: Before answering questions about the user, ALWAYS probe or reason first."
"IMPORTANT: Before answering questions about the user, ALWAYS probe/reason/observe first."
),
"parameters": {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": ["add", "search", "probe", "related", "reason", "contradict", "update", "remove", "list"],
"enum": ["add", "search", "probe", "related", "reason", "observe", "contradict", "update", "remove", "list"],
},
"content": {"type": "string", "description": "Fact content (required for 'add')."},
"query": {"type": "string", "description": "Search query (required for 'search')."},
"query": {"type": "string", "description": "Search query (required for 'search'/'observe')."},
"entity": {"type": "string", "description": "Entity name for 'probe'/'related'."},
"entities": {"type": "array", "items": {"type": "string"}, "description": "Entity names for 'reason'."},
"fact_id": {"type": "integer", "description": "Fact ID for 'update'/'remove'."},
@@ -66,6 +68,12 @@ FACT_STORE_SCHEMA = {
"tags": {"type": "string", "description": "Comma-separated tags."},
"trust_delta": {"type": "number", "description": "Trust adjustment for 'update'."},
"min_trust": {"type": "number", "description": "Minimum trust filter (default: 0.3)."},
"min_confidence": {"type": "number", "description": "Minimum observation confidence (default: 0.6)."},
"observation_type": {
"type": "string",
"enum": ["recurring_preference", "stable_direction", "behavioral_pattern"],
"description": "Optional observation type filter for 'observe'.",
},
"limit": {"type": "integer", "description": "Max results (default: 10)."},
},
"required": ["action"],
@@ -118,7 +126,9 @@ class HolographicMemoryProvider(MemoryProvider):
self._config = config or _load_plugin_config()
self._store = None
self._retriever = None
self._observation_synth = None
self._min_trust = float(self._config.get("min_trust_threshold", 0.3))
self._observation_min_confidence = float(self._config.get("observation_min_confidence", 0.6))
@property
def name(self) -> str:
@@ -177,6 +187,7 @@ class HolographicMemoryProvider(MemoryProvider):
hrr_weight=hrr_weight,
hrr_dim=hrr_dim,
)
self._observation_synth = ObservationSynthesizer(self._store)
self._session_id = session_id
def system_prompt_block(self) -> str:
@@ -193,30 +204,76 @@ class HolographicMemoryProvider(MemoryProvider):
"# Holographic Memory\n"
"Active. Empty fact store — proactively add facts the user would expect you to remember.\n"
"Use fact_store(action='add') to store durable structured facts about people, projects, preferences, decisions.\n"
"Use fact_store(action='observe') to synthesize higher-order observations with evidence.\n"
"Use fact_feedback to rate facts after using them (trains trust scores)."
)
return (
f"# Holographic Memory\n"
f"Active. {total} facts stored with entity resolution and trust scoring.\n"
f"Use fact_store to search, probe entities, reason across entities, or add facts.\n"
f"Use fact_store to search, probe entities, reason across entities, or synthesize observations.\n"
f"Use fact_feedback to rate facts after using them (trains trust scores)."
)
def prefetch(self, query: str, *, session_id: str = "") -> str:
if not self._retriever or not query:
if not query:
return ""
parts = []
raw_results = []
try:
results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
if not results:
return ""
if self._retriever:
raw_results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
except Exception as e:
logger.debug("Holographic prefetch fact search failed: %s", e)
raw_results = []
observations = []
try:
if self._observation_synth:
observations = self._observation_synth.observe(
query,
min_confidence=self._observation_min_confidence,
limit=3,
refresh=True,
)
except Exception as e:
logger.debug("Holographic prefetch observation search failed: %s", e)
observations = []
if not raw_results and observations:
seen_fact_ids = set()
evidence_backfill = []
for observation in observations:
for evidence in observation.get("evidence", []):
fact_id = evidence.get("fact_id")
if fact_id in seen_fact_ids:
continue
seen_fact_ids.add(fact_id)
evidence_backfill.append(evidence)
raw_results = evidence_backfill[:5]
if raw_results:
lines = []
for r in results:
for r in raw_results:
trust = r.get("trust_score", r.get("trust", 0))
lines.append(f"- [{trust:.1f}] {r.get('content', '')}")
return "## Holographic Memory\n" + "\n".join(lines)
except Exception as e:
logger.debug("Holographic prefetch failed: %s", e)
return ""
parts.append("## Holographic Memory\n" + "\n".join(lines))
if observations:
lines = []
for observation in observations:
evidence_ids = ", ".join(
f"#{item['fact_id']}" for item in observation.get("evidence", [])[:3]
) or "none"
lines.append(
f"- [{observation.get('confidence', 0.0):.2f}] "
f"{observation.get('observation_type', 'observation')}: "
f"{observation.get('summary', '')} "
f"(evidence: {evidence_ids})"
)
parts.append("## Holographic Observations\n" + "\n".join(lines))
return "\n\n".join(parts)
def sync_turn(self, user_content: str, assistant_content: str, *, session_id: str = "") -> None:
# Holographic memory stores explicit facts via tools, not auto-sync.
@@ -252,6 +309,7 @@ class HolographicMemoryProvider(MemoryProvider):
def shutdown(self) -> None:
self._store = None
self._retriever = None
self._observation_synth = None
# -- Tool handlers -------------------------------------------------------
@@ -305,6 +363,19 @@ class HolographicMemoryProvider(MemoryProvider):
)
return json.dumps({"results": results, "count": len(results)})
elif action == "observe":
synthesizer = self._observation_synth
if not synthesizer:
return tool_error("Observation synthesizer is not initialized")
observations = synthesizer.observe(
args.get("query", ""),
observation_type=args.get("observation_type"),
min_confidence=float(args.get("min_confidence", self._observation_min_confidence)),
limit=int(args.get("limit", 10)),
refresh=True,
)
return json.dumps({"observations": observations, "count": len(observations)})
elif action == "contradict":
results = retriever.contradict(
category=args.get("category"),

View File

@@ -0,0 +1,249 @@
"""Higher-order observation synthesis for holographic memory.
Builds grounded observations from accumulated facts and keeps them in a
separate retrieval layer with explicit evidence links back to supporting facts.
"""
from __future__ import annotations
import re
from typing import Any
from .store import MemoryStore
_TOKEN_RE = re.compile(r"[a-z0-9_]+")
_HIGHER_ORDER_CUES = {
"prefer",
"preference",
"preferences",
"style",
"pattern",
"patterns",
"behavior",
"behaviour",
"habit",
"habits",
"workflow",
"direction",
"trajectory",
"strategy",
"tend",
"usually",
}
_OBSERVATION_PATTERNS = [
{
"observation_type": "recurring_preference",
"subject": "communication_style",
"categories": {"user_pref", "general"},
"labels": {
"concise": ["concise", "terse", "brief", "short", "no fluff"],
"result_first": ["result-only", "result only", "outcome only", "quick", "quickly"],
"silent_ops": ["silent", "no status", "no repetitive status", "no questions"],
},
"summary_prefix": "Recurring preference",
},
{
"observation_type": "stable_direction",
"subject": "project_direction",
"categories": {"project", "general", "tool"},
"labels": {
"local_first": ["local-first", "local first", "local-only", "local only", "ollama", "own hardware"],
"gitea_first": ["gitea-first", "gitea first", "forge", "pull request", "pr flow", "issue flow"],
"ansible": ["ansible", "playbook", "role", "deploy via ansible"],
},
"summary_prefix": "Stable direction",
},
{
"observation_type": "behavioral_pattern",
"subject": "operator_workflow",
"categories": {"general", "project", "tool", "user_pref"},
"labels": {
"commit_early": ["commit early", "commits early", "commit after", "wip commit"],
"pr_first": ["open pr", "push a pr", "pull request", "pr immediately", "create pr"],
"dedup_guard": ["no dupes", "no duplicates", "avoid duplicate", "existing pr"],
},
"summary_prefix": "Behavioral pattern",
},
]
_TYPE_QUERY_HINTS = {
"recurring_preference": {"prefer", "preference", "style", "communication", "likes", "wants"},
"stable_direction": {"direction", "trajectory", "strategy", "project", "roadmap", "moving"},
"behavioral_pattern": {"pattern", "behavior", "workflow", "habit", "operator", "agent", "usually"},
}
class ObservationSynthesizer:
"""Synthesizes grounded observations from facts and retrieves them by query."""
def __init__(self, store: MemoryStore):
self.store = store
def synthesize(
self,
*,
persist: bool = True,
min_confidence: float = 0.6,
limit: int = 10,
) -> list[dict[str, Any]]:
facts = self.store.list_facts(min_trust=0.0, limit=1000)
observations: list[dict[str, Any]] = []
for pattern in _OBSERVATION_PATTERNS:
candidate = self._build_candidate(pattern, facts, min_confidence=min_confidence)
if not candidate:
continue
if persist:
candidate["observation_id"] = self.store.upsert_observation(
candidate["observation_type"],
candidate["subject"],
candidate["summary"],
candidate["confidence"],
candidate["evidence_fact_ids"],
metadata=candidate["metadata"],
)
candidate["evidence"] = self._expand_evidence(candidate["evidence_fact_ids"])
candidate["evidence_count"] = len(candidate["evidence"])
candidate.pop("evidence_fact_ids", None)
observations.append(candidate)
observations.sort(
key=lambda item: (item["confidence"], item.get("evidence_count", 0)),
reverse=True,
)
return observations[:limit]
def observe(
self,
query: str = "",
*,
observation_type: str | None = None,
min_confidence: float = 0.6,
limit: int = 10,
refresh: bool = True,
) -> list[dict[str, Any]]:
if refresh:
self.synthesize(persist=True, min_confidence=min_confidence, limit=limit)
observations = self.store.list_observations(
observation_type=observation_type,
min_confidence=min_confidence,
limit=max(limit * 4, 20),
)
if not observations:
return []
if not query:
return observations[:limit]
query_tokens = self._tokenize(query)
is_higher_order = bool(query_tokens & _HIGHER_ORDER_CUES)
ranked: list[dict[str, Any]] = []
for item in observations:
searchable = " ".join(
[
item.get("summary", ""),
item.get("subject", ""),
item.get("observation_type", ""),
" ".join(item.get("metadata", {}).get("labels", [])),
]
)
overlap = self._overlap_score(query_tokens, self._tokenize(searchable))
type_bonus = self._type_bonus(query_tokens, item.get("observation_type", ""))
if overlap <= 0 and type_bonus <= 0 and not is_higher_order:
continue
ranked_item = dict(item)
ranked_item["score"] = round(item.get("confidence", 0.0) + overlap + type_bonus, 3)
ranked.append(ranked_item)
if not ranked and is_higher_order:
ranked = [
{**item, "score": round(float(item.get("confidence", 0.0)), 3)}
for item in observations
]
ranked.sort(
key=lambda item: (item.get("score", 0.0), item.get("confidence", 0.0), item.get("evidence_count", 0)),
reverse=True,
)
return ranked[:limit]
def _build_candidate(
self,
pattern: dict[str, Any],
facts: list[dict[str, Any]],
*,
min_confidence: float,
) -> dict[str, Any] | None:
matched_fact_ids: set[int] = set()
matched_labels: dict[str, set[int]] = {label: set() for label in pattern["labels"]}
for fact in facts:
if fact.get("category") not in pattern["categories"]:
continue
haystack = f"{fact.get('content', '')} {fact.get('tags', '')}".lower()
local_match = False
for label, keywords in pattern["labels"].items():
if any(keyword in haystack for keyword in keywords):
matched_labels[label].add(int(fact["fact_id"]))
local_match = True
if local_match:
matched_fact_ids.add(int(fact["fact_id"]))
if len(matched_fact_ids) < 2:
return None
active_labels = sorted(label for label, ids in matched_labels.items() if ids)
confidence = min(0.95, 0.35 + 0.12 * len(matched_fact_ids) + 0.08 * len(active_labels))
confidence = round(confidence, 3)
if confidence < min_confidence:
return None
label_summary = ", ".join(label.replace("_", "-") for label in active_labels)
subject_text = pattern["subject"].replace("_", " ")
summary = (
f"{pattern['summary_prefix']}: {subject_text} trends toward {label_summary} "
f"based on {len(matched_fact_ids)} supporting facts."
)
return {
"observation_type": pattern["observation_type"],
"subject": pattern["subject"],
"summary": summary,
"confidence": confidence,
"metadata": {
"labels": active_labels,
"evidence_count": len(matched_fact_ids),
},
"evidence_fact_ids": sorted(matched_fact_ids),
}
def _expand_evidence(self, fact_ids: list[int]) -> list[dict[str, Any]]:
facts_by_id = {
fact["fact_id"]: fact
for fact in self.store.list_facts(min_trust=0.0, limit=1000)
}
return [facts_by_id[fact_id] for fact_id in fact_ids if fact_id in facts_by_id]
@staticmethod
def _tokenize(text: str) -> set[str]:
return set(_TOKEN_RE.findall(text.lower()))
@staticmethod
def _overlap_score(query_tokens: set[str], text_tokens: set[str]) -> float:
if not query_tokens or not text_tokens:
return 0.0
overlap = query_tokens & text_tokens
if not overlap:
return 0.0
return round(len(overlap) / max(len(query_tokens), 1), 3)
@staticmethod
def _type_bonus(query_tokens: set[str], observation_type: str) -> float:
hints = _TYPE_QUERY_HINTS.get(observation_type, set())
if not hints:
return 0.0
return 0.25 if query_tokens & hints else 0.0

View File

@@ -3,6 +3,7 @@ SQLite-backed fact store with entity resolution and trust scoring.
Single-user Hermes memory store plugin.
"""
import json
import re
import sqlite3
import threading
@@ -73,6 +74,28 @@ CREATE TABLE IF NOT EXISTS memory_banks (
fact_count INTEGER DEFAULT 0,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS observations (
observation_id INTEGER PRIMARY KEY AUTOINCREMENT,
observation_type TEXT NOT NULL,
subject TEXT NOT NULL,
summary TEXT NOT NULL,
confidence REAL DEFAULT 0.0,
metadata_json TEXT DEFAULT '{}',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
UNIQUE(observation_type, subject)
);
CREATE TABLE IF NOT EXISTS observation_evidence (
observation_id INTEGER REFERENCES observations(observation_id) ON DELETE CASCADE,
fact_id INTEGER REFERENCES facts(fact_id) ON DELETE CASCADE,
evidence_weight REAL DEFAULT 1.0,
PRIMARY KEY (observation_id, fact_id)
);
CREATE INDEX IF NOT EXISTS idx_observations_type ON observations(observation_type);
CREATE INDEX IF NOT EXISTS idx_observations_confidence ON observations(confidence DESC);
"""
# Trust adjustment constants
@@ -128,6 +151,7 @@ class MemoryStore:
def _init_db(self) -> None:
"""Create tables, indexes, and triggers if they do not exist. Enable WAL mode."""
self._conn.execute("PRAGMA journal_mode=WAL")
self._conn.execute("PRAGMA foreign_keys=ON")
self._conn.executescript(_SCHEMA)
# Migrate: add hrr_vector column if missing (safe for existing databases)
columns = {row[1] for row in self._conn.execute("PRAGMA table_info(facts)").fetchall()}
@@ -346,6 +370,115 @@ class MemoryStore:
rows = self._conn.execute(sql, params).fetchall()
return [self._row_to_dict(r) for r in rows]
def upsert_observation(
self,
observation_type: str,
subject: str,
summary: str,
confidence: float,
evidence_fact_ids: list[int],
metadata: dict | None = None,
) -> int:
"""Create or update a synthesized observation and its evidence links."""
with self._lock:
metadata_json = json.dumps(metadata or {}, sort_keys=True)
self._conn.execute(
"""
INSERT INTO observations (
observation_type, subject, summary, confidence, metadata_json
)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(observation_type, subject) DO UPDATE SET
summary = excluded.summary,
confidence = excluded.confidence,
metadata_json = excluded.metadata_json,
updated_at = CURRENT_TIMESTAMP
""",
(observation_type, subject, summary, confidence, metadata_json),
)
row = self._conn.execute(
"""
SELECT observation_id
FROM observations
WHERE observation_type = ? AND subject = ?
""",
(observation_type, subject),
).fetchone()
observation_id = int(row["observation_id"])
self._conn.execute(
"DELETE FROM observation_evidence WHERE observation_id = ?",
(observation_id,),
)
unique_fact_ids = sorted({int(fid) for fid in evidence_fact_ids})
if unique_fact_ids:
self._conn.executemany(
"""
INSERT OR IGNORE INTO observation_evidence (observation_id, fact_id)
VALUES (?, ?)
""",
[(observation_id, fact_id) for fact_id in unique_fact_ids],
)
self._conn.commit()
return observation_id
def list_observations(
self,
observation_type: str | None = None,
min_confidence: float = 0.0,
limit: int = 50,
) -> list[dict]:
"""List synthesized observations with expanded supporting evidence."""
with self._lock:
params: list = [min_confidence]
observation_clause = ""
if observation_type is not None:
observation_clause = "AND observation_type = ?"
params.append(observation_type)
params.append(limit)
rows = self._conn.execute(
f"""
SELECT observation_id, observation_type, subject, summary, confidence,
metadata_json, created_at, updated_at,
(
SELECT COUNT(*)
FROM observation_evidence oe
WHERE oe.observation_id = observations.observation_id
) AS evidence_count
FROM observations
WHERE confidence >= ?
{observation_clause}
ORDER BY confidence DESC, updated_at DESC
LIMIT ?
""",
params,
).fetchall()
results = []
for row in rows:
item = dict(row)
try:
item["metadata"] = json.loads(item.pop("metadata_json") or "{}")
except json.JSONDecodeError:
item["metadata"] = {}
item["evidence"] = self._get_observation_evidence(int(item["observation_id"]))
results.append(item)
return results
def _get_observation_evidence(self, observation_id: int) -> list[dict]:
rows = self._conn.execute(
"""
SELECT f.fact_id, f.content, f.category, f.tags, f.trust_score,
f.retrieval_count, f.helpful_count, f.created_at, f.updated_at
FROM observation_evidence oe
JOIN facts f ON f.fact_id = oe.fact_id
WHERE oe.observation_id = ?
ORDER BY f.trust_score DESC, f.updated_at DESC
""",
(observation_id,),
).fetchall()
return [self._row_to_dict(row) for row in rows]
def record_feedback(self, fact_id: int, helpful: bool) -> dict:
"""Record user feedback and adjust trust asymmetrically.

View File

@@ -0,0 +1,96 @@
import json
import pytest
from plugins.memory.holographic import HolographicMemoryProvider
from plugins.memory.holographic.store import MemoryStore
@pytest.fixture()
def store(tmp_path):
db_path = tmp_path / "memory.db"
s = MemoryStore(db_path=str(db_path), default_trust=0.5)
yield s
s.close()
@pytest.fixture()
def provider(tmp_path):
p = HolographicMemoryProvider(
config={
"db_path": str(tmp_path / "memory.db"),
"default_trust": 0.5,
}
)
p.initialize(session_id="test-session")
yield p
if p._store:
p._store.close()
class TestObservationSynthesis:
def test_observe_action_persists_observation_with_evidence_links(self, provider):
fact_ids = [
provider._store.add_fact('User prefers concise status updates', category='user_pref'),
provider._store.add_fact('User wants result-only replies with no fluff', category='user_pref'),
]
result = json.loads(
provider.handle_tool_call(
'fact_store',
{
'action': 'observe',
'query': 'What communication style does the user prefer?',
'limit': 5,
},
)
)
assert result['count'] == 1
observation = result['observations'][0]
assert observation['observation_type'] == 'recurring_preference'
assert observation['confidence'] >= 0.6
assert sorted(item['fact_id'] for item in observation['evidence']) == sorted(fact_ids)
stored = provider._store.list_observations(limit=10)
assert len(stored) == 1
assert stored[0]['observation_type'] == 'recurring_preference'
assert stored[0]['evidence_count'] == 2
assert len(provider._store.list_facts(limit=10)) == 2
def test_observe_action_synthesizes_three_observation_types(self, provider):
provider._store.add_fact('User prefers concise updates', category='user_pref')
provider._store.add_fact('User wants result-only communication', category='user_pref')
provider._store.add_fact('Project is moving to a local-first deployment model', category='project')
provider._store.add_fact('Project direction stays Gitea-first for issue and PR flow', category='project')
provider._store.add_fact('Operator always commits early before moving on', category='general')
provider._store.add_fact('Operator pushes a PR immediately after each meaningful fix', category='general')
result = json.loads(provider.handle_tool_call('fact_store', {'action': 'observe', 'limit': 10}))
types = {item['observation_type'] for item in result['observations']}
assert {'recurring_preference', 'stable_direction', 'behavioral_pattern'} <= types
def test_single_fact_does_not_create_overconfident_observation(self, provider):
provider._store.add_fact('User prefers concise updates', category='user_pref')
result = json.loads(
provider.handle_tool_call(
'fact_store',
{'action': 'observe', 'query': 'What does the user prefer?', 'limit': 5},
)
)
assert result['count'] == 0
assert provider._store.list_observations(limit=10) == []
def test_prefetch_surfaces_observations_as_separate_layer(self, provider):
provider._store.add_fact('User prefers concise updates', category='user_pref')
provider._store.add_fact('User wants result-only communication', category='user_pref')
prefetch = provider.prefetch('What communication style does the user prefer?')
assert '## Holographic Observations' in prefetch
assert '## Holographic Memory' in prefetch
assert 'recurring_preference' in prefetch
assert 'evidence' in prefetch.lower()

View File

@@ -294,32 +294,22 @@ class TestBuiltinDiscovery:
"tools.browser_tool",
"tools.clarify_tool",
"tools.code_execution_tool",
"tools.crisis_tool",
"tools.cronjob_tools",
"tools.delegate_tool",
"tools.file_tools",
"tools.homeassistant_tool",
"tools.image_generation_tool",
"tools.local_inference_tool",
"tools.memory_tool",
"tools.mixture_of_agents_tool",
"tools.process_registry",
"tools.rl_training_tool",
"tools.scavenger_fixer",
"tools.send_message_tool",
"tools.session_search_tool",
"tools.skill_manager_tool",
"tools.skills_tool",
"tools.sovereign_router",
"tools.sovereign_scavenger",
"tools.sovereign_teleport",
"tools.static_analyzer",
"tools.symbolic_verify",
"tools.terminal_tool",
"tools.todo_tool",
"tools.tts_tool",
"tools.ultraplan",
"tools.verify_tool",
"tools.vision_tools",
"tools.web_tools",
}

View File

@@ -1,81 +0,0 @@
import json
from pathlib import Path
from toolsets import resolve_toolset
from tools.registry import registry
def test_create_action_saves_markdown_and_json(tmp_path):
from tools.ultraplan import ultraplan_tool
result = json.loads(
ultraplan_tool(
action="create",
mission="Daily autonomous planning",
streams=[
{
"id": "A",
"name": "Backlog burn",
"phases": [
{"id": "A1", "name": "Triage", "artifact": "issue list"},
{"id": "A2", "name": "Ship", "dependencies": ["A1"], "artifact": "PR"},
],
}
],
base_dir=str(tmp_path),
)
)
assert result["success"] is True
assert Path(result["file_path"]).exists()
assert Path(result["json_path"]).exists()
assert "Work Streams" in Path(result["file_path"]).read_text(encoding="utf-8")
def test_load_action_returns_saved_plan(tmp_path):
from tools.ultraplan import ultraplan_tool
created = json.loads(
ultraplan_tool(
action="create",
date="20260422",
mission="Mission from saved plan",
base_dir=str(tmp_path),
)
)
loaded = json.loads(
ultraplan_tool(
action="load",
date="20260422",
base_dir=str(tmp_path),
)
)
assert created["success"] is True
assert loaded["success"] is True
assert loaded["plan"]["mission"] == "Mission from saved plan"
assert loaded["file_path"].endswith("ultraplan_20260422.md")
def test_cron_spec_returns_daily_schedule_and_prompt():
from tools.ultraplan import ultraplan_tool
result = json.loads(ultraplan_tool(action="cron_spec"))
assert result["success"] is True
assert result["schedule"] == "0 6 * * *"
assert "Ultraplan" in result["prompt"]
assert "ultraplan_YYYYMMDD.md" in result["prompt"]
def test_registry_registers_ultraplan_tool():
import tools.ultraplan # noqa: F401
entry = registry.get_entry("ultraplan")
assert entry is not None
assert entry.toolset == "todo"
def test_default_toolsets_include_ultraplan():
assert "ultraplan" in resolve_toolset("todo")
assert "ultraplan" in resolve_toolset("hermes-cli")

View File

@@ -290,9 +290,6 @@ def load_ultraplan(date: str, base_dir: Path = None) -> Optional[Ultraplan]:
return None
DEFAULT_ULTRAPLAN_SCHEDULE = "0 6 * * *"
def generate_daily_cron_prompt() -> str:
"""Generate the prompt for the daily ultraplan cron job."""
return """Generate today's Ultraplan.
@@ -301,9 +298,9 @@ Steps:
1. Check open Gitea issues assigned to you
2. Check open PRs needing review
3. Check fleet health status
4. Decompose work into parallel streams with concrete phases and artifacts
5. Use the ultraplan tool to save ~/.timmy/cron/ultraplan_YYYYMMDD.md and the matching JSON sidecar
6. Optionally file a Gitea issue with the plan summary
4. Decompose work into parallel streams
5. Generate ultraplan_YYYYMMDD.md
6. File Gitea issue with the plan
Output format:
- Mission statement
@@ -311,176 +308,3 @@ Output format:
- Dependency map
- Success metrics
"""
def generate_daily_cron_job_spec(schedule: str = DEFAULT_ULTRAPLAN_SCHEDULE) -> Dict[str, str]:
"""Return a reusable cron job spec for daily Ultraplan generation."""
return {
"name": "Daily Ultraplan",
"schedule": schedule,
"prompt": generate_daily_cron_prompt(),
"path_pattern": "~/.timmy/cron/ultraplan_YYYYMMDD.md",
}
def _resolve_base_dir(base_dir: Optional[str | Path]) -> Path:
"""Normalize the requested Ultraplan base directory."""
if base_dir is None:
return Path.home() / ".timmy" / "cron"
return Path(base_dir).expanduser()
def ultraplan_tool(
action: str,
date: Optional[str] = None,
mission: str = "",
streams: Optional[List[Dict[str, Any]]] = None,
metrics: Optional[Dict[str, Any]] = None,
notes: str = "",
base_dir: Optional[str] = None,
) -> str:
"""Create/load Ultraplan artifacts and expose a daily cron spec."""
from tools.registry import tool_error, tool_result
action = (action or "").strip().lower()
resolved_base_dir = _resolve_base_dir(base_dir)
try:
if action == "create":
plan = create_ultraplan(date=date, mission=mission, streams=streams or [])
if metrics:
plan.metrics = metrics
if notes:
plan.notes = notes
md_path = save_ultraplan(plan, base_dir=resolved_base_dir)
json_path = resolved_base_dir / f"ultraplan_{plan.date}.json"
return tool_result(
success=True,
action="create",
date=plan.date,
file_path=str(md_path),
json_path=str(json_path),
plan=plan.to_dict(),
)
if action == "load":
plan_date = date or datetime.now().strftime("%Y%m%d")
plan = load_ultraplan(plan_date, base_dir=resolved_base_dir)
if plan is None:
return tool_error(
f"No Ultraplan found for {plan_date}",
success=False,
action="load",
date=plan_date,
)
return tool_result(
success=True,
action="load",
date=plan.date,
file_path=str(resolved_base_dir / f"ultraplan_{plan.date}.md"),
json_path=str(resolved_base_dir / f"ultraplan_{plan.date}.json"),
plan=plan.to_dict(),
markdown=plan.to_markdown(),
)
if action == "cron_spec":
spec = generate_daily_cron_job_spec()
return tool_result(success=True, action="cron_spec", **spec)
return tool_error(
f"Unknown Ultraplan action: {action}",
success=False,
action=action,
)
except Exception as e:
return tool_error(f"Ultraplan {action or 'tool'} failed: {e}", success=False, action=action)
ULTRAPLAN_SCHEMA = {
"name": "ultraplan",
"description": (
"Create or load daily Ultraplan planning artifacts under ~/.timmy/cron/ and "
"return a reusable cron spec for autonomous planning. Use this when you want "
"a concrete markdown/json plan file with streams, phases, dependencies, and metrics."
),
"parameters": {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": ["create", "load", "cron_spec"],
"description": "Operation to perform",
},
"date": {
"type": "string",
"description": "Plan date as YYYYMMDD. Defaults to today for create/load.",
},
"mission": {
"type": "string",
"description": "High-level mission statement for today's plan.",
},
"streams": {
"type": "array",
"description": "Optional work streams with phases/artifacts/dependencies for create.",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"name": {"type": "string"},
"phases": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"name": {"type": "string"},
"description": {"type": "string"},
"artifact": {"type": "string"},
"dependencies": {
"type": "array",
"items": {"type": "string"},
},
},
"required": ["name"],
},
},
},
"required": ["name"],
},
},
"metrics": {
"type": "object",
"description": "Optional success metrics to store on the plan.",
"additionalProperties": True,
},
"notes": {
"type": "string",
"description": "Optional free-form notes appended to the saved plan.",
},
"base_dir": {
"type": "string",
"description": "Optional override for the Ultraplan storage directory.",
},
},
"required": ["action"],
},
}
from tools.registry import registry
registry.register(
name="ultraplan",
toolset="todo",
schema=ULTRAPLAN_SCHEMA,
handler=lambda args, **_kw: ultraplan_tool(
action=args.get("action", ""),
date=args.get("date"),
mission=args.get("mission", ""),
streams=args.get("streams"),
metrics=args.get("metrics"),
notes=args.get("notes", ""),
base_dir=args.get("base_dir"),
),
emoji="🗺️",
)

View File

@@ -47,7 +47,7 @@ _HERMES_CORE_TOOLS = [
# Text-to-speech
"text_to_speech",
# Planning & memory
"todo", "ultraplan", "memory",
"todo", "memory",
# Session history search
"session_search",
# Clarifying questions
@@ -157,8 +157,8 @@ TOOLSETS = {
},
"todo": {
"description": "Task planning and tracking for multi-step work, including daily Ultraplan artifacts",
"tools": ["todo", "ultraplan"],
"description": "Task planning and tracking for multi-step work",
"tools": ["todo"],
"includes": []
},