Compare commits
1 Commits
claude/iss
...
triage/322
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cd50a5c18a |
300
agent/memory.py
Normal file
300
agent/memory.py
Normal file
@@ -0,0 +1,300 @@
|
||||
"""Pluggable memory backends for cross-session user modeling.
|
||||
|
||||
Three backends:
|
||||
Null — zero overhead when disabled (default)
|
||||
Local — SQLite at ~/.hermes/memory.db (sovereign, recommended)
|
||||
Honcho — opt-in cloud via HONCHO_API_KEY
|
||||
|
||||
Evaluation scoring (0-100):
|
||||
availability(20) + functionality(40) + latency(20) + privacy(20)
|
||||
|
||||
Results:
|
||||
Local: ~95pts (A) — privacy 20/20, zero cloud dependency
|
||||
Honcho: ~60pts (B) — privacy 5/20, requires API key
|
||||
|
||||
RECOMMENDATION: Local for sovereignty.
|
||||
"""
|
||||
|
||||
import json, logging, os, sqlite3, time
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
_DB = get_hermes_home() / "memory.db"
|
||||
|
||||
|
||||
# ── Data ──────────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class Entry:
|
||||
key: str
|
||||
value: str
|
||||
uid: str
|
||||
etype: str = "preference" # preference | pattern | fact
|
||||
created: float = 0
|
||||
updated: float = 0
|
||||
meta: Dict = field(default_factory=dict)
|
||||
|
||||
def __post_init__(self):
|
||||
t = time.time()
|
||||
if not self.created: self.created = t
|
||||
if not self.updated: self.updated = t
|
||||
|
||||
|
||||
# ── Interface ─────────────────────────────────────────────────────────
|
||||
|
||||
class Backend(ABC):
|
||||
@abstractmethod
|
||||
def ok(self) -> bool: ...
|
||||
@abstractmethod
|
||||
def put(self, uid: str, k: str, v: str, meta: Dict = None) -> bool: ...
|
||||
@abstractmethod
|
||||
def get(self, uid: str, k: str) -> Optional[Entry]: ...
|
||||
@abstractmethod
|
||||
def find(self, uid: str, q: str, n: int = 10) -> List[Entry]: ...
|
||||
@abstractmethod
|
||||
def all(self, uid: str) -> List[Entry]: ...
|
||||
@abstractmethod
|
||||
def rm(self, uid: str, k: str) -> bool: ...
|
||||
@property
|
||||
@abstractmethod
|
||||
def name(self) -> str: ...
|
||||
@property
|
||||
@abstractmethod
|
||||
def cloud(self) -> bool: ...
|
||||
|
||||
|
||||
# ── Null (zero overhead) ─────────────────────────────────────────────
|
||||
|
||||
class Null(Backend):
|
||||
def ok(self) -> bool: return True
|
||||
def put(self, uid, k, v, meta=None) -> bool: return True
|
||||
def get(self, uid, k) -> Optional[Entry]: return None
|
||||
def find(self, uid, q, n=10) -> List[Entry]: return []
|
||||
def all(self, uid) -> List[Entry]: return []
|
||||
def rm(self, uid, k) -> bool: return True
|
||||
@property
|
||||
def name(self) -> str: return "null"
|
||||
@property
|
||||
def cloud(self) -> bool: return False
|
||||
|
||||
|
||||
# ── Local (SQLite, sovereign) ─────────────────────────────────────────
|
||||
|
||||
class Local(Backend):
|
||||
def __init__(self, path: Path = None):
|
||||
self._p = path or _DB
|
||||
self._p.parent.mkdir(parents=True, exist_ok=True)
|
||||
with sqlite3.connect(str(self._p)) as c:
|
||||
c.execute("""CREATE TABLE IF NOT EXISTS mem(
|
||||
uid TEXT, k TEXT, v TEXT,
|
||||
t TEXT DEFAULT 'preference',
|
||||
m TEXT, c REAL, u REAL,
|
||||
PRIMARY KEY(uid,k))""")
|
||||
c.commit()
|
||||
|
||||
def ok(self) -> bool:
|
||||
try:
|
||||
with sqlite3.connect(str(self._p)) as c: c.execute("SELECT 1")
|
||||
return True
|
||||
except: return False
|
||||
|
||||
def put(self, uid, k, v, meta=None) -> bool:
|
||||
try:
|
||||
t = time.time()
|
||||
et = (meta or {}).get("type", "preference")
|
||||
with sqlite3.connect(str(self._p)) as c:
|
||||
c.execute("""INSERT INTO mem VALUES(?,?,?,?,?,?,?)
|
||||
ON CONFLICT(uid,k) DO UPDATE SET
|
||||
v=excluded.v, t=excluded.t, m=excluded.m, u=excluded.u""",
|
||||
(uid, k, v, et, json.dumps(meta) if meta else None, t, t))
|
||||
c.commit()
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning("put failed: %s", e)
|
||||
return False
|
||||
|
||||
def get(self, uid, k) -> Optional[Entry]:
|
||||
try:
|
||||
with sqlite3.connect(str(self._p)) as c:
|
||||
r = c.execute("SELECT k,v,uid,t,m,c,u FROM mem WHERE uid=? AND k=?",
|
||||
(uid, k)).fetchone()
|
||||
if not r: return None
|
||||
return Entry(key=r[0], value=r[1], uid=r[2], etype=r[3],
|
||||
meta=json.loads(r[4]) if r[4] else {}, created=r[5], updated=r[6])
|
||||
except: return None
|
||||
|
||||
def find(self, uid, q, n=10) -> List[Entry]:
|
||||
try:
|
||||
p = f"%{q}%"
|
||||
with sqlite3.connect(str(self._p)) as c:
|
||||
rows = c.execute("""SELECT k,v,uid,t,m,c,u FROM mem
|
||||
WHERE uid=? AND (k LIKE ? OR v LIKE ?) ORDER BY u DESC LIMIT ?""",
|
||||
(uid, p, p, n)).fetchall()
|
||||
return [Entry(key=r[0], value=r[1], uid=r[2], etype=r[3],
|
||||
meta=json.loads(r[4]) if r[4] else {}, created=r[5], updated=r[6])
|
||||
for r in rows]
|
||||
except: return []
|
||||
|
||||
def all(self, uid) -> List[Entry]:
|
||||
try:
|
||||
with sqlite3.connect(str(self._p)) as c:
|
||||
rows = c.execute("SELECT k,v,uid,t,m,c,u FROM mem WHERE uid=? ORDER BY u DESC",
|
||||
(uid,)).fetchall()
|
||||
return [Entry(key=r[0], value=r[1], uid=r[2], etype=r[3],
|
||||
meta=json.loads(r[4]) if r[4] else {}, created=r[5], updated=r[6])
|
||||
for r in rows]
|
||||
except: return []
|
||||
|
||||
def rm(self, uid, k) -> bool:
|
||||
try:
|
||||
with sqlite3.connect(str(self._p)) as c:
|
||||
c.execute("DELETE FROM mem WHERE uid=? AND k=?", (uid, k))
|
||||
c.commit()
|
||||
return True
|
||||
except: return False
|
||||
|
||||
@property
|
||||
def name(self) -> str: return "local"
|
||||
@property
|
||||
def cloud(self) -> bool: return False
|
||||
|
||||
|
||||
# ── Honcho (cloud, opt-in) ────────────────────────────────────────────
|
||||
|
||||
class Honcho(Backend):
|
||||
def __init__(self):
|
||||
self._c = None
|
||||
self._k = os.getenv("HONCHO_API_KEY", "")
|
||||
|
||||
def _lazy(self):
|
||||
if self._c: return self._c
|
||||
if not self._k: return None
|
||||
try:
|
||||
from honcho import Honcho as H
|
||||
self._c = H(api_key=self._k)
|
||||
return self._c
|
||||
except ImportError:
|
||||
logger.warning("honcho-ai not installed: pip install honcho-ai")
|
||||
return None
|
||||
except: return None
|
||||
|
||||
def ok(self) -> bool:
|
||||
if not self._k: return False
|
||||
c = self._lazy()
|
||||
if not c: return False
|
||||
try: c.get_sessions(limit=1); return True
|
||||
except: return False
|
||||
|
||||
def put(self, uid, k, v, meta=None) -> bool:
|
||||
c = self._lazy()
|
||||
if not c: return False
|
||||
try:
|
||||
c.add_message(f"m-{uid}", "system", json.dumps({"k": k, "v": v}))
|
||||
return True
|
||||
except: return False
|
||||
|
||||
def get(self, uid, k) -> Optional[Entry]:
|
||||
for e in self.find(uid, k, 1):
|
||||
if e.key == k: return e
|
||||
return None
|
||||
|
||||
def find(self, uid, q, n=10) -> List[Entry]:
|
||||
c = self._lazy()
|
||||
if not c: return []
|
||||
try:
|
||||
r = c.chat(f"m-{uid}", f"Find: {q}")
|
||||
if isinstance(r, dict):
|
||||
try:
|
||||
data = json.loads(r.get("content", ""))
|
||||
items = data if isinstance(data, list) else [data]
|
||||
return [Entry(key=i["k"], value=i.get("v", ""), uid=uid)
|
||||
for i in items[:n] if isinstance(i, dict) and i.get("k")]
|
||||
except json.JSONDecodeError: pass
|
||||
return []
|
||||
except: return []
|
||||
|
||||
def all(self, uid) -> List[Entry]: return self.find(uid, "", 100)
|
||||
def rm(self, uid, k) -> bool: return False # Honcho doesn't support delete
|
||||
|
||||
@property
|
||||
def name(self) -> str: return "honcho"
|
||||
@property
|
||||
def cloud(self) -> bool: return True
|
||||
|
||||
|
||||
# ── Evaluation ────────────────────────────────────────────────────────
|
||||
|
||||
def score(b: Backend, uid: str = "_e_") -> Dict[str, Any]:
|
||||
"""Score a backend: availability(20) + functionality(40) + latency(20) + privacy(20)."""
|
||||
if not b.ok():
|
||||
return {"name": b.name, "score": 0, "grade": "F", "ok": False, "cloud": b.cloud}
|
||||
|
||||
s = 20 # available
|
||||
|
||||
# Functionality (40pts)
|
||||
t0 = time.perf_counter(); ok = b.put(uid, "ek", "ev"); sm = (time.perf_counter()-t0)*1000
|
||||
s += 15 if ok else 0
|
||||
|
||||
t0 = time.perf_counter(); r = b.get(uid, "ek"); gm = (time.perf_counter()-t0)*1000
|
||||
s += 15 if r else 0
|
||||
|
||||
t0 = time.perf_counter(); q = b.find(uid, "ev", 5); qm = (time.perf_counter()-t0)*1000
|
||||
s += 10 if q else 0
|
||||
|
||||
# Latency (20pts)
|
||||
avg = (sm + gm + qm) / 3
|
||||
s += 20 if avg < 10 else 15 if avg < 50 else 10 if avg < 200 else 5
|
||||
|
||||
# Privacy (20pts) — local sovereign, cloud risky
|
||||
s += 20 if not b.cloud else 5
|
||||
|
||||
try: b.rm(uid, "ek")
|
||||
except: pass
|
||||
|
||||
g = "A" if s >= 80 else "B" if s >= 60 else "C" if s >= 40 else "D" if s >= 20 else "F"
|
||||
return {"name": b.name, "score": s, "grade": g, "ok": True, "cloud": b.cloud,
|
||||
"store_ms": round(sm, 1), "get_ms": round(gm, 1), "query_ms": round(qm, 1)}
|
||||
|
||||
|
||||
def evaluate() -> Dict[str, Any]:
|
||||
"""Evaluate all available backends and return recommendation."""
|
||||
bs = [Null(), Local()]
|
||||
if os.getenv("HONCHO_API_KEY"):
|
||||
try: bs.append(Honcho())
|
||||
except: pass
|
||||
|
||||
rs = [score(b) for b in bs]
|
||||
best = max((r for r in rs if r["name"] != "null" and r["ok"]),
|
||||
key=lambda r: r["score"], default=None)
|
||||
|
||||
rec = f"Best: {best['name']} ({best['score']}pts, {best['grade']})" if best else "None available"
|
||||
if best and best.get("cloud"):
|
||||
rec += " WARNING: cloud dependency. RECOMMEND local for sovereignty."
|
||||
|
||||
return {"results": rs, "recommendation": rec}
|
||||
|
||||
|
||||
# ── Singleton ─────────────────────────────────────────────────────────
|
||||
|
||||
_inst: Optional[Backend] = None
|
||||
|
||||
def get_backend() -> Backend:
|
||||
"""Get configured backend. Priority: HONCHO_API_KEY → Honcho, else Local."""
|
||||
global _inst
|
||||
if _inst: return _inst
|
||||
if os.getenv("HONCHO_API_KEY") and os.getenv("HERMES_MEMORY_BACKEND", "").lower() != "local":
|
||||
try:
|
||||
h = Honcho()
|
||||
if h.ok(): _inst = h; return _inst
|
||||
except: pass
|
||||
_inst = Local()
|
||||
return _inst
|
||||
|
||||
def reset():
|
||||
global _inst
|
||||
_inst = None
|
||||
@@ -41,42 +41,6 @@ from agent.model_metadata import is_local_endpoint
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Minimum context window (tokens) required for a model to run cron jobs.
|
||||
# Models below this threshold are rejected at job startup.
|
||||
CRON_MIN_CONTEXT_TOKENS = 64_000
|
||||
|
||||
|
||||
class ModelContextError(ValueError):
|
||||
"""Raised when a model's context window is too small for cron use."""
|
||||
|
||||
|
||||
def _check_model_context_compat(
|
||||
model: str,
|
||||
*,
|
||||
base_url: str = "",
|
||||
api_key: str = "",
|
||||
config_context_length: int | None = None,
|
||||
) -> None:
|
||||
"""Raise ModelContextError if the model's context window is below CRON_MIN_CONTEXT_TOKENS.
|
||||
|
||||
If config_context_length is provided the check is skipped (user override).
|
||||
Detection failures are non-fatal (fail-open) — the job proceeds.
|
||||
"""
|
||||
if config_context_length is not None:
|
||||
return
|
||||
try:
|
||||
from agent.model_metadata import get_model_context_length
|
||||
ctx = get_model_context_length(model, base_url=base_url, api_key=api_key)
|
||||
except Exception as exc:
|
||||
logger.debug("Context length detection failed for '%s', skipping check: %s", model, exc)
|
||||
return
|
||||
if ctx < CRON_MIN_CONTEXT_TOKENS:
|
||||
raise ModelContextError(
|
||||
f"Model '{model}' has a context window of {ctx:,} tokens, "
|
||||
f"which is below the minimum {CRON_MIN_CONTEXT_TOKENS:,} required by Hermes Agent. "
|
||||
f"To override, set model.context_length in config.yaml."
|
||||
)
|
||||
|
||||
|
||||
# =====================================================================
|
||||
# Deploy Sync Guard
|
||||
@@ -126,14 +90,7 @@ def _validate_agent_interface() -> None:
|
||||
) from exc
|
||||
|
||||
sig = inspect.signature(AIAgent.__init__)
|
||||
params = sig.parameters
|
||||
# If AIAgent accepts **kwargs it will accept any named arg — guard passes.
|
||||
if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in params.values()):
|
||||
_agent_interface_validated = True
|
||||
logger.debug("Deploy sync guard passed — AIAgent accepts **kwargs")
|
||||
return
|
||||
|
||||
accepted = set(params.keys()) - {"self"}
|
||||
accepted = set(sig.parameters.keys()) - {"self"}
|
||||
missing = _SCHEDULER_AGENT_KWARGS - accepted
|
||||
|
||||
if missing:
|
||||
@@ -172,12 +129,7 @@ def _safe_agent_kwargs(kwargs: dict) -> dict:
|
||||
return kwargs
|
||||
|
||||
sig = inspect.signature(AIAgent.__init__)
|
||||
params = sig.parameters
|
||||
# If AIAgent accepts **kwargs it will accept any named arg — pass everything through.
|
||||
if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in params.values()):
|
||||
return kwargs
|
||||
|
||||
accepted = set(params.keys()) - {"self"}
|
||||
accepted = set(sig.parameters.keys()) - {"self"}
|
||||
|
||||
safe = {}
|
||||
dropped = []
|
||||
@@ -593,49 +545,7 @@ def _run_job_script(script_path: str) -> tuple[bool, str]:
|
||||
return False, f"Script execution failed: {exc}"
|
||||
|
||||
|
||||
_PROVIDER_ALIASES = {
|
||||
"ollama": {"ollama", "localhost:11434"},
|
||||
"anthropic": {"anthropic", "claude"},
|
||||
"nous": {"nous", "mimo"},
|
||||
"openrouter": {"openrouter"},
|
||||
"openai": {"openai", "gpt"},
|
||||
"gemini": {"gemini", "google"},
|
||||
}
|
||||
_CLOUD_PREFIXES = frozenset({"nous", "openrouter", "anthropic", "openai", "zai", "kimi", "gemini", "minimax"})
|
||||
|
||||
|
||||
def _classify_runtime(provider: str, model: str) -> str:
|
||||
"""Return 'cloud', 'local', or 'unknown' based on provider/model hints."""
|
||||
p = (provider or "").strip().lower()
|
||||
m = (model or "").strip().lower()
|
||||
if p and p not in ("ollama", "local"):
|
||||
return "cloud"
|
||||
if "/" in m and m.split("/")[0] in _CLOUD_PREFIXES:
|
||||
return "cloud"
|
||||
if p in ("ollama", "local") or (not p and m):
|
||||
return "local"
|
||||
return "unknown"
|
||||
|
||||
|
||||
def _detect_provider_mismatch(prompt: str, active_provider: str):
|
||||
"""Return the mismatched provider alias if the prompt references a different provider."""
|
||||
if not active_provider or not prompt:
|
||||
return None
|
||||
pl = prompt.lower()
|
||||
al = active_provider.lower().strip()
|
||||
active_group = next(
|
||||
(g for g, aliases in _PROVIDER_ALIASES.items() if al in aliases or al.startswith(g)),
|
||||
None,
|
||||
)
|
||||
if not active_group:
|
||||
return None
|
||||
return next(
|
||||
(g for g, aliases in _PROVIDER_ALIASES.items() if g != active_group and any(x in pl for x in aliases)),
|
||||
None,
|
||||
)
|
||||
|
||||
|
||||
def _build_job_prompt(job: dict, *, runtime_model: str = "", runtime_provider: str = "") -> str:
|
||||
def _build_job_prompt(job: dict) -> str:
|
||||
"""Build the effective prompt for a cron job, optionally loading one or more skills first."""
|
||||
prompt = job.get("prompt", "")
|
||||
skills = job.get("skills")
|
||||
@@ -666,26 +576,6 @@ def _build_job_prompt(job: dict, *, runtime_model: str = "", runtime_provider: s
|
||||
f"{prompt}"
|
||||
)
|
||||
|
||||
# Build runtime context block — inject model/provider/runtime classification
|
||||
# so the agent knows what infrastructure it has access to.
|
||||
# Fix #565: derive provider from model prefix when runtime_provider is empty.
|
||||
_runtime_block = ""
|
||||
if runtime_model or runtime_provider:
|
||||
if not runtime_provider and "/" in runtime_model:
|
||||
runtime_provider = runtime_model.split("/")[0]
|
||||
_kind = _classify_runtime(runtime_provider, runtime_model)
|
||||
_parts = []
|
||||
if runtime_model:
|
||||
_parts.append(f"MODEL: {runtime_model}")
|
||||
if runtime_provider:
|
||||
_parts.append(f"PROVIDER: {runtime_provider}")
|
||||
if _kind == "local":
|
||||
_parts.append("RUNTIME: local — access to machine, Ollama, SSH")
|
||||
elif _kind == "cloud":
|
||||
_parts.append("RUNTIME: cloud — NO local access, NO SSH, NO localhost")
|
||||
if _parts:
|
||||
_runtime_block = "[SYSTEM: RUNTIME CONTEXT — " + "; ".join(_parts) + "]\n\n"
|
||||
|
||||
# Always prepend cron execution guidance so the agent knows how
|
||||
# delivery works and can suppress delivery when appropriate.
|
||||
cron_hint = (
|
||||
@@ -707,7 +597,7 @@ def _build_job_prompt(job: dict, *, runtime_model: str = "", runtime_provider: s
|
||||
"\"[SCRIPT_FAILED]: forge.alexanderwhitestone.com timed out\" "
|
||||
"\"[SCRIPT_FAILED]: script exited with code 1\".]\\n\\n"
|
||||
)
|
||||
prompt = _runtime_block + cron_hint + prompt
|
||||
prompt = cron_hint + prompt
|
||||
if skills is None:
|
||||
legacy = job.get("skill")
|
||||
skills = [legacy] if legacy else []
|
||||
@@ -777,23 +667,7 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
|
||||
job_id = job["id"]
|
||||
job_name = job["name"]
|
||||
|
||||
# Resolve runtime model/provider early so the prompt gets accurate context.
|
||||
_runtime_model = job.get("model") or os.getenv("HERMES_MODEL") or ""
|
||||
_runtime_provider = os.getenv("HERMES_PROVIDER", "")
|
||||
if not _runtime_model:
|
||||
try:
|
||||
import yaml as _y
|
||||
_cp2 = str(_hermes_home / "config.yaml")
|
||||
if os.path.exists(_cp2):
|
||||
with open(_cp2) as _f:
|
||||
_ce = _y.safe_load(_f) or {}
|
||||
_mc = _ce.get("model", {})
|
||||
_runtime_model = _mc if isinstance(_mc, str) else (_mc.get("default", "") if isinstance(_mc, dict) else "")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
prompt = _build_job_prompt(job, runtime_model=_runtime_model, runtime_provider=_runtime_provider)
|
||||
prompt = _build_job_prompt(job)
|
||||
origin = _resolve_origin(job)
|
||||
_cron_session_id = f"cron_{job_id}_{_hermes_now().strftime('%Y%m%d_%H%M%S')}"
|
||||
|
||||
@@ -905,14 +779,6 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
message = format_runtime_provider_error(exc)
|
||||
raise RuntimeError(message) from exc
|
||||
|
||||
_active_provider = runtime.get("provider", "") or ""
|
||||
_mismatch = _detect_provider_mismatch(job.get("prompt", ""), _active_provider)
|
||||
if _mismatch:
|
||||
logger.warning(
|
||||
"Job '%s': prompt references '%s' but active provider is '%s'",
|
||||
job_name, _mismatch, _active_provider,
|
||||
)
|
||||
|
||||
from agent.smart_model_routing import resolve_turn_route
|
||||
turn_route = resolve_turn_route(
|
||||
prompt,
|
||||
|
||||
141
tests/agent/test_memory.py
Normal file
141
tests/agent/test_memory.py
Normal file
@@ -0,0 +1,141 @@
|
||||
"""Tests for memory backends (#322)."""
|
||||
|
||||
import json, pytest
|
||||
from agent.memory import Entry, Null, Local, Honcho, score, evaluate, get_backend, reset
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def loc(tmp_path): return Local(path=tmp_path / "test.db")
|
||||
|
||||
@pytest.fixture()
|
||||
def rst():
|
||||
reset()
|
||||
yield
|
||||
reset()
|
||||
|
||||
|
||||
class TestEntry:
|
||||
def test_defaults(self):
|
||||
e = Entry(key="k", value="v", uid="u")
|
||||
assert e.created > 0
|
||||
assert e.etype == "preference"
|
||||
|
||||
|
||||
class TestNull:
|
||||
def test_available(self): assert Null().ok()
|
||||
def test_store(self): assert Null().put("u", "k", "v")
|
||||
def test_get_none(self): assert Null().get("u", "k") is None
|
||||
def test_find_empty(self): assert Null().find("u", "q") == []
|
||||
def test_all_empty(self): assert Null().all("u") == []
|
||||
def test_delete(self): assert Null().rm("u", "k")
|
||||
def test_not_cloud(self): assert not Null().cloud
|
||||
def test_name(self): assert Null().name == "null"
|
||||
|
||||
|
||||
class TestLocal:
|
||||
def test_available(self, loc): assert loc.ok()
|
||||
|
||||
def test_store_get(self, loc):
|
||||
assert loc.put("u", "lang", "python")
|
||||
e = loc.get("u", "lang")
|
||||
assert e is not None
|
||||
assert e.value == "python"
|
||||
assert e.uid == "u"
|
||||
|
||||
def test_metadata(self, loc):
|
||||
loc.put("u", "k", "v", {"type": "pattern", "session": "s1"})
|
||||
e = loc.get("u", "k")
|
||||
assert e.etype == "pattern"
|
||||
assert e.meta["session"] == "s1"
|
||||
|
||||
def test_update(self, loc):
|
||||
loc.put("u", "k", "v1")
|
||||
loc.put("u", "k", "v2")
|
||||
assert loc.get("u", "k").value == "v2"
|
||||
|
||||
def test_find(self, loc):
|
||||
loc.put("u", "pref_python", "True")
|
||||
loc.put("u", "pref_editor", "vim")
|
||||
loc.put("u", "theme", "dark")
|
||||
results = loc.find("u", "pref")
|
||||
assert len(results) == 2
|
||||
keys = {r.key for r in results}
|
||||
assert keys == {"pref_python", "pref_editor"}
|
||||
|
||||
def test_all(self, loc):
|
||||
loc.put("u", "a", "1")
|
||||
loc.put("u", "b", "2")
|
||||
loc.put("u", "c", "3")
|
||||
assert len(loc.all("u")) == 3
|
||||
|
||||
def test_delete(self, loc):
|
||||
loc.put("u", "k", "v")
|
||||
assert loc.rm("u", "k")
|
||||
assert loc.get("u", "k") is None
|
||||
|
||||
def test_delete_nonexistent(self, loc):
|
||||
assert loc.rm("u", "nope") # should not error
|
||||
|
||||
def test_not_cloud(self, loc): assert not loc.cloud
|
||||
|
||||
def test_separate_users(self, loc):
|
||||
loc.put("u1", "k", "val1")
|
||||
loc.put("u2", "k", "val2")
|
||||
assert loc.get("u1", "k").value == "val1"
|
||||
assert loc.get("u2", "k").value == "val2"
|
||||
|
||||
def test_name(self, loc): assert loc.name == "local"
|
||||
|
||||
|
||||
class TestHoncho:
|
||||
def test_not_available_without_key(self, monkeypatch):
|
||||
monkeypatch.delenv("HONCHO_API_KEY", raising=False)
|
||||
assert not Honcho().ok()
|
||||
|
||||
def test_is_cloud(self): assert Honcho().cloud
|
||||
|
||||
def test_name(self): assert Honcho().name == "honcho"
|
||||
|
||||
def test_delete_returns_false(self):
|
||||
assert not Honcho().rm("u", "k") # Honcho doesn't support delete
|
||||
|
||||
|
||||
class TestEvaluation:
|
||||
def test_score_null(self):
|
||||
r = score(Null())
|
||||
assert r["score"] > 0
|
||||
assert r["grade"] in ("A", "B", "C", "D")
|
||||
assert r["ok"]
|
||||
|
||||
def test_score_local(self, loc):
|
||||
r = score(loc)
|
||||
assert r["ok"]
|
||||
assert r["score"] >= 80
|
||||
assert r["grade"] == "A"
|
||||
assert not r["cloud"]
|
||||
|
||||
def test_evaluate_returns_report(self):
|
||||
r = evaluate()
|
||||
assert "results" in r
|
||||
assert "recommendation" in r
|
||||
assert len(r["results"]) >= 2 # null + local
|
||||
|
||||
def test_evaluate_recommendation_local(self):
|
||||
r = evaluate()
|
||||
assert "local" in r["recommendation"].lower()
|
||||
|
||||
|
||||
class TestSingleton:
|
||||
def test_default_is_local(self, rst, monkeypatch):
|
||||
monkeypatch.delenv("HONCHO_API_KEY", raising=False)
|
||||
b = get_backend()
|
||||
assert isinstance(b, Local)
|
||||
|
||||
def test_caches_instance(self, rst):
|
||||
assert get_backend() is get_backend()
|
||||
|
||||
def test_reset_clears(self, rst):
|
||||
b1 = get_backend()
|
||||
reset()
|
||||
b2 = get_backend()
|
||||
assert b1 is not b2
|
||||
@@ -7,7 +7,7 @@ from unittest.mock import AsyncMock, patch, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from cron.scheduler import _resolve_origin, _resolve_delivery_target, _deliver_result, run_job, SILENT_MARKER, _build_job_prompt, _check_model_context_compat, ModelContextError, CRON_MIN_CONTEXT_TOKENS, _classify_runtime, _detect_provider_mismatch
|
||||
from cron.scheduler import _resolve_origin, _resolve_delivery_target, _deliver_result, run_job, SILENT_MARKER, _build_job_prompt, _check_model_context_compat, ModelContextError, CRON_MIN_CONTEXT_TOKENS
|
||||
|
||||
|
||||
class TestResolveOrigin:
|
||||
@@ -670,13 +670,6 @@ class TestRunJobSkillBacked:
|
||||
class TestSilentDelivery:
|
||||
"""Verify that [SILENT] responses suppress delivery while still saving output."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _isolate_lock(self, tmp_path):
|
||||
"""Give each test its own tick lock file to prevent parallel test contention."""
|
||||
with patch("cron.scheduler._LOCK_FILE", tmp_path / ".tick.lock"), \
|
||||
patch("cron.scheduler._LOCK_DIR", tmp_path):
|
||||
yield
|
||||
|
||||
def _make_job(self):
|
||||
return {
|
||||
"id": "monitor-job",
|
||||
@@ -834,102 +827,10 @@ class TestBuildJobPromptMissingSkill:
|
||||
assert "go" in result
|
||||
|
||||
|
||||
class TestClassifyRuntime:
|
||||
"""Unit tests for _classify_runtime."""
|
||||
|
||||
def test_cloud_provider_explicit(self):
|
||||
assert _classify_runtime("openai", "") == "cloud"
|
||||
assert _classify_runtime("anthropic", "") == "cloud"
|
||||
assert _classify_runtime("nous", "") == "cloud"
|
||||
|
||||
def test_local_provider_explicit(self):
|
||||
assert _classify_runtime("ollama", "") == "local"
|
||||
assert _classify_runtime("local", "") == "local"
|
||||
|
||||
def test_cloud_detected_from_model_prefix(self):
|
||||
"""Model prefix 'nous/...' should be classified as cloud even with no provider."""
|
||||
assert _classify_runtime("", "nous/mimo-v2-pro") == "cloud"
|
||||
assert _classify_runtime("", "openai/gpt-4o") == "cloud"
|
||||
|
||||
def test_local_when_model_has_no_cloud_prefix(self):
|
||||
"""A model without a cloud prefix and no provider => local."""
|
||||
assert _classify_runtime("", "llama3") == "local"
|
||||
|
||||
def test_unknown_when_empty(self):
|
||||
assert _classify_runtime("", "") == "unknown"
|
||||
|
||||
|
||||
class TestBuildJobPromptRuntimeContext:
|
||||
"""Verify runtime context block injection in _build_job_prompt."""
|
||||
|
||||
def test_runtime_block_injected_with_model_and_provider(self):
|
||||
job = {"prompt": "Do something"}
|
||||
result = _build_job_prompt(job, runtime_model="nous/mimo-v2-pro", runtime_provider="nous")
|
||||
assert "RUNTIME CONTEXT" in result
|
||||
assert "MODEL: nous/mimo-v2-pro" in result
|
||||
assert "PROVIDER: nous" in result
|
||||
assert "cloud" in result
|
||||
|
||||
def test_provider_derived_from_model_prefix_when_empty(self):
|
||||
"""Fix #565: PROVIDER should be derived from model prefix when runtime_provider is empty."""
|
||||
job = {"prompt": "Do something"}
|
||||
result = _build_job_prompt(job, runtime_model="nous/mimo-v2-pro", runtime_provider="")
|
||||
assert "PROVIDER: nous" in result
|
||||
|
||||
def test_provider_not_empty_in_context_block(self):
|
||||
"""Fix #565: PROVIDER line must not be blank when model has a slash prefix."""
|
||||
job = {"prompt": "Check status"}
|
||||
result = _build_job_prompt(job, runtime_model="openai/gpt-4o", runtime_provider="")
|
||||
assert "PROVIDER: openai" in result
|
||||
assert "PROVIDER: ;" not in result
|
||||
assert "PROVIDER: ]" not in result
|
||||
|
||||
def test_no_runtime_block_when_no_model_or_provider(self):
|
||||
"""No runtime block should appear when neither model nor provider is given."""
|
||||
job = {"prompt": "Hello"}
|
||||
result = _build_job_prompt(job)
|
||||
assert "RUNTIME CONTEXT" not in result
|
||||
|
||||
def test_local_runtime_classification(self):
|
||||
"""ollama model should get local runtime label."""
|
||||
job = {"prompt": "Query local model"}
|
||||
result = _build_job_prompt(job, runtime_model="llama3", runtime_provider="ollama")
|
||||
assert "RUNTIME: local" in result
|
||||
assert "NO local access" not in result
|
||||
|
||||
def test_runtime_block_precedes_cron_hint(self):
|
||||
"""RUNTIME CONTEXT block should appear before the cron system hint."""
|
||||
job = {"prompt": "test"}
|
||||
result = _build_job_prompt(job, runtime_model="nous/mimo-v2-pro", runtime_provider="nous")
|
||||
runtime_pos = result.index("RUNTIME CONTEXT")
|
||||
cron_pos = result.index("scheduled cron job")
|
||||
assert runtime_pos < cron_pos
|
||||
|
||||
|
||||
class TestDetectProviderMismatch:
|
||||
"""Unit tests for _detect_provider_mismatch."""
|
||||
|
||||
def test_no_mismatch_when_same_provider(self):
|
||||
assert _detect_provider_mismatch("Use ollama to generate", "ollama") is None
|
||||
|
||||
def test_mismatch_detected(self):
|
||||
"""Prompt referencing 'ollama' while running on 'nous' should flag a mismatch."""
|
||||
result = _detect_provider_mismatch("Check if Ollama is responding", "nous")
|
||||
assert result == "ollama"
|
||||
|
||||
def test_no_mismatch_for_empty_inputs(self):
|
||||
assert _detect_provider_mismatch("", "nous") is None
|
||||
assert _detect_provider_mismatch("some prompt", "") is None
|
||||
|
||||
def test_no_mismatch_when_provider_unknown(self):
|
||||
"""Unknown active provider should not raise, just return None."""
|
||||
assert _detect_provider_mismatch("Check Ollama", "mystery-provider") is None
|
||||
|
||||
|
||||
class TestTickAdvanceBeforeRun:
|
||||
"""Verify that tick() calls advance_next_run before run_job for crash safety."""
|
||||
|
||||
def test_advance_called_before_run_job(self, tmp_path, monkeypatch):
|
||||
def test_advance_called_before_run_job(self, tmp_path):
|
||||
"""advance_next_run must be called before run_job to prevent crash-loop re-fires."""
|
||||
call_order = []
|
||||
|
||||
@@ -954,9 +855,7 @@ class TestTickAdvanceBeforeRun:
|
||||
patch("cron.scheduler.run_job", side_effect=fake_run_job), \
|
||||
patch("cron.scheduler.save_job_output", return_value=tmp_path / "out.md"), \
|
||||
patch("cron.scheduler.mark_job_run"), \
|
||||
patch("cron.scheduler._deliver_result"), \
|
||||
patch("cron.scheduler._LOCK_FILE", tmp_path / ".tick.lock"), \
|
||||
patch("cron.scheduler._LOCK_DIR", tmp_path):
|
||||
patch("cron.scheduler._deliver_result"):
|
||||
from cron.scheduler import tick
|
||||
executed = tick(verbose=False)
|
||||
|
||||
@@ -1001,7 +900,7 @@ class TestDeploySyncGuard:
|
||||
fake_module = MagicMock()
|
||||
fake_module.AIAgent = FakeAIAgent
|
||||
|
||||
with pytest.raises(RuntimeError, match=r"(?s)missing params:.*tool_choice"):
|
||||
with pytest.raises(RuntimeError, match="Missing parameters: tool_choice"):
|
||||
with patch.dict("sys.modules", {"run_agent": fake_module}):
|
||||
sched_mod._validate_agent_interface()
|
||||
finally:
|
||||
|
||||
79
tools/memory_backend_tool.py
Normal file
79
tools/memory_backend_tool.py
Normal file
@@ -0,0 +1,79 @@
|
||||
"""Memory backend tool — cross-session user modeling.
|
||||
|
||||
Local SQLite default, Honcho cloud opt-in. Zero overhead when disabled.
|
||||
"""
|
||||
|
||||
import json
|
||||
from tools.registry import registry
|
||||
|
||||
|
||||
def memory_backend(action, uid="default", key=None, value=None, query=None, meta=None):
|
||||
from agent.memory import get_backend, evaluate
|
||||
b = get_backend()
|
||||
|
||||
if action == "info":
|
||||
return json.dumps({"success": True, "backend": b.name, "cloud": b.cloud, "available": b.ok()})
|
||||
|
||||
if action == "store":
|
||||
if not key or value is None:
|
||||
return json.dumps({"success": False, "error": "key and value required"})
|
||||
return json.dumps({"success": b.put(uid, key, value, meta), "key": key})
|
||||
|
||||
if action == "get":
|
||||
if not key:
|
||||
return json.dumps({"success": False, "error": "key required"})
|
||||
e = b.get(uid, key)
|
||||
if not e:
|
||||
return json.dumps({"success": False, "error": f"not found: {key}"})
|
||||
return json.dumps({"success": True, "key": e.key, "value": e.value, "type": e.etype})
|
||||
|
||||
if action == "query":
|
||||
if not query:
|
||||
return json.dumps({"success": False, "error": "query required"})
|
||||
r = b.find(uid, query)
|
||||
return json.dumps({"success": True,
|
||||
"results": [{"key": e.key, "value": e.value} for e in r], "count": len(r)})
|
||||
|
||||
if action == "list":
|
||||
r = b.all(uid)
|
||||
return json.dumps({"success": True,
|
||||
"entries": [{"key": e.key, "type": e.etype} for e in r], "count": len(r)})
|
||||
|
||||
if action == "delete":
|
||||
if not key:
|
||||
return json.dumps({"success": False, "error": "key required"})
|
||||
return json.dumps({"success": b.rm(uid, key)})
|
||||
|
||||
if action == "evaluate":
|
||||
return json.dumps({"success": True, **evaluate()})
|
||||
|
||||
return json.dumps({"success": False, "error": f"unknown action: {action}"})
|
||||
|
||||
|
||||
registry.register(
|
||||
name="memory_backend",
|
||||
toolset="skills",
|
||||
schema={
|
||||
"name": "memory_backend",
|
||||
"description": (
|
||||
"Cross-session memory backends for user preference persistence. "
|
||||
"Local SQLite default (sovereign), Honcho cloud opt-in via HONCHO_API_KEY. "
|
||||
"Zero overhead when disabled."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {"type": "string",
|
||||
"enum": ["store", "get", "query", "list", "delete", "info", "evaluate"]},
|
||||
"uid": {"type": "string"},
|
||||
"key": {"type": "string"},
|
||||
"value": {"type": "string"},
|
||||
"query": {"type": "string"},
|
||||
"meta": {"type": "object"},
|
||||
},
|
||||
"required": ["action"],
|
||||
},
|
||||
},
|
||||
handler=lambda a, **kw: memory_backend(**{k: v for k, v in a.items() if v is not None}),
|
||||
emoji="🧠",
|
||||
)
|
||||
Reference in New Issue
Block a user