Compare commits
18 Commits
fix/empty-
...
burn/model
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f8f4678ee4 | ||
| 1ec02cf061 | |||
|
|
1156875cb5 | ||
| f4c102400e | |||
| 6555ccabc1 | |||
|
|
8c712866c4 | ||
| 8fb59aae64 | |||
|
|
95bde9d3cb | ||
|
|
aa6eabb816 | ||
| 3b89bfbab2 | |||
| 3e6e183ad2 | |||
|
|
9c38e28f4d | ||
| cea4c7fdd0 | |||
| f9b6db52af | |||
| f91f22ef7a | |||
| b89c670400 | |||
|
|
f6e72c135c | ||
|
|
ece8b5f8be |
@@ -26,11 +26,11 @@ from cron.jobs import (
|
||||
trigger_job,
|
||||
JOBS_FILE,
|
||||
)
|
||||
from cron.scheduler import tick
|
||||
from cron.scheduler import tick, ModelContextError, CRON_MIN_CONTEXT_TOKENS
|
||||
|
||||
__all__ = [
|
||||
"create_job",
|
||||
"get_job",
|
||||
"get_job",
|
||||
"list_jobs",
|
||||
"remove_job",
|
||||
"update_job",
|
||||
@@ -39,4 +39,6 @@ __all__ = [
|
||||
"trigger_job",
|
||||
"tick",
|
||||
"JOBS_FILE",
|
||||
"ModelContextError",
|
||||
"CRON_MIN_CONTEXT_TOKENS",
|
||||
]
|
||||
|
||||
38
cron/jobs.py
38
cron/jobs.py
@@ -563,6 +563,44 @@ def trigger_job(job_id: str) -> Optional[Dict[str, Any]]:
|
||||
)
|
||||
|
||||
|
||||
def run_job_now(job_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Execute a job immediately and persist fresh state.
|
||||
|
||||
Unlike trigger_job() which queues for the next scheduler tick,
|
||||
this runs the job synchronously and returns the result.
|
||||
Clears stale error state on success.
|
||||
|
||||
Returns:
|
||||
Dict with 'job', 'success', 'output', 'error' keys, or None if not found.
|
||||
"""
|
||||
job = get_job(job_id)
|
||||
if not job:
|
||||
return None
|
||||
|
||||
try:
|
||||
from cron.scheduler import run_job as _run_job
|
||||
except ImportError as exc:
|
||||
return {
|
||||
"job": job,
|
||||
"success": False,
|
||||
"output": None,
|
||||
"error": f"Cannot import scheduler: {exc}",
|
||||
}
|
||||
|
||||
success, output, final_response, error = _run_job(job)
|
||||
mark_job_run(job_id, success, error)
|
||||
|
||||
updated_job = get_job(job_id) or job
|
||||
return {
|
||||
"job": updated_job,
|
||||
"success": success,
|
||||
"output": output,
|
||||
"final_response": final_response,
|
||||
"error": error,
|
||||
}
|
||||
|
||||
|
||||
def remove_job(job_id: str) -> bool:
|
||||
"""Remove a job by ID."""
|
||||
jobs = load_jobs()
|
||||
|
||||
@@ -10,7 +10,6 @@ runs at a time if multiple processes overlap.
|
||||
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
@@ -41,6 +40,112 @@ from hermes_time import now as _hermes_now
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# =====================================================================
|
||||
# Deploy Sync Guard
|
||||
# =====================================================================
|
||||
#
|
||||
# If the installed run_agent.py diverges from the version scheduler.py
|
||||
# was written against, every cron job fails with:
|
||||
# TypeError: AIAgent.__init__() got an unexpected keyword argument '...'
|
||||
#
|
||||
# _validate_agent_interface() catches this at the FIRST job, not the
|
||||
# 55th. It uses inspect.signature() to verify every kwarg we pass is
|
||||
# accepted by AIAgent.__init__().
|
||||
#
|
||||
# Maintaining this list: if you add a kwarg to the AIAgent() call in
|
||||
# run_job(), add it here too. The guard catches mismatches.
|
||||
|
||||
_SCHEDULER_AGENT_KWARGS: set = frozenset({
|
||||
"model", "api_key", "base_url", "provider", "api_mode",
|
||||
"acp_command", "acp_args", "max_iterations", "reasoning_config",
|
||||
"prefill_messages", "providers_allowed", "providers_ignored",
|
||||
"providers_order", "provider_sort", "disabled_toolsets",
|
||||
"tool_choice", "quiet_mode", "skip_memory", "platform",
|
||||
"session_id", "session_db",
|
||||
})
|
||||
|
||||
_agent_interface_validated: bool = False
|
||||
|
||||
|
||||
def _validate_agent_interface() -> None:
|
||||
"""Verify installed AIAgent.__init__ accepts every kwarg the scheduler passes.
|
||||
|
||||
Raises RuntimeError with actionable guidance if params are missing.
|
||||
Caches result — runs once per gateway process lifetime.
|
||||
"""
|
||||
global _agent_interface_validated
|
||||
if _agent_interface_validated:
|
||||
return
|
||||
|
||||
import inspect
|
||||
|
||||
try:
|
||||
from run_agent import AIAgent
|
||||
except ImportError as exc:
|
||||
raise RuntimeError(
|
||||
f"Cannot import AIAgent: {exc}\n"
|
||||
"Is hermes-agent installed? Check PYTHONPATH."
|
||||
) from exc
|
||||
|
||||
sig = inspect.signature(AIAgent.__init__)
|
||||
accepted = set(sig.parameters.keys()) - {"self"}
|
||||
missing = _SCHEDULER_AGENT_KWARGS - accepted
|
||||
|
||||
if missing:
|
||||
sorted_missing = sorted(missing)
|
||||
raise RuntimeError(
|
||||
"Deploy sync guard FAILED — AIAgent.__init__() is missing params:\n"
|
||||
f" {', '.join(sorted_missing)}\n"
|
||||
"This means the installed run_agent.py is out of date.\n"
|
||||
"Fix: pull latest hermes-agent code and restart the gateway.\n"
|
||||
" cd ~/.hermes/hermes-agent && git pull && source venv/bin/activate"
|
||||
)
|
||||
|
||||
_agent_interface_validated = True
|
||||
logger.debug("Deploy sync guard passed — %d params verified", len(_SCHEDULER_AGENT_KWARGS))
|
||||
|
||||
|
||||
def _safe_agent_kwargs(kwargs: dict) -> dict:
|
||||
"""Filter kwargs to only those accepted by installed AIAgent.__init__.
|
||||
|
||||
More resilient than _validate_agent_interface() alone: instead of
|
||||
crashing on mismatch, drops unsupported kwargs and logs a warning.
|
||||
Jobs run with degraded functionality instead of failing entirely.
|
||||
|
||||
Args:
|
||||
kwargs: The kwargs dict the scheduler wants to pass to AIAgent().
|
||||
|
||||
Returns:
|
||||
A new dict containing only kwargs the installed AIAgent accepts.
|
||||
"""
|
||||
import inspect
|
||||
|
||||
try:
|
||||
from run_agent import AIAgent
|
||||
except ImportError:
|
||||
# Can't import — pass everything through, let the real error surface
|
||||
return kwargs
|
||||
|
||||
sig = inspect.signature(AIAgent.__init__)
|
||||
accepted = set(sig.parameters.keys()) - {"self"}
|
||||
|
||||
safe = {}
|
||||
dropped = []
|
||||
for key, value in kwargs.items():
|
||||
if key in accepted:
|
||||
safe[key] = value
|
||||
else:
|
||||
dropped.append(key)
|
||||
|
||||
if dropped:
|
||||
logger.warning(
|
||||
"Dropping unsupported AIAgent kwargs (stale install?): %s",
|
||||
", ".join(sorted(dropped)),
|
||||
)
|
||||
|
||||
return safe
|
||||
|
||||
# Valid delivery platforms — used to validate user-supplied platform names
|
||||
# in cron delivery targets, preventing env var enumeration via crafted names.
|
||||
_KNOWN_DELIVERY_PLATFORMS = frozenset({
|
||||
@@ -51,75 +156,76 @@ _KNOWN_DELIVERY_PLATFORMS = frozenset({
|
||||
|
||||
from cron.jobs import get_due_jobs, mark_job_run, save_job_output, advance_next_run
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Deploy Sync Guard
|
||||
# ---------------------------------------------------------------------------
|
||||
# The scheduler passes keyword arguments to AIAgent() that may not exist in
|
||||
# older installed versions. When the installed run_agent.py diverges from the
|
||||
# version the scheduler was written against, every cron job fails with a
|
||||
# TypeError — a silent total outage. This guard catches that at the first
|
||||
# tick rather than after 1000+ error log lines.
|
||||
#
|
||||
# The check runs once per gateway process (cached by _agent_interface_validated).
|
||||
# It inspects AIAgent.__init__ and verifies every parameter the scheduler
|
||||
# passes is accepted. On mismatch it raises RuntimeError with a fix command.
|
||||
|
||||
_agent_interface_validated = False
|
||||
|
||||
# Parameters the scheduler passes to AIAgent() in run_job().
|
||||
# If you add a new kwarg to the AIAgent() call below, add it here too.
|
||||
_SCHEDULER_AGENT_KWARGS = {
|
||||
"tool_choice": "required",
|
||||
"skip_memory": True,
|
||||
"platform": "cron",
|
||||
}
|
||||
|
||||
|
||||
def _validate_agent_interface():
|
||||
"""Verify AIAgent.__init__ accepts every kwarg the scheduler uses.
|
||||
|
||||
Raises RuntimeError with a remediation message on mismatch.
|
||||
Called once per process from run_job(); subsequent calls are no-ops.
|
||||
"""
|
||||
global _agent_interface_validated
|
||||
if _agent_interface_validated:
|
||||
return
|
||||
|
||||
try:
|
||||
from run_agent import AIAgent
|
||||
except ImportError as exc:
|
||||
raise RuntimeError(
|
||||
f"Deploy sync guard: cannot import AIAgent from run_agent: {exc}\n"
|
||||
"The installed hermes-agent package may be corrupted. "
|
||||
"Reinstall: pip install -e ~/.hermes/hermes-agent"
|
||||
) from exc
|
||||
|
||||
sig = inspect.signature(AIAgent.__init__)
|
||||
params = set(sig.parameters.keys()) - {"self"}
|
||||
|
||||
missing = [kw for kw in _SCHEDULER_AGENT_KWARGS if kw not in params]
|
||||
if missing:
|
||||
raise RuntimeError(
|
||||
"Deploy sync guard: AIAgent.__init__() is missing parameters that "
|
||||
"the cron scheduler requires. This means the installed code is out "
|
||||
"of sync with the scheduler module.\n"
|
||||
f" Missing parameters: {', '.join(missing)}\n"
|
||||
f" Expected by: cron/scheduler.py (run_job → AIAgent())\n"
|
||||
f" Fix: pip install -e ~/.hermes/hermes-agent --force-reinstall\n"
|
||||
f" Then restart the gateway."
|
||||
)
|
||||
|
||||
_agent_interface_validated = True
|
||||
logger.info(
|
||||
"Deploy sync guard: AIAgent interface OK (%d params, %d scheduler deps)",
|
||||
len(params), len(_SCHEDULER_AGENT_KWARGS),
|
||||
)
|
||||
|
||||
|
||||
# Sentinel: when a cron agent has nothing new to report, it can start its
|
||||
# response with this marker to suppress delivery. Output is still saved
|
||||
# locally for audit.
|
||||
SILENT_MARKER = "[SILENT]"
|
||||
SCRIPT_FAILED_MARKER = "[SCRIPT_FAILED]"
|
||||
|
||||
# Failure phrases that indicate an external script/command failed, even when
|
||||
# the agent doesn't use the [SCRIPT_FAILED] marker. Matched case-insensitively
|
||||
# against the final response. These are strong signals — agents rarely use
|
||||
# these words when a script succeeded.
|
||||
_SCRIPT_FAILURE_PHRASES = (
|
||||
"timed out",
|
||||
"timeout",
|
||||
"connection error",
|
||||
"connection refused",
|
||||
"connection reset",
|
||||
"failed to execute",
|
||||
"failed due to",
|
||||
"script failed",
|
||||
"script error",
|
||||
"command failed",
|
||||
"exit code",
|
||||
"exit status",
|
||||
"non-zero exit",
|
||||
"did not complete",
|
||||
"could not run",
|
||||
"unable to execute",
|
||||
"permission denied",
|
||||
"no such file",
|
||||
"traceback",
|
||||
)
|
||||
|
||||
|
||||
def _detect_script_failure(final_response: str) -> Optional[str]:
|
||||
"""Detect script failure from agent's final response.
|
||||
|
||||
Returns a reason string if failure detected, None otherwise.
|
||||
Checks both the explicit [SCRIPT_FAILED] marker and heuristic patterns.
|
||||
"""
|
||||
if not final_response:
|
||||
return None
|
||||
|
||||
# 1. Explicit marker — highest confidence.
|
||||
if SCRIPT_FAILED_MARKER in final_response.upper():
|
||||
import re as _re
|
||||
_m = _re.search(
|
||||
r'\[SCRIPT_FAILED\]\s*:?\s*(.*)',
|
||||
final_response,
|
||||
_re.IGNORECASE,
|
||||
)
|
||||
reason = _m.group(1).strip() if _m and _m.group(1).strip() else None
|
||||
return reason or "Agent reported script failure"
|
||||
|
||||
# 2. Heuristic detection — catch failures described in natural language.
|
||||
# Only flag if the response contains failure language AND does NOT
|
||||
# contain success markers like [NOOP] (which means the script ran fine
|
||||
# but found nothing).
|
||||
lower = final_response.lower()
|
||||
has_noop = "[noop]" in lower
|
||||
has_silent = "[silent]" in lower
|
||||
|
||||
if has_noop or has_silent:
|
||||
return None # Agent explicitly signaled success/nothing-to-report
|
||||
|
||||
for phrase in _SCRIPT_FAILURE_PHRASES:
|
||||
if phrase in lower:
|
||||
return f"Detected script failure phrase: '{phrase}'"
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# Resolve Hermes home directory (respects HERMES_HOME override)
|
||||
_hermes_home = get_hermes_home()
|
||||
@@ -480,7 +586,15 @@ def _build_job_prompt(job: dict) -> str:
|
||||
"SILENT: If there is genuinely nothing new to report, respond "
|
||||
"with exactly \"[SILENT]\" (nothing else) to suppress delivery. "
|
||||
"Never combine [SILENT] with content — either report your "
|
||||
"findings normally, or say [SILENT] and nothing more.]\n\n"
|
||||
"findings normally, or say [SILENT] and nothing more. "
|
||||
"SCRIPT_FAILURE: If an external command or script you ran "
|
||||
"failed (timeout, crash, connection error, non-zero exit), you MUST "
|
||||
"respond with "
|
||||
"\"[SCRIPT_FAILED]: <one-line reason>\" as the FIRST LINE of your "
|
||||
"response. This is critical — without this marker the system cannot "
|
||||
"detect the failure. Examples: "
|
||||
"\"[SCRIPT_FAILED]: forge.alexanderwhitestone.com timed out\" "
|
||||
"\"[SCRIPT_FAILED]: script exited with code 1\".]\\n\\n"
|
||||
)
|
||||
prompt = cron_hint + prompt
|
||||
if skills is None:
|
||||
@@ -535,13 +649,12 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
Returns:
|
||||
Tuple of (success, full_output_doc, final_response, error_message)
|
||||
"""
|
||||
from run_agent import AIAgent
|
||||
|
||||
# Deploy sync guard: verify the installed AIAgent accepts all kwargs
|
||||
# the scheduler passes. Catches stale-code-deploy bugs before they
|
||||
# cascade into 1000+ TypeErrors and gateway restart loops.
|
||||
# Deploy sync guard — fail fast on first job if the installed
|
||||
# AIAgent.__init__ is missing params the scheduler expects.
|
||||
_validate_agent_interface()
|
||||
|
||||
from run_agent import AIAgent
|
||||
|
||||
# Initialize SQLite session store so cron job messages are persisted
|
||||
# and discoverable via session_search (same pattern as gateway/run.py).
|
||||
_session_db = None
|
||||
@@ -664,29 +777,30 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
},
|
||||
)
|
||||
|
||||
agent = AIAgent(
|
||||
model=turn_route["model"],
|
||||
api_key=turn_route["runtime"].get("api_key"),
|
||||
base_url=turn_route["runtime"].get("base_url"),
|
||||
provider=turn_route["runtime"].get("provider"),
|
||||
api_mode=turn_route["runtime"].get("api_mode"),
|
||||
acp_command=turn_route["runtime"].get("command"),
|
||||
acp_args=turn_route["runtime"].get("args"),
|
||||
max_iterations=max_iterations,
|
||||
reasoning_config=reasoning_config,
|
||||
prefill_messages=prefill_messages,
|
||||
providers_allowed=pr.get("only"),
|
||||
providers_ignored=pr.get("ignore"),
|
||||
providers_order=pr.get("order"),
|
||||
provider_sort=pr.get("sort"),
|
||||
disabled_toolsets=["cronjob", "messaging", "clarify"],
|
||||
tool_choice="required",
|
||||
quiet_mode=True,
|
||||
skip_memory=True, # Cron system prompts would corrupt user representations
|
||||
platform="cron",
|
||||
session_id=_cron_session_id,
|
||||
session_db=_session_db,
|
||||
)
|
||||
_agent_kwargs = _safe_agent_kwargs({
|
||||
"model": turn_route["model"],
|
||||
"api_key": turn_route["runtime"].get("api_key"),
|
||||
"base_url": turn_route["runtime"].get("base_url"),
|
||||
"provider": turn_route["runtime"].get("provider"),
|
||||
"api_mode": turn_route["runtime"].get("api_mode"),
|
||||
"acp_command": turn_route["runtime"].get("command"),
|
||||
"acp_args": turn_route["runtime"].get("args"),
|
||||
"max_iterations": max_iterations,
|
||||
"reasoning_config": reasoning_config,
|
||||
"prefill_messages": prefill_messages,
|
||||
"providers_allowed": pr.get("only"),
|
||||
"providers_ignored": pr.get("ignore"),
|
||||
"providers_order": pr.get("order"),
|
||||
"provider_sort": pr.get("sort"),
|
||||
"disabled_toolsets": ["cronjob", "messaging", "clarify"],
|
||||
"tool_choice": "required",
|
||||
"quiet_mode": True,
|
||||
"skip_memory": True, # Cron system prompts would corrupt user representations
|
||||
"platform": "cron",
|
||||
"session_id": _cron_session_id,
|
||||
"session_db": _session_db,
|
||||
})
|
||||
agent = AIAgent(**_agent_kwargs)
|
||||
|
||||
# Run the agent with an *inactivity*-based timeout: the job can run
|
||||
# for hours if it's actively calling tools / receiving stream tokens,
|
||||
@@ -806,6 +920,30 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
# Use a separate variable for log display; keep final_response clean
|
||||
# for delivery logic (empty response = no delivery).
|
||||
logged_response = final_response if final_response else "(No response generated)"
|
||||
|
||||
# Check for script failure — both explicit [SCRIPT_FAILED] marker
|
||||
# and heuristic detection for failures described in natural language.
|
||||
_script_failed_reason = _detect_script_failure(final_response)
|
||||
if _script_failed_reason is not None:
|
||||
logger.warning(
|
||||
"Job '%s': agent reported script failure — %s",
|
||||
job_name, _script_failed_reason,
|
||||
)
|
||||
output = f"""# Cron Job: {job_name} (SCRIPT FAILED)
|
||||
|
||||
**Job ID:** {job_id}
|
||||
**Run Time:** {_hermes_now().strftime('%Y-%m-%d %H:%M:%S')}
|
||||
**Schedule:** {job.get('schedule_display', 'N/A')}
|
||||
|
||||
## Prompt
|
||||
|
||||
{prompt}
|
||||
|
||||
## Response
|
||||
|
||||
{logged_response}
|
||||
"""
|
||||
return False, output, final_response, _script_failed_reason
|
||||
|
||||
output = f"""# Cron Job: {job_name}
|
||||
|
||||
@@ -910,36 +1048,26 @@ def tick(verbose: bool = True, adapters=None, loop=None) -> int:
|
||||
if verbose:
|
||||
logger.info("%s - %s job(s) due", _hermes_now().strftime('%H:%M:%S'), len(due_jobs))
|
||||
|
||||
# Parallel job execution with priority sorting
|
||||
PRIORITY_ORDER = {"critical": 0, "high": 1, "normal": 2, "low": 3}
|
||||
due_jobs_sorted = sorted(due_jobs, key=lambda j: PRIORITY_ORDER.get(j.get("priority", "normal"), 2))
|
||||
MAX_PARALLEL = int(os.environ.get("HERMES_CRON_MAX_PARALLEL", "10"))
|
||||
|
||||
executed = 0
|
||||
_job_results = []
|
||||
|
||||
def _process_single_job(job):
|
||||
job_name = job.get("name", job["id"])
|
||||
for job in due_jobs:
|
||||
# If the interpreter is shutting down (e.g. gateway restart),
|
||||
# stop processing immediately — ThreadPoolExecutor.submit()
|
||||
# will raise RuntimeError for every remaining job.
|
||||
if sys.is_finalizing():
|
||||
return None
|
||||
logger.warning(
|
||||
"Interpreter finalizing — skipping %d remaining job(s)",
|
||||
len(due_jobs) - executed,
|
||||
)
|
||||
break
|
||||
|
||||
try:
|
||||
# For recurring jobs (cron/interval), advance next_run_at to the
|
||||
# next future occurrence BEFORE execution. This way, if the
|
||||
# process crashes mid-run, the job won't re-fire on restart.
|
||||
# One-shot jobs are left alone so they can retry on restart.
|
||||
advance_next_run(job["id"])
|
||||
|
||||
success, output, final_response, error = run_job(job)
|
||||
return (job, success, output, final_response, error)
|
||||
except Exception as e:
|
||||
logger.error("Job '%s': parallel error: %s", job_name, e)
|
||||
return (job, False, "", None, str(e))
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_PARALLEL) as executor:
|
||||
futures = {executor.submit(_process_single_job, job): job for job in due_jobs_sorted}
|
||||
for future in concurrent.futures.as_completed(futures):
|
||||
result = future.result()
|
||||
if result is None:
|
||||
continue
|
||||
_job_results.append(result)
|
||||
|
||||
for job, success, output, final_response, error in _job_results:
|
||||
executed += 1
|
||||
|
||||
output_file = save_job_output(job["id"], output)
|
||||
if verbose:
|
||||
|
||||
@@ -648,6 +648,51 @@ def load_gateway_config() -> GatewayConfig:
|
||||
return config
|
||||
|
||||
|
||||
# Known-weak placeholder tokens from .env.example, tutorials, etc.
|
||||
_WEAK_TOKEN_PATTERNS = {
|
||||
"your-token-here", "your_token_here", "your-token", "your_token",
|
||||
"change-me", "change_me", "changeme",
|
||||
"xxx", "xxxx", "xxxxx", "xxxxxxxx",
|
||||
"test", "testing", "fake", "placeholder",
|
||||
"replace-me", "replace_me", "replace this",
|
||||
"insert-token-here", "put-your-token",
|
||||
"bot-token", "bot_token",
|
||||
"sk-xxxxxxxx", "sk-placeholder",
|
||||
"BOT_TOKEN_HERE", "YOUR_BOT_TOKEN",
|
||||
}
|
||||
|
||||
# Minimum token lengths by platform (tokens shorter than these are invalid)
|
||||
_MIN_TOKEN_LENGTHS = {
|
||||
"TELEGRAM_BOT_TOKEN": 30,
|
||||
"DISCORD_BOT_TOKEN": 50,
|
||||
"SLACK_BOT_TOKEN": 20,
|
||||
"HASS_TOKEN": 20,
|
||||
}
|
||||
|
||||
|
||||
def _guard_weak_credentials() -> list[str]:
|
||||
"""Check env vars for known-weak placeholder tokens.
|
||||
|
||||
Returns a list of warning messages for any weak credentials found.
|
||||
"""
|
||||
warnings = []
|
||||
for env_var, min_len in _MIN_TOKEN_LENGTHS.items():
|
||||
value = os.getenv(env_var, "").strip()
|
||||
if not value:
|
||||
continue
|
||||
if value.lower() in _WEAK_TOKEN_PATTERNS:
|
||||
warnings.append(
|
||||
f"{env_var} is set to a placeholder value ('{value[:20]}'). "
|
||||
f"Replace it with a real token."
|
||||
)
|
||||
elif len(value) < min_len:
|
||||
warnings.append(
|
||||
f"{env_var} is suspiciously short ({len(value)} chars, "
|
||||
f"expected >{min_len}). May be truncated or invalid."
|
||||
)
|
||||
return warnings
|
||||
|
||||
|
||||
def _apply_env_overrides(config: GatewayConfig) -> None:
|
||||
"""Apply environment variable overrides to config."""
|
||||
|
||||
@@ -941,3 +986,7 @@ def _apply_env_overrides(config: GatewayConfig) -> None:
|
||||
config.default_reset_policy.at_hour = int(reset_hour)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Guard against weak placeholder tokens from .env.example copies
|
||||
for warning in _guard_weak_credentials():
|
||||
logger.warning("Weak credential: %s", warning)
|
||||
|
||||
@@ -973,6 +973,7 @@ class APIServerAdapter(BasePlatformAdapter):
|
||||
pause_job as _cron_pause,
|
||||
resume_job as _cron_resume,
|
||||
trigger_job as _cron_trigger,
|
||||
run_job_now as _cron_run_now,
|
||||
)
|
||||
# Wrap as staticmethod to prevent descriptor binding — these are plain
|
||||
# module functions, not instance methods. Without this, self._cron_*()
|
||||
@@ -986,6 +987,7 @@ class APIServerAdapter(BasePlatformAdapter):
|
||||
_cron_pause = staticmethod(_cron_pause)
|
||||
_cron_resume = staticmethod(_cron_resume)
|
||||
_cron_trigger = staticmethod(_cron_trigger)
|
||||
_cron_run_now = staticmethod(_cron_run_now)
|
||||
_CRON_AVAILABLE = True
|
||||
except ImportError:
|
||||
pass
|
||||
@@ -1204,6 +1206,28 @@ class APIServerAdapter(BasePlatformAdapter):
|
||||
except Exception as e:
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
async def _handle_run_job_now(self, request: "web.Request") -> "web.Response":
|
||||
"""POST /api/jobs/{job_id}/run-now — execute job synchronously and return result."""
|
||||
auth_err = self._check_auth(request)
|
||||
if auth_err:
|
||||
return auth_err
|
||||
cron_err = self._check_jobs_available()
|
||||
if cron_err:
|
||||
return cron_err
|
||||
job_id, id_err = self._check_job_id(request)
|
||||
if id_err:
|
||||
return id_err
|
||||
try:
|
||||
import asyncio as _asyncio
|
||||
result = await _asyncio.get_event_loop().run_in_executor(
|
||||
None, self._cron_run_now, job_id
|
||||
)
|
||||
if result is None:
|
||||
return web.json_response({"error": "Job not found"}, status=404)
|
||||
return web.json_response(result)
|
||||
except Exception as e:
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Output extraction helper
|
||||
# ------------------------------------------------------------------
|
||||
@@ -1565,6 +1589,7 @@ class APIServerAdapter(BasePlatformAdapter):
|
||||
self._app.router.add_post("/api/jobs/{job_id}/pause", self._handle_pause_job)
|
||||
self._app.router.add_post("/api/jobs/{job_id}/resume", self._handle_resume_job)
|
||||
self._app.router.add_post("/api/jobs/{job_id}/run", self._handle_run_job)
|
||||
self._app.router.add_post("/api/jobs/{job_id}/run-now", self._handle_run_job_now)
|
||||
# Structured event streaming
|
||||
self._app.router.add_post("/v1/runs", self._handle_runs)
|
||||
self._app.router.add_get("/v1/runs/{run_id}/events", self._handle_run_events)
|
||||
|
||||
@@ -221,7 +221,31 @@ def cron_edit(args):
|
||||
return 0
|
||||
|
||||
|
||||
def _job_action(action: str, job_id: str, success_verb: str) -> int:
|
||||
def _job_action(action: str, job_id: str, success_verb: str, now: bool = False) -> int:
|
||||
if action == "run" and now:
|
||||
# Synchronous execution — run job immediately and show result
|
||||
result = _cron_api(action="run_now", job_id=job_id)
|
||||
if not result.get("success"):
|
||||
if result.get("error"):
|
||||
print(color(f"Failed to run job now: {result['error']}", Colors.RED))
|
||||
else:
|
||||
print(color(f"Failed to run job now: {result.get('error', 'unknown error')}", Colors.RED))
|
||||
return 1
|
||||
job = result.get("job", {})
|
||||
success = result.get("success", False)
|
||||
error = result.get("error")
|
||||
final_response = result.get("final_response", "")
|
||||
name = job.get("name", job_id)
|
||||
if success:
|
||||
print(color(f"Job '{name}' completed successfully", Colors.GREEN))
|
||||
else:
|
||||
print(color(f"Job '{name}' failed: {error}", Colors.RED))
|
||||
if final_response:
|
||||
print(f"\n{final_response}\n")
|
||||
if not error:
|
||||
print(color("Stale error state cleared.", Colors.GREEN))
|
||||
return 0 if success else 1
|
||||
|
||||
result = _cron_api(action=action, job_id=job_id)
|
||||
if not result.get("success"):
|
||||
print(color(f"Failed to {action} job: {result.get('error', 'unknown error')}", Colors.RED))
|
||||
@@ -265,7 +289,8 @@ def cron_command(args):
|
||||
return _job_action("resume", args.job_id, "Resumed")
|
||||
|
||||
if subcmd == "run":
|
||||
return _job_action("run", args.job_id, "Triggered")
|
||||
now = getattr(args, 'now', False)
|
||||
return _job_action("run", args.job_id, "Triggered", now=now)
|
||||
|
||||
if subcmd in {"remove", "rm", "delete"}:
|
||||
return _job_action("remove", args.job_id, "Removed")
|
||||
|
||||
@@ -4574,6 +4574,7 @@ For more help on a command:
|
||||
|
||||
cron_run = cron_subparsers.add_parser("run", help="Run a job on the next scheduler tick")
|
||||
cron_run.add_argument("job_id", help="Job ID to trigger")
|
||||
cron_run.add_argument("--now", action="store_true", help="Execute immediately and wait for result (clears stale errors)")
|
||||
|
||||
cron_remove = cron_subparsers.add_parser("remove", aliases=["rm", "delete"], help="Remove a scheduled job")
|
||||
cron_remove.add_argument("job_id", help="Job ID to remove")
|
||||
|
||||
@@ -540,6 +540,29 @@ def handle_function_call(
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Poka-yoke: validate tool handler return type.
|
||||
# Handlers MUST return a JSON string. If they return dict/list/None,
|
||||
# wrap the result so the agent loop doesn't crash with cryptic errors.
|
||||
if not isinstance(result, str):
|
||||
logger.warning(
|
||||
"Tool '%s' returned %s instead of str — wrapping in JSON",
|
||||
function_name, type(result).__name__,
|
||||
)
|
||||
result = json.dumps(
|
||||
{"output": str(result), "_type_warning": f"Tool returned {type(result).__name__}, expected str"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
else:
|
||||
# Validate it's parseable JSON
|
||||
try:
|
||||
json.loads(result)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
logger.warning(
|
||||
"Tool '%s' returned non-JSON string — wrapping in JSON",
|
||||
function_name,
|
||||
)
|
||||
result = json.dumps({"output": result}, ensure_ascii=False)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -12,7 +12,7 @@ Config in $HERMES_HOME/config.yaml (profile-scoped):
|
||||
auto_extract: false
|
||||
default_trust: 0.5
|
||||
min_trust_threshold: 0.3
|
||||
temporal_decay_half_life: 0
|
||||
temporal_decay_half_life: 60
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -152,6 +152,7 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
{"key": "auto_extract", "description": "Auto-extract facts at session end", "default": "false", "choices": ["true", "false"]},
|
||||
{"key": "default_trust", "description": "Default trust score for new facts", "default": "0.5"},
|
||||
{"key": "hrr_dim", "description": "HRR vector dimensions", "default": "1024"},
|
||||
{"key": "temporal_decay_half_life", "description": "Days for facts to lose half their relevance (0=disabled)", "default": "60"},
|
||||
]
|
||||
|
||||
def initialize(self, session_id: str, **kwargs) -> None:
|
||||
@@ -168,7 +169,7 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
default_trust = float(self._config.get("default_trust", 0.5))
|
||||
hrr_dim = int(self._config.get("hrr_dim", 1024))
|
||||
hrr_weight = float(self._config.get("hrr_weight", 0.3))
|
||||
temporal_decay = int(self._config.get("temporal_decay_half_life", 0))
|
||||
temporal_decay = int(self._config.get("temporal_decay_half_life", 60))
|
||||
|
||||
self._store = MemoryStore(db_path=db_path, default_trust=default_trust, hrr_dim=hrr_dim)
|
||||
self._retriever = FactRetriever(
|
||||
|
||||
@@ -98,7 +98,15 @@ class FactRetriever:
|
||||
|
||||
# Optional temporal decay
|
||||
if self.half_life > 0:
|
||||
score *= self._temporal_decay(fact.get("updated_at") or fact.get("created_at"))
|
||||
decay = self._temporal_decay(fact.get("updated_at") or fact.get("created_at"))
|
||||
# Access-recency boost: facts retrieved recently decay slower.
|
||||
# A fact accessed within 1 half-life gets up to 1.5x the decay
|
||||
# factor, tapering to 1.0x (no boost) after 2 half-lives.
|
||||
last_accessed = fact.get("last_accessed_at")
|
||||
if last_accessed:
|
||||
access_boost = self._access_recency_boost(last_accessed)
|
||||
decay = min(1.0, decay * access_boost)
|
||||
score *= decay
|
||||
|
||||
fact["score"] = score
|
||||
scored.append(fact)
|
||||
@@ -591,3 +599,41 @@ class FactRetriever:
|
||||
return math.pow(0.5, age_days / self.half_life)
|
||||
except (ValueError, TypeError):
|
||||
return 1.0
|
||||
|
||||
def _access_recency_boost(self, last_accessed_str: str | None) -> float:
|
||||
"""Boost factor for recently-accessed facts. Range [1.0, 1.5].
|
||||
|
||||
Facts accessed within 1 half-life get up to 1.5x boost (compensating
|
||||
for content staleness when the fact is still being actively used).
|
||||
Boost decays linearly to 1.0 (no boost) at 2 half-lives.
|
||||
|
||||
Returns 1.0 if half-life is disabled or timestamp is missing.
|
||||
"""
|
||||
if not self.half_life or not last_accessed_str:
|
||||
return 1.0
|
||||
|
||||
try:
|
||||
if isinstance(last_accessed_str, str):
|
||||
ts = datetime.fromisoformat(last_accessed_str.replace("Z", "+00:00"))
|
||||
else:
|
||||
ts = last_accessed_str
|
||||
|
||||
if ts.tzinfo is None:
|
||||
ts = ts.replace(tzinfo=timezone.utc)
|
||||
|
||||
age_days = (datetime.now(timezone.utc) - ts).total_seconds() / 86400
|
||||
if age_days < 0:
|
||||
return 1.5 # Future timestamp = just accessed
|
||||
|
||||
half_lives_since_access = age_days / self.half_life
|
||||
|
||||
if half_lives_since_access <= 1.0:
|
||||
# Within 1 half-life: linearly from 1.5 (just now) to 1.0 (at 1 HL)
|
||||
return 1.0 + 0.5 * (1.0 - half_lives_since_access)
|
||||
elif half_lives_since_access <= 2.0:
|
||||
# Between 1 and 2 half-lives: linearly from 1.0 to 1.0 (no boost)
|
||||
return 1.0
|
||||
else:
|
||||
return 1.0
|
||||
except (ValueError, TypeError):
|
||||
return 1.0
|
||||
|
||||
@@ -172,13 +172,13 @@ class MemPalacePortalProvider(MemoryProvider):
|
||||
|
||||
# Extract key facts for Hologram
|
||||
if self._hologram_store:
|
||||
# Simple heuristic: look for \"I prefer\", \"The project uses\", etc.
|
||||
# Simple heuristic: look for "I prefer", "The project uses", etc.
|
||||
for msg in messages:
|
||||
if msg.get(\"role\") == \"user\":
|
||||
content = msg.get(\"content\", \"\")
|
||||
if \"prefer\" in content.lower() or \"use\" in content.lower():
|
||||
if msg.get("role") == "user":
|
||||
content = msg.get("content", "")
|
||||
if "prefer" in content.lower() or "use" in content.lower():
|
||||
try:
|
||||
self._hologram_store.add_fact(content[:200], category=\"user_pref\")
|
||||
self._hologram_store.add_fact(content[:200], category="user_pref")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
109
run_agent.py
109
run_agent.py
@@ -721,6 +721,19 @@ class AIAgent:
|
||||
self._current_tool: str | None = None
|
||||
self._api_call_count: int = 0
|
||||
|
||||
# Poka-yoke #309: Circuit breaker for error cascading
|
||||
# P(error | prev was error) = 58.6% vs P(error | prev was success) = 25.2%
|
||||
# After 3+ consecutive errors, inject guidance to break the cascade.
|
||||
self._consecutive_tool_errors: int = 0
|
||||
self._error_streak_tool_names: list = [] # track which tools are in the streak
|
||||
|
||||
# Poka-yoke #310: Tool fixation detection
|
||||
# Marathon sessions show tool fixation - same tool called 8-25 times in a row.
|
||||
# After 5 consecutive calls to the same tool, nudge the agent to diversify.
|
||||
self._last_tool_name: str | None = None
|
||||
self._same_tool_streak: int = 0
|
||||
self._tool_fixation_threshold: int = 5
|
||||
|
||||
# Centralized logging — agent.log (INFO+) and errors.log (WARNING+)
|
||||
# both live under ~/.hermes/logs/. Idempotent, so gateway mode
|
||||
# (which creates a new AIAgent per message) won't duplicate handlers.
|
||||
@@ -6238,6 +6251,12 @@ class AIAgent:
|
||||
def _run_tool(index, tool_call, function_name, function_args):
|
||||
"""Worker function executed in a thread."""
|
||||
start = time.time()
|
||||
# Poka-yoke #310: Tool fixation detection (concurrent path)
|
||||
if function_name == self._last_tool_name:
|
||||
self._same_tool_streak += 1
|
||||
else:
|
||||
self._last_tool_name = function_name
|
||||
self._same_tool_streak = 1
|
||||
try:
|
||||
result = self._invoke_tool(function_name, function_args, effective_task_id, tool_call.id)
|
||||
except Exception as tool_error:
|
||||
@@ -6288,6 +6307,13 @@ class AIAgent:
|
||||
if is_error:
|
||||
result_preview = function_result[:200] if len(function_result) > 200 else function_result
|
||||
logger.warning("Tool %s returned error (%.2fs): %s", function_name, tool_duration, result_preview)
|
||||
# Circuit breaker: track consecutive errors
|
||||
self._consecutive_tool_errors += 1
|
||||
self._error_streak_tool_names.append(function_name)
|
||||
else:
|
||||
# Reset circuit breaker on success
|
||||
self._consecutive_tool_errors = 0
|
||||
self._error_streak_tool_names = []
|
||||
|
||||
if self.tool_progress_callback:
|
||||
try:
|
||||
@@ -6331,6 +6357,41 @@ class AIAgent:
|
||||
if subdir_hints:
|
||||
function_result += subdir_hints
|
||||
|
||||
# Circuit breaker: inject warning after 3+ consecutive errors
|
||||
if self._consecutive_tool_errors >= 3:
|
||||
streak_info = self._error_streak_tool_names[-self._consecutive_tool_errors:]
|
||||
unique_tools = list(dict.fromkeys(streak_info))
|
||||
if self._consecutive_tool_errors == 3:
|
||||
cb_msg = (
|
||||
f"\n\n⚠️ CIRCUIT BREAKER: You have had {self._consecutive_tool_errors} consecutive tool errors "
|
||||
f"({', '.join(unique_tools)}). Errors cascade — P(error|error) is 2.33x higher than normal. "
|
||||
f"Consider: (1) trying a different tool type, (2) using terminal to debug, "
|
||||
f"(3) simplifying your approach, or (4) asking the user for guidance."
|
||||
)
|
||||
function_result += cb_msg
|
||||
elif self._consecutive_tool_errors == 6:
|
||||
cb_msg = (
|
||||
f"\n\n🛑 CIRCUIT BREAKER: {self._consecutive_tool_errors} consecutive errors. "
|
||||
f"The error cascade is severe. STOP retrying the same approach. "
|
||||
f"Use terminal to investigate, or switch strategies entirely."
|
||||
)
|
||||
function_result += cb_msg
|
||||
elif self._consecutive_tool_errors >= 9 and self._consecutive_tool_errors % 3 == 0:
|
||||
cb_msg = (
|
||||
f"\n\n🔴 CIRCUIT BREAKER: {self._consecutive_tool_errors} consecutive errors. "
|
||||
f"Terminal is your only reliable recovery path. Use it now."
|
||||
)
|
||||
function_result += cb_msg
|
||||
|
||||
# Poka-yoke #310: Tool fixation nudge
|
||||
if self._same_tool_streak >= self._tool_fixation_threshold and self._same_tool_streak % self._tool_fixation_threshold == 0:
|
||||
fixation_msg = (
|
||||
f"\n\n🔄 TOOL FIXATION: You have called `{function_name}` {self._same_tool_streak} times consecutively. "
|
||||
f"Consider: (1) trying a different tool, (2) using `terminal` to verify your approach, "
|
||||
f"(3) stepping back to reassess the task."
|
||||
)
|
||||
function_result += fixation_msg
|
||||
|
||||
# Append tool result message in order
|
||||
tool_msg = {
|
||||
"role": "tool",
|
||||
@@ -6416,6 +6477,13 @@ class AIAgent:
|
||||
self._current_tool = function_name
|
||||
self._touch_activity(f"executing tool: {function_name}")
|
||||
|
||||
# Poka-yoke #310: Tool fixation detection
|
||||
if function_name == self._last_tool_name:
|
||||
self._same_tool_streak += 1
|
||||
else:
|
||||
self._last_tool_name = function_name
|
||||
self._same_tool_streak = 1
|
||||
|
||||
if self.tool_progress_callback:
|
||||
try:
|
||||
preview = _build_tool_preview(function_name, function_args)
|
||||
@@ -6609,8 +6677,14 @@ class AIAgent:
|
||||
_is_error_result, _ = _detect_tool_failure(function_name, function_result)
|
||||
if _is_error_result:
|
||||
logger.warning("Tool %s returned error (%.2fs): %s", function_name, tool_duration, result_preview)
|
||||
# Circuit breaker: track consecutive errors
|
||||
self._consecutive_tool_errors += 1
|
||||
self._error_streak_tool_names.append(function_name)
|
||||
else:
|
||||
logger.info("tool %s completed (%.2fs, %d chars)", function_name, tool_duration, len(function_result))
|
||||
# Reset circuit breaker on success
|
||||
self._consecutive_tool_errors = 0
|
||||
self._error_streak_tool_names = []
|
||||
|
||||
if self.tool_progress_callback:
|
||||
try:
|
||||
@@ -6642,6 +6716,41 @@ class AIAgent:
|
||||
if subdir_hints:
|
||||
function_result += subdir_hints
|
||||
|
||||
# Circuit breaker: inject warning after 3+ consecutive errors
|
||||
if self._consecutive_tool_errors >= 3:
|
||||
streak_info = self._error_streak_tool_names[-self._consecutive_tool_errors:]
|
||||
unique_tools = list(dict.fromkeys(streak_info)) # preserve order, deduplicate
|
||||
if self._consecutive_tool_errors == 3:
|
||||
cb_msg = (
|
||||
f"\n\n⚠️ CIRCUIT BREAKER: You have had {self._consecutive_tool_errors} consecutive tool errors "
|
||||
f"({', '.join(unique_tools)}). Errors cascade — P(error|error) is 2.33x higher than normal. "
|
||||
f"Consider: (1) trying a different tool type, (2) using terminal to debug, "
|
||||
f"(3) simplifying your approach, or (4) asking the user for guidance."
|
||||
)
|
||||
function_result += cb_msg
|
||||
elif self._consecutive_tool_errors == 6:
|
||||
cb_msg = (
|
||||
f"\n\n🛑 CIRCUIT BREAKER: {self._consecutive_tool_errors} consecutive errors. "
|
||||
f"The error cascade is severe. STOP retrying the same approach. "
|
||||
f"Use terminal to investigate, or switch strategies entirely."
|
||||
)
|
||||
function_result += cb_msg
|
||||
elif self._consecutive_tool_errors >= 9 and self._consecutive_tool_errors % 3 == 0:
|
||||
cb_msg = (
|
||||
f"\n\n🔴 CIRCUIT BREAKER: {self._consecutive_tool_errors} consecutive errors. "
|
||||
f"Terminal is your only reliable recovery path. Use it now."
|
||||
)
|
||||
function_result += cb_msg
|
||||
|
||||
# Poka-yoke #310: Tool fixation nudge
|
||||
if self._same_tool_streak >= self._tool_fixation_threshold and self._same_tool_streak % self._tool_fixation_threshold == 0:
|
||||
fixation_msg = (
|
||||
f"\n\n🔄 TOOL FIXATION: You have called `{function_name}` {self._same_tool_streak} times consecutively. "
|
||||
f"Consider: (1) trying a different tool, (2) using `terminal` to verify your approach, "
|
||||
f"(3) stepping back to reassess the task."
|
||||
)
|
||||
function_result += fixation_msg
|
||||
|
||||
tool_msg = {
|
||||
"role": "tool",
|
||||
"content": function_result,
|
||||
|
||||
284
scripts/benchmark_local_models.py
Normal file
284
scripts/benchmark_local_models.py
Normal file
@@ -0,0 +1,284 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Benchmark local Ollama models against the 50 tok/s UX threshold.
|
||||
|
||||
Usage:
|
||||
python3 scripts/benchmark_local_models.py [--models MODEL1,MODEL2] [--prompt PROMPT] [--rounds N]
|
||||
python3 scripts/benchmark_local_models.py --all # test all pulled models
|
||||
python3 scripts/benchmark_local_models.py --json # JSON output for CI
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
from dataclasses import dataclass, asdict
|
||||
from typing import Optional
|
||||
|
||||
OLLAMA_BASE = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
|
||||
THRESHOLD_TOK_S = 50.0
|
||||
|
||||
BENCHMARK_PROMPT = (
|
||||
"Explain the difference between TCP and UDP protocols. "
|
||||
"Cover reliability, ordering, speed, and use cases. "
|
||||
"Be thorough but concise. Write at least 300 words."
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BenchmarkResult:
|
||||
model: str
|
||||
size_gb: float
|
||||
prompt_tokens: int
|
||||
eval_tokens: int
|
||||
eval_duration_s: float
|
||||
tokens_per_second: float
|
||||
total_duration_s: float
|
||||
rounds: int
|
||||
avg_tok_s: float
|
||||
meets_threshold: bool
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
def get_models() -> list[dict]:
|
||||
"""List all pulled Ollama models."""
|
||||
url = f"{OLLAMA_BASE}/api/tags"
|
||||
try:
|
||||
req = urllib.request.Request(url)
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
data = json.loads(resp.read())
|
||||
return data.get("models", [])
|
||||
except Exception as e:
|
||||
print(f"Error connecting to Ollama at {OLLAMA_BASE}: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def benchmark_model(model: str, prompt: str, num_predict: int = 512) -> dict:
|
||||
"""Run a single benchmark generation, return timing stats."""
|
||||
url = f"{OLLAMA_BASE}/api/generate"
|
||||
payload = json.dumps({
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
"options": {
|
||||
"num_predict": num_predict,
|
||||
"temperature": 0.1, # low temp for consistent output
|
||||
},
|
||||
}).encode()
|
||||
|
||||
req = urllib.request.Request(url, data=payload, method="POST")
|
||||
req.add_header("Content-Type", "application/json")
|
||||
|
||||
start = time.monotonic()
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=300) as resp:
|
||||
data = json.loads(resp.read())
|
||||
except urllib.error.HTTPError as e:
|
||||
body = e.read().decode() if e.fp else str(e)
|
||||
raise RuntimeError(f"HTTP {e.code}: {body[:200]}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(str(e))
|
||||
elapsed = time.monotonic() - start
|
||||
|
||||
prompt_tokens = data.get("prompt_eval_count", 0)
|
||||
eval_tokens = data.get("eval_count", 0)
|
||||
eval_duration_ns = data.get("eval_duration", 0)
|
||||
total_duration_ns = data.get("total_duration", 0)
|
||||
|
||||
eval_duration_s = eval_duration_ns / 1e9 if eval_duration_ns else elapsed
|
||||
total_duration_s = total_duration_ns / 1e9 if total_duration_ns else elapsed
|
||||
tok_s = eval_tokens / eval_duration_s if eval_duration_s > 0 else 0.0
|
||||
|
||||
return {
|
||||
"prompt_tokens": prompt_tokens,
|
||||
"eval_tokens": eval_tokens,
|
||||
"eval_duration_s": round(eval_duration_s, 2),
|
||||
"total_duration_s": round(total_duration_s, 2),
|
||||
"tokens_per_second": round(tok_s, 1),
|
||||
}
|
||||
|
||||
|
||||
def run_benchmark(
|
||||
model_name: str,
|
||||
model_size: float,
|
||||
prompt: str,
|
||||
rounds: int,
|
||||
num_predict: int,
|
||||
threshold: float = 50.0,
|
||||
) -> BenchmarkResult:
|
||||
"""Run multiple rounds and compute average."""
|
||||
results = []
|
||||
errors = []
|
||||
|
||||
for i in range(rounds):
|
||||
try:
|
||||
r = benchmark_model(model_name, prompt, num_predict)
|
||||
results.append(r)
|
||||
print(f" Round {i+1}/{rounds}: {r['tokens_per_second']} tok/s "
|
||||
f"({r['eval_tokens']} tokens in {r['eval_duration_s']}s)")
|
||||
except Exception as e:
|
||||
errors.append(str(e))
|
||||
print(f" Round {i+1}/{rounds}: ERROR - {e}")
|
||||
|
||||
if not results:
|
||||
return BenchmarkResult(
|
||||
model=model_name,
|
||||
size_gb=model_size,
|
||||
prompt_tokens=0, eval_tokens=0,
|
||||
eval_duration_s=0, tokens_per_second=0,
|
||||
total_duration_s=0, rounds=rounds,
|
||||
avg_tok_s=0, meets_threshold=False,
|
||||
error="; ".join(errors),
|
||||
)
|
||||
|
||||
avg_tok_s = sum(r["tokens_per_second"] for r in results) / len(results)
|
||||
avg_tok_s = round(avg_tok_s, 1)
|
||||
|
||||
return BenchmarkResult(
|
||||
model=model_name,
|
||||
size_gb=model_size,
|
||||
prompt_tokens=sum(r["prompt_tokens"] for r in results) // len(results),
|
||||
eval_tokens=sum(r["eval_tokens"] for r in results) // len(results),
|
||||
eval_duration_s=round(sum(r["eval_duration_s"] for r in results) / len(results), 2),
|
||||
tokens_per_second=avg_tok_s,
|
||||
total_duration_s=round(sum(r["total_duration_s"] for r in results) / len(results), 2),
|
||||
rounds=len(results),
|
||||
avg_tok_s=avg_tok_s,
|
||||
meets_threshold=avg_tok_s >= threshold,
|
||||
)
|
||||
|
||||
|
||||
def format_report(results: list[BenchmarkResult], threshold: float = 50.0) -> str:
|
||||
"""Format a human-readable benchmark report."""
|
||||
lines = []
|
||||
lines.append("")
|
||||
lines.append("=" * 72)
|
||||
lines.append(f" LOCAL MODEL BENCHMARK — {threshold:.0f} tok/s UX Threshold")
|
||||
lines.append("=" * 72)
|
||||
lines.append("")
|
||||
|
||||
# Summary table
|
||||
header = f"{'Model':<25} {'Size':>6} {'tok/s':>8} {'Threshold':>10} {'Status':>8}"
|
||||
lines.append(header)
|
||||
lines.append("-" * 72)
|
||||
|
||||
passed = 0
|
||||
failed = 0
|
||||
errors = 0
|
||||
|
||||
for r in sorted(results, key=lambda x: x.avg_tok_s, reverse=True):
|
||||
size_str = f"{r.size_gb:.1f}GB"
|
||||
tok_s_str = f"{r.avg_tok_s:.1f}"
|
||||
|
||||
if r.error:
|
||||
status = "ERROR"
|
||||
errors += 1
|
||||
elif r.meets_threshold:
|
||||
status = "PASS"
|
||||
passed += 1
|
||||
else:
|
||||
status = "FAIL"
|
||||
failed += 1
|
||||
|
||||
marker = ">" if r.meets_threshold else "X" if r.error else "!"
|
||||
thresh_str = f">= {threshold:.0f}"
|
||||
lines.append(f" {marker} {r.model:<23} {size_str:>6} {tok_s_str:>8} {thresh_str:>10} {status:>8}")
|
||||
|
||||
lines.append("-" * 72)
|
||||
lines.append(f" Passed: {passed} | Failed: {failed} | Errors: {errors} | Total: {len(results)}")
|
||||
lines.append("")
|
||||
|
||||
# Detail section for failures
|
||||
failures = [r for r in results if not r.meets_threshold and not r.error]
|
||||
if failures:
|
||||
lines.append(" FAILED MODELS (below threshold):")
|
||||
for r in sorted(failures, key=lambda x: x.avg_tok_s):
|
||||
gap = threshold - r.avg_tok_s
|
||||
lines.append(f" - {r.model}: {r.avg_tok_s:.1f} tok/s "
|
||||
f"({gap:.1f} tok/s short, {r.eval_tokens} avg tokens/round)")
|
||||
lines.append("")
|
||||
|
||||
error_list = [r for r in results if r.error]
|
||||
if error_list:
|
||||
lines.append(" ERRORS:")
|
||||
for r in error_list:
|
||||
lines.append(f" - {r.model}: {r.error}")
|
||||
lines.append("")
|
||||
|
||||
# Hardware info
|
||||
import platform
|
||||
lines.append(f" Host: {platform.node()} | {platform.system()} {platform.release()}")
|
||||
lines.append(f" Ollama: {OLLAMA_BASE}")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Benchmark local Ollama models vs 50 tok/s threshold")
|
||||
parser.add_argument("--models", help="Comma-separated model names (default: all)")
|
||||
parser.add_argument("--prompt", default=BENCHMARK_PROMPT, help="Benchmark prompt")
|
||||
parser.add_argument("--rounds", type=int, default=3, help="Rounds per model (default: 3)")
|
||||
parser.add_argument("--tokens", type=int, default=512, help="Max tokens to generate (default: 512)")
|
||||
parser.add_argument("--json", action="store_true", help="JSON output for CI")
|
||||
parser.add_argument("--all", action="store_true", help="Test all pulled models")
|
||||
parser.add_argument("--threshold", type=float, default=THRESHOLD_TOK_S, help="tok/s threshold")
|
||||
args = parser.parse_args()
|
||||
threshold = args.threshold
|
||||
|
||||
# Get model list
|
||||
available = get_models()
|
||||
if not available:
|
||||
print("No models found. Pull a model first: ollama pull <model>", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if args.models:
|
||||
names = [m.strip() for m in args.models.split(",")]
|
||||
models = [m for m in available if m["name"] in names]
|
||||
missing = set(names) - set(m["name"] for m in models)
|
||||
if missing:
|
||||
print(f"Models not found: {', '.join(missing)}", file=sys.stderr)
|
||||
print(f"Available: {', '.join(m['name'] for m in available)}", file=sys.stderr)
|
||||
else:
|
||||
models = available
|
||||
|
||||
print(f"Benchmarking {len(models)} model(s) against {threshold} tok/s threshold")
|
||||
print(f"Ollama: {OLLAMA_BASE} | Rounds: {args.rounds} | Max tokens: {args.tokens}")
|
||||
print()
|
||||
|
||||
results = []
|
||||
for m in models:
|
||||
name = m["name"]
|
||||
size_gb = m.get("size", 0) / (1024**3)
|
||||
print(f" {name} ({size_gb:.1f}GB):")
|
||||
|
||||
result = run_benchmark(name, size_gb, args.prompt, args.rounds, args.tokens, threshold)
|
||||
results.append(result)
|
||||
|
||||
# Output
|
||||
report = format_report(results, threshold)
|
||||
if args.json:
|
||||
output = {
|
||||
"threshold_tok_s": threshold,
|
||||
"ollama_base": OLLAMA_BASE,
|
||||
"rounds": args.rounds,
|
||||
"results": [asdict(r) for r in results],
|
||||
"passed": sum(1 for r in results if r.meets_threshold),
|
||||
"failed": sum(1 for r in results if not r.meets_threshold and not r.error),
|
||||
"errors": sum(1 for r in results if r.error),
|
||||
}
|
||||
print(json.dumps(output, indent=2))
|
||||
else:
|
||||
print(report)
|
||||
|
||||
# Exit code: 0 if all pass, 1 if any fail/error
|
||||
if any(not r.meets_threshold or r.error for r in results):
|
||||
sys.exit(1)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -7,7 +7,7 @@ from unittest.mock import AsyncMock, patch, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from cron.scheduler import _resolve_origin, _resolve_delivery_target, _deliver_result, run_job, SILENT_MARKER, _build_job_prompt
|
||||
from cron.scheduler import _resolve_origin, _resolve_delivery_target, _deliver_result, run_job, SILENT_MARKER, _build_job_prompt, _check_model_context_compat, ModelContextError, CRON_MIN_CONTEXT_TOKENS
|
||||
|
||||
|
||||
class TestResolveOrigin:
|
||||
@@ -914,3 +914,90 @@ class TestDeploySyncGuard:
|
||||
# Should not raise even if we somehow break AIAgent — the flag is set.
|
||||
sched_mod._validate_agent_interface()
|
||||
# No exception = pass.
|
||||
|
||||
|
||||
class TestCheckModelContextCompat:
|
||||
"""Tests for _check_model_context_compat() preflight validation."""
|
||||
|
||||
def test_raises_when_context_below_minimum(self):
|
||||
"""ModelContextError raised when detected context < CRON_MIN_CONTEXT_TOKENS."""
|
||||
with patch(
|
||||
"cron.scheduler.get_model_context_length" if False else "agent.model_metadata.get_model_context_length",
|
||||
):
|
||||
pass # placeholder; real test below uses patch path correctly
|
||||
|
||||
with patch("cron.scheduler._check_model_context_compat") as mock_check:
|
||||
mock_check.side_effect = ModelContextError(
|
||||
"Model 'gemma4:latest' has a context window of 8,192 tokens, "
|
||||
"which is below the minimum 64,000 required by Hermes Agent."
|
||||
)
|
||||
with pytest.raises(ModelContextError, match="below the minimum"):
|
||||
mock_check("gemma4:latest")
|
||||
|
||||
def test_passes_when_context_sufficient(self):
|
||||
"""No exception when model has sufficient context."""
|
||||
with patch(
|
||||
"agent.model_metadata.get_model_context_length",
|
||||
return_value=128_000,
|
||||
):
|
||||
# Should not raise
|
||||
_check_model_context_compat("claude-sonnet-4-6", base_url="", api_key="")
|
||||
|
||||
def test_passes_with_config_override_below_minimum(self):
|
||||
"""When config_context_length is set, check is skipped regardless of detected size."""
|
||||
with patch(
|
||||
"agent.model_metadata.get_model_context_length",
|
||||
return_value=8_192, # Would normally fail
|
||||
) as mock_get:
|
||||
# config_context_length override skips the check
|
||||
_check_model_context_compat(
|
||||
"gemma4:latest",
|
||||
config_context_length=8_192,
|
||||
)
|
||||
# get_model_context_length should NOT be called — check is bypassed
|
||||
mock_get.assert_not_called()
|
||||
|
||||
def test_raises_model_context_error_subclass_of_value_error(self):
|
||||
"""ModelContextError is a subclass of ValueError."""
|
||||
assert issubclass(ModelContextError, ValueError)
|
||||
|
||||
def test_detection_failure_is_non_fatal(self):
|
||||
"""If context length detection raises, the check is skipped (fail-open)."""
|
||||
with patch(
|
||||
"agent.model_metadata.get_model_context_length",
|
||||
side_effect=Exception("network error"),
|
||||
):
|
||||
# Should NOT raise — detection failure is logged and ignored
|
||||
_check_model_context_compat("unknown-model", base_url="http://localhost:11434")
|
||||
|
||||
def test_raises_for_small_context_model(self):
|
||||
"""End-to-end: model with 8K context raises ModelContextError."""
|
||||
with patch(
|
||||
"agent.model_metadata.get_model_context_length",
|
||||
return_value=8_192,
|
||||
):
|
||||
with pytest.raises(ModelContextError) as exc_info:
|
||||
_check_model_context_compat("gemma4:latest")
|
||||
err = str(exc_info.value)
|
||||
assert "gemma4:latest" in err
|
||||
assert "8,192" in err
|
||||
assert str(CRON_MIN_CONTEXT_TOKENS) in err or "64,000" in err
|
||||
assert "config.yaml" in err
|
||||
|
||||
def test_boundary_exactly_at_minimum_passes(self):
|
||||
"""A model with exactly CRON_MIN_CONTEXT_TOKENS context is accepted."""
|
||||
with patch(
|
||||
"agent.model_metadata.get_model_context_length",
|
||||
return_value=CRON_MIN_CONTEXT_TOKENS,
|
||||
):
|
||||
# Should not raise
|
||||
_check_model_context_compat("borderline-model")
|
||||
|
||||
def test_boundary_one_below_minimum_raises(self):
|
||||
"""A model with context length one below the minimum is rejected."""
|
||||
with patch(
|
||||
"agent.model_metadata.get_model_context_length",
|
||||
return_value=CRON_MIN_CONTEXT_TOKENS - 1,
|
||||
):
|
||||
with pytest.raises(ModelContextError):
|
||||
_check_model_context_compat("borderline-model")
|
||||
|
||||
52
tests/gateway/test_weak_credential_guard.py
Normal file
52
tests/gateway/test_weak_credential_guard.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""Tests for weak credential guard in gateway/config.py."""
|
||||
|
||||
import os
|
||||
import pytest
|
||||
|
||||
from gateway.config import _guard_weak_credentials, _WEAK_TOKEN_PATTERNS, _MIN_TOKEN_LENGTHS
|
||||
|
||||
|
||||
class TestWeakCredentialGuard:
|
||||
"""Tests for _guard_weak_credentials()."""
|
||||
|
||||
def test_no_tokens_set(self, monkeypatch):
|
||||
"""When no relevant tokens are set, no warnings."""
|
||||
for var in _MIN_TOKEN_LENGTHS:
|
||||
monkeypatch.delenv(var, raising=False)
|
||||
warnings = _guard_weak_credentials()
|
||||
assert warnings == []
|
||||
|
||||
def test_placeholder_token_detected(self, monkeypatch):
|
||||
"""Known-weak placeholder tokens are flagged."""
|
||||
monkeypatch.setenv("TELEGRAM_BOT_TOKEN", "your-token-here")
|
||||
warnings = _guard_weak_credentials()
|
||||
assert len(warnings) == 1
|
||||
assert "TELEGRAM_BOT_TOKEN" in warnings[0]
|
||||
assert "placeholder" in warnings[0].lower()
|
||||
|
||||
def test_case_insensitive_match(self, monkeypatch):
|
||||
"""Placeholder detection is case-insensitive."""
|
||||
monkeypatch.setenv("DISCORD_BOT_TOKEN", "FAKE")
|
||||
warnings = _guard_weak_credentials()
|
||||
assert len(warnings) == 1
|
||||
assert "DISCORD_BOT_TOKEN" in warnings[0]
|
||||
|
||||
def test_short_token_detected(self, monkeypatch):
|
||||
"""Suspiciously short tokens are flagged."""
|
||||
monkeypatch.setenv("TELEGRAM_BOT_TOKEN", "abc123") # 6 chars, min is 30
|
||||
warnings = _guard_weak_credentials()
|
||||
assert len(warnings) == 1
|
||||
assert "short" in warnings[0].lower()
|
||||
|
||||
def test_valid_token_passes(self, monkeypatch):
|
||||
"""A long, non-placeholder token produces no warnings."""
|
||||
monkeypatch.setenv("TELEGRAM_BOT_TOKEN", "1234567890:ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567")
|
||||
warnings = _guard_weak_credentials()
|
||||
assert warnings == []
|
||||
|
||||
def test_multiple_weak_tokens(self, monkeypatch):
|
||||
"""Multiple weak tokens each produce a warning."""
|
||||
monkeypatch.setenv("TELEGRAM_BOT_TOKEN", "change-me")
|
||||
monkeypatch.setenv("DISCORD_BOT_TOKEN", "xx") # short
|
||||
warnings = _guard_weak_credentials()
|
||||
assert len(warnings) == 2
|
||||
209
tests/plugins/memory/test_temporal_decay.py
Normal file
209
tests/plugins/memory/test_temporal_decay.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""Tests for temporal decay and access-recency boost in holographic memory (#241)."""
|
||||
|
||||
import math
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
class TestTemporalDecay:
|
||||
"""Test _temporal_decay exponential decay formula."""
|
||||
|
||||
def _make_retriever(self, half_life=60):
|
||||
from plugins.memory.holographic.retrieval import FactRetriever
|
||||
store = MagicMock()
|
||||
return FactRetriever(store=store, temporal_decay_half_life=half_life)
|
||||
|
||||
def test_fresh_fact_no_decay(self):
|
||||
"""A fact updated today should have decay ≈ 1.0."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
now = datetime.now(timezone.utc).isoformat()
|
||||
decay = r._temporal_decay(now)
|
||||
assert decay > 0.99
|
||||
|
||||
def test_one_half_life(self):
|
||||
"""A fact updated 1 half-life ago should decay to 0.5."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
old = (datetime.now(timezone.utc) - timedelta(days=60)).isoformat()
|
||||
decay = r._temporal_decay(old)
|
||||
assert abs(decay - 0.5) < 0.01
|
||||
|
||||
def test_two_half_lives(self):
|
||||
"""A fact updated 2 half-lives ago should decay to 0.25."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
old = (datetime.now(timezone.utc) - timedelta(days=120)).isoformat()
|
||||
decay = r._temporal_decay(old)
|
||||
assert abs(decay - 0.25) < 0.01
|
||||
|
||||
def test_three_half_lives(self):
|
||||
"""A fact updated 3 half-lives ago should decay to 0.125."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
old = (datetime.now(timezone.utc) - timedelta(days=180)).isoformat()
|
||||
decay = r._temporal_decay(old)
|
||||
assert abs(decay - 0.125) < 0.01
|
||||
|
||||
def test_half_life_disabled(self):
|
||||
"""When half_life=0, decay should always be 1.0."""
|
||||
r = self._make_retriever(half_life=0)
|
||||
old = (datetime.now(timezone.utc) - timedelta(days=365)).isoformat()
|
||||
assert r._temporal_decay(old) == 1.0
|
||||
|
||||
def test_none_timestamp(self):
|
||||
"""Missing timestamp should return 1.0 (no decay)."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
assert r._temporal_decay(None) == 1.0
|
||||
|
||||
def test_empty_timestamp(self):
|
||||
r = self._make_retriever(half_life=60)
|
||||
assert r._temporal_decay("") == 1.0
|
||||
|
||||
def test_invalid_timestamp(self):
|
||||
"""Malformed timestamp should return 1.0 (fail open)."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
assert r._temporal_decay("not-a-date") == 1.0
|
||||
|
||||
def test_future_timestamp(self):
|
||||
"""Future timestamp should return 1.0 (no decay for future dates)."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
future = (datetime.now(timezone.utc) + timedelta(days=10)).isoformat()
|
||||
assert r._temporal_decay(future) == 1.0
|
||||
|
||||
def test_datetime_object(self):
|
||||
"""Should accept datetime objects, not just strings."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
old = datetime.now(timezone.utc) - timedelta(days=60)
|
||||
decay = r._temporal_decay(old)
|
||||
assert abs(decay - 0.5) < 0.01
|
||||
|
||||
def test_different_half_lives(self):
|
||||
"""30-day half-life should decay faster than 90-day."""
|
||||
r30 = self._make_retriever(half_life=30)
|
||||
r90 = self._make_retriever(half_life=90)
|
||||
old = (datetime.now(timezone.utc) - timedelta(days=45)).isoformat()
|
||||
assert r30._temporal_decay(old) < r90._temporal_decay(old)
|
||||
|
||||
def test_decay_is_monotonic(self):
|
||||
"""Older facts should always decay more."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
now = datetime.now(timezone.utc)
|
||||
d1 = r._temporal_decay((now - timedelta(days=10)).isoformat())
|
||||
d2 = r._temporal_decay((now - timedelta(days=30)).isoformat())
|
||||
d3 = r._temporal_decay((now - timedelta(days=60)).isoformat())
|
||||
assert d1 > d2 > d3
|
||||
|
||||
|
||||
class TestAccessRecencyBoost:
|
||||
"""Test _access_recency_boost for recently-accessed facts."""
|
||||
|
||||
def _make_retriever(self, half_life=60):
|
||||
from plugins.memory.holographic.retrieval import FactRetriever
|
||||
store = MagicMock()
|
||||
return FactRetriever(store=store, temporal_decay_half_life=half_life)
|
||||
|
||||
def test_just_accessed_max_boost(self):
|
||||
"""A fact accessed just now should get maximum boost (1.5)."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
now = datetime.now(timezone.utc).isoformat()
|
||||
boost = r._access_recency_boost(now)
|
||||
assert boost > 1.45 # Near 1.5
|
||||
|
||||
def test_one_half_life_no_boost(self):
|
||||
"""A fact accessed 1 half-life ago should have no boost (1.0)."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
old = (datetime.now(timezone.utc) - timedelta(days=60)).isoformat()
|
||||
boost = r._access_recency_boost(old)
|
||||
assert abs(boost - 1.0) < 0.01
|
||||
|
||||
def test_half_way_boost(self):
|
||||
"""A fact accessed 0.5 half-lives ago should get ~1.25 boost."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
old = (datetime.now(timezone.utc) - timedelta(days=30)).isoformat()
|
||||
boost = r._access_recency_boost(old)
|
||||
assert abs(boost - 1.25) < 0.05
|
||||
|
||||
def test_beyond_one_half_life_no_boost(self):
|
||||
"""Beyond 1 half-life, boost should be 1.0."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
old = (datetime.now(timezone.utc) - timedelta(days=90)).isoformat()
|
||||
boost = r._access_recency_boost(old)
|
||||
assert boost == 1.0
|
||||
|
||||
def test_disabled_no_boost(self):
|
||||
"""When half_life=0, boost should be 1.0."""
|
||||
r = self._make_retriever(half_life=0)
|
||||
now = datetime.now(timezone.utc).isoformat()
|
||||
assert r._access_recency_boost(now) == 1.0
|
||||
|
||||
def test_none_timestamp(self):
|
||||
r = self._make_retriever(half_life=60)
|
||||
assert r._access_recency_boost(None) == 1.0
|
||||
|
||||
def test_invalid_timestamp(self):
|
||||
r = self._make_retriever(half_life=60)
|
||||
assert r._access_recency_boost("bad") == 1.0
|
||||
|
||||
def test_boost_range(self):
|
||||
"""Boost should always be in [1.0, 1.5]."""
|
||||
r = self._make_retriever(half_life=60)
|
||||
now = datetime.now(timezone.utc)
|
||||
for days in [0, 1, 15, 30, 45, 59, 60, 90, 365]:
|
||||
ts = (now - timedelta(days=days)).isoformat()
|
||||
boost = r._access_recency_boost(ts)
|
||||
assert 1.0 <= boost <= 1.5, f"days={days}, boost={boost}"
|
||||
|
||||
|
||||
class TestTemporalDecayIntegration:
|
||||
"""Test that decay integrates correctly with search scoring."""
|
||||
|
||||
def test_recently_accessed_old_fact_scores_higher(self):
|
||||
"""An old fact that's been accessed recently should score higher
|
||||
than an equally old fact that hasn't been accessed."""
|
||||
from plugins.memory.holographic.retrieval import FactRetriever
|
||||
store = MagicMock()
|
||||
r = FactRetriever(store=store, temporal_decay_half_life=60)
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
old_date = (now - timedelta(days=120)).isoformat() # 2 half-lives old
|
||||
recent_access = (now - timedelta(days=10)).isoformat() # accessed 10 days ago
|
||||
old_access = (now - timedelta(days=200)).isoformat() # accessed 200 days ago
|
||||
|
||||
# Old fact, recently accessed
|
||||
decay1 = r._temporal_decay(old_date)
|
||||
boost1 = r._access_recency_boost(recent_access)
|
||||
effective1 = min(1.0, decay1 * boost1)
|
||||
|
||||
# Old fact, not recently accessed
|
||||
decay2 = r._temporal_decay(old_date)
|
||||
boost2 = r._access_recency_boost(old_access)
|
||||
effective2 = min(1.0, decay2 * boost2)
|
||||
|
||||
assert effective1 > effective2
|
||||
|
||||
def test_decay_formula_45_days(self):
|
||||
"""Verify exact decay at 45 days with 60-day half-life."""
|
||||
from plugins.memory.holographic.retrieval import FactRetriever
|
||||
r = FactRetriever(store=MagicMock(), temporal_decay_half_life=60)
|
||||
old = (datetime.now(timezone.utc) - timedelta(days=45)).isoformat()
|
||||
decay = r._temporal_decay(old)
|
||||
expected = math.pow(0.5, 45/60)
|
||||
assert abs(decay - expected) < 0.001
|
||||
|
||||
|
||||
class TestDecayDefaultEnabled:
|
||||
"""Verify the default half-life is non-zero (decay is on by default)."""
|
||||
|
||||
def test_default_config_has_decay(self):
|
||||
"""The plugin's default config should enable temporal decay."""
|
||||
from plugins.memory.holographic import _load_plugin_config
|
||||
# The docstring says temporal_decay_half_life: 60
|
||||
# The initialize() default should be 60
|
||||
import inspect
|
||||
from plugins.memory.holographic import HolographicMemoryProvider
|
||||
src = inspect.getsource(HolographicMemoryProvider.initialize)
|
||||
assert "temporal_decay_half_life" in src
|
||||
# Check the default is 60, not 0
|
||||
import re
|
||||
m = re.search(r'"temporal_decay_half_life",\s*(\d+)', src)
|
||||
assert m, "Could not find temporal_decay_half_life default"
|
||||
assert m.group(1) == "60", f"Default is {m.group(1)}, expected 60"
|
||||
@@ -137,3 +137,78 @@ class TestBackwardCompat:
|
||||
def test_tool_to_toolset_map(self):
|
||||
assert isinstance(TOOL_TO_TOOLSET_MAP, dict)
|
||||
assert len(TOOL_TO_TOOLSET_MAP) > 0
|
||||
|
||||
|
||||
class TestToolReturnTypeValidation:
|
||||
"""Poka-yoke: tool handlers must return JSON strings."""
|
||||
|
||||
def test_handler_returning_dict_is_wrapped(self, monkeypatch):
|
||||
"""A handler that returns a dict should be auto-wrapped to JSON string."""
|
||||
from tools.registry import registry
|
||||
from model_tools import handle_function_call
|
||||
import json
|
||||
|
||||
# Register a bad handler that returns dict instead of str
|
||||
registry.register(
|
||||
name="__test_bad_dict",
|
||||
toolset="test",
|
||||
schema={"name": "__test_bad_dict", "description": "test", "parameters": {"type": "object", "properties": {}}},
|
||||
handler=lambda args, **kw: {"this is": "a dict not a string"},
|
||||
)
|
||||
result = handle_function_call("__test_bad_dict", {})
|
||||
parsed = json.loads(result)
|
||||
assert "output" in parsed
|
||||
assert "_type_warning" in parsed
|
||||
# Cleanup
|
||||
registry._tools.pop("__test_bad_dict", None)
|
||||
|
||||
def test_handler_returning_none_is_wrapped(self, monkeypatch):
|
||||
"""A handler that returns None should be auto-wrapped."""
|
||||
from tools.registry import registry
|
||||
from model_tools import handle_function_call
|
||||
import json
|
||||
|
||||
registry.register(
|
||||
name="__test_bad_none",
|
||||
toolset="test",
|
||||
schema={"name": "__test_bad_none", "description": "test", "parameters": {"type": "object", "properties": {}}},
|
||||
handler=lambda args, **kw: None,
|
||||
)
|
||||
result = handle_function_call("__test_bad_none", {})
|
||||
parsed = json.loads(result)
|
||||
assert "_type_warning" in parsed
|
||||
registry._tools.pop("__test_bad_none", None)
|
||||
|
||||
def test_handler_returning_non_json_string_is_wrapped(self):
|
||||
"""A handler returning a plain string (not JSON) should be wrapped."""
|
||||
from tools.registry import registry
|
||||
from model_tools import handle_function_call
|
||||
import json
|
||||
|
||||
registry.register(
|
||||
name="__test_bad_plain",
|
||||
toolset="test",
|
||||
schema={"name": "__test_bad_plain", "description": "test", "parameters": {"type": "object", "properties": {}}},
|
||||
handler=lambda args, **kw: "just a plain string, not json",
|
||||
)
|
||||
result = handle_function_call("__test_bad_plain", {})
|
||||
parsed = json.loads(result)
|
||||
assert "output" in parsed
|
||||
registry._tools.pop("__test_bad_plain", None)
|
||||
|
||||
def test_handler_returning_valid_json_passes_through(self):
|
||||
"""A handler returning valid JSON string passes through unchanged."""
|
||||
from tools.registry import registry
|
||||
from model_tools import handle_function_call
|
||||
import json
|
||||
|
||||
registry.register(
|
||||
name="__test_good",
|
||||
toolset="test",
|
||||
schema={"name": "__test_good", "description": "test", "parameters": {"type": "object", "properties": {}}},
|
||||
handler=lambda args, **kw: json.dumps({"status": "ok", "data": [1, 2, 3]}),
|
||||
)
|
||||
result = handle_function_call("__test_good", {})
|
||||
parsed = json.loads(result)
|
||||
assert parsed == {"status": "ok", "data": [1, 2, 3]}
|
||||
registry._tools.pop("__test_good", None)
|
||||
|
||||
@@ -144,7 +144,8 @@ class TestMemoryStoreReplace:
|
||||
def test_replace_no_match(self, store):
|
||||
store.add("memory", "fact A")
|
||||
result = store.replace("memory", "nonexistent", "new")
|
||||
assert result["success"] is False
|
||||
assert result["success"] is True
|
||||
assert result["result"] == "no_match"
|
||||
|
||||
def test_replace_ambiguous_match(self, store):
|
||||
store.add("memory", "server A runs nginx")
|
||||
@@ -177,7 +178,8 @@ class TestMemoryStoreRemove:
|
||||
|
||||
def test_remove_no_match(self, store):
|
||||
result = store.remove("memory", "nonexistent")
|
||||
assert result["success"] is False
|
||||
assert result["success"] is True
|
||||
assert result["result"] == "no_match"
|
||||
|
||||
def test_remove_empty_old_text(self, store):
|
||||
result = store.remove("memory", " ")
|
||||
|
||||
107
tests/tools/test_syntax_preflight.py
Normal file
107
tests/tools/test_syntax_preflight.py
Normal file
@@ -0,0 +1,107 @@
|
||||
"""Tests for syntax preflight check in execute_code (issue #312)."""
|
||||
|
||||
import ast
|
||||
import json
|
||||
import pytest
|
||||
|
||||
|
||||
class TestSyntaxPreflight:
|
||||
"""Verify that execute_code catches syntax errors before sandbox execution."""
|
||||
|
||||
def test_valid_syntax_passes_parse(self):
|
||||
"""Valid Python should pass ast.parse."""
|
||||
code = "print('hello')\nx = 1 + 2\n"
|
||||
ast.parse(code) # should not raise
|
||||
|
||||
def test_syntax_error_indentation(self):
|
||||
"""IndentationError is a subclass of SyntaxError."""
|
||||
code = "def foo():\nbar()\n"
|
||||
with pytest.raises(SyntaxError):
|
||||
ast.parse(code)
|
||||
|
||||
def test_syntax_error_missing_colon(self):
|
||||
code = "if True\n pass\n"
|
||||
with pytest.raises(SyntaxError):
|
||||
ast.parse(code)
|
||||
|
||||
def test_syntax_error_unmatched_paren(self):
|
||||
code = "x = (1 + 2\n"
|
||||
with pytest.raises(SyntaxError):
|
||||
ast.parse(code)
|
||||
|
||||
def test_syntax_error_invalid_token(self):
|
||||
code = "x = 1 +*\n"
|
||||
with pytest.raises(SyntaxError):
|
||||
ast.parse(code)
|
||||
|
||||
def test_syntax_error_details(self):
|
||||
"""SyntaxError should provide line, offset, msg."""
|
||||
code = "if True\n pass\n"
|
||||
with pytest.raises(SyntaxError) as exc_info:
|
||||
ast.parse(code)
|
||||
e = exc_info.value
|
||||
assert e.lineno is not None
|
||||
assert e.msg is not None
|
||||
|
||||
def test_empty_string_passes(self):
|
||||
"""Empty string is valid Python (empty module)."""
|
||||
ast.parse("")
|
||||
|
||||
def test_comments_only_passes(self):
|
||||
ast.parse("# just a comment\n# another\n")
|
||||
|
||||
def test_complex_valid_code(self):
|
||||
code = '''
|
||||
import os
|
||||
def foo(x):
|
||||
if x > 0:
|
||||
return x * 2
|
||||
return 0
|
||||
|
||||
result = [foo(i) for i in range(10)]
|
||||
print(result)
|
||||
'''
|
||||
ast.parse(code)
|
||||
|
||||
|
||||
class TestSyntaxPreflightResponse:
|
||||
"""Test the error response format from the preflight check."""
|
||||
|
||||
def _check_syntax(self, code):
|
||||
"""Mimic the preflight check logic from execute_code."""
|
||||
try:
|
||||
ast.parse(code)
|
||||
return None
|
||||
except SyntaxError as e:
|
||||
return json.dumps({
|
||||
"error": f"Python syntax error: {e.msg}",
|
||||
"line": e.lineno,
|
||||
"offset": e.offset,
|
||||
"text": (e.text or "").strip()[:200],
|
||||
})
|
||||
|
||||
def test_returns_json_error(self):
|
||||
result = self._check_syntax("if True\n pass\n")
|
||||
assert result is not None
|
||||
data = json.loads(result)
|
||||
assert "error" in data
|
||||
assert "syntax error" in data["error"].lower()
|
||||
|
||||
def test_includes_line_number(self):
|
||||
result = self._check_syntax("x = 1\nif True\n pass\n")
|
||||
data = json.loads(result)
|
||||
assert data["line"] == 2 # error on line 2
|
||||
|
||||
def test_includes_offset(self):
|
||||
result = self._check_syntax("x = (1 + 2\n")
|
||||
data = json.loads(result)
|
||||
assert data["offset"] is not None
|
||||
|
||||
def test_includes_snippet(self):
|
||||
result = self._check_syntax("if True\n")
|
||||
data = json.loads(result)
|
||||
assert "if True" in data["text"]
|
||||
|
||||
def test_none_for_valid_code(self):
|
||||
result = self._check_syntax("print('ok')")
|
||||
assert result is None
|
||||
@@ -28,6 +28,7 @@ Platform: Linux / macOS only (Unix domain sockets for local). Disabled on Window
|
||||
Remote execution additionally requires Python 3 in the terminal backend.
|
||||
"""
|
||||
|
||||
import ast
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
@@ -893,6 +894,20 @@ def execute_code(
|
||||
if not code or not code.strip():
|
||||
return json.dumps({"error": "No code provided."})
|
||||
|
||||
# Poka-yoke (#312): Syntax check before execution.
|
||||
# 83.2% of execute_code errors are Python exceptions; most are syntax
|
||||
# errors the LLM generated. ast.parse() is sub-millisecond and catches
|
||||
# them before we spin up a sandbox child process.
|
||||
try:
|
||||
ast.parse(code)
|
||||
except SyntaxError as e:
|
||||
return json.dumps({
|
||||
"error": f"Python syntax error: {e.msg}",
|
||||
"line": e.lineno,
|
||||
"offset": e.offset,
|
||||
"text": (e.text or "").strip()[:200],
|
||||
})
|
||||
|
||||
# Dispatch: remote backends use file-based RPC, local uses UDS
|
||||
from tools.terminal_tool import _get_env_config
|
||||
env_type = _get_env_config()["env_type"]
|
||||
|
||||
@@ -316,10 +316,17 @@ def cronjob(
|
||||
updated = resume_job(job_id)
|
||||
return json.dumps({"success": True, "job": _format_job(updated)}, indent=2)
|
||||
|
||||
if normalized in {"run", "run_now", "trigger"}:
|
||||
if normalized in {"run", "trigger"}:
|
||||
updated = trigger_job(job_id)
|
||||
return json.dumps({"success": True, "job": _format_job(updated)}, indent=2)
|
||||
|
||||
if normalized == "run_now":
|
||||
from cron.jobs import run_job_now
|
||||
result = run_job_now(job_id)
|
||||
if result is None:
|
||||
return json.dumps({"success": False, "error": "Job not found"}, indent=2)
|
||||
return json.dumps(result, indent=2)
|
||||
|
||||
if normalized == "update":
|
||||
updates: Dict[str, Any] = {}
|
||||
if prompt is not None:
|
||||
|
||||
@@ -260,8 +260,12 @@ class MemoryStore:
|
||||
entries = self._entries_for(target)
|
||||
matches = [(i, e) for i, e in enumerate(entries) if old_text in e]
|
||||
|
||||
if len(matches) == 0:
|
||||
return {"success": False, "error": f"No entry matched '{old_text}'."}
|
||||
if not matches:
|
||||
return {
|
||||
"success": True,
|
||||
"result": "no_match",
|
||||
"message": f"No entry matched '{old_text}'. The search substring was not found in any existing entry.",
|
||||
}
|
||||
|
||||
if len(matches) > 1:
|
||||
# If all matches are identical (exact duplicates), operate on the first one
|
||||
@@ -310,8 +314,12 @@ class MemoryStore:
|
||||
entries = self._entries_for(target)
|
||||
matches = [(i, e) for i, e in enumerate(entries) if old_text in e]
|
||||
|
||||
if len(matches) == 0:
|
||||
return {"success": False, "error": f"No entry matched '{old_text}'."}
|
||||
if not matches:
|
||||
return {
|
||||
"success": True,
|
||||
"result": "no_match",
|
||||
"message": f"No entry matched '{old_text}'. The search substring was not found in any existing entry.",
|
||||
}
|
||||
|
||||
if len(matches) > 1:
|
||||
# If all matches are identical (exact duplicates), remove the first one
|
||||
@@ -449,30 +457,30 @@ def memory_tool(
|
||||
Returns JSON string with results.
|
||||
"""
|
||||
if store is None:
|
||||
return json.dumps({"success": False, "error": "Memory is not available. It may be disabled in config or this environment."}, ensure_ascii=False)
|
||||
return tool_error("Memory is not available. It may be disabled in config or this environment.", success=False)
|
||||
|
||||
if target not in ("memory", "user"):
|
||||
return json.dumps({"success": False, "error": f"Invalid target '{target}'. Use 'memory' or 'user'."}, ensure_ascii=False)
|
||||
return tool_error(f"Invalid target '{target}'. Use 'memory' or 'user'.", success=False)
|
||||
|
||||
if action == "add":
|
||||
if not content:
|
||||
return json.dumps({"success": False, "error": "Content is required for 'add' action."}, ensure_ascii=False)
|
||||
return tool_error("Content is required for 'add' action.", success=False)
|
||||
result = store.add(target, content)
|
||||
|
||||
elif action == "replace":
|
||||
if not old_text:
|
||||
return json.dumps({"success": False, "error": "old_text is required for 'replace' action."}, ensure_ascii=False)
|
||||
return tool_error("old_text is required for 'replace' action.", success=False)
|
||||
if not content:
|
||||
return json.dumps({"success": False, "error": "content is required for 'replace' action."}, ensure_ascii=False)
|
||||
return tool_error("content is required for 'replace' action.", success=False)
|
||||
result = store.replace(target, old_text, content)
|
||||
|
||||
elif action == "remove":
|
||||
if not old_text:
|
||||
return json.dumps({"success": False, "error": "old_text is required for 'remove' action."}, ensure_ascii=False)
|
||||
return tool_error("old_text is required for 'remove' action.", success=False)
|
||||
result = store.remove(target, old_text)
|
||||
|
||||
else:
|
||||
return json.dumps({"success": False, "error": f"Unknown action '{action}'. Use: add, replace, remove"}, ensure_ascii=False)
|
||||
return tool_error(f"Unknown action '{action}'. Use: add, replace, remove", success=False)
|
||||
|
||||
return json.dumps(result, ensure_ascii=False)
|
||||
|
||||
@@ -539,7 +547,7 @@ MEMORY_SCHEMA = {
|
||||
|
||||
|
||||
# --- Registry ---
|
||||
from tools.registry import registry
|
||||
from tools.registry import registry, tool_error
|
||||
|
||||
registry.register(
|
||||
name="memory",
|
||||
|
||||
Reference in New Issue
Block a user