Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eab5635a7a |
326
agent/account_usage.py
Normal file
326
agent/account_usage.py
Normal file
@@ -0,0 +1,326 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Optional
|
||||
|
||||
import httpx
|
||||
|
||||
from agent.anthropic_adapter import _is_oauth_token, resolve_anthropic_token
|
||||
from hermes_cli.auth import _read_codex_tokens, resolve_codex_runtime_credentials
|
||||
from hermes_cli.runtime_provider import resolve_runtime_provider
|
||||
|
||||
|
||||
def _utc_now() -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class AccountUsageWindow:
|
||||
label: str
|
||||
used_percent: Optional[float] = None
|
||||
reset_at: Optional[datetime] = None
|
||||
detail: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class AccountUsageSnapshot:
|
||||
provider: str
|
||||
source: str
|
||||
fetched_at: datetime
|
||||
title: str = "Account limits"
|
||||
plan: Optional[str] = None
|
||||
windows: tuple[AccountUsageWindow, ...] = ()
|
||||
details: tuple[str, ...] = ()
|
||||
unavailable_reason: Optional[str] = None
|
||||
|
||||
@property
|
||||
def available(self) -> bool:
|
||||
return bool(self.windows or self.details) and not self.unavailable_reason
|
||||
|
||||
|
||||
def _title_case_slug(value: Optional[str]) -> Optional[str]:
|
||||
cleaned = str(value or "").strip()
|
||||
if not cleaned:
|
||||
return None
|
||||
return cleaned.replace("_", " ").replace("-", " ").title()
|
||||
|
||||
|
||||
def _parse_dt(value: Any) -> Optional[datetime]:
|
||||
if value in (None, ""):
|
||||
return None
|
||||
if isinstance(value, (int, float)):
|
||||
return datetime.fromtimestamp(float(value), tz=timezone.utc)
|
||||
if isinstance(value, str):
|
||||
text = value.strip()
|
||||
if not text:
|
||||
return None
|
||||
if text.endswith("Z"):
|
||||
text = text[:-1] + "+00:00"
|
||||
try:
|
||||
dt = datetime.fromisoformat(text)
|
||||
return dt if dt.tzinfo else dt.replace(tzinfo=timezone.utc)
|
||||
except ValueError:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def _format_reset(dt: Optional[datetime]) -> str:
|
||||
if not dt:
|
||||
return "unknown"
|
||||
local_dt = dt.astimezone()
|
||||
delta = dt - _utc_now()
|
||||
total_seconds = int(delta.total_seconds())
|
||||
if total_seconds <= 0:
|
||||
return f"now ({local_dt.strftime('%Y-%m-%d %H:%M %Z')})"
|
||||
hours, rem = divmod(total_seconds, 3600)
|
||||
minutes = rem // 60
|
||||
if hours >= 24:
|
||||
days, hours = divmod(hours, 24)
|
||||
rel = f"in {days}d {hours}h"
|
||||
elif hours > 0:
|
||||
rel = f"in {hours}h {minutes}m"
|
||||
else:
|
||||
rel = f"in {minutes}m"
|
||||
return f"{rel} ({local_dt.strftime('%Y-%m-%d %H:%M %Z')})"
|
||||
|
||||
|
||||
def render_account_usage_lines(snapshot: Optional[AccountUsageSnapshot], *, markdown: bool = False) -> list[str]:
|
||||
if not snapshot:
|
||||
return []
|
||||
header = f"📈 {'**' if markdown else ''}{snapshot.title}{'**' if markdown else ''}"
|
||||
lines = [header]
|
||||
if snapshot.plan:
|
||||
lines.append(f"Provider: {snapshot.provider} ({snapshot.plan})")
|
||||
else:
|
||||
lines.append(f"Provider: {snapshot.provider}")
|
||||
for window in snapshot.windows:
|
||||
if window.used_percent is None:
|
||||
base = f"{window.label}: unavailable"
|
||||
else:
|
||||
remaining = max(0, round(100 - float(window.used_percent)))
|
||||
used = max(0, round(float(window.used_percent)))
|
||||
base = f"{window.label}: {remaining}% remaining ({used}% used)"
|
||||
if window.reset_at:
|
||||
base += f" • resets {_format_reset(window.reset_at)}"
|
||||
elif window.detail:
|
||||
base += f" • {window.detail}"
|
||||
lines.append(base)
|
||||
for detail in snapshot.details:
|
||||
lines.append(detail)
|
||||
if snapshot.unavailable_reason:
|
||||
lines.append(f"Unavailable: {snapshot.unavailable_reason}")
|
||||
return lines
|
||||
|
||||
|
||||
def _resolve_codex_usage_url(base_url: str) -> str:
|
||||
normalized = (base_url or "").strip().rstrip("/")
|
||||
if not normalized:
|
||||
normalized = "https://chatgpt.com/backend-api/codex"
|
||||
if normalized.endswith("/codex"):
|
||||
normalized = normalized[: -len("/codex")]
|
||||
if "/backend-api" in normalized:
|
||||
return normalized + "/wham/usage"
|
||||
return normalized + "/api/codex/usage"
|
||||
|
||||
|
||||
def _fetch_codex_account_usage() -> Optional[AccountUsageSnapshot]:
|
||||
creds = resolve_codex_runtime_credentials(refresh_if_expiring=True)
|
||||
token_data = _read_codex_tokens()
|
||||
tokens = token_data.get("tokens") or {}
|
||||
account_id = str(tokens.get("account_id", "") or "").strip() or None
|
||||
headers = {
|
||||
"Authorization": f"Bearer {creds['api_key']}",
|
||||
"Accept": "application/json",
|
||||
"User-Agent": "codex-cli",
|
||||
}
|
||||
if account_id:
|
||||
headers["ChatGPT-Account-Id"] = account_id
|
||||
with httpx.Client(timeout=15.0) as client:
|
||||
response = client.get(_resolve_codex_usage_url(creds.get("base_url", "")), headers=headers)
|
||||
response.raise_for_status()
|
||||
payload = response.json() or {}
|
||||
rate_limit = payload.get("rate_limit") or {}
|
||||
windows: list[AccountUsageWindow] = []
|
||||
for key, label in (("primary_window", "Session"), ("secondary_window", "Weekly")):
|
||||
window = rate_limit.get(key) or {}
|
||||
used = window.get("used_percent")
|
||||
if used is None:
|
||||
continue
|
||||
windows.append(
|
||||
AccountUsageWindow(
|
||||
label=label,
|
||||
used_percent=float(used),
|
||||
reset_at=_parse_dt(window.get("reset_at")),
|
||||
)
|
||||
)
|
||||
details: list[str] = []
|
||||
credits = payload.get("credits") or {}
|
||||
if credits.get("has_credits"):
|
||||
balance = credits.get("balance")
|
||||
if isinstance(balance, (int, float)):
|
||||
details.append(f"Credits balance: ${float(balance):.2f}")
|
||||
elif credits.get("unlimited"):
|
||||
details.append("Credits balance: unlimited")
|
||||
return AccountUsageSnapshot(
|
||||
provider="openai-codex",
|
||||
source="usage_api",
|
||||
fetched_at=_utc_now(),
|
||||
plan=_title_case_slug(payload.get("plan_type")),
|
||||
windows=tuple(windows),
|
||||
details=tuple(details),
|
||||
)
|
||||
|
||||
|
||||
def _fetch_anthropic_account_usage() -> Optional[AccountUsageSnapshot]:
|
||||
token = (resolve_anthropic_token() or "").strip()
|
||||
if not token:
|
||||
return None
|
||||
if not _is_oauth_token(token):
|
||||
return AccountUsageSnapshot(
|
||||
provider="anthropic",
|
||||
source="oauth_usage_api",
|
||||
fetched_at=_utc_now(),
|
||||
unavailable_reason="Anthropic account limits are only available for OAuth-backed Claude accounts.",
|
||||
)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Accept": "application/json",
|
||||
"Content-Type": "application/json",
|
||||
"anthropic-beta": "oauth-2025-04-20",
|
||||
"User-Agent": "claude-code/2.1.0",
|
||||
}
|
||||
with httpx.Client(timeout=15.0) as client:
|
||||
response = client.get("https://api.anthropic.com/api/oauth/usage", headers=headers)
|
||||
response.raise_for_status()
|
||||
payload = response.json() or {}
|
||||
windows: list[AccountUsageWindow] = []
|
||||
mapping = (
|
||||
("five_hour", "Current session"),
|
||||
("seven_day", "Current week"),
|
||||
("seven_day_opus", "Opus week"),
|
||||
("seven_day_sonnet", "Sonnet week"),
|
||||
)
|
||||
for key, label in mapping:
|
||||
window = payload.get(key) or {}
|
||||
util = window.get("utilization")
|
||||
if util is None:
|
||||
continue
|
||||
used = float(util) * 100 if float(util) <= 1 else float(util)
|
||||
windows.append(
|
||||
AccountUsageWindow(
|
||||
label=label,
|
||||
used_percent=used,
|
||||
reset_at=_parse_dt(window.get("resets_at")),
|
||||
)
|
||||
)
|
||||
details: list[str] = []
|
||||
extra = payload.get("extra_usage") or {}
|
||||
if extra.get("is_enabled"):
|
||||
used_credits = extra.get("used_credits")
|
||||
monthly_limit = extra.get("monthly_limit")
|
||||
currency = extra.get("currency") or "USD"
|
||||
if isinstance(used_credits, (int, float)) and isinstance(monthly_limit, (int, float)):
|
||||
details.append(
|
||||
f"Extra usage: {used_credits:.2f} / {monthly_limit:.2f} {currency}"
|
||||
)
|
||||
return AccountUsageSnapshot(
|
||||
provider="anthropic",
|
||||
source="oauth_usage_api",
|
||||
fetched_at=_utc_now(),
|
||||
windows=tuple(windows),
|
||||
details=tuple(details),
|
||||
)
|
||||
|
||||
|
||||
def _fetch_openrouter_account_usage(base_url: Optional[str], api_key: Optional[str]) -> Optional[AccountUsageSnapshot]:
|
||||
runtime = resolve_runtime_provider(
|
||||
requested="openrouter",
|
||||
explicit_base_url=base_url,
|
||||
explicit_api_key=api_key,
|
||||
)
|
||||
token = str(runtime.get("api_key", "") or "").strip()
|
||||
if not token:
|
||||
return None
|
||||
normalized = str(runtime.get("base_url", "") or "").rstrip("/")
|
||||
credits_url = f"{normalized}/credits"
|
||||
key_url = f"{normalized}/key"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
with httpx.Client(timeout=10.0) as client:
|
||||
credits_resp = client.get(credits_url, headers=headers)
|
||||
credits_resp.raise_for_status()
|
||||
credits = (credits_resp.json() or {}).get("data") or {}
|
||||
try:
|
||||
key_resp = client.get(key_url, headers=headers)
|
||||
key_resp.raise_for_status()
|
||||
key_data = (key_resp.json() or {}).get("data") or {}
|
||||
except Exception:
|
||||
key_data = {}
|
||||
total_credits = float(credits.get("total_credits") or 0.0)
|
||||
total_usage = float(credits.get("total_usage") or 0.0)
|
||||
details = [f"Credits balance: ${max(0.0, total_credits - total_usage):.2f}"]
|
||||
windows: list[AccountUsageWindow] = []
|
||||
limit = key_data.get("limit")
|
||||
limit_remaining = key_data.get("limit_remaining")
|
||||
limit_reset = str(key_data.get("limit_reset") or "").strip()
|
||||
usage = key_data.get("usage")
|
||||
if (
|
||||
isinstance(limit, (int, float))
|
||||
and float(limit) > 0
|
||||
and isinstance(limit_remaining, (int, float))
|
||||
and 0 <= float(limit_remaining) <= float(limit)
|
||||
):
|
||||
limit_value = float(limit)
|
||||
remaining_value = float(limit_remaining)
|
||||
used_percent = ((limit_value - remaining_value) / limit_value) * 100
|
||||
detail_parts = [f"${remaining_value:.2f} of ${limit_value:.2f} remaining"]
|
||||
if limit_reset:
|
||||
detail_parts.append(f"resets {limit_reset}")
|
||||
windows.append(
|
||||
AccountUsageWindow(
|
||||
label="API key quota",
|
||||
used_percent=used_percent,
|
||||
detail=" • ".join(detail_parts),
|
||||
)
|
||||
)
|
||||
if isinstance(usage, (int, float)):
|
||||
usage_parts = [f"API key usage: ${float(usage):.2f} total"]
|
||||
for value, label in (
|
||||
(key_data.get("usage_daily"), "today"),
|
||||
(key_data.get("usage_weekly"), "this week"),
|
||||
(key_data.get("usage_monthly"), "this month"),
|
||||
):
|
||||
if isinstance(value, (int, float)) and float(value) > 0:
|
||||
usage_parts.append(f"${float(value):.2f} {label}")
|
||||
details.append(" • ".join(usage_parts))
|
||||
return AccountUsageSnapshot(
|
||||
provider="openrouter",
|
||||
source="credits_api",
|
||||
fetched_at=_utc_now(),
|
||||
windows=tuple(windows),
|
||||
details=tuple(details),
|
||||
)
|
||||
|
||||
|
||||
def fetch_account_usage(
|
||||
provider: Optional[str],
|
||||
*,
|
||||
base_url: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
) -> Optional[AccountUsageSnapshot]:
|
||||
normalized = str(provider or "").strip().lower()
|
||||
if normalized in {"", "auto", "custom"}:
|
||||
return None
|
||||
try:
|
||||
if normalized == "openai-codex":
|
||||
return _fetch_codex_account_usage()
|
||||
if normalized == "anthropic":
|
||||
return _fetch_anthropic_account_usage()
|
||||
if normalized == "openrouter":
|
||||
return _fetch_openrouter_account_usage(base_url, api_key)
|
||||
except Exception:
|
||||
return None
|
||||
return None
|
||||
25
cli.py
25
cli.py
@@ -13,6 +13,7 @@ Usage:
|
||||
python cli.py --list-tools # List available tools and exit
|
||||
"""
|
||||
|
||||
import concurrent.futures
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
@@ -63,6 +64,7 @@ from agent.usage_pricing import (
|
||||
format_duration_compact,
|
||||
format_token_count_compact,
|
||||
)
|
||||
from agent.account_usage import fetch_account_usage, render_account_usage_lines
|
||||
from hermes_cli.banner import _format_context_length, format_banner_version_label
|
||||
|
||||
_COMMAND_SPINNER_FRAMES = ("⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏")
|
||||
@@ -6471,6 +6473,29 @@ class HermesCLI:
|
||||
if cost_result.status == "unknown":
|
||||
print(f" Note: Pricing unknown for {agent.model}")
|
||||
|
||||
# Account limits -- fetched off-thread with a hard timeout so slow
|
||||
# provider APIs don't hang the prompt.
|
||||
provider = getattr(agent, "provider", None) or getattr(self, "provider", None)
|
||||
base_url = getattr(agent, "base_url", None) or getattr(self, "base_url", None)
|
||||
api_key = getattr(agent, "api_key", None) or getattr(self, "api_key", None)
|
||||
account_snapshot = None
|
||||
if provider:
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as _pool:
|
||||
try:
|
||||
account_snapshot = _pool.submit(
|
||||
fetch_account_usage,
|
||||
provider,
|
||||
base_url=base_url,
|
||||
api_key=api_key,
|
||||
).result(timeout=10.0)
|
||||
except (concurrent.futures.TimeoutError, Exception):
|
||||
account_snapshot = None
|
||||
account_lines = [f" {line}" for line in render_account_usage_lines(account_snapshot)]
|
||||
if account_lines:
|
||||
print()
|
||||
for line in account_lines:
|
||||
print(line)
|
||||
|
||||
if self.verbose:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
for noisy in ('openai', 'openai._base_client', 'httpx', 'httpcore', 'asyncio', 'hpack', 'grpc', 'modal'):
|
||||
|
||||
@@ -28,6 +28,8 @@ from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Dict, Optional, Any, List
|
||||
|
||||
from agent.account_usage import fetch_account_usage, render_account_usage_lines
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# SSL certificate auto-detection for NixOS and other non-standard systems.
|
||||
# Must run BEFORE any HTTP library (discord, aiohttp, etc.) is imported.
|
||||
@@ -6481,6 +6483,38 @@ class GatewayRunner:
|
||||
if cached:
|
||||
agent = cached[0]
|
||||
|
||||
# Resolve provider/base_url/api_key for the account-usage fetch.
|
||||
# Prefer the live agent; fall back to persisted billing data on the
|
||||
# SessionDB row so `/usage` still returns account info between turns
|
||||
# when no agent is resident.
|
||||
provider = getattr(agent, "provider", None) if agent and agent is not _AGENT_PENDING_SENTINEL else None
|
||||
base_url = getattr(agent, "base_url", None) if agent and agent is not _AGENT_PENDING_SENTINEL else None
|
||||
api_key = getattr(agent, "api_key", None) if agent and agent is not _AGENT_PENDING_SENTINEL else None
|
||||
if not provider and getattr(self, "_session_db", None) is not None:
|
||||
try:
|
||||
_entry_for_billing = self.session_store.get_or_create_session(source)
|
||||
persisted = self._session_db.get_session(_entry_for_billing.session_id) or {}
|
||||
except Exception:
|
||||
persisted = {}
|
||||
provider = provider or persisted.get("billing_provider")
|
||||
base_url = base_url or persisted.get("billing_base_url")
|
||||
|
||||
# Fetch account usage off the event loop so slow provider APIs don't
|
||||
# block the gateway. Failures are non-fatal -- account_lines stays [].
|
||||
account_lines: list[str] = []
|
||||
if provider:
|
||||
try:
|
||||
account_snapshot = await asyncio.to_thread(
|
||||
fetch_account_usage,
|
||||
provider,
|
||||
base_url=base_url,
|
||||
api_key=api_key,
|
||||
)
|
||||
except Exception:
|
||||
account_snapshot = None
|
||||
if account_snapshot:
|
||||
account_lines = render_account_usage_lines(account_snapshot, markdown=True)
|
||||
|
||||
if agent and hasattr(agent, "session_total_tokens") and agent.session_api_calls > 0:
|
||||
lines = []
|
||||
|
||||
@@ -6538,6 +6572,10 @@ class GatewayRunner:
|
||||
if ctx.compression_count:
|
||||
lines.append(f"Compressions: {ctx.compression_count}")
|
||||
|
||||
if account_lines:
|
||||
lines.append("")
|
||||
lines.extend(account_lines)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
# No agent at all -- check session history for a rough count
|
||||
@@ -6547,12 +6585,18 @@ class GatewayRunner:
|
||||
from agent.model_metadata import estimate_messages_tokens_rough
|
||||
msgs = [m for m in history if m.get("role") in ("user", "assistant") and m.get("content")]
|
||||
approx = estimate_messages_tokens_rough(msgs)
|
||||
return (
|
||||
f"📊 **Session Info**\n"
|
||||
f"Messages: {len(msgs)}\n"
|
||||
f"Estimated context: ~{approx:,} tokens\n"
|
||||
f"_(Detailed usage available after the first agent response)_"
|
||||
)
|
||||
lines = [
|
||||
"📊 **Session Info**",
|
||||
f"Messages: {len(msgs)}",
|
||||
f"Estimated context: ~{approx:,} tokens",
|
||||
"_(Detailed usage available after the first agent response)_",
|
||||
]
|
||||
if account_lines:
|
||||
lines.append("")
|
||||
lines.extend(account_lines)
|
||||
return "\n".join(lines)
|
||||
if account_lines:
|
||||
return "\n".join(account_lines)
|
||||
return "No usage data available for this session."
|
||||
|
||||
async def _handle_insights_command(self, event: MessageEvent) -> str:
|
||||
|
||||
@@ -26,7 +26,6 @@ from agent.memory_provider import MemoryProvider
|
||||
from tools.registry import tool_error
|
||||
from .store import MemoryStore
|
||||
from .retrieval import FactRetriever
|
||||
from .observations import ObservationSynthesizer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -38,29 +37,28 @@ logger = logging.getLogger(__name__)
|
||||
FACT_STORE_SCHEMA = {
|
||||
"name": "fact_store",
|
||||
"description": (
|
||||
"Deep structured memory with algebraic reasoning and grounded observation synthesis. "
|
||||
"Deep structured memory with algebraic reasoning. "
|
||||
"Use alongside the memory tool — memory for always-on context, "
|
||||
"fact_store for deep recall, compositional queries, and higher-order observations.\n\n"
|
||||
"fact_store for deep recall and compositional queries.\n\n"
|
||||
"ACTIONS (simple → powerful):\n"
|
||||
"• add — Store a fact the user would expect you to remember.\n"
|
||||
"• search — Keyword lookup ('editor config', 'deploy process').\n"
|
||||
"• probe — Entity recall: ALL facts about a person/thing.\n"
|
||||
"• related — What connects to an entity? Structural adjacency.\n"
|
||||
"• reason — Compositional: facts connected to MULTIPLE entities simultaneously.\n"
|
||||
"• observe — Synthesized higher-order observations backed by supporting facts.\n"
|
||||
"• contradict — Memory hygiene: find facts making conflicting claims.\n"
|
||||
"• update/remove/list — CRUD operations.\n\n"
|
||||
"IMPORTANT: Before answering questions about the user, ALWAYS probe/reason/observe first."
|
||||
"IMPORTANT: Before answering questions about the user, ALWAYS probe or reason first."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
"type": "string",
|
||||
"enum": ["add", "search", "probe", "related", "reason", "observe", "contradict", "update", "remove", "list"],
|
||||
"enum": ["add", "search", "probe", "related", "reason", "contradict", "update", "remove", "list"],
|
||||
},
|
||||
"content": {"type": "string", "description": "Fact content (required for 'add')."},
|
||||
"query": {"type": "string", "description": "Search query (required for 'search'/'observe')."},
|
||||
"query": {"type": "string", "description": "Search query (required for 'search')."},
|
||||
"entity": {"type": "string", "description": "Entity name for 'probe'/'related'."},
|
||||
"entities": {"type": "array", "items": {"type": "string"}, "description": "Entity names for 'reason'."},
|
||||
"fact_id": {"type": "integer", "description": "Fact ID for 'update'/'remove'."},
|
||||
@@ -68,12 +66,6 @@ FACT_STORE_SCHEMA = {
|
||||
"tags": {"type": "string", "description": "Comma-separated tags."},
|
||||
"trust_delta": {"type": "number", "description": "Trust adjustment for 'update'."},
|
||||
"min_trust": {"type": "number", "description": "Minimum trust filter (default: 0.3)."},
|
||||
"min_confidence": {"type": "number", "description": "Minimum observation confidence (default: 0.6)."},
|
||||
"observation_type": {
|
||||
"type": "string",
|
||||
"enum": ["recurring_preference", "stable_direction", "behavioral_pattern"],
|
||||
"description": "Optional observation type filter for 'observe'.",
|
||||
},
|
||||
"limit": {"type": "integer", "description": "Max results (default: 10)."},
|
||||
},
|
||||
"required": ["action"],
|
||||
@@ -126,9 +118,7 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
self._config = config or _load_plugin_config()
|
||||
self._store = None
|
||||
self._retriever = None
|
||||
self._observation_synth = None
|
||||
self._min_trust = float(self._config.get("min_trust_threshold", 0.3))
|
||||
self._observation_min_confidence = float(self._config.get("observation_min_confidence", 0.6))
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
@@ -187,7 +177,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
hrr_weight=hrr_weight,
|
||||
hrr_dim=hrr_dim,
|
||||
)
|
||||
self._observation_synth = ObservationSynthesizer(self._store)
|
||||
self._session_id = session_id
|
||||
|
||||
def system_prompt_block(self) -> str:
|
||||
@@ -204,76 +193,30 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
"# Holographic Memory\n"
|
||||
"Active. Empty fact store — proactively add facts the user would expect you to remember.\n"
|
||||
"Use fact_store(action='add') to store durable structured facts about people, projects, preferences, decisions.\n"
|
||||
"Use fact_store(action='observe') to synthesize higher-order observations with evidence.\n"
|
||||
"Use fact_feedback to rate facts after using them (trains trust scores)."
|
||||
)
|
||||
return (
|
||||
f"# Holographic Memory\n"
|
||||
f"Active. {total} facts stored with entity resolution and trust scoring.\n"
|
||||
f"Use fact_store to search, probe entities, reason across entities, or synthesize observations.\n"
|
||||
f"Use fact_store to search, probe entities, reason across entities, or add facts.\n"
|
||||
f"Use fact_feedback to rate facts after using them (trains trust scores)."
|
||||
)
|
||||
|
||||
def prefetch(self, query: str, *, session_id: str = "") -> str:
|
||||
if not query:
|
||||
if not self._retriever or not query:
|
||||
return ""
|
||||
|
||||
parts = []
|
||||
raw_results = []
|
||||
try:
|
||||
if self._retriever:
|
||||
raw_results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
|
||||
except Exception as e:
|
||||
logger.debug("Holographic prefetch fact search failed: %s", e)
|
||||
raw_results = []
|
||||
|
||||
observations = []
|
||||
try:
|
||||
if self._observation_synth:
|
||||
observations = self._observation_synth.observe(
|
||||
query,
|
||||
min_confidence=self._observation_min_confidence,
|
||||
limit=3,
|
||||
refresh=True,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("Holographic prefetch observation search failed: %s", e)
|
||||
observations = []
|
||||
|
||||
if not raw_results and observations:
|
||||
seen_fact_ids = set()
|
||||
evidence_backfill = []
|
||||
for observation in observations:
|
||||
for evidence in observation.get("evidence", []):
|
||||
fact_id = evidence.get("fact_id")
|
||||
if fact_id in seen_fact_ids:
|
||||
continue
|
||||
seen_fact_ids.add(fact_id)
|
||||
evidence_backfill.append(evidence)
|
||||
raw_results = evidence_backfill[:5]
|
||||
|
||||
if raw_results:
|
||||
results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
|
||||
if not results:
|
||||
return ""
|
||||
lines = []
|
||||
for r in raw_results:
|
||||
for r in results:
|
||||
trust = r.get("trust_score", r.get("trust", 0))
|
||||
lines.append(f"- [{trust:.1f}] {r.get('content', '')}")
|
||||
parts.append("## Holographic Memory\n" + "\n".join(lines))
|
||||
|
||||
if observations:
|
||||
lines = []
|
||||
for observation in observations:
|
||||
evidence_ids = ", ".join(
|
||||
f"#{item['fact_id']}" for item in observation.get("evidence", [])[:3]
|
||||
) or "none"
|
||||
lines.append(
|
||||
f"- [{observation.get('confidence', 0.0):.2f}] "
|
||||
f"{observation.get('observation_type', 'observation')}: "
|
||||
f"{observation.get('summary', '')} "
|
||||
f"(evidence: {evidence_ids})"
|
||||
)
|
||||
parts.append("## Holographic Observations\n" + "\n".join(lines))
|
||||
|
||||
return "\n\n".join(parts)
|
||||
return "## Holographic Memory\n" + "\n".join(lines)
|
||||
except Exception as e:
|
||||
logger.debug("Holographic prefetch failed: %s", e)
|
||||
return ""
|
||||
|
||||
def sync_turn(self, user_content: str, assistant_content: str, *, session_id: str = "") -> None:
|
||||
# Holographic memory stores explicit facts via tools, not auto-sync.
|
||||
@@ -309,7 +252,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
def shutdown(self) -> None:
|
||||
self._store = None
|
||||
self._retriever = None
|
||||
self._observation_synth = None
|
||||
|
||||
# -- Tool handlers -------------------------------------------------------
|
||||
|
||||
@@ -363,19 +305,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
)
|
||||
return json.dumps({"results": results, "count": len(results)})
|
||||
|
||||
elif action == "observe":
|
||||
synthesizer = self._observation_synth
|
||||
if not synthesizer:
|
||||
return tool_error("Observation synthesizer is not initialized")
|
||||
observations = synthesizer.observe(
|
||||
args.get("query", ""),
|
||||
observation_type=args.get("observation_type"),
|
||||
min_confidence=float(args.get("min_confidence", self._observation_min_confidence)),
|
||||
limit=int(args.get("limit", 10)),
|
||||
refresh=True,
|
||||
)
|
||||
return json.dumps({"observations": observations, "count": len(observations)})
|
||||
|
||||
elif action == "contradict":
|
||||
results = retriever.contradict(
|
||||
category=args.get("category"),
|
||||
|
||||
@@ -1,249 +0,0 @@
|
||||
"""Higher-order observation synthesis for holographic memory.
|
||||
|
||||
Builds grounded observations from accumulated facts and keeps them in a
|
||||
separate retrieval layer with explicit evidence links back to supporting facts.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from .store import MemoryStore
|
||||
|
||||
_TOKEN_RE = re.compile(r"[a-z0-9_]+")
|
||||
_HIGHER_ORDER_CUES = {
|
||||
"prefer",
|
||||
"preference",
|
||||
"preferences",
|
||||
"style",
|
||||
"pattern",
|
||||
"patterns",
|
||||
"behavior",
|
||||
"behaviour",
|
||||
"habit",
|
||||
"habits",
|
||||
"workflow",
|
||||
"direction",
|
||||
"trajectory",
|
||||
"strategy",
|
||||
"tend",
|
||||
"usually",
|
||||
}
|
||||
|
||||
_OBSERVATION_PATTERNS = [
|
||||
{
|
||||
"observation_type": "recurring_preference",
|
||||
"subject": "communication_style",
|
||||
"categories": {"user_pref", "general"},
|
||||
"labels": {
|
||||
"concise": ["concise", "terse", "brief", "short", "no fluff"],
|
||||
"result_first": ["result-only", "result only", "outcome only", "quick", "quickly"],
|
||||
"silent_ops": ["silent", "no status", "no repetitive status", "no questions"],
|
||||
},
|
||||
"summary_prefix": "Recurring preference",
|
||||
},
|
||||
{
|
||||
"observation_type": "stable_direction",
|
||||
"subject": "project_direction",
|
||||
"categories": {"project", "general", "tool"},
|
||||
"labels": {
|
||||
"local_first": ["local-first", "local first", "local-only", "local only", "ollama", "own hardware"],
|
||||
"gitea_first": ["gitea-first", "gitea first", "forge", "pull request", "pr flow", "issue flow"],
|
||||
"ansible": ["ansible", "playbook", "role", "deploy via ansible"],
|
||||
},
|
||||
"summary_prefix": "Stable direction",
|
||||
},
|
||||
{
|
||||
"observation_type": "behavioral_pattern",
|
||||
"subject": "operator_workflow",
|
||||
"categories": {"general", "project", "tool", "user_pref"},
|
||||
"labels": {
|
||||
"commit_early": ["commit early", "commits early", "commit after", "wip commit"],
|
||||
"pr_first": ["open pr", "push a pr", "pull request", "pr immediately", "create pr"],
|
||||
"dedup_guard": ["no dupes", "no duplicates", "avoid duplicate", "existing pr"],
|
||||
},
|
||||
"summary_prefix": "Behavioral pattern",
|
||||
},
|
||||
]
|
||||
|
||||
_TYPE_QUERY_HINTS = {
|
||||
"recurring_preference": {"prefer", "preference", "style", "communication", "likes", "wants"},
|
||||
"stable_direction": {"direction", "trajectory", "strategy", "project", "roadmap", "moving"},
|
||||
"behavioral_pattern": {"pattern", "behavior", "workflow", "habit", "operator", "agent", "usually"},
|
||||
}
|
||||
|
||||
|
||||
class ObservationSynthesizer:
|
||||
"""Synthesizes grounded observations from facts and retrieves them by query."""
|
||||
|
||||
def __init__(self, store: MemoryStore):
|
||||
self.store = store
|
||||
|
||||
def synthesize(
|
||||
self,
|
||||
*,
|
||||
persist: bool = True,
|
||||
min_confidence: float = 0.6,
|
||||
limit: int = 10,
|
||||
) -> list[dict[str, Any]]:
|
||||
facts = self.store.list_facts(min_trust=0.0, limit=1000)
|
||||
observations: list[dict[str, Any]] = []
|
||||
|
||||
for pattern in _OBSERVATION_PATTERNS:
|
||||
candidate = self._build_candidate(pattern, facts, min_confidence=min_confidence)
|
||||
if not candidate:
|
||||
continue
|
||||
|
||||
if persist:
|
||||
candidate["observation_id"] = self.store.upsert_observation(
|
||||
candidate["observation_type"],
|
||||
candidate["subject"],
|
||||
candidate["summary"],
|
||||
candidate["confidence"],
|
||||
candidate["evidence_fact_ids"],
|
||||
metadata=candidate["metadata"],
|
||||
)
|
||||
|
||||
candidate["evidence"] = self._expand_evidence(candidate["evidence_fact_ids"])
|
||||
candidate["evidence_count"] = len(candidate["evidence"])
|
||||
candidate.pop("evidence_fact_ids", None)
|
||||
observations.append(candidate)
|
||||
|
||||
observations.sort(
|
||||
key=lambda item: (item["confidence"], item.get("evidence_count", 0)),
|
||||
reverse=True,
|
||||
)
|
||||
return observations[:limit]
|
||||
|
||||
def observe(
|
||||
self,
|
||||
query: str = "",
|
||||
*,
|
||||
observation_type: str | None = None,
|
||||
min_confidence: float = 0.6,
|
||||
limit: int = 10,
|
||||
refresh: bool = True,
|
||||
) -> list[dict[str, Any]]:
|
||||
if refresh:
|
||||
self.synthesize(persist=True, min_confidence=min_confidence, limit=limit)
|
||||
|
||||
observations = self.store.list_observations(
|
||||
observation_type=observation_type,
|
||||
min_confidence=min_confidence,
|
||||
limit=max(limit * 4, 20),
|
||||
)
|
||||
if not observations:
|
||||
return []
|
||||
|
||||
if not query:
|
||||
return observations[:limit]
|
||||
|
||||
query_tokens = self._tokenize(query)
|
||||
is_higher_order = bool(query_tokens & _HIGHER_ORDER_CUES)
|
||||
ranked: list[dict[str, Any]] = []
|
||||
|
||||
for item in observations:
|
||||
searchable = " ".join(
|
||||
[
|
||||
item.get("summary", ""),
|
||||
item.get("subject", ""),
|
||||
item.get("observation_type", ""),
|
||||
" ".join(item.get("metadata", {}).get("labels", [])),
|
||||
]
|
||||
)
|
||||
overlap = self._overlap_score(query_tokens, self._tokenize(searchable))
|
||||
type_bonus = self._type_bonus(query_tokens, item.get("observation_type", ""))
|
||||
if overlap <= 0 and type_bonus <= 0 and not is_higher_order:
|
||||
continue
|
||||
ranked_item = dict(item)
|
||||
ranked_item["score"] = round(item.get("confidence", 0.0) + overlap + type_bonus, 3)
|
||||
ranked.append(ranked_item)
|
||||
|
||||
if not ranked and is_higher_order:
|
||||
ranked = [
|
||||
{**item, "score": round(float(item.get("confidence", 0.0)), 3)}
|
||||
for item in observations
|
||||
]
|
||||
|
||||
ranked.sort(
|
||||
key=lambda item: (item.get("score", 0.0), item.get("confidence", 0.0), item.get("evidence_count", 0)),
|
||||
reverse=True,
|
||||
)
|
||||
return ranked[:limit]
|
||||
|
||||
def _build_candidate(
|
||||
self,
|
||||
pattern: dict[str, Any],
|
||||
facts: list[dict[str, Any]],
|
||||
*,
|
||||
min_confidence: float,
|
||||
) -> dict[str, Any] | None:
|
||||
matched_fact_ids: set[int] = set()
|
||||
matched_labels: dict[str, set[int]] = {label: set() for label in pattern["labels"]}
|
||||
|
||||
for fact in facts:
|
||||
if fact.get("category") not in pattern["categories"]:
|
||||
continue
|
||||
haystack = f"{fact.get('content', '')} {fact.get('tags', '')}".lower()
|
||||
local_match = False
|
||||
for label, keywords in pattern["labels"].items():
|
||||
if any(keyword in haystack for keyword in keywords):
|
||||
matched_labels[label].add(int(fact["fact_id"]))
|
||||
local_match = True
|
||||
if local_match:
|
||||
matched_fact_ids.add(int(fact["fact_id"]))
|
||||
|
||||
if len(matched_fact_ids) < 2:
|
||||
return None
|
||||
|
||||
active_labels = sorted(label for label, ids in matched_labels.items() if ids)
|
||||
confidence = min(0.95, 0.35 + 0.12 * len(matched_fact_ids) + 0.08 * len(active_labels))
|
||||
confidence = round(confidence, 3)
|
||||
if confidence < min_confidence:
|
||||
return None
|
||||
|
||||
label_summary = ", ".join(label.replace("_", "-") for label in active_labels)
|
||||
subject_text = pattern["subject"].replace("_", " ")
|
||||
summary = (
|
||||
f"{pattern['summary_prefix']}: {subject_text} trends toward {label_summary} "
|
||||
f"based on {len(matched_fact_ids)} supporting facts."
|
||||
)
|
||||
return {
|
||||
"observation_type": pattern["observation_type"],
|
||||
"subject": pattern["subject"],
|
||||
"summary": summary,
|
||||
"confidence": confidence,
|
||||
"metadata": {
|
||||
"labels": active_labels,
|
||||
"evidence_count": len(matched_fact_ids),
|
||||
},
|
||||
"evidence_fact_ids": sorted(matched_fact_ids),
|
||||
}
|
||||
|
||||
def _expand_evidence(self, fact_ids: list[int]) -> list[dict[str, Any]]:
|
||||
facts_by_id = {
|
||||
fact["fact_id"]: fact
|
||||
for fact in self.store.list_facts(min_trust=0.0, limit=1000)
|
||||
}
|
||||
return [facts_by_id[fact_id] for fact_id in fact_ids if fact_id in facts_by_id]
|
||||
|
||||
@staticmethod
|
||||
def _tokenize(text: str) -> set[str]:
|
||||
return set(_TOKEN_RE.findall(text.lower()))
|
||||
|
||||
@staticmethod
|
||||
def _overlap_score(query_tokens: set[str], text_tokens: set[str]) -> float:
|
||||
if not query_tokens or not text_tokens:
|
||||
return 0.0
|
||||
overlap = query_tokens & text_tokens
|
||||
if not overlap:
|
||||
return 0.0
|
||||
return round(len(overlap) / max(len(query_tokens), 1), 3)
|
||||
|
||||
@staticmethod
|
||||
def _type_bonus(query_tokens: set[str], observation_type: str) -> float:
|
||||
hints = _TYPE_QUERY_HINTS.get(observation_type, set())
|
||||
if not hints:
|
||||
return 0.0
|
||||
return 0.25 if query_tokens & hints else 0.0
|
||||
@@ -3,7 +3,6 @@ SQLite-backed fact store with entity resolution and trust scoring.
|
||||
Single-user Hermes memory store plugin.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import sqlite3
|
||||
import threading
|
||||
@@ -74,28 +73,6 @@ CREATE TABLE IF NOT EXISTS memory_banks (
|
||||
fact_count INTEGER DEFAULT 0,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS observations (
|
||||
observation_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
observation_type TEXT NOT NULL,
|
||||
subject TEXT NOT NULL,
|
||||
summary TEXT NOT NULL,
|
||||
confidence REAL DEFAULT 0.0,
|
||||
metadata_json TEXT DEFAULT '{}',
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(observation_type, subject)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS observation_evidence (
|
||||
observation_id INTEGER REFERENCES observations(observation_id) ON DELETE CASCADE,
|
||||
fact_id INTEGER REFERENCES facts(fact_id) ON DELETE CASCADE,
|
||||
evidence_weight REAL DEFAULT 1.0,
|
||||
PRIMARY KEY (observation_id, fact_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_observations_type ON observations(observation_type);
|
||||
CREATE INDEX IF NOT EXISTS idx_observations_confidence ON observations(confidence DESC);
|
||||
"""
|
||||
|
||||
# Trust adjustment constants
|
||||
@@ -151,7 +128,6 @@ class MemoryStore:
|
||||
def _init_db(self) -> None:
|
||||
"""Create tables, indexes, and triggers if they do not exist. Enable WAL mode."""
|
||||
self._conn.execute("PRAGMA journal_mode=WAL")
|
||||
self._conn.execute("PRAGMA foreign_keys=ON")
|
||||
self._conn.executescript(_SCHEMA)
|
||||
# Migrate: add hrr_vector column if missing (safe for existing databases)
|
||||
columns = {row[1] for row in self._conn.execute("PRAGMA table_info(facts)").fetchall()}
|
||||
@@ -370,115 +346,6 @@ class MemoryStore:
|
||||
rows = self._conn.execute(sql, params).fetchall()
|
||||
return [self._row_to_dict(r) for r in rows]
|
||||
|
||||
def upsert_observation(
|
||||
self,
|
||||
observation_type: str,
|
||||
subject: str,
|
||||
summary: str,
|
||||
confidence: float,
|
||||
evidence_fact_ids: list[int],
|
||||
metadata: dict | None = None,
|
||||
) -> int:
|
||||
"""Create or update a synthesized observation and its evidence links."""
|
||||
with self._lock:
|
||||
metadata_json = json.dumps(metadata or {}, sort_keys=True)
|
||||
self._conn.execute(
|
||||
"""
|
||||
INSERT INTO observations (
|
||||
observation_type, subject, summary, confidence, metadata_json
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(observation_type, subject) DO UPDATE SET
|
||||
summary = excluded.summary,
|
||||
confidence = excluded.confidence,
|
||||
metadata_json = excluded.metadata_json,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
""",
|
||||
(observation_type, subject, summary, confidence, metadata_json),
|
||||
)
|
||||
row = self._conn.execute(
|
||||
"""
|
||||
SELECT observation_id
|
||||
FROM observations
|
||||
WHERE observation_type = ? AND subject = ?
|
||||
""",
|
||||
(observation_type, subject),
|
||||
).fetchone()
|
||||
observation_id = int(row["observation_id"])
|
||||
|
||||
self._conn.execute(
|
||||
"DELETE FROM observation_evidence WHERE observation_id = ?",
|
||||
(observation_id,),
|
||||
)
|
||||
unique_fact_ids = sorted({int(fid) for fid in evidence_fact_ids})
|
||||
if unique_fact_ids:
|
||||
self._conn.executemany(
|
||||
"""
|
||||
INSERT OR IGNORE INTO observation_evidence (observation_id, fact_id)
|
||||
VALUES (?, ?)
|
||||
""",
|
||||
[(observation_id, fact_id) for fact_id in unique_fact_ids],
|
||||
)
|
||||
self._conn.commit()
|
||||
return observation_id
|
||||
|
||||
def list_observations(
|
||||
self,
|
||||
observation_type: str | None = None,
|
||||
min_confidence: float = 0.0,
|
||||
limit: int = 50,
|
||||
) -> list[dict]:
|
||||
"""List synthesized observations with expanded supporting evidence."""
|
||||
with self._lock:
|
||||
params: list = [min_confidence]
|
||||
observation_clause = ""
|
||||
if observation_type is not None:
|
||||
observation_clause = "AND observation_type = ?"
|
||||
params.append(observation_type)
|
||||
params.append(limit)
|
||||
rows = self._conn.execute(
|
||||
f"""
|
||||
SELECT observation_id, observation_type, subject, summary, confidence,
|
||||
metadata_json, created_at, updated_at,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM observation_evidence oe
|
||||
WHERE oe.observation_id = observations.observation_id
|
||||
) AS evidence_count
|
||||
FROM observations
|
||||
WHERE confidence >= ?
|
||||
{observation_clause}
|
||||
ORDER BY confidence DESC, updated_at DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
params,
|
||||
).fetchall()
|
||||
|
||||
results = []
|
||||
for row in rows:
|
||||
item = dict(row)
|
||||
try:
|
||||
item["metadata"] = json.loads(item.pop("metadata_json") or "{}")
|
||||
except json.JSONDecodeError:
|
||||
item["metadata"] = {}
|
||||
item["evidence"] = self._get_observation_evidence(int(item["observation_id"]))
|
||||
results.append(item)
|
||||
return results
|
||||
|
||||
def _get_observation_evidence(self, observation_id: int) -> list[dict]:
|
||||
rows = self._conn.execute(
|
||||
"""
|
||||
SELECT f.fact_id, f.content, f.category, f.tags, f.trust_score,
|
||||
f.retrieval_count, f.helpful_count, f.created_at, f.updated_at
|
||||
FROM observation_evidence oe
|
||||
JOIN facts f ON f.fact_id = oe.fact_id
|
||||
WHERE oe.observation_id = ?
|
||||
ORDER BY f.trust_score DESC, f.updated_at DESC
|
||||
""",
|
||||
(observation_id,),
|
||||
).fetchall()
|
||||
return [self._row_to_dict(row) for row in rows]
|
||||
|
||||
def record_feedback(self, fact_id: int, helpful: bool) -> dict:
|
||||
"""Record user feedback and adjust trust asymmetrically.
|
||||
|
||||
|
||||
@@ -175,3 +175,79 @@ class TestUsageCachedAgent:
|
||||
result = await runner._handle_usage_command(event)
|
||||
|
||||
assert "Cost: included" in result
|
||||
|
||||
|
||||
class TestUsageAccountSection:
|
||||
"""Account-limits section appended to /usage output."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_usage_command_includes_account_section(self, monkeypatch):
|
||||
agent = _make_mock_agent(provider="openai-codex")
|
||||
agent.base_url = "https://chatgpt.com/backend-api/codex"
|
||||
agent.api_key = "unused"
|
||||
runner = _make_runner(SK, cached_agent=agent)
|
||||
event = MagicMock()
|
||||
|
||||
monkeypatch.setattr(
|
||||
"gateway.run.fetch_account_usage",
|
||||
lambda provider, base_url=None, api_key=None: object(),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"gateway.run.render_account_usage_lines",
|
||||
lambda snapshot, markdown=False: [
|
||||
"📈 **Account limits**",
|
||||
"Provider: openai-codex (Pro)",
|
||||
"Session: 85% remaining (15% used)",
|
||||
],
|
||||
)
|
||||
with patch("agent.rate_limit_tracker.format_rate_limit_compact", return_value="RPM: 50/60"), \
|
||||
patch("agent.usage_pricing.estimate_usage_cost") as mock_cost:
|
||||
mock_cost.return_value = MagicMock(amount_usd=None, status="included")
|
||||
result = await runner._handle_usage_command(event)
|
||||
|
||||
assert "📊 **Session Token Usage**" in result
|
||||
assert "📈 **Account limits**" in result
|
||||
assert "Provider: openai-codex (Pro)" in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_usage_command_uses_persisted_provider_when_agent_not_running(self, monkeypatch):
|
||||
runner = _make_runner(SK)
|
||||
runner._session_db = MagicMock()
|
||||
runner._session_db.get_session.return_value = {
|
||||
"billing_provider": "openai-codex",
|
||||
"billing_base_url": "https://chatgpt.com/backend-api/codex",
|
||||
}
|
||||
session_entry = MagicMock()
|
||||
session_entry.session_id = "sess-1"
|
||||
runner.session_store.get_or_create_session.return_value = session_entry
|
||||
runner.session_store.load_transcript.return_value = [
|
||||
{"role": "user", "content": "earlier"},
|
||||
]
|
||||
|
||||
calls = {}
|
||||
|
||||
async def _fake_to_thread(fn, *args, **kwargs):
|
||||
calls["args"] = args
|
||||
calls["kwargs"] = kwargs
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
monkeypatch.setattr("gateway.run.asyncio.to_thread", _fake_to_thread)
|
||||
monkeypatch.setattr(
|
||||
"gateway.run.fetch_account_usage",
|
||||
lambda provider, base_url=None, api_key=None: object(),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"gateway.run.render_account_usage_lines",
|
||||
lambda snapshot, markdown=False: [
|
||||
"📈 **Account limits**",
|
||||
"Provider: openai-codex (Pro)",
|
||||
],
|
||||
)
|
||||
|
||||
event = MagicMock()
|
||||
result = await runner._handle_usage_command(event)
|
||||
|
||||
assert calls["args"] == ("openai-codex",)
|
||||
assert calls["kwargs"]["base_url"] == "https://chatgpt.com/backend-api/codex"
|
||||
assert "📊 **Session Info**" in result
|
||||
assert "📈 **Account limits**" in result
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
from plugins.memory.holographic import HolographicMemoryProvider
|
||||
from plugins.memory.holographic.store import MemoryStore
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def store(tmp_path):
|
||||
db_path = tmp_path / "memory.db"
|
||||
s = MemoryStore(db_path=str(db_path), default_trust=0.5)
|
||||
yield s
|
||||
s.close()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def provider(tmp_path):
|
||||
p = HolographicMemoryProvider(
|
||||
config={
|
||||
"db_path": str(tmp_path / "memory.db"),
|
||||
"default_trust": 0.5,
|
||||
}
|
||||
)
|
||||
p.initialize(session_id="test-session")
|
||||
yield p
|
||||
if p._store:
|
||||
p._store.close()
|
||||
|
||||
|
||||
class TestObservationSynthesis:
|
||||
def test_observe_action_persists_observation_with_evidence_links(self, provider):
|
||||
fact_ids = [
|
||||
provider._store.add_fact('User prefers concise status updates', category='user_pref'),
|
||||
provider._store.add_fact('User wants result-only replies with no fluff', category='user_pref'),
|
||||
]
|
||||
|
||||
result = json.loads(
|
||||
provider.handle_tool_call(
|
||||
'fact_store',
|
||||
{
|
||||
'action': 'observe',
|
||||
'query': 'What communication style does the user prefer?',
|
||||
'limit': 5,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
assert result['count'] == 1
|
||||
observation = result['observations'][0]
|
||||
assert observation['observation_type'] == 'recurring_preference'
|
||||
assert observation['confidence'] >= 0.6
|
||||
assert sorted(item['fact_id'] for item in observation['evidence']) == sorted(fact_ids)
|
||||
|
||||
stored = provider._store.list_observations(limit=10)
|
||||
assert len(stored) == 1
|
||||
assert stored[0]['observation_type'] == 'recurring_preference'
|
||||
assert stored[0]['evidence_count'] == 2
|
||||
assert len(provider._store.list_facts(limit=10)) == 2
|
||||
|
||||
def test_observe_action_synthesizes_three_observation_types(self, provider):
|
||||
provider._store.add_fact('User prefers concise updates', category='user_pref')
|
||||
provider._store.add_fact('User wants result-only communication', category='user_pref')
|
||||
provider._store.add_fact('Project is moving to a local-first deployment model', category='project')
|
||||
provider._store.add_fact('Project direction stays Gitea-first for issue and PR flow', category='project')
|
||||
provider._store.add_fact('Operator always commits early before moving on', category='general')
|
||||
provider._store.add_fact('Operator pushes a PR immediately after each meaningful fix', category='general')
|
||||
|
||||
result = json.loads(provider.handle_tool_call('fact_store', {'action': 'observe', 'limit': 10}))
|
||||
types = {item['observation_type'] for item in result['observations']}
|
||||
|
||||
assert {'recurring_preference', 'stable_direction', 'behavioral_pattern'} <= types
|
||||
|
||||
def test_single_fact_does_not_create_overconfident_observation(self, provider):
|
||||
provider._store.add_fact('User prefers concise updates', category='user_pref')
|
||||
|
||||
result = json.loads(
|
||||
provider.handle_tool_call(
|
||||
'fact_store',
|
||||
{'action': 'observe', 'query': 'What does the user prefer?', 'limit': 5},
|
||||
)
|
||||
)
|
||||
|
||||
assert result['count'] == 0
|
||||
assert provider._store.list_observations(limit=10) == []
|
||||
|
||||
def test_prefetch_surfaces_observations_as_separate_layer(self, provider):
|
||||
provider._store.add_fact('User prefers concise updates', category='user_pref')
|
||||
provider._store.add_fact('User wants result-only communication', category='user_pref')
|
||||
|
||||
prefetch = provider.prefetch('What communication style does the user prefer?')
|
||||
|
||||
assert '## Holographic Observations' in prefetch
|
||||
assert '## Holographic Memory' in prefetch
|
||||
assert 'recurring_preference' in prefetch
|
||||
assert 'evidence' in prefetch.lower()
|
||||
203
tests/test_account_usage.py
Normal file
203
tests/test_account_usage.py
Normal file
@@ -0,0 +1,203 @@
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from agent.account_usage import (
|
||||
AccountUsageSnapshot,
|
||||
AccountUsageWindow,
|
||||
fetch_account_usage,
|
||||
render_account_usage_lines,
|
||||
)
|
||||
|
||||
|
||||
class _Response:
|
||||
def __init__(self, payload, status_code=200):
|
||||
self._payload = payload
|
||||
self.status_code = status_code
|
||||
|
||||
def raise_for_status(self):
|
||||
if self.status_code >= 400:
|
||||
raise RuntimeError(f"HTTP {self.status_code}")
|
||||
|
||||
def json(self):
|
||||
return self._payload
|
||||
|
||||
|
||||
class _Client:
|
||||
def __init__(self, payload):
|
||||
self._payload = payload
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
return False
|
||||
|
||||
def get(self, url, headers=None):
|
||||
return _Response(self._payload)
|
||||
|
||||
|
||||
class _RoutingClient:
|
||||
def __init__(self, payloads):
|
||||
self._payloads = payloads
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
return False
|
||||
|
||||
def get(self, url, headers=None):
|
||||
return _Response(self._payloads[url])
|
||||
|
||||
|
||||
def test_fetch_account_usage_codex(monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage.resolve_codex_runtime_credentials",
|
||||
lambda refresh_if_expiring=True: {
|
||||
"provider": "openai-codex",
|
||||
"base_url": "https://chatgpt.com/backend-api/codex",
|
||||
"api_key": "***",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage._read_codex_tokens",
|
||||
lambda: {"tokens": {"account_id": "acct_123"}},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage.httpx.Client",
|
||||
lambda timeout=15.0: _Client(
|
||||
{
|
||||
"plan_type": "pro",
|
||||
"rate_limit": {
|
||||
"primary_window": {
|
||||
"used_percent": 15,
|
||||
"reset_at": 1_900_000_000,
|
||||
"limit_window_seconds": 18000,
|
||||
},
|
||||
"secondary_window": {
|
||||
"used_percent": 40,
|
||||
"reset_at": 1_900_500_000,
|
||||
"limit_window_seconds": 604800,
|
||||
},
|
||||
},
|
||||
"credits": {"has_credits": True, "balance": 12.5},
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
snapshot = fetch_account_usage("openai-codex")
|
||||
|
||||
assert snapshot is not None
|
||||
assert snapshot.plan == "Pro"
|
||||
assert len(snapshot.windows) == 2
|
||||
assert snapshot.windows[0].label == "Session"
|
||||
assert snapshot.windows[0].used_percent == 15.0
|
||||
assert snapshot.windows[0].reset_at == datetime.fromtimestamp(1_900_000_000, tz=timezone.utc)
|
||||
assert "Credits balance: $12.50" in snapshot.details
|
||||
|
||||
|
||||
def test_render_account_usage_lines_includes_reset_and_provider():
|
||||
snapshot = AccountUsageSnapshot(
|
||||
provider="openai-codex",
|
||||
source="usage_api",
|
||||
fetched_at=datetime.now(timezone.utc),
|
||||
plan="Pro",
|
||||
windows=(
|
||||
AccountUsageWindow(
|
||||
label="Session",
|
||||
used_percent=25,
|
||||
reset_at=datetime.now(timezone.utc),
|
||||
),
|
||||
),
|
||||
details=("Credits balance: $9.99",),
|
||||
)
|
||||
lines = render_account_usage_lines(snapshot)
|
||||
|
||||
assert lines[0] == "📈 Account limits"
|
||||
assert "openai-codex (Pro)" in lines[1]
|
||||
assert "Session: 75% remaining (25% used)" in lines[2]
|
||||
assert "Credits balance: $9.99" in lines[3]
|
||||
|
||||
|
||||
def test_fetch_account_usage_openrouter_uses_limit_remaining_and_ignores_deprecated_rate_limit(monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage.resolve_runtime_provider",
|
||||
lambda requested, explicit_base_url=None, explicit_api_key=None: {
|
||||
"provider": "openrouter",
|
||||
"base_url": "https://openrouter.ai/api/v1",
|
||||
"api_key": "***",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage.httpx.Client",
|
||||
lambda timeout=10.0: _RoutingClient(
|
||||
{
|
||||
"https://openrouter.ai/api/v1/credits": {
|
||||
"data": {"total_credits": 300.0, "total_usage": 10.92}
|
||||
},
|
||||
"https://openrouter.ai/api/v1/key": {
|
||||
"data": {
|
||||
"limit": 100.0,
|
||||
"limit_remaining": 70.0,
|
||||
"limit_reset": "monthly",
|
||||
"usage": 12.5,
|
||||
"usage_daily": 0.5,
|
||||
"usage_weekly": 2.0,
|
||||
"usage_monthly": 8.0,
|
||||
"rate_limit": {"requests": -1, "interval": "10s"},
|
||||
}
|
||||
},
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
snapshot = fetch_account_usage("openrouter")
|
||||
|
||||
assert snapshot is not None
|
||||
assert snapshot.windows == (
|
||||
AccountUsageWindow(
|
||||
label="API key quota",
|
||||
used_percent=30.0,
|
||||
detail="$70.00 of $100.00 remaining • resets monthly",
|
||||
),
|
||||
)
|
||||
assert "Credits balance: $289.08" in snapshot.details
|
||||
assert "API key usage: $12.50 total • $0.50 today • $2.00 this week • $8.00 this month" in snapshot.details
|
||||
assert all("-1 requests / 10s" not in line for line in render_account_usage_lines(snapshot))
|
||||
|
||||
|
||||
def test_fetch_account_usage_openrouter_omits_quota_window_when_key_has_no_limit(monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage.resolve_runtime_provider",
|
||||
lambda requested, explicit_base_url=None, explicit_api_key=None: {
|
||||
"provider": "openrouter",
|
||||
"base_url": "https://openrouter.ai/api/v1",
|
||||
"api_key": "***",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage.httpx.Client",
|
||||
lambda timeout=10.0: _RoutingClient(
|
||||
{
|
||||
"https://openrouter.ai/api/v1/credits": {
|
||||
"data": {"total_credits": 100.0, "total_usage": 25.5}
|
||||
},
|
||||
"https://openrouter.ai/api/v1/key": {
|
||||
"data": {
|
||||
"limit": None,
|
||||
"limit_remaining": None,
|
||||
"usage": 25.5,
|
||||
"usage_daily": 1.25,
|
||||
"usage_weekly": 4.5,
|
||||
"usage_monthly": 18.0,
|
||||
}
|
||||
},
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
snapshot = fetch_account_usage("openrouter")
|
||||
|
||||
assert snapshot is not None
|
||||
assert snapshot.windows == ()
|
||||
assert "Credits balance: $74.50" in snapshot.details
|
||||
assert "API key usage: $25.50 total • $1.25 today • $4.50 this week • $18.00 this month" in snapshot.details
|
||||
Reference in New Issue
Block a user